From 1f0dc94e6ac95940ac5fd0e0b5f62152b8f821a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Thu, 21 Nov 2024 14:10:19 +0100 Subject: [PATCH 01/10] chore: Basic object tracking (#3205) ## Changes - All proposals for basic object tracking tested - Added functions (and tested them) that allow us to use Golang's context to chosen usage tracking ## Next pr - https://github.com/Snowflake-Labs/terraform-provider-snowflake/pull/3205#discussion_r1850109561 --- .../helpers/information_schema_client.go | 44 +++++++ pkg/acceptance/helpers/test_client.go | 2 + pkg/acceptance/helpers/user_client.go | 8 ++ pkg/internal/tracking/context.go | 73 +++++++++++ pkg/internal/tracking/context_test.go | 45 +++++++ pkg/internal/tracking/query.go | 31 +++++ pkg/internal/tracking/query_test.go | 65 ++++++++++ pkg/resources/common.go | 46 +++++++ pkg/resources/schema.go | 17 +-- pkg/sdk/client.go | 31 +++-- pkg/sdk/context_functions.go | 2 + pkg/sdk/integration_test_imports.go | 16 +-- .../basic_object_tracking_integration_test.go | 113 ++++++++++++++++++ pkg/sdk/testint/client_integration_test.go | 63 ++++++++++ 14 files changed, 531 insertions(+), 25 deletions(-) create mode 100644 pkg/acceptance/helpers/information_schema_client.go create mode 100644 pkg/internal/tracking/context.go create mode 100644 pkg/internal/tracking/context_test.go create mode 100644 pkg/internal/tracking/query.go create mode 100644 pkg/internal/tracking/query_test.go create mode 100644 pkg/sdk/testint/basic_object_tracking_integration_test.go create mode 100644 pkg/sdk/testint/client_integration_test.go diff --git a/pkg/acceptance/helpers/information_schema_client.go b/pkg/acceptance/helpers/information_schema_client.go new file mode 100644 index 0000000000..9ed99e4e19 --- /dev/null +++ b/pkg/acceptance/helpers/information_schema_client.go @@ -0,0 +1,44 @@ +package helpers + +import ( + "context" + "fmt" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "github.com/stretchr/testify/require" +) + +type InformationSchemaClient struct { + context *TestClientContext + ids *IdsGenerator +} + +func NewInformationSchemaClient(context *TestClientContext, idsGenerator *IdsGenerator) *InformationSchemaClient { + return &InformationSchemaClient{ + context: context, + ids: idsGenerator, + } +} + +func (c *InformationSchemaClient) client() *sdk.Client { + return c.context.client +} + +func (c *InformationSchemaClient) GetQueryTextByQueryId(t *testing.T, queryId string) string { + t.Helper() + result, err := c.client().QueryUnsafe(context.Background(), fmt.Sprintf("SELECT QUERY_TEXT FROM TABLE(INFORMATION_SCHEMA.QUERY_HISTORY(RESULT_LIMIT => 20)) WHERE QUERY_ID = '%s'", queryId)) + require.NoError(t, err) + require.Len(t, result, 1) + require.NotNil(t, result[0]["QUERY_TEXT"]) + return (*result[0]["QUERY_TEXT"]).(string) +} + +func (c *InformationSchemaClient) GetQueryTagByQueryId(t *testing.T, queryId string) string { + t.Helper() + result, err := c.client().QueryUnsafe(context.Background(), fmt.Sprintf("SELECT QUERY_TAG FROM TABLE(INFORMATION_SCHEMA.QUERY_HISTORY(RESULT_LIMIT => 20)) WHERE QUERY_ID = '%s'", queryId)) + require.NoError(t, err) + require.Len(t, result, 1) + require.NotNil(t, result[0]["QUERY_TAG"]) + return (*result[0]["QUERY_TAG"]).(string) +} diff --git a/pkg/acceptance/helpers/test_client.go b/pkg/acceptance/helpers/test_client.go index 53a9b6cb2d..8c2a3cccb1 100644 --- a/pkg/acceptance/helpers/test_client.go +++ b/pkg/acceptance/helpers/test_client.go @@ -36,6 +36,7 @@ type TestClient struct { FileFormat *FileFormatClient Function *FunctionClient Grant *GrantClient + InformationSchema *InformationSchemaClient MaskingPolicy *MaskingPolicyClient MaterializedView *MaterializedViewClient NetworkPolicy *NetworkPolicyClient @@ -108,6 +109,7 @@ func NewTestClient(c *sdk.Client, database string, schema string, warehouse stri FileFormat: NewFileFormatClient(context, idsGenerator), Function: NewFunctionClient(context, idsGenerator), Grant: NewGrantClient(context, idsGenerator), + InformationSchema: NewInformationSchemaClient(context, idsGenerator), MaskingPolicy: NewMaskingPolicyClient(context, idsGenerator), MaterializedView: NewMaterializedViewClient(context, idsGenerator), NetworkPolicy: NewNetworkPolicyClient(context, idsGenerator), diff --git a/pkg/acceptance/helpers/user_client.go b/pkg/acceptance/helpers/user_client.go index c64afcf723..20461ae6e5 100644 --- a/pkg/acceptance/helpers/user_client.go +++ b/pkg/acceptance/helpers/user_client.go @@ -68,6 +68,14 @@ func (c *UserClient) Alter(t *testing.T, id sdk.AccountObjectIdentifier, opts *s require.NoError(t, err) } +func (c *UserClient) AlterCurrentUser(t *testing.T, opts *sdk.AlterUserOptions) { + t.Helper() + id, err := c.context.client.ContextFunctions.CurrentUser(context.Background()) + require.NoError(t, err) + err = c.client().Alter(context.Background(), id, opts) + require.NoError(t, err) +} + func (c *UserClient) DropUserFunc(t *testing.T, id sdk.AccountObjectIdentifier) func() { t.Helper() ctx := context.Background() diff --git a/pkg/internal/tracking/context.go b/pkg/internal/tracking/context.go new file mode 100644 index 0000000000..9519bf1bb4 --- /dev/null +++ b/pkg/internal/tracking/context.go @@ -0,0 +1,73 @@ +package tracking + +import ( + "context" + "errors" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" +) + +const ( + ProviderVersion string = "v0.99.0" // TODO(SNOW-1814934): Currently hardcoded, make it computed + MetadataPrefix string = "terraform_provider_usage_tracking" +) + +type key struct{} + +var metadataContextKey key + +type Operation string + +const ( + CreateOperation Operation = "create" + ReadOperation Operation = "read" + UpdateOperation Operation = "update" + DeleteOperation Operation = "delete" + ImportOperation Operation = "import" + CustomDiffOperation Operation = "custom_diff" +) + +type Metadata struct { + Version string `json:"version,omitempty"` + Resource string `json:"resource,omitempty"` + Operation Operation `json:"operation,omitempty"` +} + +func (m Metadata) validate() error { + errs := make([]error, 0) + if m.Version == "" { + errs = append(errs, errors.New("version for metadata should not be empty")) + } + if m.Resource == "" { + errs = append(errs, errors.New("resource name for metadata should not be empty")) + } + if m.Operation == "" { + errs = append(errs, errors.New("operation for metadata should not be empty")) + } + return errors.Join(errs...) +} + +func NewMetadata(version string, resource resources.Resource, operation Operation) Metadata { + return Metadata{ + Version: version, + Resource: resource.String(), + Operation: operation, + } +} + +func NewVersionedMetadata(resource resources.Resource, operation Operation) Metadata { + return Metadata{ + Version: ProviderVersion, + Resource: resource.String(), + Operation: operation, + } +} + +func NewContext(ctx context.Context, metadata Metadata) context.Context { + return context.WithValue(ctx, metadataContextKey, metadata) +} + +func FromContext(ctx context.Context) (Metadata, bool) { + metadata, ok := ctx.Value(metadataContextKey).(Metadata) + return metadata, ok +} diff --git a/pkg/internal/tracking/context_test.go b/pkg/internal/tracking/context_test.go new file mode 100644 index 0000000000..96e38f75a3 --- /dev/null +++ b/pkg/internal/tracking/context_test.go @@ -0,0 +1,45 @@ +package tracking + +import ( + "context" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/stretchr/testify/require" +) + +func Test_Context(t *testing.T) { + metadata := NewMetadata("123", resources.Account, CreateOperation) + newMetadata := NewMetadata("321", resources.Database, UpdateOperation) + ctx := context.Background() + + // no metadata in context + value := ctx.Value(metadataContextKey) + require.Nil(t, value) + + retrievedMetadata, ok := FromContext(ctx) + require.False(t, ok) + require.Empty(t, retrievedMetadata) + + // add metadata by hand + ctx = context.WithValue(ctx, metadataContextKey, metadata) + + value = ctx.Value(metadataContextKey) + require.NotNil(t, value) + require.Equal(t, metadata, value) + + retrievedMetadata, ok = FromContext(ctx) + require.True(t, ok) + require.Equal(t, metadata, retrievedMetadata) + + // add metadata with NewContext function (overrides previous value) + ctx = NewContext(ctx, newMetadata) + + value = ctx.Value(metadataContextKey) + require.NotNil(t, value) + require.Equal(t, newMetadata, value) + + retrievedMetadata, ok = FromContext(ctx) + require.True(t, ok) + require.Equal(t, newMetadata, retrievedMetadata) +} diff --git a/pkg/internal/tracking/query.go b/pkg/internal/tracking/query.go new file mode 100644 index 0000000000..e49421b1a9 --- /dev/null +++ b/pkg/internal/tracking/query.go @@ -0,0 +1,31 @@ +package tracking + +import ( + "encoding/json" + "fmt" + "strings" +) + +func AppendMetadata(sql string, metadata Metadata) (string, error) { + bytes, err := json.Marshal(metadata) + if err != nil { + return "", fmt.Errorf("failed to marshal the metadata: %w", err) + } else { + return fmt.Sprintf("%s --%s %s", sql, MetadataPrefix, string(bytes)), nil + } +} + +func ParseMetadata(sql string) (Metadata, error) { + parts := strings.Split(sql, fmt.Sprintf("--%s", MetadataPrefix)) + if len(parts) != 2 { + return Metadata{}, fmt.Errorf("failed to parse metadata from sql, incorrect number of parts, expected: 2, got: %d", len(parts)) + } + var metadata Metadata + if err := json.Unmarshal([]byte(strings.TrimSpace(parts[1])), &metadata); err != nil { + return Metadata{}, fmt.Errorf("failed to unmarshal metadata from sql: %s, err = %w", sql, err) + } + if err := metadata.validate(); err != nil { + return Metadata{}, err + } + return metadata, nil +} diff --git a/pkg/internal/tracking/query_test.go b/pkg/internal/tracking/query_test.go new file mode 100644 index 0000000000..6d46162186 --- /dev/null +++ b/pkg/internal/tracking/query_test.go @@ -0,0 +1,65 @@ +package tracking + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/stretchr/testify/require" +) + +func TestAppendMetadata(t *testing.T) { + metadata := NewMetadata("123", resources.Account, CreateOperation) + sql := "SELECT 1" + + bytes, err := json.Marshal(metadata) + require.NoError(t, err) + + expectedSql := fmt.Sprintf("%s --%s %s", sql, MetadataPrefix, string(bytes)) + + newSql, err := AppendMetadata(sql, metadata) + require.NoError(t, err) + require.Equal(t, expectedSql, newSql) +} + +func TestParseMetadata(t *testing.T) { + metadata := NewMetadata("123", resources.Account, CreateOperation) + bytes, err := json.Marshal(metadata) + require.NoError(t, err) + sql := fmt.Sprintf("SELECT 1 --%s %s", MetadataPrefix, string(bytes)) + + parsedMetadata, err := ParseMetadata(sql) + require.NoError(t, err) + require.Equal(t, metadata, parsedMetadata) +} + +func TestParseInvalidMetadataKeys(t *testing.T) { + sql := fmt.Sprintf(`SELECT 1 --%s {"key": "value"}`, MetadataPrefix) + + parsedMetadata, err := ParseMetadata(sql) + require.ErrorContains(t, err, "version for metadata should not be empty") + require.ErrorContains(t, err, "resource name for metadata should not be empty") + require.ErrorContains(t, err, "operation for metadata should not be empty") + require.Equal(t, Metadata{}, parsedMetadata) +} + +func TestParseInvalidMetadataJson(t *testing.T) { + sql := fmt.Sprintf(`SELECT 1 --%s "key": "value"`, MetadataPrefix) + + parsedMetadata, err := ParseMetadata(sql) + require.ErrorContains(t, err, "failed to unmarshal metadata from sql") + require.Equal(t, Metadata{}, parsedMetadata) +} + +func TestParseMetadataFromInvalidSqlCommentPrefix(t *testing.T) { + metadata := NewMetadata("123", resources.Account, CreateOperation) + sql := "SELECT 1" + + bytes, err := json.Marshal(metadata) + require.NoError(t, err) + + parsedMetadata, err := ParseMetadata(fmt.Sprintf("%s --invalid_prefix %s", sql, string(bytes))) + require.ErrorContains(t, err, "failed to parse metadata from sql") + require.Equal(t, Metadata{}, parsedMetadata) +} diff --git a/pkg/resources/common.go b/pkg/resources/common.go index 8a5df06f11..36a1da648a 100644 --- a/pkg/resources/common.go +++ b/pkg/resources/common.go @@ -5,6 +5,10 @@ import ( "regexp" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/tracking" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/hashicorp/go-cty/cty" @@ -101,3 +105,45 @@ func ImportName[T sdk.AccountObjectIdentifier | sdk.DatabaseObjectIdentifier | s return []*schema.ResourceData{d}, nil } + +func TrackingImportWrapper(resourceName resources.Resource, importImplementation schema.StateContextFunc) schema.StateContextFunc { + return func(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { + ctx = tracking.NewContext(ctx, tracking.NewVersionedMetadata(resourceName, tracking.ImportOperation)) + return importImplementation(ctx, d, meta) + } +} + +func TrackingCreateWrapper(resourceName resources.Resource, createImplementation schema.CreateContextFunc) schema.CreateContextFunc { + return func(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + ctx = tracking.NewContext(ctx, tracking.NewVersionedMetadata(resourceName, tracking.CreateOperation)) + return createImplementation(ctx, d, meta) + } +} + +func TrackingReadWrapper(resourceName resources.Resource, readImplementation schema.ReadContextFunc) schema.ReadContextFunc { + return func(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + ctx = tracking.NewContext(ctx, tracking.NewVersionedMetadata(resourceName, tracking.ReadOperation)) + return readImplementation(ctx, d, meta) + } +} + +func TrackingUpdateWrapper(resourceName resources.Resource, updateImplementation schema.UpdateContextFunc) schema.UpdateContextFunc { + return func(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + ctx = tracking.NewContext(ctx, tracking.NewVersionedMetadata(resourceName, tracking.UpdateOperation)) + return updateImplementation(ctx, d, meta) + } +} + +func TrackingDeleteWrapper(resourceName resources.Resource, deleteImplementation schema.DeleteContextFunc) schema.DeleteContextFunc { + return func(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + ctx = tracking.NewContext(ctx, tracking.NewVersionedMetadata(resourceName, tracking.DeleteOperation)) + return deleteImplementation(ctx, d, meta) + } +} + +func TrackingCustomDiffWrapper(resourceName resources.Resource, customdiffImplementation schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { + return func(ctx context.Context, diff *schema.ResourceDiff, meta any) error { + ctx = tracking.NewContext(ctx, tracking.NewVersionedMetadata(resourceName, tracking.CustomDiffOperation)) + return customdiffImplementation(ctx, diff, meta) + } +} diff --git a/pkg/resources/schema.go b/pkg/resources/schema.go index 27158b069d..e406eb31e2 100644 --- a/pkg/resources/schema.go +++ b/pkg/resources/schema.go @@ -8,6 +8,8 @@ import ( "slices" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -89,23 +91,24 @@ var schemaSchema = map[string]*schema.Schema{ // Schema returns a pointer to the resource representing a schema. func Schema() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextSchema, - ReadContext: ReadContextSchema(true), - UpdateContext: UpdateContextSchema, - DeleteContext: DeleteContextSchema, + CreateContext: TrackingCreateWrapper(resources.Schema, CreateContextSchema), + ReadContext: TrackingReadWrapper(resources.Schema, ReadContextSchema(true)), + UpdateContext: TrackingUpdateWrapper(resources.Schema, UpdateContextSchema), + DeleteContext: TrackingDeleteWrapper(resources.Schema, DeleteContextSchema), Description: "Resource used to manage schema objects. For more information, check [schema documentation](https://docs.snowflake.com/en/sql-reference/sql/create-schema).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.Schema, customdiff.All( ComputedIfAnyAttributeChanged(schemaSchema, ShowOutputAttributeName, "name", "comment", "with_managed_access", "is_transient"), ComputedIfAnyAttributeChanged(schemaSchema, DescribeOutputAttributeName, "name"), ComputedIfAnyAttributeChanged(schemaSchema, FullyQualifiedNameAttributeName, "name"), ComputedIfAnyAttributeChanged(schemaParametersSchema, ParametersAttributeName, collections.Map(sdk.AsStringList(sdk.AllSchemaParameters), strings.ToLower)...), + // TODO(SNOW-1804424 - next pr): handle custom context in parameters customdiff schemaParametersCustomDiff, - ), + )), Schema: collections.MergeMaps(schemaSchema, schemaParametersSchema), Importer: &schema.ResourceImporter{ - StateContext: ImportSchema, + StateContext: TrackingImportWrapper(resources.Schema, ImportSchema), }, SchemaVersion: 2, diff --git a/pkg/sdk/client.go b/pkg/sdk/client.go index 8f6d66c8a4..134313439d 100644 --- a/pkg/sdk/client.go +++ b/pkg/sdk/client.go @@ -8,6 +8,8 @@ import ( "os" "slices" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/tracking" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/snowflakeenvs" "github.com/jmoiron/sqlx" "github.com/luna-duclos/instrumentedsql" @@ -132,7 +134,7 @@ func NewClient(cfg *gosnowflake.Config) (*Client, error) { logger := instrumentedsql.LoggerFunc(func(ctx context.Context, s string, kv ...interface{}) { switch s { case "sql-conn-query", "sql-conn-exec": - log.Printf("[DEBUG] %s: %v (%s)\n", s, kv, ctx.Value(snowflakeAccountLocatorContextKey)) + log.Printf("[DEBUG] %s: %v (%s)\n", s, kv, ctx.Value(SnowflakeAccountLocatorContextKey)) default: return } @@ -264,11 +266,9 @@ func (c *Client) Close() error { return nil } -type snowflakeAccountLocatorContext string +type ContextKey string -const ( - snowflakeAccountLocatorContextKey snowflakeAccountLocatorContext = "snowflake_account_locator" -) +const SnowflakeAccountLocatorContextKey ContextKey = "snowflake_account_locator" // Exec executes a query that does not return rows. func (c *Client) exec(ctx context.Context, sql string) (sql.Result, error) { @@ -277,7 +277,8 @@ func (c *Client) exec(ctx context.Context, sql string) (sql.Result, error) { log.Printf("[DEBUG] sql-conn-exec-dry: %v\n", sql) return nil, nil } - ctx = context.WithValue(ctx, snowflakeAccountLocatorContextKey, c.accountLocator) + ctx = context.WithValue(ctx, SnowflakeAccountLocatorContextKey, c.accountLocator) + sql = appendQueryMetadata(ctx, sql) result, err := c.db.ExecContext(ctx, sql) return result, decodeDriverError(err) } @@ -289,7 +290,8 @@ func (c *Client) query(ctx context.Context, dest interface{}, sql string) error log.Printf("[DEBUG] sql-conn-query-dry: %v\n", sql) return nil } - ctx = context.WithValue(ctx, snowflakeAccountLocatorContextKey, c.accountLocator) + ctx = context.WithValue(ctx, SnowflakeAccountLocatorContextKey, c.accountLocator) + sql = appendQueryMetadata(ctx, sql) return decodeDriverError(c.db.SelectContext(ctx, dest, sql)) } @@ -300,6 +302,19 @@ func (c *Client) queryOne(ctx context.Context, dest interface{}, sql string) err log.Printf("[DEBUG] sql-conn-query-one-dry: %v\n", sql) return nil } - ctx = context.WithValue(ctx, snowflakeAccountLocatorContextKey, c.accountLocator) + ctx = context.WithValue(ctx, SnowflakeAccountLocatorContextKey, c.accountLocator) + sql = appendQueryMetadata(ctx, sql) return decodeDriverError(c.db.GetContext(ctx, dest, sql)) } + +func appendQueryMetadata(ctx context.Context, sql string) string { + if metadata, ok := tracking.FromContext(ctx); ok { + newSql, err := tracking.AppendMetadata(sql, metadata) + if err != nil { + log.Printf("[ERROR] failed to append metadata tracking: %v\n", err) + return sql + } + return newSql + } + return sql +} diff --git a/pkg/sdk/context_functions.go b/pkg/sdk/context_functions.go index 1983189eb0..bbf39a23e3 100644 --- a/pkg/sdk/context_functions.go +++ b/pkg/sdk/context_functions.go @@ -20,6 +20,8 @@ type ContextFunctions interface { CurrentSession(ctx context.Context) (string, error) CurrentUser(ctx context.Context) (AccountObjectIdentifier, error) CurrentSessionDetails(ctx context.Context) (*CurrentSessionDetails, error) + + // TODO(SNOW-1805152): Remove this and utilize gosnowflake.WithQueryIDChan instead whenever query id is needed LastQueryId(ctx context.Context) (string, error) // Session Object functions. diff --git a/pkg/sdk/integration_test_imports.go b/pkg/sdk/integration_test_imports.go index aaf396b739..ab759dbce1 100644 --- a/pkg/sdk/integration_test_imports.go +++ b/pkg/sdk/integration_test_imports.go @@ -12,26 +12,22 @@ import ( // All the contents of this file were added to be able to use them outside the sdk package (i.e. integration tests package). // It was easier to do it that way, so that we do not include big rename changes in the first moving PR. -// ExecForTests is an exact copy of exec (that is unexported), that some integration tests/helpers were using +// ExecForTests is forwarding function for Client.exec (that is unexported), that some integration tests/helpers were using // TODO: remove after we have all usages covered by SDK (for now it means implementing stages, tables, and tags) func (c *Client) ExecForTests(ctx context.Context, sql string) (sql.Result, error) { - ctx = context.WithValue(ctx, snowflakeAccountLocatorContextKey, c.accountLocator) - result, err := c.db.ExecContext(ctx, sql) - return result, decodeDriverError(err) + return c.exec(ctx, sql) } -// QueryOneForTests is an exact copy of queryOne (that is unexported), that some integration tests/helpers were using +// QueryOneForTests is forwarding function for Client.queryOne (that is unexported), that some integration tests/helpers were using // TODO: remove after introducing all resources using this func (c *Client) QueryOneForTests(ctx context.Context, dest interface{}, sql string) error { - ctx = context.WithValue(ctx, snowflakeAccountLocatorContextKey, c.accountLocator) - return decodeDriverError(c.db.GetContext(ctx, dest, sql)) + return c.queryOne(ctx, dest, sql) } -// QueryForTests is an exact copy of query (that is unexported), that some integration tests/helpers were using +// QueryForTests is forwarding function for Client.query (that is unexported), that some integration tests/helpers were using // TODO: remove after introducing all resources using this func (c *Client) QueryForTests(ctx context.Context, dest interface{}, sql string) error { - ctx = context.WithValue(ctx, snowflakeAccountLocatorContextKey, c.accountLocator) - return decodeDriverError(c.db.SelectContext(ctx, dest, sql)) + return c.query(ctx, dest, sql) } func ErrorsEqual(t *testing.T, expected error, actual error) { diff --git a/pkg/sdk/testint/basic_object_tracking_integration_test.go b/pkg/sdk/testint/basic_object_tracking_integration_test.go new file mode 100644 index 0000000000..673eb31df3 --- /dev/null +++ b/pkg/sdk/testint/basic_object_tracking_integration_test.go @@ -0,0 +1,113 @@ +package testint + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "github.com/snowflakedb/gosnowflake" + "github.com/stretchr/testify/require" +) + +// Research for basic object tracking done as part of SNOW-1737787 + +// https://docs.snowflake.com/en/sql-reference/parameters#query-tag +func TestInt_ContextQueryTags(t *testing.T) { + client := testClient(t) + ctx := context.Background() + + // set query_tag on user level + userQueryTag := "user query tag" + testClientHelper().User.AlterCurrentUser(t, &sdk.AlterUserOptions{ + Set: &sdk.UserSet{ + SessionParameters: &sdk.SessionParameters{ + QueryTag: sdk.String(userQueryTag), + }, + }, + }) + t.Cleanup(func() { + testClientHelper().User.AlterCurrentUser(t, &sdk.AlterUserOptions{ + Unset: &sdk.UserUnset{ + SessionParameters: &sdk.SessionParametersUnset{ + QueryTag: sdk.Bool(true), + }, + }, + }) + }) + queryId := executeQueryAndReturnQueryId(t, context.Background(), client) + queryTagResult := testClientHelper().InformationSchema.GetQueryTagByQueryId(t, queryId) + require.Equal(t, userQueryTag, queryTagResult) + + // set query_tag on session level + sessionQueryTag := "session query tag" + require.NoError(t, client.Sessions.AlterSession(ctx, &sdk.AlterSessionOptions{ + Set: &sdk.SessionSet{ + SessionParameters: &sdk.SessionParameters{ + QueryTag: sdk.String(sessionQueryTag), + }, + }, + })) + t.Cleanup(func() { + require.NoError(t, client.Sessions.AlterSession(ctx, &sdk.AlterSessionOptions{ + Unset: &sdk.SessionUnset{ + SessionParametersUnset: &sdk.SessionParametersUnset{ + QueryTag: sdk.Bool(true), + }, + }, + })) + }) + queryId = executeQueryAndReturnQueryId(t, context.Background(), client) + queryTagResult = testClientHelper().InformationSchema.GetQueryTagByQueryId(t, queryId) + require.Equal(t, sessionQueryTag, queryTagResult) + + // set query_tag on query level + perQueryQueryTag := "per-query query tag" + ctxWithQueryTag := gosnowflake.WithQueryTag(context.Background(), perQueryQueryTag) + queryId = executeQueryAndReturnQueryId(t, ctxWithQueryTag, client) + queryTagResult = testClientHelper().InformationSchema.GetQueryTagByQueryId(t, queryId) + require.Equal(t, perQueryQueryTag, queryTagResult) +} + +func executeQueryAndReturnQueryId(t *testing.T, ctx context.Context, client *sdk.Client) string { + t.Helper() + queryIdChan := make(chan string, 1) + ctx = gosnowflake.WithQueryIDChan(ctx, queryIdChan) + + _, err := client.QueryUnsafe(ctx, "SELECT 1") + require.NoError(t, err) + + return <-queryIdChan +} + +// https://select.dev/posts/snowflake-query-tags#using-query-comments-instead-of-query-tags +func TestInt_QueryComment(t *testing.T) { + client := testClient(t) + ctx := context.Background() + + queryIdChan := make(chan string, 1) + metadata := `{"comment": "some comment"}` + _, err := client.QueryUnsafe(gosnowflake.WithQueryIDChan(ctx, queryIdChan), fmt.Sprintf(`SELECT 1; --%s`, metadata)) + require.NoError(t, err) + queryId := <-queryIdChan + + queryText := testClientHelper().InformationSchema.GetQueryTextByQueryId(t, queryId) + require.Equal(t, metadata, strings.Split(queryText, "--")[1]) +} + +func TestInt_AppName(t *testing.T) { + // https://community.snowflake.com/s/article/How-to-see-application-name-added-in-the-connection-string-in-Snowsight + t.Skip("there no way to check client application name by querying Snowflake's") + + version := "v0.99.0" + config := sdk.DefaultConfig() + config.Application = fmt.Sprintf("terraform-provider-snowflake:%s", version) + client, err := sdk.NewClient(config) + require.NoError(t, err) + + _, err = client.QueryUnsafe(context.Background(), "SELECT 1") + require.NoError(t, err) +} + +// TODO(SNOW-1805150): Document potential usage of connection string diff --git a/pkg/sdk/testint/client_integration_test.go b/pkg/sdk/testint/client_integration_test.go new file mode 100644 index 0000000000..47a38e5449 --- /dev/null +++ b/pkg/sdk/testint/client_integration_test.go @@ -0,0 +1,63 @@ +package testint + +import ( + "context" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/tracking" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/snowflakedb/gosnowflake" + "github.com/stretchr/testify/require" +) + +func TestInt_Client_AdditionalMetadata(t *testing.T) { + client := testClient(t) + metadata := tracking.NewMetadata("v1.13.1002-rc-test", resources.Database, tracking.CreateOperation) + + assertQueryMetadata := func(t *testing.T, queryId string) { + t.Helper() + queryText := testClientHelper().InformationSchema.GetQueryTextByQueryId(t, queryId) + parsedMetadata, err := tracking.ParseMetadata(queryText) + require.NoError(t, err) + require.Equal(t, metadata, parsedMetadata) + } + + t.Run("query one", func(t *testing.T) { + queryIdChan := make(chan string, 1) + ctx := context.Background() + ctx = tracking.NewContext(ctx, metadata) + ctx = gosnowflake.WithQueryIDChan(ctx, queryIdChan) + row := struct { + One int `db:"ONE"` + }{} + err := client.QueryOneForTests(ctx, &row, "SELECT 1 AS ONE") + require.NoError(t, err) + + assertQueryMetadata(t, <-queryIdChan) + }) + + t.Run("query", func(t *testing.T) { + queryIdChan := make(chan string, 1) + ctx := context.Background() + ctx = tracking.NewContext(ctx, metadata) + ctx = gosnowflake.WithQueryIDChan(ctx, queryIdChan) + var rows []struct { + One int `db:"ONE"` + } + err := client.QueryForTests(ctx, &rows, "SELECT 1 AS ONE") + require.NoError(t, err) + + assertQueryMetadata(t, <-queryIdChan) + }) + + t.Run("exec", func(t *testing.T) { + queryIdChan := make(chan string, 1) + ctx := context.Background() + ctx = tracking.NewContext(ctx, metadata) + ctx = gosnowflake.WithQueryIDChan(ctx, queryIdChan) + _, err := client.ExecForTests(ctx, "SELECT 1") + require.NoError(t, err) + + assertQueryMetadata(t, <-queryIdChan) + }) +} From 8907d9dfea69d6b8ac26fc0a9e249676f332f8b3 Mon Sep 17 00:00:00 2001 From: Jakub Michalak Date: Fri, 22 Nov 2024 10:32:28 +0100 Subject: [PATCH 02/10] feat: Add tags data source (#3211) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - add `tags` data source - add missing examples for `streams` data source ## Test Plan * [x] acceptance tests * [ ] … ## References https://docs.snowflake.com/en/sql-reference/sql/show-tags https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1372 --- MIGRATION_GUIDE.md | 7 + docs/data-sources/streams.md | 117 ++++++++++++- docs/data-sources/tags.md | 156 +++++++++++++++++ .../snowflake_streams/data-source.tf | 119 ++++++++++++- .../snowflake_tags/data-source.tf | 93 ++++++++++ .../tag_show_output_ext.go | 12 ++ pkg/datasources/tags.go | 72 ++++++++ pkg/datasources/tags_acceptance_test.go | 160 ++++++++++++++++++ .../testdata/TestAcc_Tags/basic/test.tf | 15 ++ .../testdata/TestAcc_Tags/basic/variables.tf | 19 +++ .../TestAcc_Tags/non_existing/test.tf | 10 ++ pkg/provider/provider.go | 1 + pkg/sdk/tags_dto.go | 4 +- pkg/sdk/tags_dto_builders.go | 4 +- pkg/sdk/tags_impl.go | 4 +- templates/data-sources/tags.md.tmpl | 24 +++ 16 files changed, 804 insertions(+), 13 deletions(-) create mode 100644 docs/data-sources/tags.md create mode 100644 examples/data-sources/snowflake_tags/data-source.tf create mode 100644 pkg/datasources/tags.go create mode 100644 pkg/datasources/tags_acceptance_test.go create mode 100644 pkg/datasources/testdata/TestAcc_Tags/basic/test.tf create mode 100644 pkg/datasources/testdata/TestAcc_Tags/basic/variables.tf create mode 100644 pkg/datasources/testdata/TestAcc_Tags/non_existing/test.tf create mode 100644 templates/data-sources/tags.md.tmpl diff --git a/MIGRATION_GUIDE.md b/MIGRATION_GUIDE.md index 2c75e172d8..148dc15e45 100644 --- a/MIGRATION_GUIDE.md +++ b/MIGRATION_GUIDE.md @@ -9,6 +9,13 @@ across different versions. ## v0.98.0 ➞ v0.99.0 +### *(new feature)* snowflake_tags datasource +Added a new datasource enabling querying and filtering tags. Notes: +- all results are stored in `tags` field. +- `like` field enables tags filtering by name. +- `in` field enables tags filtering by `account`, `database`, `schema`, `application` and `application_package`. +- `SHOW TAGS` output is enclosed in `show_output` field inside `tags`. + ### snowflake_tag_masking_policy_association deprecation `snowflake_tag_masking_policy_association` is now deprecated in favor of `snowflake_tag` with a new `masking_policy` field. It will be removed with the v1 release. Please adjust your configuration files. diff --git a/docs/data-sources/streams.md b/docs/data-sources/streams.md index ee575faa06..62ca70cb22 100644 --- a/docs/data-sources/streams.md +++ b/docs/data-sources/streams.md @@ -14,9 +14,120 @@ Datasource used to get details of filtered streams. Filtering is aligned with th ## Example Usage ```terraform -data "snowflake_streams" "current" { - database = "MYDB" - schema = "MYSCHEMA" +# Simple usage +data "snowflake_streams" "simple" { +} + +output "simple_output" { + value = data.snowflake_streams.simple.streams +} + +# Filtering (like) +data "snowflake_streams" "like" { + like = "stream-name" +} + +output "like_output" { + value = data.snowflake_streams.like.streams +} + +# Filtering by prefix (like) +data "snowflake_streams" "like_prefix" { + like = "prefix%" +} + +output "like_prefix_output" { + value = data.snowflake_streams.like_prefix.streams +} + +# Filtering (limit) +data "snowflake_streams" "limit" { + limit { + rows = 10 + from = "prefix-" + } +} + +output "limit_output" { + value = data.snowflake_streams.limit.streams +} + +# Filtering (in) +data "snowflake_streams" "in_account" { + in { + account = true + } +} + +data "snowflake_streams" "in_database" { + in { + database = "" + } +} + +data "snowflake_streams" "in_schema" { + in { + schema = "." + } +} + +data "snowflake_streams" "in_application" { + in { + application = "" + } +} + +data "snowflake_streams" "in_application_package" { + in { + application_package = "" + } +} + +output "in_output" { + value = { + "account" : data.snowflake_streams.in_account.streams, + "database" : data.snowflake_streams.in_database.streams, + "schema" : data.snowflake_streams.in_schema.streams, + "application" : data.snowflake_streams.in_application.streams, + "application_package" : data.snowflake_streams.in_application_package.streams, + } +} + +output "in_output" { + value = data.snowflake_streams.in.streams +} + +# Without additional data (to limit the number of calls make for every found stream) +data "snowflake_streams" "only_show" { + # with_describe is turned on by default and it calls DESCRIBE STREAM for every stream found and attaches its output to streams.*.describe_output field + with_describe = false +} + +output "only_show_output" { + value = data.snowflake_streams.only_show.streams +} + +# Ensure the number of streams is equal to at least one element (with the use of postcondition) +data "snowflake_streams" "assert_with_postcondition" { + like = "stream-name%" + lifecycle { + postcondition { + condition = length(self.streams) > 0 + error_message = "there should be at least one stream" + } + } +} + +# Ensure the number of streams is equal to at exactly one element (with the use of check block) +check "stream_check" { + data "snowflake_streams" "assert_with_check_block" { + like = "stream-name" + } + + assert { + condition = length(data.snowflake_streams.assert_with_check_block.streams) == 1 + error_message = "streams filtered by '${data.snowflake_streams.assert_with_check_block.like}' returned ${length(data.snowflake_streams.assert_with_check_block.streams)} streams where one was expected" + } } ``` diff --git a/docs/data-sources/tags.md b/docs/data-sources/tags.md new file mode 100644 index 0000000000..bb8e360071 --- /dev/null +++ b/docs/data-sources/tags.md @@ -0,0 +1,156 @@ +--- +page_title: "snowflake_tags Data Source - terraform-provider-snowflake" +subcategory: "" +description: |- + Datasource used to get details of filtered tags. Filtering is aligned with the current possibilities for SHOW TAGS https://docs.snowflake.com/en/sql-reference/sql/show-tags query. The results of SHOW are encapsulated in one output collection tags. +--- + +!> **V1 release candidate** This data source is a release candidate for the V1. We do not expect significant changes in it before the V1. We will welcome any feedback and adjust the data source if needed. Any errors reported will be resolved with a higher priority. We encourage checking this data source out before the V1 release. Please follow the [migration guide](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/MIGRATION_GUIDE.md#v0980--v0990) to use it. + +# snowflake_tags (Data Source) + +Datasource used to get details of filtered tags. Filtering is aligned with the current possibilities for [SHOW TAGS](https://docs.snowflake.com/en/sql-reference/sql/show-tags) query. The results of SHOW are encapsulated in one output collection `tags`. + +## Example Usage + +```terraform +# Simple usage +data "snowflake_tags" "simple" { +} + +output "simple_output" { + value = data.snowflake_tags.simple.tags +} + +# Filtering (like) +data "snowflake_tags" "like" { + like = "tag-name" +} + +output "like_output" { + value = data.snowflake_tags.like.tags +} + +# Filtering by prefix (like) +data "snowflake_tags" "like_prefix" { + like = "prefix%" +} + +output "like_prefix_output" { + value = data.snowflake_tags.like_prefix.tags +} + +# Filtering (in) +data "snowflake_tags" "in_account" { + in { + account = true + } +} + +data "snowflake_tags" "in_database" { + in { + database = "" + } +} + +data "snowflake_tags" "in_schema" { + in { + schema = "." + } +} + +data "snowflake_tags" "in_application" { + in { + application = "" + } +} + +data "snowflake_tags" "in_application_package" { + in { + application_package = "" + } +} + +output "in_output" { + value = { + "account" : data.snowflake_tags.in_account.tags, + "database" : data.snowflake_tags.in_database.tags, + "schema" : data.snowflake_tags.in_schema.tags, + "application" : data.snowflake_tags.in_application.tags, + "application_package" : data.snowflake_tags.in_application_package.tags, + } +} + +output "in_output" { + value = data.snowflake_tags.in.tags +} + +# Ensure the number of tags is equal to at least one element (with the use of postcondition) +data "snowflake_tags" "assert_with_postcondition" { + like = "tag-name%" + lifecycle { + postcondition { + condition = length(self.tags) > 0 + error_message = "there should be at least one tag" + } + } +} + +# Ensure the number of tags is equal to at exactly one element (with the use of check block) +check "tag_check" { + data "snowflake_tags" "assert_with_check_block" { + like = "tag-name" + } + + assert { + condition = length(data.snowflake_tags.assert_with_check_block.tags) == 1 + error_message = "tags filtered by '${data.snowflake_tags.assert_with_check_block.like}' returned ${length(data.snowflake_tags.assert_with_check_block.tags)} tags where one was expected" + } +} +``` + + +## Schema + +### Optional + +- `in` (Block List, Max: 1) IN clause to filter the list of objects (see [below for nested schema](#nestedblock--in)) +- `like` (String) Filters the output with **case-insensitive** pattern, with support for SQL wildcard characters (`%` and `_`). + +### Read-Only + +- `id` (String) The ID of this resource. +- `tags` (List of Object) Holds the aggregated output of all tags details queries. (see [below for nested schema](#nestedatt--tags)) + + +### Nested Schema for `in` + +Optional: + +- `account` (Boolean) Returns records for the entire account. +- `application` (String) Returns records for the specified application. +- `application_package` (String) Returns records for the specified application package. +- `database` (String) Returns records for the current database in use or for a specified database. +- `schema` (String) Returns records for the current schema in use or a specified schema. Use fully qualified name. + + + +### Nested Schema for `tags` + +Read-Only: + +- `show_output` (List of Object) (see [below for nested schema](#nestedobjatt--tags--show_output)) + + +### Nested Schema for `tags.show_output` + +Read-Only: + +- `allowed_values` (Set of String) +- `comment` (String) +- `created_on` (String) +- `database_name` (String) +- `name` (String) +- `owner` (String) +- `owner_role_type` (String) +- `schema_name` (String) diff --git a/examples/data-sources/snowflake_streams/data-source.tf b/examples/data-sources/snowflake_streams/data-source.tf index 00afc182f3..f275f36fb0 100644 --- a/examples/data-sources/snowflake_streams/data-source.tf +++ b/examples/data-sources/snowflake_streams/data-source.tf @@ -1,4 +1,115 @@ -data "snowflake_streams" "current" { - database = "MYDB" - schema = "MYSCHEMA" -} \ No newline at end of file +# Simple usage +data "snowflake_streams" "simple" { +} + +output "simple_output" { + value = data.snowflake_streams.simple.streams +} + +# Filtering (like) +data "snowflake_streams" "like" { + like = "stream-name" +} + +output "like_output" { + value = data.snowflake_streams.like.streams +} + +# Filtering by prefix (like) +data "snowflake_streams" "like_prefix" { + like = "prefix%" +} + +output "like_prefix_output" { + value = data.snowflake_streams.like_prefix.streams +} + +# Filtering (limit) +data "snowflake_streams" "limit" { + limit { + rows = 10 + from = "prefix-" + } +} + +output "limit_output" { + value = data.snowflake_streams.limit.streams +} + +# Filtering (in) +data "snowflake_streams" "in_account" { + in { + account = true + } +} + +data "snowflake_streams" "in_database" { + in { + database = "" + } +} + +data "snowflake_streams" "in_schema" { + in { + schema = "." + } +} + +data "snowflake_streams" "in_application" { + in { + application = "" + } +} + +data "snowflake_streams" "in_application_package" { + in { + application_package = "" + } +} + +output "in_output" { + value = { + "account" : data.snowflake_streams.in_account.streams, + "database" : data.snowflake_streams.in_database.streams, + "schema" : data.snowflake_streams.in_schema.streams, + "application" : data.snowflake_streams.in_application.streams, + "application_package" : data.snowflake_streams.in_application_package.streams, + } +} + +output "in_output" { + value = data.snowflake_streams.in.streams +} + +# Without additional data (to limit the number of calls make for every found stream) +data "snowflake_streams" "only_show" { + # with_describe is turned on by default and it calls DESCRIBE STREAM for every stream found and attaches its output to streams.*.describe_output field + with_describe = false +} + +output "only_show_output" { + value = data.snowflake_streams.only_show.streams +} + +# Ensure the number of streams is equal to at least one element (with the use of postcondition) +data "snowflake_streams" "assert_with_postcondition" { + like = "stream-name%" + lifecycle { + postcondition { + condition = length(self.streams) > 0 + error_message = "there should be at least one stream" + } + } +} + +# Ensure the number of streams is equal to at exactly one element (with the use of check block) +check "stream_check" { + data "snowflake_streams" "assert_with_check_block" { + like = "stream-name" + } + + assert { + condition = length(data.snowflake_streams.assert_with_check_block.streams) == 1 + error_message = "streams filtered by '${data.snowflake_streams.assert_with_check_block.like}' returned ${length(data.snowflake_streams.assert_with_check_block.streams)} streams where one was expected" + } +} diff --git a/examples/data-sources/snowflake_tags/data-source.tf b/examples/data-sources/snowflake_tags/data-source.tf new file mode 100644 index 0000000000..0152d21715 --- /dev/null +++ b/examples/data-sources/snowflake_tags/data-source.tf @@ -0,0 +1,93 @@ +# Simple usage +data "snowflake_tags" "simple" { +} + +output "simple_output" { + value = data.snowflake_tags.simple.tags +} + +# Filtering (like) +data "snowflake_tags" "like" { + like = "tag-name" +} + +output "like_output" { + value = data.snowflake_tags.like.tags +} + +# Filtering by prefix (like) +data "snowflake_tags" "like_prefix" { + like = "prefix%" +} + +output "like_prefix_output" { + value = data.snowflake_tags.like_prefix.tags +} + +# Filtering (in) +data "snowflake_tags" "in_account" { + in { + account = true + } +} + +data "snowflake_tags" "in_database" { + in { + database = "" + } +} + +data "snowflake_tags" "in_schema" { + in { + schema = "." + } +} + +data "snowflake_tags" "in_application" { + in { + application = "" + } +} + +data "snowflake_tags" "in_application_package" { + in { + application_package = "" + } +} + +output "in_output" { + value = { + "account" : data.snowflake_tags.in_account.tags, + "database" : data.snowflake_tags.in_database.tags, + "schema" : data.snowflake_tags.in_schema.tags, + "application" : data.snowflake_tags.in_application.tags, + "application_package" : data.snowflake_tags.in_application_package.tags, + } +} + +output "in_output" { + value = data.snowflake_tags.in.tags +} + +# Ensure the number of tags is equal to at least one element (with the use of postcondition) +data "snowflake_tags" "assert_with_postcondition" { + like = "tag-name%" + lifecycle { + postcondition { + condition = length(self.tags) > 0 + error_message = "there should be at least one tag" + } + } +} + +# Ensure the number of tags is equal to at exactly one element (with the use of check block) +check "tag_check" { + data "snowflake_tags" "assert_with_check_block" { + like = "tag-name" + } + + assert { + condition = length(data.snowflake_tags.assert_with_check_block.tags) == 1 + error_message = "tags filtered by '${data.snowflake_tags.assert_with_check_block.like}' returned ${length(data.snowflake_tags.assert_with_check_block.tags)} tags where one was expected" + } +} diff --git a/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/tag_show_output_ext.go b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/tag_show_output_ext.go index 9fb4de087b..46873a7668 100644 --- a/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/tag_show_output_ext.go +++ b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/tag_show_output_ext.go @@ -3,10 +3,22 @@ package resourceshowoutputassert import ( "fmt" "strconv" + "testing" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" ) +// TagsDatasourceShowOutput is a temporary workaround to have better show output assertions in data source acceptance tests. +func TagsDatasourceShowOutput(t *testing.T, name string) *TagShowOutputAssert { + t.Helper() + + s := TagShowOutputAssert{ + ResourceAssert: assert.NewDatasourceAssert("data."+name, "show_output", "tags.0."), + } + s.AddAssertion(assert.ValueSet("show_output.#", "1")) + return &s +} + func (s *TagShowOutputAssert) HasCreatedOnNotEmpty() *TagShowOutputAssert { s.AddAssertion(assert.ResourceShowOutputValuePresent("created_on")) return s diff --git a/pkg/datasources/tags.go b/pkg/datasources/tags.go new file mode 100644 index 0000000000..05b551fc0b --- /dev/null +++ b/pkg/datasources/tags.go @@ -0,0 +1,72 @@ +package datasources + +import ( + "context" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +var tagsSchema = map[string]*schema.Schema{ + "like": likeSchema, + "in": extendedInSchema, + "tags": { + Type: schema.TypeList, + Computed: true, + Description: "Holds the aggregated output of all tags details queries.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + resources.ShowOutputAttributeName: { + Type: schema.TypeList, + Computed: true, + Description: "Holds the output of SHOW TAGS.", + Elem: &schema.Resource{ + Schema: schemas.ShowTagSchema, + }, + }, + }, + }, + }, +} + +func Tags() *schema.Resource { + return &schema.Resource{ + ReadContext: ReadTags, + Schema: tagsSchema, + Description: "Datasource used to get details of filtered tags. Filtering is aligned with the current possibilities for [SHOW TAGS](https://docs.snowflake.com/en/sql-reference/sql/show-tags) query. The results of SHOW are encapsulated in one output collection `tags`.", + } +} + +func ReadTags(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + client := meta.(*provider.Context).Client + req := sdk.ShowTagRequest{} + + handleLike(d, &req.Like) + err := handleExtendedIn(d, &req.In) + if err != nil { + return diag.FromErr(err) + } + + tags, err := client.Tags.Show(ctx, &req) + if err != nil { + return diag.FromErr(err) + } + d.SetId("tags_read") + + flattenedTags := make([]map[string]any, len(tags)) + for i, tag := range tags { + tag := tag + flattenedTags[i] = map[string]any{ + resources.ShowOutputAttributeName: []map[string]any{schemas.TagToSchema(&tag)}, + } + } + if err := d.Set("tags", flattenedTags); err != nil { + return diag.FromErr(err) + } + return nil +} diff --git a/pkg/datasources/tags_acceptance_test.go b/pkg/datasources/tags_acceptance_test.go new file mode 100644 index 0000000000..a29eacc082 --- /dev/null +++ b/pkg/datasources/tags_acceptance_test.go @@ -0,0 +1,160 @@ +package datasources_test + +import ( + "fmt" + "regexp" + "testing" + + acc "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config" + testconfig "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config/model" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/helpers/random" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/testenvs" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/snowflakeroles" + tfconfig "github.com/hashicorp/terraform-plugin-testing/config" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/tfversion" +) + +func TestAcc_Tags(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + + model := model.Tag("test", id.DatabaseName(), id.Name(), id.SchemaName()). + WithComment("foo"). + WithAllowedValuesValue(tfconfig.ListVariable(tfconfig.StringVariable("foo"), tfconfig.StringVariable(""), tfconfig.StringVariable("bar"))) + + dsName := "data.snowflake_tags.test" + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + Steps: []resource.TestStep{ + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Tags/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, model), + + Check: assert.AssertThat(t, + assert.Check(resource.TestCheckResourceAttr(dsName, "tags.#", "1")), + + resourceshowoutputassert.TagsDatasourceShowOutput(t, "snowflake_tags.test"). + HasCreatedOnNotEmpty(). + HasName(id.Name()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasComment("foo"). + HasOwner(snowflakeroles.Accountadmin.Name()). + HasOwnerRoleType("ROLE"), + assert.Check(resource.TestCheckResourceAttr(model.ResourceReference(), "show_output.0.allowed_values.#", "3")), + assert.Check(resource.TestCheckTypeSetElemAttr(model.ResourceReference(), "show_output.0.allowed_values.*", "foo")), + assert.Check(resource.TestCheckTypeSetElemAttr(model.ResourceReference(), "show_output.0.allowed_values.*", "")), + assert.Check(resource.TestCheckTypeSetElemAttr(model.ResourceReference(), "show_output.0.allowed_values.*", "bar")), + ), + }, + }, + }) +} + +func tagsDatasource(like, resourceName string) string { + return fmt.Sprintf(` +data "snowflake_tags" "test" { + depends_on = [%s] + + like = "%s" +} +`, resourceName, like) +} + +func TestAcc_Tags_Filtering(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + prefix := random.AlphaN(4) + id1 := acc.TestClient().Ids.RandomSchemaObjectIdentifierWithPrefix(prefix) + id2 := acc.TestClient().Ids.RandomSchemaObjectIdentifierWithPrefix(prefix) + id3 := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + + model1 := model.Tag("test_1", id1.DatabaseName(), id1.Name(), id1.SchemaName()) + model2 := model.Tag("test_2", id2.DatabaseName(), id2.Name(), id2.SchemaName()) + model3 := model.Tag("test_3", id3.DatabaseName(), id3.Name(), id3.SchemaName()) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + PreCheck: func() { acc.TestAccPreCheck(t) }, + Steps: []resource.TestStep{ + { + Config: testconfig.FromModel(t, model1) + testconfig.FromModel(t, model2) + testconfig.FromModel(t, model3) + tagsDatasourceLike(id1.Name()), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.snowflake_tags.test", "tags.#", "1"), + ), + }, + { + Config: testconfig.FromModel(t, model1) + testconfig.FromModel(t, model2) + testconfig.FromModel(t, model3) + tagsDatasourceLike(prefix+"%"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.snowflake_tags.test", "tags.#", "2"), + ), + }, + }, + }) +} + +func tagsDatasourceLike(like string) string { + return fmt.Sprintf(` +data "snowflake_tags" "test" { + depends_on = [snowflake_tag.test_1, snowflake_tag.test_2, snowflake_tag.test_3] + + like = "%s" +} +`, like) +} + +func TestAcc_Tags_emptyIn(t *testing.T) { + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: tagsDatasourceEmptyIn(), + ExpectError: regexp.MustCompile("Invalid combination of arguments"), + }, + }, + }) +} + +func tagsDatasourceEmptyIn() string { + return ` +data "snowflake_tags" "test" { + in { + } +} +` +} + +func TestAcc_Tags_NotFound_WithPostConditions(t *testing.T) { + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + Steps: []resource.TestStep{ + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Tags/non_existing"), + ExpectError: regexp.MustCompile("there should be at least one tag"), + }, + }, + }) +} diff --git a/pkg/datasources/testdata/TestAcc_Tags/basic/test.tf b/pkg/datasources/testdata/TestAcc_Tags/basic/test.tf new file mode 100644 index 0000000000..595bd930e1 --- /dev/null +++ b/pkg/datasources/testdata/TestAcc_Tags/basic/test.tf @@ -0,0 +1,15 @@ +resource "snowflake_tag" "test" { + name = var.name + schema = var.schema + database = var.database + + allowed_values = var.allowed_values + + comment = var.comment +} + +data "snowflake_tags" "test" { + depends_on = [snowflake_tag.test] + + like = var.name +} diff --git a/pkg/datasources/testdata/TestAcc_Tags/basic/variables.tf b/pkg/datasources/testdata/TestAcc_Tags/basic/variables.tf new file mode 100644 index 0000000000..8b9daeb051 --- /dev/null +++ b/pkg/datasources/testdata/TestAcc_Tags/basic/variables.tf @@ -0,0 +1,19 @@ +variable "name" { + type = string +} + +variable "database" { + type = string +} + +variable "schema" { + type = string +} + +variable "comment" { + type = string +} + +variable "allowed_values" { + type = set(string) +} diff --git a/pkg/datasources/testdata/TestAcc_Tags/non_existing/test.tf b/pkg/datasources/testdata/TestAcc_Tags/non_existing/test.tf new file mode 100644 index 0000000000..919b121905 --- /dev/null +++ b/pkg/datasources/testdata/TestAcc_Tags/non_existing/test.tf @@ -0,0 +1,10 @@ +data "snowflake_tags" "test" { + like = "non-existing-tag" + + lifecycle { + postcondition { + condition = length(self.tags) > 0 + error_message = "there should be at least one tag" + } + } +} diff --git a/pkg/provider/provider.go b/pkg/provider/provider.go index ddc0512a20..94318dc77f 100644 --- a/pkg/provider/provider.go +++ b/pkg/provider/provider.go @@ -609,6 +609,7 @@ func getDataSources() map[string]*schema.Resource { "snowflake_system_get_privatelink_config": datasources.SystemGetPrivateLinkConfig(), "snowflake_system_get_snowflake_platform_info": datasources.SystemGetSnowflakePlatformInfo(), "snowflake_tables": datasources.Tables(), + "snowflake_tags": datasources.Tags(), "snowflake_tasks": datasources.Tasks(), "snowflake_users": datasources.Users(), "snowflake_views": datasources.Views(), diff --git a/pkg/sdk/tags_dto.go b/pkg/sdk/tags_dto.go index 4aa1a50df4..10580f9ceb 100644 --- a/pkg/sdk/tags_dto.go +++ b/pkg/sdk/tags_dto.go @@ -73,8 +73,8 @@ type TagUnsetRequest struct { } type ShowTagRequest struct { - like *Like - in *ExtendedIn + Like *Like + In *ExtendedIn } type DropTagRequest struct { diff --git a/pkg/sdk/tags_dto_builders.go b/pkg/sdk/tags_dto_builders.go index 4e8a155989..505d0ca854 100644 --- a/pkg/sdk/tags_dto_builders.go +++ b/pkg/sdk/tags_dto_builders.go @@ -195,14 +195,14 @@ func NewShowTagRequest() *ShowTagRequest { } func (s *ShowTagRequest) WithLike(pattern string) *ShowTagRequest { - s.like = &Like{ + s.Like = &Like{ Pattern: String(pattern), } return s } func (s *ShowTagRequest) WithIn(in *ExtendedIn) *ShowTagRequest { - s.in = in + s.In = in return s } diff --git a/pkg/sdk/tags_impl.go b/pkg/sdk/tags_impl.go index 9813b03803..d0e534ae57 100644 --- a/pkg/sdk/tags_impl.go +++ b/pkg/sdk/tags_impl.go @@ -116,8 +116,8 @@ func (s *AlterTagRequest) toOpts() *alterTagOptions { func (s *ShowTagRequest) toOpts() *showTagOptions { return &showTagOptions{ - Like: s.like, - In: s.in, + Like: s.Like, + In: s.In, } } diff --git a/templates/data-sources/tags.md.tmpl b/templates/data-sources/tags.md.tmpl new file mode 100644 index 0000000000..2aef1476da --- /dev/null +++ b/templates/data-sources/tags.md.tmpl @@ -0,0 +1,24 @@ +--- +page_title: "{{.Name}} {{.Type}} - {{.ProviderName}}" +subcategory: "" +description: |- +{{ if gt (len (split .Description "")) 1 -}} +{{ index (split .Description "") 1 | plainmarkdown | trimspace | prefixlines " " }} +{{- else -}} +{{ .Description | plainmarkdown | trimspace | prefixlines " " }} +{{- end }} +--- + +!> **V1 release candidate** This data source is a release candidate for the V1. We do not expect significant changes in it before the V1. We will welcome any feedback and adjust the data source if needed. Any errors reported will be resolved with a higher priority. We encourage checking this data source out before the V1 release. Please follow the [migration guide](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/MIGRATION_GUIDE.md#v0980--v0990) to use it. + +# {{.Name}} ({{.Type}}) + +{{ .Description | trimspace }} + +{{ if .HasExample -}} +## Example Usage + +{{ tffile (printf "examples/data-sources/%s/data-source.tf" .Name)}} +{{- end }} + +{{ .SchemaMarkdown | trimspace }} From e44f2e1938807285ed4d521b56d2efeab7b927bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Mon, 25 Nov 2024 10:07:06 +0100 Subject: [PATCH 03/10] chore: basic object tracking part 2 (#3214) follow up to https://github.com/Snowflake-Labs/terraform-provider-snowflake/pull/3205 ## Changes - applied comments from the previous pr (added test in resource and account locator key was changed to `struct{}`, etc.) - applied tracking wrappers to every resource that has implemented context method (in old ones we create context ourselves; I can go and add the context there too), also imports implemented as passthrough were skipped. - added missing resource names to `resource` type - added entry to our v1 guidelines to add wrapper around terraform functions - modified test client for information schema a bit ## Coming up - data sources (after pr that adds data source names similar to the `resrouces.Resource` interface) --- .../resourceassert/database_resource_gen.go | 257 +++++++++++++++++ .../resourceassert/schema_resource_gen.go | 267 ++++++++++++++++++ .../helpers/information_schema_client.go | 37 ++- pkg/provider/resources/resources.go | 9 + pkg/resources/account.go | 6 +- pkg/resources/account_role.go | 16 +- pkg/resources/alert.go | 12 +- ...tegration_with_authorization_code_grant.go | 16 +- ...ion_integration_with_client_credentials.go | 16 +- ...hentication_integration_with_jwt_bearer.go | 16 +- pkg/resources/authentication_policy.go | 12 +- pkg/resources/cortex_search_service.go | 10 +- pkg/resources/database.go | 16 +- pkg/resources/database_old.go | 4 +- pkg/resources/database_role.go | 16 +- pkg/resources/external_function.go | 10 +- pkg/resources/external_oauth_integration.go | 16 +- pkg/resources/external_volume.go | 16 +- pkg/resources/file_format.go | 6 +- pkg/resources/function.go | 14 +- pkg/resources/grant_account_role.go | 6 +- pkg/resources/grant_application_role.go | 12 +- pkg/resources/grant_database_role.go | 6 +- pkg/resources/grant_ownership.go | 10 +- .../grant_privileges_to_account_role.go | 12 +- .../grant_privileges_to_database_role.go | 12 +- pkg/resources/grant_privileges_to_share.go | 12 +- pkg/resources/masking_policy.go | 16 +- pkg/resources/materialized_view.go | 6 +- pkg/resources/network_policy.go | 16 +- pkg/resources/network_rule.go | 10 +- .../oauth_integration_for_custom_clients.go | 16 +- ...th_integration_for_partner_applications.go | 16 +- pkg/resources/password_policy.go | 6 +- pkg/resources/primary_connection.go | 16 +- pkg/resources/procedure.go | 14 +- pkg/resources/resource_monitor.go | 16 +- pkg/resources/row_access_policy.go | 16 +- pkg/resources/saml2_integration.go | 16 +- pkg/resources/schema.go | 1 - pkg/resources/scim_integration.go | 16 +- pkg/resources/secondary_connection.go | 16 +- pkg/resources/secondary_database.go | 16 +- .../secret_with_basic_authentication.go | 16 +- pkg/resources/secret_with_generic_string.go | 16 +- ...ret_with_oauth_authorization_code_grant.go | 16 +- .../secret_with_oauth_client_credentials.go | 16 +- pkg/resources/share.go | 4 +- pkg/resources/shared_database.go | 16 +- pkg/resources/stage.go | 10 +- pkg/resources/stream_on_directory_table.go | 16 +- pkg/resources/stream_on_external_table.go | 16 +- pkg/resources/stream_on_table.go | 16 +- pkg/resources/stream_on_view.go | 16 +- pkg/resources/streamlit.go | 16 +- pkg/resources/table.go | 6 +- pkg/resources/tag.go | 16 +- pkg/resources/tag_association.go | 10 +- .../tag_masking_policy_association.go | 10 +- pkg/resources/task.go | 6 +- pkg/resources/unsafe_execute.go | 6 +- .../usage_tracking_acceptance_test.go | 108 +++++++ pkg/resources/user.go | 44 +-- pkg/resources/view.go | 16 +- pkg/resources/warehouse.go | 16 +- pkg/sdk/client.go | 12 +- .../basic_object_tracking_integration_test.go | 14 +- pkg/sdk/testint/client_integration_test.go | 2 +- 68 files changed, 1135 insertions(+), 360 deletions(-) create mode 100644 pkg/acceptance/bettertestspoc/assert/resourceassert/database_resource_gen.go create mode 100644 pkg/acceptance/bettertestspoc/assert/resourceassert/schema_resource_gen.go create mode 100644 pkg/resources/usage_tracking_acceptance_test.go diff --git a/pkg/acceptance/bettertestspoc/assert/resourceassert/database_resource_gen.go b/pkg/acceptance/bettertestspoc/assert/resourceassert/database_resource_gen.go new file mode 100644 index 0000000000..4c3536308d --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/resourceassert/database_resource_gen.go @@ -0,0 +1,257 @@ +// Code generated by assertions generator; DO NOT EDIT. + +package resourceassert + +import ( + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" +) + +type DatabaseResourceAssert struct { + *assert.ResourceAssert +} + +func DatabaseResource(t *testing.T, name string) *DatabaseResourceAssert { + t.Helper() + + return &DatabaseResourceAssert{ + ResourceAssert: assert.NewResourceAssert(name, "resource"), + } +} + +func ImportedDatabaseResource(t *testing.T, id string) *DatabaseResourceAssert { + t.Helper() + + return &DatabaseResourceAssert{ + ResourceAssert: assert.NewImportedResourceAssert(id, "imported resource"), + } +} + +/////////////////////////////////// +// Attribute value string checks // +/////////////////////////////////// + +func (d *DatabaseResourceAssert) HasCatalogString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("catalog", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasCommentString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("comment", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasDataRetentionTimeInDaysString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("data_retention_time_in_days", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasDefaultDdlCollationString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("default_ddl_collation", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasDropPublicSchemaOnCreationString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("drop_public_schema_on_creation", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasEnableConsoleOutputString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("enable_console_output", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasExternalVolumeString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("external_volume", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasFullyQualifiedNameString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("fully_qualified_name", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasIsTransientString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("is_transient", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasLogLevelString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("log_level", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasMaxDataExtensionTimeInDaysString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("max_data_extension_time_in_days", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasNameString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("name", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasQuotedIdentifiersIgnoreCaseString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("quoted_identifiers_ignore_case", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasReplaceInvalidCharactersString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("replace_invalid_characters", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasReplicationString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("replication", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasStorageSerializationPolicyString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("storage_serialization_policy", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasSuspendTaskAfterNumFailuresString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("suspend_task_after_num_failures", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasTaskAutoRetryAttemptsString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("task_auto_retry_attempts", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasTraceLevelString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("trace_level", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasUserTaskManagedInitialWarehouseSizeString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("user_task_managed_initial_warehouse_size", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasUserTaskMinimumTriggerIntervalInSecondsString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("user_task_minimum_trigger_interval_in_seconds", expected)) + return d +} + +func (d *DatabaseResourceAssert) HasUserTaskTimeoutMsString(expected string) *DatabaseResourceAssert { + d.AddAssertion(assert.ValueSet("user_task_timeout_ms", expected)) + return d +} + +//////////////////////////// +// Attribute empty checks // +//////////////////////////// + +func (d *DatabaseResourceAssert) HasNoCatalog() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("catalog")) + return d +} + +func (d *DatabaseResourceAssert) HasNoComment() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("comment")) + return d +} + +func (d *DatabaseResourceAssert) HasNoDataRetentionTimeInDays() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("data_retention_time_in_days")) + return d +} + +func (d *DatabaseResourceAssert) HasNoDefaultDdlCollation() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("default_ddl_collation")) + return d +} + +func (d *DatabaseResourceAssert) HasNoDropPublicSchemaOnCreation() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("drop_public_schema_on_creation")) + return d +} + +func (d *DatabaseResourceAssert) HasNoEnableConsoleOutput() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("enable_console_output")) + return d +} + +func (d *DatabaseResourceAssert) HasNoExternalVolume() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("external_volume")) + return d +} + +func (d *DatabaseResourceAssert) HasNoFullyQualifiedName() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("fully_qualified_name")) + return d +} + +func (d *DatabaseResourceAssert) HasNoIsTransient() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("is_transient")) + return d +} + +func (d *DatabaseResourceAssert) HasNoLogLevel() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("log_level")) + return d +} + +func (d *DatabaseResourceAssert) HasNoMaxDataExtensionTimeInDays() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("max_data_extension_time_in_days")) + return d +} + +func (d *DatabaseResourceAssert) HasNoName() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("name")) + return d +} + +func (d *DatabaseResourceAssert) HasNoQuotedIdentifiersIgnoreCase() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("quoted_identifiers_ignore_case")) + return d +} + +func (d *DatabaseResourceAssert) HasNoReplaceInvalidCharacters() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("replace_invalid_characters")) + return d +} + +func (d *DatabaseResourceAssert) HasNoReplication() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("replication")) + return d +} + +func (d *DatabaseResourceAssert) HasNoStorageSerializationPolicy() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("storage_serialization_policy")) + return d +} + +func (d *DatabaseResourceAssert) HasNoSuspendTaskAfterNumFailures() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("suspend_task_after_num_failures")) + return d +} + +func (d *DatabaseResourceAssert) HasNoTaskAutoRetryAttempts() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("task_auto_retry_attempts")) + return d +} + +func (d *DatabaseResourceAssert) HasNoTraceLevel() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("trace_level")) + return d +} + +func (d *DatabaseResourceAssert) HasNoUserTaskManagedInitialWarehouseSize() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("user_task_managed_initial_warehouse_size")) + return d +} + +func (d *DatabaseResourceAssert) HasNoUserTaskMinimumTriggerIntervalInSeconds() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("user_task_minimum_trigger_interval_in_seconds")) + return d +} + +func (d *DatabaseResourceAssert) HasNoUserTaskTimeoutMs() *DatabaseResourceAssert { + d.AddAssertion(assert.ValueNotSet("user_task_timeout_ms")) + return d +} diff --git a/pkg/acceptance/bettertestspoc/assert/resourceassert/schema_resource_gen.go b/pkg/acceptance/bettertestspoc/assert/resourceassert/schema_resource_gen.go new file mode 100644 index 0000000000..f4249764f3 --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/resourceassert/schema_resource_gen.go @@ -0,0 +1,267 @@ +// Code generated by assertions generator; DO NOT EDIT. + +package resourceassert + +import ( + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" +) + +type SchemaResourceAssert struct { + *assert.ResourceAssert +} + +func SchemaResource(t *testing.T, name string) *SchemaResourceAssert { + t.Helper() + + return &SchemaResourceAssert{ + ResourceAssert: assert.NewResourceAssert(name, "resource"), + } +} + +func ImportedSchemaResource(t *testing.T, id string) *SchemaResourceAssert { + t.Helper() + + return &SchemaResourceAssert{ + ResourceAssert: assert.NewImportedResourceAssert(id, "imported resource"), + } +} + +/////////////////////////////////// +// Attribute value string checks // +/////////////////////////////////// + +func (s *SchemaResourceAssert) HasCatalogString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("catalog", expected)) + return s +} + +func (s *SchemaResourceAssert) HasCommentString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("comment", expected)) + return s +} + +func (s *SchemaResourceAssert) HasDataRetentionTimeInDaysString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("data_retention_time_in_days", expected)) + return s +} + +func (s *SchemaResourceAssert) HasDatabaseString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("database", expected)) + return s +} + +func (s *SchemaResourceAssert) HasDefaultDdlCollationString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("default_ddl_collation", expected)) + return s +} + +func (s *SchemaResourceAssert) HasEnableConsoleOutputString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("enable_console_output", expected)) + return s +} + +func (s *SchemaResourceAssert) HasExternalVolumeString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("external_volume", expected)) + return s +} + +func (s *SchemaResourceAssert) HasFullyQualifiedNameString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("fully_qualified_name", expected)) + return s +} + +func (s *SchemaResourceAssert) HasIsTransientString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("is_transient", expected)) + return s +} + +func (s *SchemaResourceAssert) HasLogLevelString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("log_level", expected)) + return s +} + +func (s *SchemaResourceAssert) HasMaxDataExtensionTimeInDaysString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("max_data_extension_time_in_days", expected)) + return s +} + +func (s *SchemaResourceAssert) HasNameString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("name", expected)) + return s +} + +func (s *SchemaResourceAssert) HasPipeExecutionPausedString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("pipe_execution_paused", expected)) + return s +} + +func (s *SchemaResourceAssert) HasQuotedIdentifiersIgnoreCaseString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("quoted_identifiers_ignore_case", expected)) + return s +} + +func (s *SchemaResourceAssert) HasReplaceInvalidCharactersString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("replace_invalid_characters", expected)) + return s +} + +func (s *SchemaResourceAssert) HasStorageSerializationPolicyString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("storage_serialization_policy", expected)) + return s +} + +func (s *SchemaResourceAssert) HasSuspendTaskAfterNumFailuresString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("suspend_task_after_num_failures", expected)) + return s +} + +func (s *SchemaResourceAssert) HasTaskAutoRetryAttemptsString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("task_auto_retry_attempts", expected)) + return s +} + +func (s *SchemaResourceAssert) HasTraceLevelString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("trace_level", expected)) + return s +} + +func (s *SchemaResourceAssert) HasUserTaskManagedInitialWarehouseSizeString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("user_task_managed_initial_warehouse_size", expected)) + return s +} + +func (s *SchemaResourceAssert) HasUserTaskMinimumTriggerIntervalInSecondsString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("user_task_minimum_trigger_interval_in_seconds", expected)) + return s +} + +func (s *SchemaResourceAssert) HasUserTaskTimeoutMsString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("user_task_timeout_ms", expected)) + return s +} + +func (s *SchemaResourceAssert) HasWithManagedAccessString(expected string) *SchemaResourceAssert { + s.AddAssertion(assert.ValueSet("with_managed_access", expected)) + return s +} + +//////////////////////////// +// Attribute empty checks // +//////////////////////////// + +func (s *SchemaResourceAssert) HasNoCatalog() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("catalog")) + return s +} + +func (s *SchemaResourceAssert) HasNoComment() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("comment")) + return s +} + +func (s *SchemaResourceAssert) HasNoDataRetentionTimeInDays() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("data_retention_time_in_days")) + return s +} + +func (s *SchemaResourceAssert) HasNoDatabase() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("database")) + return s +} + +func (s *SchemaResourceAssert) HasNoDefaultDdlCollation() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("default_ddl_collation")) + return s +} + +func (s *SchemaResourceAssert) HasNoEnableConsoleOutput() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("enable_console_output")) + return s +} + +func (s *SchemaResourceAssert) HasNoExternalVolume() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("external_volume")) + return s +} + +func (s *SchemaResourceAssert) HasNoFullyQualifiedName() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("fully_qualified_name")) + return s +} + +func (s *SchemaResourceAssert) HasNoIsTransient() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("is_transient")) + return s +} + +func (s *SchemaResourceAssert) HasNoLogLevel() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("log_level")) + return s +} + +func (s *SchemaResourceAssert) HasNoMaxDataExtensionTimeInDays() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("max_data_extension_time_in_days")) + return s +} + +func (s *SchemaResourceAssert) HasNoName() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("name")) + return s +} + +func (s *SchemaResourceAssert) HasNoPipeExecutionPaused() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("pipe_execution_paused")) + return s +} + +func (s *SchemaResourceAssert) HasNoQuotedIdentifiersIgnoreCase() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("quoted_identifiers_ignore_case")) + return s +} + +func (s *SchemaResourceAssert) HasNoReplaceInvalidCharacters() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("replace_invalid_characters")) + return s +} + +func (s *SchemaResourceAssert) HasNoStorageSerializationPolicy() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("storage_serialization_policy")) + return s +} + +func (s *SchemaResourceAssert) HasNoSuspendTaskAfterNumFailures() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("suspend_task_after_num_failures")) + return s +} + +func (s *SchemaResourceAssert) HasNoTaskAutoRetryAttempts() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("task_auto_retry_attempts")) + return s +} + +func (s *SchemaResourceAssert) HasNoTraceLevel() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("trace_level")) + return s +} + +func (s *SchemaResourceAssert) HasNoUserTaskManagedInitialWarehouseSize() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("user_task_managed_initial_warehouse_size")) + return s +} + +func (s *SchemaResourceAssert) HasNoUserTaskMinimumTriggerIntervalInSeconds() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("user_task_minimum_trigger_interval_in_seconds")) + return s +} + +func (s *SchemaResourceAssert) HasNoUserTaskTimeoutMs() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("user_task_timeout_ms")) + return s +} + +func (s *SchemaResourceAssert) HasNoWithManagedAccess() *SchemaResourceAssert { + s.AddAssertion(assert.ValueNotSet("with_managed_access")) + return s +} diff --git a/pkg/acceptance/helpers/information_schema_client.go b/pkg/acceptance/helpers/information_schema_client.go index 9ed99e4e19..972027cf76 100644 --- a/pkg/acceptance/helpers/information_schema_client.go +++ b/pkg/acceptance/helpers/information_schema_client.go @@ -5,6 +5,8 @@ import ( "fmt" "testing" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/stretchr/testify/require" ) @@ -25,20 +27,37 @@ func (c *InformationSchemaClient) client() *sdk.Client { return c.context.client } -func (c *InformationSchemaClient) GetQueryTextByQueryId(t *testing.T, queryId string) string { +type QueryHistory struct { + QueryId string + QueryText string + QueryTag string +} + +func (c *InformationSchemaClient) GetQueryHistory(t *testing.T, limit int) []QueryHistory { t.Helper() - result, err := c.client().QueryUnsafe(context.Background(), fmt.Sprintf("SELECT QUERY_TEXT FROM TABLE(INFORMATION_SCHEMA.QUERY_HISTORY(RESULT_LIMIT => 20)) WHERE QUERY_ID = '%s'", queryId)) + result, err := c.client().QueryUnsafe(context.Background(), fmt.Sprintf("SELECT * FROM TABLE(INFORMATION_SCHEMA.QUERY_HISTORY(RESULT_LIMIT => %d))", limit)) require.NoError(t, err) - require.Len(t, result, 1) - require.NotNil(t, result[0]["QUERY_TEXT"]) - return (*result[0]["QUERY_TEXT"]).(string) + return collections.Map(result, func(queryResult map[string]*any) QueryHistory { + return c.mapQueryHistory(t, queryResult) + }) } -func (c *InformationSchemaClient) GetQueryTagByQueryId(t *testing.T, queryId string) string { +func (c *InformationSchemaClient) GetQueryHistoryByQueryId(t *testing.T, limit int, queryId string) QueryHistory { t.Helper() - result, err := c.client().QueryUnsafe(context.Background(), fmt.Sprintf("SELECT QUERY_TAG FROM TABLE(INFORMATION_SCHEMA.QUERY_HISTORY(RESULT_LIMIT => 20)) WHERE QUERY_ID = '%s'", queryId)) + result, err := c.client().QueryUnsafe(context.Background(), fmt.Sprintf("SELECT * FROM TABLE(INFORMATION_SCHEMA.QUERY_HISTORY(RESULT_LIMIT => %d)) WHERE QUERY_ID = '%s'", limit, queryId)) require.NoError(t, err) require.Len(t, result, 1) - require.NotNil(t, result[0]["QUERY_TAG"]) - return (*result[0]["QUERY_TAG"]).(string) + return c.mapQueryHistory(t, result[0]) +} + +func (c *InformationSchemaClient) mapQueryHistory(t *testing.T, queryResult map[string]*any) QueryHistory { + t.Helper() + require.NotNil(t, queryResult["QUERY_ID"]) + require.NotNil(t, queryResult["QUERY_TEXT"]) + require.NotNil(t, queryResult["QUERY_TAG"]) + return QueryHistory{ + QueryId: (*queryResult["QUERY_ID"]).(string), + QueryText: (*queryResult["QUERY_TEXT"]).(string), + QueryTag: (*queryResult["QUERY_TAG"]).(string), + } } diff --git a/pkg/provider/resources/resources.go b/pkg/provider/resources/resources.go index 05757d5967..dc4de69296 100644 --- a/pkg/provider/resources/resources.go +++ b/pkg/provider/resources/resources.go @@ -23,6 +23,13 @@ const ( ExternalVolume resource = "snowflake_external_volume" FailoverGroup resource = "snowflake_failover_group" FileFormat resource = "snowflake_file_format" + GrantAccountRole resource = "snowflake_grant_account_role" + GrantApplicationRole resource = "snowflake_grant_application_role" + GrantDatabaseRole resource = "snowflake_grant_database_role" + GrantOwnership resource = "snowflake_grant_ownership" + GrantPrivilegesToAccountRole resource = "snowflake_grant_privileges_to_account_role" + GrantPrivilegesToDatabaseRole resource = "snowflake_grant_privileges_to_database_role" + GrantPrivilegesToShare resource = "snowflake_grant_privileges_to_share" Function resource = "snowflake_function" LegacyServiceUser resource = "snowflake_legacy_service_user" ManagedAccount resource = "snowflake_managed_account" @@ -64,7 +71,9 @@ const ( Table resource = "snowflake_table" Tag resource = "snowflake_tag" TagAssociation resource = "snowflake_tag_association" + TagMaskingPolicyAssociation resource = "snowflake_tag_masking_policy_association" Task resource = "snowflake_task" + UnsafeExecute resource = "snowflake_unsafe_execute" User resource = "snowflake_user" View resource = "snowflake_view" Warehouse resource = "snowflake_warehouse" diff --git a/pkg/resources/account.go b/pkg/resources/account.go index 7ae047874b..dd58b80c5c 100644 --- a/pkg/resources/account.go +++ b/pkg/resources/account.go @@ -7,6 +7,8 @@ import ( "strings" "time" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/util" @@ -216,9 +218,9 @@ func Account() *schema.Resource { Update: UpdateAccount, Delete: DeleteAccount, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.Account, customdiff.All( ComputedIfAnyAttributeChanged(accountSchema, FullyQualifiedNameAttributeName, "name"), - ), + )), Schema: accountSchema, Importer: &schema.ResourceImporter{ diff --git a/pkg/resources/account_role.go b/pkg/resources/account_role.go index 28922572db..760ed99d93 100644 --- a/pkg/resources/account_role.go +++ b/pkg/resources/account_role.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -43,19 +45,19 @@ func AccountRole() *schema.Resource { return &schema.Resource{ Schema: accountRoleSchema, - CreateContext: CreateAccountRole, - ReadContext: ReadAccountRole, - DeleteContext: DeleteAccountRole, - UpdateContext: UpdateAccountRole, + CreateContext: TrackingCreateWrapper(resources.AccountRole, CreateAccountRole), + ReadContext: TrackingReadWrapper(resources.AccountRole, ReadAccountRole), + DeleteContext: TrackingDeleteWrapper(resources.AccountRole, DeleteAccountRole), + UpdateContext: TrackingUpdateWrapper(resources.AccountRole, UpdateAccountRole), Description: "The resource is used for role management, where roles can be assigned privileges and, in turn, granted to users and other roles. When granted to roles they can create hierarchies of privilege structures. For more details, refer to the [official documentation](https://docs.snowflake.com/en/user-guide/security-access-control-overview).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.AccountRole, customdiff.All( ComputedIfAnyAttributeChanged(accountRoleSchema, ShowOutputAttributeName, "comment", "name"), ComputedIfAnyAttributeChanged(accountRoleSchema, FullyQualifiedNameAttributeName, "name"), - ), + )), Importer: &schema.ResourceImporter{ - StateContext: ImportName[sdk.AccountObjectIdentifier], + StateContext: TrackingImportWrapper(resources.AccountRole, ImportName[sdk.AccountObjectIdentifier]), }, } } diff --git a/pkg/resources/alert.go b/pkg/resources/alert.go index 15d7281937..2c16b7c8c7 100644 --- a/pkg/resources/alert.go +++ b/pkg/resources/alert.go @@ -8,6 +8,8 @@ import ( "strings" "time" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/util" @@ -110,14 +112,14 @@ var alertSchema = map[string]*schema.Schema{ // Alert returns a pointer to the resource representing an alert. func Alert() *schema.Resource { return &schema.Resource{ - CreateContext: CreateAlert, - ReadContext: ReadAlert, - UpdateContext: UpdateAlert, - DeleteContext: DeleteAlert, + CreateContext: TrackingCreateWrapper(resources.Alert, CreateAlert), + ReadContext: TrackingReadWrapper(resources.Alert, ReadAlert), + UpdateContext: TrackingUpdateWrapper(resources.Alert, UpdateAlert), + DeleteContext: TrackingDeleteWrapper(resources.Alert, DeleteAlert), Schema: alertSchema, Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, + StateContext: TrackingImportWrapper(resources.Alert, schema.ImportStatePassthroughContext), }, } } diff --git a/pkg/resources/api_authentication_integration_with_authorization_code_grant.go b/pkg/resources/api_authentication_integration_with_authorization_code_grant.go index c261d19305..0812406511 100644 --- a/pkg/resources/api_authentication_integration_with_authorization_code_grant.go +++ b/pkg/resources/api_authentication_integration_with_authorization_code_grant.go @@ -6,6 +6,8 @@ import ( "fmt" "reflect" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" @@ -36,13 +38,13 @@ var apiAuthAuthorizationCodeGrantSchema = func() map[string]*schema.Schema { func ApiAuthenticationIntegrationWithAuthorizationCodeGrant() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextApiAuthenticationIntegrationWithAuthorizationCodeGrant, - ReadContext: ReadContextApiAuthenticationIntegrationWithAuthorizationCodeGrant(true), - UpdateContext: UpdateContextApiAuthenticationIntegrationWithAuthorizationCodeGrant, - DeleteContext: DeleteContextApiAuthenticationIntegrationWithAuthorizationCodeGrant, + CreateContext: TrackingCreateWrapper(resources.ApiAuthenticationIntegrationWithAuthorizationCodeGrant, CreateContextApiAuthenticationIntegrationWithAuthorizationCodeGrant), + ReadContext: TrackingReadWrapper(resources.ApiAuthenticationIntegrationWithAuthorizationCodeGrant, ReadContextApiAuthenticationIntegrationWithAuthorizationCodeGrant(true)), + UpdateContext: TrackingUpdateWrapper(resources.ApiAuthenticationIntegrationWithAuthorizationCodeGrant, UpdateContextApiAuthenticationIntegrationWithAuthorizationCodeGrant), + DeleteContext: TrackingDeleteWrapper(resources.ApiAuthenticationIntegrationWithAuthorizationCodeGrant, DeleteContextApiAuthenticationIntegrationWithAuthorizationCodeGrant), Description: "Resource used to manage api authentication security integration objects with authorization code grant. For more information, check [security integrations documentation](https://docs.snowflake.com/en/sql-reference/sql/create-security-integration-api-auth).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.ApiAuthenticationIntegrationWithAuthorizationCodeGrant, customdiff.All( ForceNewIfChangeToEmptyString("oauth_token_endpoint"), ForceNewIfChangeToEmptyString("oauth_authorization_endpoint"), ForceNewIfChangeToEmptyString("oauth_client_auth_method"), @@ -50,10 +52,10 @@ func ApiAuthenticationIntegrationWithAuthorizationCodeGrant() *schema.Resource { ComputedIfAnyAttributeChanged(apiAuthAuthorizationCodeGrantSchema, DescribeOutputAttributeName, "enabled", "comment", "oauth_access_token_validity", "oauth_refresh_token_validity", "oauth_client_id", "oauth_client_auth_method", "oauth_authorization_endpoint", "oauth_token_endpoint", "oauth_allowed_scopes"), - ), + )), Schema: apiAuthAuthorizationCodeGrantSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportApiAuthenticationWithAuthorizationCodeGrant, + StateContext: TrackingImportWrapper(resources.ApiAuthenticationIntegrationWithAuthorizationCodeGrant, ImportApiAuthenticationWithAuthorizationCodeGrant), }, } } diff --git a/pkg/resources/api_authentication_integration_with_client_credentials.go b/pkg/resources/api_authentication_integration_with_client_credentials.go index 51c2209c3e..82f180e14b 100644 --- a/pkg/resources/api_authentication_integration_with_client_credentials.go +++ b/pkg/resources/api_authentication_integration_with_client_credentials.go @@ -6,6 +6,8 @@ import ( "fmt" "reflect" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" @@ -31,22 +33,22 @@ var apiAuthClientCredentialsSchema = func() map[string]*schema.Schema { func ApiAuthenticationIntegrationWithClientCredentials() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextApiAuthenticationIntegrationWithClientCredentials, - ReadContext: ReadContextApiAuthenticationIntegrationWithClientCredentials(true), - UpdateContext: UpdateContextApiAuthenticationIntegrationWithClientCredentials, - DeleteContext: DeleteContextApiAuthenticationIntegrationWithClientCredentials, + CreateContext: TrackingCreateWrapper(resources.ApiAuthenticationIntegrationWithClientCredentials, CreateContextApiAuthenticationIntegrationWithClientCredentials), + ReadContext: TrackingReadWrapper(resources.ApiAuthenticationIntegrationWithClientCredentials, ReadContextApiAuthenticationIntegrationWithClientCredentials(true)), + UpdateContext: TrackingUpdateWrapper(resources.ApiAuthenticationIntegrationWithClientCredentials, UpdateContextApiAuthenticationIntegrationWithClientCredentials), + DeleteContext: TrackingDeleteWrapper(resources.ApiAuthenticationIntegrationWithClientCredentials, DeleteContextApiAuthenticationIntegrationWithClientCredentials), Description: "Resource used to manage api authentication security integration objects with client credentials. For more information, check [security integrations documentation](https://docs.snowflake.com/en/sql-reference/sql/create-security-integration-api-auth).", Schema: apiAuthClientCredentialsSchema, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.ApiAuthenticationIntegrationWithClientCredentials, customdiff.All( ForceNewIfChangeToEmptyString("oauth_token_endpoint"), ForceNewIfChangeToEmptyString("oauth_client_auth_method"), ComputedIfAnyAttributeChanged(apiAuthClientCredentialsSchema, ShowOutputAttributeName, "enabled", "comment"), ComputedIfAnyAttributeChanged(apiAuthClientCredentialsSchema, DescribeOutputAttributeName, "enabled", "comment", "oauth_access_token_validity", "oauth_refresh_token_validity", "oauth_client_id", "oauth_client_auth_method", "oauth_token_endpoint", "oauth_allowed_scopes"), - ), + )), Importer: &schema.ResourceImporter{ - StateContext: ImportApiAuthenticationWithClientCredentials, + StateContext: TrackingImportWrapper(resources.ApiAuthenticationIntegrationWithClientCredentials, ImportApiAuthenticationWithClientCredentials), }, } } diff --git a/pkg/resources/api_authentication_integration_with_jwt_bearer.go b/pkg/resources/api_authentication_integration_with_jwt_bearer.go index 22703fe675..b896fab940 100644 --- a/pkg/resources/api_authentication_integration_with_jwt_bearer.go +++ b/pkg/resources/api_authentication_integration_with_jwt_bearer.go @@ -6,6 +6,8 @@ import ( "fmt" "reflect" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" @@ -34,24 +36,24 @@ var apiAuthJwtBearerSchema = func() map[string]*schema.Schema { func ApiAuthenticationIntegrationWithJwtBearer() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextApiAuthenticationIntegrationWithJwtBearer, - ReadContext: ReadContextApiAuthenticationIntegrationWithJwtBearer(true), - UpdateContext: UpdateContextApiAuthenticationIntegrationWithJwtBearer, - DeleteContext: DeleteContextApiAuthenticationIntegrationWithJwtBearer, + CreateContext: TrackingCreateWrapper(resources.ApiAuthenticationIntegrationWithJwtBearer, CreateContextApiAuthenticationIntegrationWithJwtBearer), + ReadContext: TrackingReadWrapper(resources.ApiAuthenticationIntegrationWithJwtBearer, ReadContextApiAuthenticationIntegrationWithJwtBearer(true)), + UpdateContext: TrackingUpdateWrapper(resources.ApiAuthenticationIntegrationWithJwtBearer, UpdateContextApiAuthenticationIntegrationWithJwtBearer), + DeleteContext: TrackingDeleteWrapper(resources.ApiAuthenticationIntegrationWithJwtBearer, DeleteContextApiAuthenticationIntegrationWithJwtBearer), Description: "Resource used to manage api authentication security integration objects with jwt bearer. For more information, check [security integrations documentation](https://docs.snowflake.com/en/sql-reference/sql/create-security-integration-api-auth).", Schema: apiAuthJwtBearerSchema, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.ApiAuthenticationIntegrationWithJwtBearer, customdiff.All( ForceNewIfChangeToEmptyString("oauth_token_endpoint"), ForceNewIfChangeToEmptyString("oauth_authorization_endpoint"), ForceNewIfChangeToEmptyString("oauth_client_auth_method"), ComputedIfAnyAttributeChanged(apiAuthJwtBearerSchema, ShowOutputAttributeName, "enabled", "comment"), ComputedIfAnyAttributeChanged(apiAuthJwtBearerSchema, DescribeOutputAttributeName, "enabled", "comment", "oauth_access_token_validity", "oauth_refresh_token_validity", "oauth_client_id", "oauth_client_auth_method", "oauth_authorization_endpoint", - "oauth_token_endpoint", "oauth_assertion_issuer"), + "oauth_token_endpoint", "oauth_assertion_issuer")), ), Importer: &schema.ResourceImporter{ - StateContext: ImportApiAuthenticationWithJwtBearer, + StateContext: TrackingImportWrapper(resources.ApiAuthenticationIntegrationWithJwtBearer, ImportApiAuthenticationWithJwtBearer), }, } } diff --git a/pkg/resources/authentication_policy.go b/pkg/resources/authentication_policy.go index 16f11927b6..85c00356a1 100644 --- a/pkg/resources/authentication_policy.go +++ b/pkg/resources/authentication_policy.go @@ -6,6 +6,8 @@ import ( "fmt" "reflect" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" @@ -108,15 +110,15 @@ var authenticationPolicySchema = map[string]*schema.Schema{ // AuthenticationPolicy returns a pointer to the resource representing an authentication policy. func AuthenticationPolicy() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextAuthenticationPolicy, - ReadContext: ReadContextAuthenticationPolicy, - UpdateContext: UpdateContextAuthenticationPolicy, - DeleteContext: DeleteContextAuthenticationPolicy, + CreateContext: TrackingCreateWrapper(resources.AuthenticationPolicy, CreateContextAuthenticationPolicy), + ReadContext: TrackingReadWrapper(resources.AuthenticationPolicy, ReadContextAuthenticationPolicy), + UpdateContext: TrackingUpdateWrapper(resources.AuthenticationPolicy, UpdateContextAuthenticationPolicy), + DeleteContext: TrackingDeleteWrapper(resources.AuthenticationPolicy, DeleteContextAuthenticationPolicy), Description: "Resource used to manage authentication policy objects. For more information, check [authentication policy documentation](https://docs.snowflake.com/en/sql-reference/sql/create-authentication-policy).", Schema: authenticationPolicySchema, Importer: &schema.ResourceImporter{ - StateContext: ImportAuthenticationPolicy, + StateContext: TrackingImportWrapper(resources.AuthenticationPolicy, ImportAuthenticationPolicy), }, } } diff --git a/pkg/resources/cortex_search_service.go b/pkg/resources/cortex_search_service.go index b48b2abb71..f9ad0a933f 100644 --- a/pkg/resources/cortex_search_service.go +++ b/pkg/resources/cortex_search_service.go @@ -7,6 +7,8 @@ import ( "log" "time" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -81,10 +83,10 @@ var cortexSearchServiceSchema = map[string]*schema.Schema{ // CortexSearchService returns a pointer to the resource representing a Cortex search service. func CortexSearchService() *schema.Resource { return &schema.Resource{ - CreateContext: CreateCortexSearchService, - ReadContext: ReadCortexSearchService, - UpdateContext: UpdateCortexSearchService, - DeleteContext: DeleteCortexSearchService, + CreateContext: TrackingCreateWrapper(resources.CortexSearchService, CreateCortexSearchService), + ReadContext: TrackingReadWrapper(resources.CortexSearchService, ReadCortexSearchService), + UpdateContext: TrackingUpdateWrapper(resources.CortexSearchService, UpdateCortexSearchService), + DeleteContext: TrackingDeleteWrapper(resources.CortexSearchService, DeleteCortexSearchService), Schema: cortexSearchServiceSchema, Importer: &schema.ResourceImporter{ diff --git a/pkg/resources/database.go b/pkg/resources/database.go index f9de40def7..b9b0935bcc 100644 --- a/pkg/resources/database.go +++ b/pkg/resources/database.go @@ -8,6 +8,8 @@ import ( "strings" "time" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/util" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -90,21 +92,21 @@ var databaseSchema = map[string]*schema.Schema{ func Database() *schema.Resource { return &schema.Resource{ - CreateContext: CreateDatabase, - UpdateContext: UpdateDatabase, - ReadContext: ReadDatabase, - DeleteContext: DeleteDatabase, + CreateContext: TrackingCreateWrapper(resources.Database, CreateDatabase), + UpdateContext: TrackingUpdateWrapper(resources.Database, UpdateDatabase), + ReadContext: TrackingReadWrapper(resources.Database, ReadDatabase), + DeleteContext: TrackingDeleteWrapper(resources.Database, DeleteDatabase), Description: "Represents a standard database. If replication configuration is specified, the database is promoted to serve as a primary database for replication.", Schema: collections.MergeMaps(databaseSchema, databaseParametersSchema), Importer: &schema.ResourceImporter{ - StateContext: ImportName[sdk.AccountObjectIdentifier], + StateContext: TrackingImportWrapper(resources.Database, ImportName[sdk.AccountObjectIdentifier]), }, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.Database, customdiff.All( ComputedIfAnyAttributeChanged(databaseSchema, FullyQualifiedNameAttributeName, "name"), databaseParametersCustomDiff, - ), + )), SchemaVersion: 1, StateUpgraders: []schema.StateUpgrader{ diff --git a/pkg/resources/database_old.go b/pkg/resources/database_old.go index ede9949eef..15d4fca440 100644 --- a/pkg/resources/database_old.go +++ b/pkg/resources/database_old.go @@ -7,6 +7,8 @@ import ( "slices" "strconv" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -96,7 +98,7 @@ func DatabaseOld() *schema.Resource { Schema: databaseOldSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportName[sdk.AccountObjectIdentifier], + StateContext: TrackingImportWrapper(resources.DatabaseOld, ImportName[sdk.AccountObjectIdentifier]), }, } } diff --git a/pkg/resources/database_role.go b/pkg/resources/database_role.go index c070152ea9..8888c01f1d 100644 --- a/pkg/resources/database_role.go +++ b/pkg/resources/database_role.go @@ -6,6 +6,8 @@ import ( "fmt" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -49,19 +51,19 @@ var databaseRoleSchema = map[string]*schema.Schema{ func DatabaseRole() *schema.Resource { return &schema.Resource{ - CreateContext: CreateDatabaseRole, - ReadContext: ReadDatabaseRole, - UpdateContext: UpdateDatabaseRole, - DeleteContext: DeleteDatabaseRole, + CreateContext: TrackingCreateWrapper(resources.DatabaseRole, CreateDatabaseRole), + ReadContext: TrackingReadWrapper(resources.DatabaseRole, ReadDatabaseRole), + UpdateContext: TrackingUpdateWrapper(resources.DatabaseRole, UpdateDatabaseRole), + DeleteContext: TrackingDeleteWrapper(resources.DatabaseRole, DeleteDatabaseRole), Schema: databaseRoleSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportName[sdk.DatabaseObjectIdentifier], + StateContext: TrackingImportWrapper(resources.DatabaseRole, ImportName[sdk.DatabaseObjectIdentifier]), }, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.DatabaseRole, customdiff.All( ComputedIfAnyAttributeChanged(databaseRoleSchema, ShowOutputAttributeName, "comment", "name"), - ), + )), SchemaVersion: 1, StateUpgraders: []schema.StateUpgrader{ diff --git a/pkg/resources/external_function.go b/pkg/resources/external_function.go index 134e0e6c8f..9459adab6a 100644 --- a/pkg/resources/external_function.go +++ b/pkg/resources/external_function.go @@ -8,6 +8,8 @@ import ( "strconv" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -185,10 +187,10 @@ func ExternalFunction() *schema.Resource { return &schema.Resource{ SchemaVersion: 2, - CreateContext: CreateContextExternalFunction, - ReadContext: ReadContextExternalFunction, - UpdateContext: UpdateContextExternalFunction, - DeleteContext: DeleteContextExternalFunction, + CreateContext: TrackingCreateWrapper(resources.ExternalFunction, CreateContextExternalFunction), + ReadContext: TrackingReadWrapper(resources.ExternalFunction, ReadContextExternalFunction), + UpdateContext: TrackingUpdateWrapper(resources.ExternalFunction, UpdateContextExternalFunction), + DeleteContext: TrackingDeleteWrapper(resources.ExternalFunction, DeleteContextExternalFunction), Schema: externalFunctionSchema, Importer: &schema.ResourceImporter{ diff --git a/pkg/resources/external_oauth_integration.go b/pkg/resources/external_oauth_integration.go index b454e3fa67..8ec52d3327 100644 --- a/pkg/resources/external_oauth_integration.go +++ b/pkg/resources/external_oauth_integration.go @@ -6,6 +6,8 @@ import ( "fmt" "reflect" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" @@ -154,14 +156,14 @@ var externalOauthIntegrationSchema = map[string]*schema.Schema{ func ExternalOauthIntegration() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextExternalOauthIntegration, - ReadContext: ReadContextExternalOauthIntegration(true), - UpdateContext: UpdateContextExternalOauthIntegration, - DeleteContext: DeleteContextExternalOauthIntegration, + CreateContext: TrackingCreateWrapper(resources.ExternalOauthSecurityIntegration, CreateContextExternalOauthIntegration), + ReadContext: TrackingReadWrapper(resources.ExternalOauthSecurityIntegration, ReadContextExternalOauthIntegration(true)), + UpdateContext: TrackingUpdateWrapper(resources.ExternalOauthSecurityIntegration, UpdateContextExternalOauthIntegration), + DeleteContext: TrackingDeleteWrapper(resources.ExternalOauthSecurityIntegration, DeleteContextExternalOauthIntegration), Description: "Resource used to manage external oauth security integration objects. For more information, check [security integrations documentation](https://docs.snowflake.com/en/sql-reference/sql/create-security-integration-oauth-external).", Schema: externalOauthIntegrationSchema, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.ExternalOauthSecurityIntegration, customdiff.All( ForceNewIfChangeToEmptyString("external_oauth_rsa_public_key"), ForceNewIfChangeToEmptyString("external_oauth_rsa_public_key_2"), ForceNewIfChangeToEmptyString("external_oauth_scope_mapping_attribute"), @@ -172,9 +174,9 @@ func ExternalOauthIntegration() *schema.Resource { "external_oauth_rsa_public_key", "external_oauth_rsa_public_key_2", "external_oauth_blocked_roles_list", "external_oauth_allowed_roles_list", "external_oauth_audience_list", "external_oauth_token_user_mapping_claim", "external_oauth_snowflake_user_mapping_attribute", "external_oauth_scope_delimiter", "comment"), - ), + )), Importer: &schema.ResourceImporter{ - StateContext: ImportExternalOauthIntegration, + StateContext: TrackingImportWrapper(resources.ExternalOauthSecurityIntegration, ImportExternalOauthIntegration), }, SchemaVersion: 1, diff --git a/pkg/resources/external_volume.go b/pkg/resources/external_volume.go index 89a3fe5c2a..7486f779b1 100644 --- a/pkg/resources/external_volume.go +++ b/pkg/resources/external_volume.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -116,21 +118,21 @@ var externalVolumeSchema = map[string]*schema.Schema{ // ExternalVolume returns a pointer to the resource representing an external volume. func ExternalVolume() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextExternalVolume, - UpdateContext: UpdateContextExternalVolume, - ReadContext: ReadContextExternalVolume(true), - DeleteContext: DeleteContextExternalVolume, + CreateContext: TrackingCreateWrapper(resources.ExternalVolume, CreateContextExternalVolume), + UpdateContext: TrackingUpdateWrapper(resources.ExternalVolume, UpdateContextExternalVolume), + ReadContext: TrackingReadWrapper(resources.ExternalVolume, ReadContextExternalVolume(true)), + DeleteContext: TrackingDeleteWrapper(resources.ExternalVolume, DeleteContextExternalVolume), Description: "Resource used to manage external volume objects. For more information, check [external volume documentation](https://docs.snowflake.com/en/sql-reference/commands-data-loading#external-volume).", Schema: externalVolumeSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportExternalVolume, + StateContext: TrackingImportWrapper(resources.ExternalVolume, ImportExternalVolume), }, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.ExternalVolume, customdiff.All( ComputedIfAnyAttributeChanged(externalVolumeSchema, ShowOutputAttributeName, "name", "allow_writes", "comment"), ComputedIfAnyAttributeChanged(externalVolumeSchema, DescribeOutputAttributeName, "name", "allow_writes", "comment", "storage_location"), - ), + )), } } diff --git a/pkg/resources/file_format.go b/pkg/resources/file_format.go index 35b2a08205..561212e487 100644 --- a/pkg/resources/file_format.go +++ b/pkg/resources/file_format.go @@ -7,6 +7,8 @@ import ( "fmt" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -318,9 +320,9 @@ func FileFormat() *schema.Resource { Update: UpdateFileFormat, Delete: DeleteFileFormat, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.FileFormat, customdiff.All( ComputedIfAnyAttributeChanged(fileFormatSchema, FullyQualifiedNameAttributeName, "name"), - ), + )), Schema: fileFormatSchema, Importer: &schema.ResourceImporter{ diff --git a/pkg/resources/function.go b/pkg/resources/function.go index 40709a0feb..314439b96d 100644 --- a/pkg/resources/function.go +++ b/pkg/resources/function.go @@ -7,6 +7,8 @@ import ( "regexp" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -166,15 +168,15 @@ func Function() *schema.Resource { return &schema.Resource{ SchemaVersion: 2, - CreateContext: CreateContextFunction, - ReadContext: ReadContextFunction, - UpdateContext: UpdateContextFunction, - DeleteContext: DeleteContextFunction, + CreateContext: TrackingCreateWrapper(resources.Function, CreateContextFunction), + ReadContext: TrackingReadWrapper(resources.Function, ReadContextFunction), + UpdateContext: TrackingUpdateWrapper(resources.Function, UpdateContextFunction), + DeleteContext: TrackingDeleteWrapper(resources.Function, DeleteContextFunction), - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.Function, customdiff.All( // TODO(SNOW-1348103): add `arguments` to ComputedIfAnyAttributeChanged. This can't be done now because this function compares values without diff suppress. ComputedIfAnyAttributeChanged(functionSchema, FullyQualifiedNameAttributeName, "name"), - ), + )), Schema: functionSchema, Importer: &schema.ResourceImporter{ diff --git a/pkg/resources/grant_account_role.go b/pkg/resources/grant_account_role.go index e1e5906ccb..78e6b35f2c 100644 --- a/pkg/resources/grant_account_role.go +++ b/pkg/resources/grant_account_role.go @@ -6,6 +6,8 @@ import ( "log" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -52,7 +54,7 @@ func GrantAccountRole() *schema.Resource { Delete: DeleteGrantAccountRole, Schema: grantAccountRoleSchema, Importer: &schema.ResourceImporter{ - StateContext: func(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { + StateContext: TrackingImportWrapper(resources.GrantAccountRole, func(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { parts := strings.Split(d.Id(), helpers.IDDelimiter) if len(parts) != 3 { return nil, fmt.Errorf("invalid ID specified: %v, expected ||", d.Id()) @@ -74,7 +76,7 @@ func GrantAccountRole() *schema.Resource { } return []*schema.ResourceData{d}, nil - }, + }), }, } } diff --git a/pkg/resources/grant_application_role.go b/pkg/resources/grant_application_role.go index 0f812fe7c5..d1f12ebf54 100644 --- a/pkg/resources/grant_application_role.go +++ b/pkg/resources/grant_application_role.go @@ -6,6 +6,8 @@ import ( "fmt" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -51,12 +53,12 @@ var grantApplicationRoleSchema = map[string]*schema.Schema{ func GrantApplicationRole() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextGrantApplicationRole, - ReadContext: ReadContextGrantApplicationRole, - DeleteContext: DeleteContextGrantApplicationRole, + CreateContext: TrackingCreateWrapper(resources.GrantApplicationRole, CreateContextGrantApplicationRole), + ReadContext: TrackingReadWrapper(resources.GrantApplicationRole, ReadContextGrantApplicationRole), + DeleteContext: TrackingDeleteWrapper(resources.GrantApplicationRole, DeleteContextGrantApplicationRole), Schema: grantApplicationRoleSchema, Importer: &schema.ResourceImporter{ - StateContext: func(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { + StateContext: TrackingImportWrapper(resources.GrantApplicationRole, func(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { parts := helpers.ParseResourceIdentifier(d.Id()) if len(parts) != 3 { return nil, fmt.Errorf("invalid ID specified: %v, expected ||", d.Id()) @@ -86,7 +88,7 @@ func GrantApplicationRole() *schema.Resource { } return []*schema.ResourceData{d}, nil - }, + }), }, } } diff --git a/pkg/resources/grant_database_role.go b/pkg/resources/grant_database_role.go index 132712fd48..1c845200af 100644 --- a/pkg/resources/grant_database_role.go +++ b/pkg/resources/grant_database_role.go @@ -5,6 +5,8 @@ import ( "fmt" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -69,7 +71,7 @@ func GrantDatabaseRole() *schema.Resource { Delete: DeleteGrantDatabaseRole, Schema: grantDatabaseRoleSchema, Importer: &schema.ResourceImporter{ - StateContext: func(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { + StateContext: TrackingImportWrapper(resources.GrantDatabaseRole, func(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { parts := helpers.ParseResourceIdentifier(d.Id()) if len(parts) != 3 { return nil, fmt.Errorf("invalid ID specified: %v, expected ||", d.Id()) @@ -113,7 +115,7 @@ func GrantDatabaseRole() *schema.Resource { } return []*schema.ResourceData{d}, nil - }, + }), }, } } diff --git a/pkg/resources/grant_ownership.go b/pkg/resources/grant_ownership.go index a79e9679ae..7173f18380 100644 --- a/pkg/resources/grant_ownership.go +++ b/pkg/resources/grant_ownership.go @@ -6,6 +6,8 @@ import ( "log" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -155,14 +157,14 @@ func grantOwnershipBulkOperationSchema(branchName string) map[string]*schema.Sch func GrantOwnership() *schema.Resource { return &schema.Resource{ - CreateContext: CreateGrantOwnership, + CreateContext: TrackingCreateWrapper(resources.GrantOwnership, CreateGrantOwnership), // There's no Update, because every field is marked as ForceNew - DeleteContext: DeleteGrantOwnership, - ReadContext: ReadGrantOwnership, + DeleteContext: TrackingDeleteWrapper(resources.GrantOwnership, DeleteGrantOwnership), + ReadContext: TrackingReadWrapper(resources.GrantOwnership, ReadGrantOwnership), Schema: grantOwnershipSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportGrantOwnership(), + StateContext: TrackingImportWrapper(resources.GrantOwnership, ImportGrantOwnership()), }, } } diff --git a/pkg/resources/grant_privileges_to_account_role.go b/pkg/resources/grant_privileges_to_account_role.go index 9d8fd49a97..ccce92e80b 100644 --- a/pkg/resources/grant_privileges_to_account_role.go +++ b/pkg/resources/grant_privileges_to_account_role.go @@ -8,6 +8,8 @@ import ( "slices" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" @@ -296,14 +298,14 @@ func getGrantPrivilegesOnAccountRoleBulkOperationSchema(validGrantToObjectTypes func GrantPrivilegesToAccountRole() *schema.Resource { return &schema.Resource{ - CreateContext: CreateGrantPrivilegesToAccountRole, - UpdateContext: UpdateGrantPrivilegesToAccountRole, - DeleteContext: DeleteGrantPrivilegesToAccountRole, - ReadContext: ReadGrantPrivilegesToAccountRole, + CreateContext: TrackingCreateWrapper(resources.GrantPrivilegesToAccountRole, CreateGrantPrivilegesToAccountRole), + UpdateContext: TrackingUpdateWrapper(resources.GrantPrivilegesToAccountRole, UpdateGrantPrivilegesToAccountRole), + DeleteContext: TrackingDeleteWrapper(resources.GrantPrivilegesToAccountRole, DeleteGrantPrivilegesToAccountRole), + ReadContext: TrackingReadWrapper(resources.GrantPrivilegesToAccountRole, ReadGrantPrivilegesToAccountRole), Schema: grantPrivilegesToAccountRoleSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportGrantPrivilegesToAccountRole(), + StateContext: TrackingImportWrapper(resources.GrantPrivilegesToAccountRole, ImportGrantPrivilegesToAccountRole()), }, } } diff --git a/pkg/resources/grant_privileges_to_database_role.go b/pkg/resources/grant_privileges_to_database_role.go index b69b089a97..04e4375c91 100644 --- a/pkg/resources/grant_privileges_to_database_role.go +++ b/pkg/resources/grant_privileges_to_database_role.go @@ -8,6 +8,8 @@ import ( "slices" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -250,14 +252,14 @@ func getGrantPrivilegesOnDatabaseRoleBulkOperationSchema(validGrantToObjectTypes func GrantPrivilegesToDatabaseRole() *schema.Resource { return &schema.Resource{ - CreateContext: CreateGrantPrivilegesToDatabaseRole, - UpdateContext: UpdateGrantPrivilegesToDatabaseRole, - DeleteContext: DeleteGrantPrivilegesToDatabaseRole, - ReadContext: ReadGrantPrivilegesToDatabaseRole, + CreateContext: TrackingCreateWrapper(resources.GrantPrivilegesToDatabaseRole, CreateGrantPrivilegesToDatabaseRole), + UpdateContext: TrackingUpdateWrapper(resources.GrantPrivilegesToDatabaseRole, UpdateGrantPrivilegesToDatabaseRole), + DeleteContext: TrackingDeleteWrapper(resources.GrantPrivilegesToDatabaseRole, DeleteGrantPrivilegesToDatabaseRole), + ReadContext: TrackingReadWrapper(resources.GrantPrivilegesToDatabaseRole, ReadGrantPrivilegesToDatabaseRole), Schema: grantPrivilegesToDatabaseRoleSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportGrantPrivilegesToDatabaseRole, + StateContext: TrackingImportWrapper(resources.GrantPrivilegesToDatabaseRole, ImportGrantPrivilegesToDatabaseRole), }, } } diff --git a/pkg/resources/grant_privileges_to_share.go b/pkg/resources/grant_privileges_to_share.go index ee3ab1eaab..30d4cc2e71 100644 --- a/pkg/resources/grant_privileges_to_share.go +++ b/pkg/resources/grant_privileges_to_share.go @@ -7,6 +7,8 @@ import ( "log" "slices" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -105,14 +107,14 @@ var grantPrivilegesToShareSchema = map[string]*schema.Schema{ func GrantPrivilegesToShare() *schema.Resource { return &schema.Resource{ - CreateContext: CreateGrantPrivilegesToShare, - UpdateContext: UpdateGrantPrivilegesToShare, - DeleteContext: DeleteGrantPrivilegesToShare, - ReadContext: ReadGrantPrivilegesToShare, + CreateContext: TrackingCreateWrapper(resources.GrantPrivilegesToShare, CreateGrantPrivilegesToShare), + UpdateContext: TrackingUpdateWrapper(resources.GrantPrivilegesToShare, UpdateGrantPrivilegesToShare), + DeleteContext: TrackingDeleteWrapper(resources.GrantPrivilegesToShare, DeleteGrantPrivilegesToShare), + ReadContext: TrackingReadWrapper(resources.GrantPrivilegesToShare, ReadGrantPrivilegesToShare), Schema: grantPrivilegesToShareSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportGrantPrivilegesToShare(), + StateContext: TrackingImportWrapper(resources.GrantPrivilegesToShare, ImportGrantPrivilegesToShare()), }, } } diff --git a/pkg/resources/masking_policy.go b/pkg/resources/masking_policy.go index 965945e708..282e8ce644 100644 --- a/pkg/resources/masking_policy.go +++ b/pkg/resources/masking_policy.go @@ -6,6 +6,8 @@ import ( "fmt" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -116,21 +118,21 @@ func MaskingPolicy() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, - CreateContext: CreateMaskingPolicy, - ReadContext: ReadMaskingPolicy(true), - UpdateContext: UpdateMaskingPolicy, - DeleteContext: DeleteMaskingPolicy, + CreateContext: TrackingCreateWrapper(resources.MaskingPolicy, CreateMaskingPolicy), + ReadContext: TrackingReadWrapper(resources.MaskingPolicy, ReadMaskingPolicy(true)), + UpdateContext: TrackingUpdateWrapper(resources.MaskingPolicy, UpdateMaskingPolicy), + DeleteContext: TrackingDeleteWrapper(resources.MaskingPolicy, DeleteMaskingPolicy), Description: "Resource used to manage masking policies. For more information, check [masking policies documentation](https://docs.snowflake.com/en/sql-reference/sql/create-masking-policy).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.MaskingPolicy, customdiff.All( ComputedIfAnyAttributeChanged(maskingPolicySchema, ShowOutputAttributeName, "name", "comment"), ComputedIfAnyAttributeChanged(maskingPolicySchema, DescribeOutputAttributeName, "name", "body"), ComputedIfAnyAttributeChanged(maskingPolicySchema, FullyQualifiedNameAttributeName, "name"), - ), + )), Schema: maskingPolicySchema, Importer: &schema.ResourceImporter{ - StateContext: ImportMaskingPolicy, + StateContext: TrackingImportWrapper(resources.MaskingPolicy, ImportMaskingPolicy), }, StateUpgraders: []schema.StateUpgrader{ diff --git a/pkg/resources/materialized_view.go b/pkg/resources/materialized_view.go index a280aef202..5151daf88a 100644 --- a/pkg/resources/materialized_view.go +++ b/pkg/resources/materialized_view.go @@ -5,6 +5,8 @@ import ( "fmt" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -75,9 +77,9 @@ func MaterializedView() *schema.Resource { Update: UpdateMaterializedView, Delete: DeleteMaterializedView, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.MaterializedView, customdiff.All( ComputedIfAnyAttributeChanged(materializedViewSchema, FullyQualifiedNameAttributeName, "name"), - ), + )), Schema: materializedViewSchema, Importer: &schema.ResourceImporter{ diff --git a/pkg/resources/network_policy.go b/pkg/resources/network_policy.go index 981107abb0..07cab78f28 100644 --- a/pkg/resources/network_policy.go +++ b/pkg/resources/network_policy.go @@ -6,6 +6,8 @@ import ( "fmt" "reflect" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -87,13 +89,13 @@ func NetworkPolicy() *schema.Resource { return &schema.Resource{ Schema: networkPolicySchema, - CreateContext: CreateContextNetworkPolicy, - ReadContext: ReadContextNetworkPolicy, - UpdateContext: UpdateContextNetworkPolicy, - DeleteContext: DeleteContextNetworkPolicy, + CreateContext: TrackingCreateWrapper(resources.NetworkPolicy, CreateContextNetworkPolicy), + ReadContext: TrackingReadWrapper(resources.NetworkPolicy, ReadContextNetworkPolicy), + UpdateContext: TrackingUpdateWrapper(resources.NetworkPolicy, UpdateContextNetworkPolicy), + DeleteContext: TrackingDeleteWrapper(resources.NetworkPolicy, DeleteContextNetworkPolicy), Description: "Resource used to control network traffic. For more information, check an [official guide](https://docs.snowflake.com/en/user-guide/network-policies) on controlling network traffic with network policies.", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.NetworkPolicy, customdiff.All( // For now, allowed_network_rule_list and blocked_network_rule_list have to stay commented. // The main issue lays in the old Terraform SDK and how its handling DiffSuppression and CustomizeDiff // for complex types like Sets, Lists, and Maps. When every element of the Set is suppressed in custom diff, @@ -117,10 +119,10 @@ func NetworkPolicy() *schema.Resource { "blocked_ip_list", ), ComputedIfAnyAttributeChanged(networkPolicySchema, FullyQualifiedNameAttributeName, "name"), - ), + )), Importer: &schema.ResourceImporter{ - StateContext: ImportName[sdk.AccountObjectIdentifier], + StateContext: TrackingImportWrapper(resources.NetworkPolicy, ImportName[sdk.AccountObjectIdentifier]), }, } } diff --git a/pkg/resources/network_rule.go b/pkg/resources/network_rule.go index 8cdac7d036..cc965da040 100644 --- a/pkg/resources/network_rule.go +++ b/pkg/resources/network_rule.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -65,10 +67,10 @@ var networkRuleSchema = map[string]*schema.Schema{ // NetworkRule returns a pointer to the resource representing a network rule. func NetworkRule() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextNetworkRule, - ReadContext: ReadContextNetworkRule, - UpdateContext: UpdateContextNetworkRule, - DeleteContext: DeleteContextNetworkRule, + CreateContext: TrackingCreateWrapper(resources.NetworkRule, CreateContextNetworkRule), + ReadContext: TrackingReadWrapper(resources.NetworkRule, ReadContextNetworkRule), + UpdateContext: TrackingUpdateWrapper(resources.NetworkRule, UpdateContextNetworkRule), + DeleteContext: TrackingDeleteWrapper(resources.NetworkRule, DeleteContextNetworkRule), Schema: networkRuleSchema, Importer: &schema.ResourceImporter{ diff --git a/pkg/resources/oauth_integration_for_custom_clients.go b/pkg/resources/oauth_integration_for_custom_clients.go index 9669a37b53..8f737d8dc6 100644 --- a/pkg/resources/oauth_integration_for_custom_clients.go +++ b/pkg/resources/oauth_integration_for_custom_clients.go @@ -8,6 +8,8 @@ import ( "strconv" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" @@ -153,13 +155,13 @@ func OauthIntegrationForCustomClients() *schema.Resource { return &schema.Resource{ Schema: oauthIntegrationForCustomClientsSchema, - CreateContext: CreateContextOauthIntegrationForCustomClients, - ReadContext: ReadContextOauthIntegrationForCustomClients(true), - UpdateContext: UpdateContextOauthIntegrationForCustomClients, - DeleteContext: DeleteContextOauthIntegrationForCustomClients, + CreateContext: TrackingCreateWrapper(resources.OauthIntegrationForCustomClients, CreateContextOauthIntegrationForCustomClients), + ReadContext: TrackingReadWrapper(resources.OauthIntegrationForCustomClients, ReadContextOauthIntegrationForCustomClients(true)), + UpdateContext: TrackingUpdateWrapper(resources.OauthIntegrationForCustomClients, UpdateContextOauthIntegrationForCustomClients), + DeleteContext: TrackingDeleteWrapper(resources.OauthIntegrationForCustomClients, DeleteContextOauthIntegrationForCustomClients), Description: "Resource used to manage oauth security integration for custom clients objects. For more information, check [security integrations documentation](https://docs.snowflake.com/en/sql-reference/sql/create-security-integration-oauth-snowflake).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.OauthIntegrationForCustomClients, customdiff.All( ComputedIfAnyAttributeChanged( oauthIntegrationForCustomClientsSchema, ShowOutputAttributeName, @@ -184,10 +186,10 @@ func OauthIntegrationForCustomClients() *schema.Resource { "oauth_client_rsa_public_key_2", "comment", ), - ), + )), Importer: &schema.ResourceImporter{ - StateContext: ImportOauthForCustomClientsIntegration, + StateContext: TrackingImportWrapper(resources.OauthIntegrationForCustomClients, ImportOauthForCustomClientsIntegration), }, } } diff --git a/pkg/resources/oauth_integration_for_partner_applications.go b/pkg/resources/oauth_integration_for_partner_applications.go index 12c18c62ae..3fc0d5a586 100644 --- a/pkg/resources/oauth_integration_for_partner_applications.go +++ b/pkg/resources/oauth_integration_for_partner_applications.go @@ -8,6 +8,8 @@ import ( "strconv" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" @@ -113,13 +115,13 @@ func OauthIntegrationForPartnerApplications() *schema.Resource { return &schema.Resource{ Schema: oauthIntegrationForPartnerApplicationsSchema, - CreateContext: CreateContextOauthIntegrationForPartnerApplications, - ReadContext: ReadContextOauthIntegrationForPartnerApplications(true), - UpdateContext: UpdateContextOauthIntegrationForPartnerApplications, - DeleteContext: DeleteContextSecurityIntegration, + CreateContext: TrackingCreateWrapper(resources.OauthIntegrationForPartnerApplications, CreateContextOauthIntegrationForPartnerApplications), + ReadContext: TrackingReadWrapper(resources.OauthIntegrationForPartnerApplications, ReadContextOauthIntegrationForPartnerApplications(true)), + UpdateContext: TrackingUpdateWrapper(resources.OauthIntegrationForPartnerApplications, UpdateContextOauthIntegrationForPartnerApplications), + DeleteContext: TrackingDeleteWrapper(resources.OauthIntegrationForPartnerApplications, DeleteContextSecurityIntegration), Description: "Resource used to manage oauth security integration for partner applications objects. For more information, check [security integrations documentation](https://docs.snowflake.com/en/sql-reference/sql/create-security-integration-oauth-snowflake).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.OauthIntegrationForPartnerApplications, customdiff.All( ComputedIfAnyAttributeChanged( oauthIntegrationForPartnerApplicationsSchema, ShowOutputAttributeName, @@ -137,10 +139,10 @@ func OauthIntegrationForPartnerApplications() *schema.Resource { "blocked_roles_list", "comment", ), - ), + )), Importer: &schema.ResourceImporter{ - StateContext: ImportOauthForPartnerApplicationIntegration, + StateContext: TrackingImportWrapper(resources.OauthIntegrationForPartnerApplications, ImportOauthForPartnerApplicationIntegration), }, } } diff --git a/pkg/resources/password_policy.go b/pkg/resources/password_policy.go index 0aa5aaff98..0fba86bf06 100644 --- a/pkg/resources/password_policy.go +++ b/pkg/resources/password_policy.go @@ -3,6 +3,8 @@ package resources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -144,9 +146,9 @@ func PasswordPolicy() *schema.Resource { Update: UpdatePasswordPolicy, Delete: DeletePasswordPolicy, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.PasswordPolicy, customdiff.All( ComputedIfAnyAttributeChanged(passwordPolicySchema, FullyQualifiedNameAttributeName, "name"), - ), + )), Schema: passwordPolicySchema, Importer: &schema.ResourceImporter{ diff --git a/pkg/resources/primary_connection.go b/pkg/resources/primary_connection.go index 0966e6dc5a..3dc4f8444b 100644 --- a/pkg/resources/primary_connection.go +++ b/pkg/resources/primary_connection.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -54,20 +56,20 @@ var primaryConnectionSchema = map[string]*schema.Schema{ func PrimaryConnection() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextPrimaryConnection, - ReadContext: ReadContextPrimaryConnection, - UpdateContext: UpdateContextPrimaryConnection, - DeleteContext: DeleteContextPrimaryConnection, + CreateContext: TrackingCreateWrapper(resources.PrimaryConnection, CreateContextPrimaryConnection), + ReadContext: TrackingReadWrapper(resources.PrimaryConnection, ReadContextPrimaryConnection), + UpdateContext: TrackingUpdateWrapper(resources.PrimaryConnection, UpdateContextPrimaryConnection), + DeleteContext: TrackingDeleteWrapper(resources.PrimaryConnection, DeleteContextPrimaryConnection), - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.PrimaryConnection, customdiff.All( ComputedIfAnyAttributeChanged(primaryConnectionSchema, ShowOutputAttributeName, "comment", "is_primary", "enable_failover_to_accounts"), RecreateWhenResourceBoolFieldChangedExternally("is_primary", true), - ), + )), Description: "Resource used to manage primary connections. For managing replicated connection check resource [snowflake_secondary_connection](./secondary_connection). For more information, check [connection documentation](https://docs.snowflake.com/en/sql-reference/sql/create-connection.html).", Schema: primaryConnectionSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportName[sdk.AccountObjectIdentifier], + StateContext: TrackingImportWrapper(resources.PrimaryConnection, ImportName[sdk.AccountObjectIdentifier]), }, } } diff --git a/pkg/resources/procedure.go b/pkg/resources/procedure.go index f1337acd2b..adb80061bb 100644 --- a/pkg/resources/procedure.go +++ b/pkg/resources/procedure.go @@ -8,6 +8,8 @@ import ( "slices" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -181,16 +183,16 @@ func Procedure() *schema.Resource { return &schema.Resource{ SchemaVersion: 2, - CreateContext: CreateContextProcedure, - ReadContext: ReadContextProcedure, - UpdateContext: UpdateContextProcedure, - DeleteContext: DeleteContextProcedure, + CreateContext: TrackingCreateWrapper(resources.Procedure, CreateContextProcedure), + ReadContext: TrackingReadWrapper(resources.Procedure, ReadContextProcedure), + UpdateContext: TrackingUpdateWrapper(resources.Procedure, UpdateContextProcedure), + DeleteContext: TrackingDeleteWrapper(resources.Procedure, DeleteContextProcedure), // TODO(SNOW-1348106): add `arguments` to ComputedIfAnyAttributeChanged for FullyQualifiedNameAttributeName. // This can't be done now because this function compares values without diff suppress. - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.Procedure, customdiff.All( ComputedIfAnyAttributeChanged(procedureSchema, FullyQualifiedNameAttributeName, "name"), - ), + )), Schema: procedureSchema, Importer: &schema.ResourceImporter{ diff --git a/pkg/resources/resource_monitor.go b/pkg/resources/resource_monitor.go index ae8da17fc2..14e0a6557b 100644 --- a/pkg/resources/resource_monitor.go +++ b/pkg/resources/resource_monitor.go @@ -6,6 +6,8 @@ import ( "fmt" "reflect" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -97,22 +99,22 @@ var resourceMonitorSchema = map[string]*schema.Schema{ func ResourceMonitor() *schema.Resource { return &schema.Resource{ - CreateContext: CreateResourceMonitor, - ReadContext: ReadResourceMonitor(true), - UpdateContext: UpdateResourceMonitor, - DeleteContext: DeleteResourceMonitor, + CreateContext: TrackingCreateWrapper(resources.ResourceMonitor, CreateResourceMonitor), + ReadContext: TrackingReadWrapper(resources.ResourceMonitor, ReadResourceMonitor(true)), + UpdateContext: TrackingUpdateWrapper(resources.ResourceMonitor, UpdateResourceMonitor), + DeleteContext: TrackingDeleteWrapper(resources.ResourceMonitor, DeleteResourceMonitor), Schema: resourceMonitorSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportResourceMonitor, + StateContext: TrackingImportWrapper(resources.ResourceMonitor, ImportResourceMonitor), }, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.ResourceMonitor, customdiff.All( ComputedIfAnyAttributeChanged(resourceMonitorSchema, ShowOutputAttributeName, "notify_users", "credit_quota", "frequency", "start_timestamp", "end_timestamp", "notify_triggers", "suspend_trigger", "suspend_immediate_trigger"), ForceNewIfAllKeysAreNotSet("notify_triggers", "notify_triggers", "suspend_trigger", "suspend_immediate_trigger"), ForceNewIfAllKeysAreNotSet("suspend_trigger", "notify_triggers", "suspend_trigger", "suspend_immediate_trigger"), ForceNewIfAllKeysAreNotSet("suspend_immediate_trigger", "notify_triggers", "suspend_trigger", "suspend_immediate_trigger"), - ), + )), } } diff --git a/pkg/resources/row_access_policy.go b/pkg/resources/row_access_policy.go index 08949fbbcb..f1028a7f82 100644 --- a/pkg/resources/row_access_policy.go +++ b/pkg/resources/row_access_policy.go @@ -6,6 +6,8 @@ import ( "fmt" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -98,22 +100,22 @@ func RowAccessPolicy() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, - CreateContext: CreateRowAccessPolicy, - ReadContext: ReadRowAccessPolicy, - UpdateContext: UpdateRowAccessPolicy, - DeleteContext: DeleteRowAccessPolicy, + CreateContext: TrackingCreateWrapper(resources.RowAccessPolicy, CreateRowAccessPolicy), + ReadContext: TrackingReadWrapper(resources.RowAccessPolicy, ReadRowAccessPolicy), + UpdateContext: TrackingUpdateWrapper(resources.RowAccessPolicy, UpdateRowAccessPolicy), + DeleteContext: TrackingDeleteWrapper(resources.RowAccessPolicy, DeleteRowAccessPolicy), Description: "Resource used to manage row access policy objects. For more information, check [row access policy documentation](https://docs.snowflake.com/en/sql-reference/sql/create-row-access-policy).", Schema: rowAccessPolicySchema, Importer: &schema.ResourceImporter{ - StateContext: ImportRowAccessPolicy, + StateContext: TrackingImportWrapper(resources.RowAccessPolicy, ImportRowAccessPolicy), }, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.RowAccessPolicy, customdiff.All( ComputedIfAnyAttributeChanged(rowAccessPolicySchema, ShowOutputAttributeName, "comment", "name"), ComputedIfAnyAttributeChanged(rowAccessPolicySchema, DescribeOutputAttributeName, "body", "name", "signature"), ComputedIfAnyAttributeChanged(rowAccessPolicySchema, FullyQualifiedNameAttributeName, "name"), - ), + )), StateUpgraders: []schema.StateUpgrader{ { diff --git a/pkg/resources/saml2_integration.go b/pkg/resources/saml2_integration.go index a921b96b7e..cde54cfd63 100644 --- a/pkg/resources/saml2_integration.go +++ b/pkg/resources/saml2_integration.go @@ -7,6 +7,8 @@ import ( "log" "reflect" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" @@ -155,18 +157,18 @@ var saml2IntegrationSchema = map[string]*schema.Schema{ func SAML2Integration() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextSAML2Integration, - ReadContext: ReadContextSAML2Integration(true), - UpdateContext: UpdateContextSAML2Integration, - DeleteContext: DeleteContextSAM2LIntegration, + CreateContext: TrackingCreateWrapper(resources.Saml2SecurityIntegration, CreateContextSAML2Integration), + ReadContext: TrackingReadWrapper(resources.Saml2SecurityIntegration, ReadContextSAML2Integration(true)), + UpdateContext: TrackingUpdateWrapper(resources.Saml2SecurityIntegration, UpdateContextSAML2Integration), + DeleteContext: TrackingDeleteWrapper(resources.Saml2SecurityIntegration, DeleteContextSAM2LIntegration), Description: "Resource used to manage saml2 security integration objects. For more information, check [security integrations documentation](https://docs.snowflake.com/en/sql-reference/sql/create-security-integration-saml2).", Schema: saml2IntegrationSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportSaml2Integration, + StateContext: TrackingImportWrapper(resources.Saml2SecurityIntegration, ImportSaml2Integration), }, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.Saml2SecurityIntegration, customdiff.All( ForceNewIfChangeToEmptySet("allowed_user_domains"), ForceNewIfChangeToEmptySet("allowed_email_patterns"), ForceNewIfChangeToEmptyString("saml2_snowflake_issuer_url"), @@ -177,7 +179,7 @@ func SAML2Integration() *schema.Resource { "saml2_sp_initiated_login_page_label", "saml2_enable_sp_initiated", "saml2_sign_request", "saml2_requtedted_nameid_format", "saml2_post_logout_redirect_url", "saml2_force_authn", "saml2_snowflake_issuer_url", "saml2_snowflake_acs_url", "allowed_user_domains", "allowed_email_patterns"), - ), + )), } } diff --git a/pkg/resources/schema.go b/pkg/resources/schema.go index e406eb31e2..66e4be7a28 100644 --- a/pkg/resources/schema.go +++ b/pkg/resources/schema.go @@ -102,7 +102,6 @@ func Schema() *schema.Resource { ComputedIfAnyAttributeChanged(schemaSchema, DescribeOutputAttributeName, "name"), ComputedIfAnyAttributeChanged(schemaSchema, FullyQualifiedNameAttributeName, "name"), ComputedIfAnyAttributeChanged(schemaParametersSchema, ParametersAttributeName, collections.Map(sdk.AsStringList(sdk.AllSchemaParameters), strings.ToLower)...), - // TODO(SNOW-1804424 - next pr): handle custom context in parameters customdiff schemaParametersCustomDiff, )), diff --git a/pkg/resources/scim_integration.go b/pkg/resources/scim_integration.go index 8ca1aeedb2..bf97e28f1a 100644 --- a/pkg/resources/scim_integration.go +++ b/pkg/resources/scim_integration.go @@ -7,6 +7,8 @@ import ( "strconv" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" @@ -97,21 +99,21 @@ var scimIntegrationSchema = map[string]*schema.Schema{ func SCIMIntegration() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextSCIMIntegration, - ReadContext: ReadContextSCIMIntegration(true), - UpdateContext: UpdateContextSCIMIntegration, - DeleteContext: DeleteContextSCIMIntegration, + CreateContext: TrackingCreateWrapper(resources.ScimSecurityIntegration, CreateContextSCIMIntegration), + ReadContext: TrackingReadWrapper(resources.ScimSecurityIntegration, ReadContextSCIMIntegration(true)), + UpdateContext: TrackingUpdateWrapper(resources.ScimSecurityIntegration, UpdateContextSCIMIntegration), + DeleteContext: TrackingDeleteWrapper(resources.ScimSecurityIntegration, DeleteContextSCIMIntegration), Description: "Resource used to manage scim security integration objects. For more information, check [security integrations documentation](https://docs.snowflake.com/en/sql-reference/sql/create-security-integration-scim).", Schema: scimIntegrationSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportScimIntegration, + StateContext: TrackingImportWrapper(resources.ScimSecurityIntegration, ImportScimIntegration), }, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.ScimSecurityIntegration, customdiff.All( ComputedIfAnyAttributeChanged(scimIntegrationSchema, ShowOutputAttributeName, "enabled", "scim_client", "comment"), ComputedIfAnyAttributeChanged(scimIntegrationSchema, DescribeOutputAttributeName, "enabled", "comment", "network_policy", "run_as_role", "sync_password"), - ), + )), SchemaVersion: 2, StateUpgraders: []schema.StateUpgrader{ diff --git a/pkg/resources/secondary_connection.go b/pkg/resources/secondary_connection.go index c58b7a8ed3..6f9dbca91c 100644 --- a/pkg/resources/secondary_connection.go +++ b/pkg/resources/secondary_connection.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -52,20 +54,20 @@ var secondaryConnectionSchema = map[string]*schema.Schema{ func SecondaryConnection() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextSecondaryConnection, - ReadContext: ReadContextSecondaryConnection, - UpdateContext: UpdateContextSecondaryConnection, - DeleteContext: DeleteContextSecondaryConnection, + CreateContext: TrackingCreateWrapper(resources.SecondaryConnection, CreateContextSecondaryConnection), + ReadContext: TrackingReadWrapper(resources.SecondaryConnection, ReadContextSecondaryConnection), + UpdateContext: TrackingUpdateWrapper(resources.SecondaryConnection, UpdateContextSecondaryConnection), + DeleteContext: TrackingDeleteWrapper(resources.SecondaryConnection, DeleteContextSecondaryConnection), Description: "Resource used to manage secondary (replicated) connections. To manage primary connection check resource [snowflake_primary_connection](./primary_connection). For more information, check [connection documentation](https://docs.snowflake.com/en/sql-reference/sql/create-connection.html).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.SecondaryConnection, customdiff.All( ComputedIfAnyAttributeChanged(secondaryConnectionSchema, ShowOutputAttributeName, "comment", "is_primary"), RecreateWhenResourceBoolFieldChangedExternally("is_primary", false), - ), + )), Schema: secondaryConnectionSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportName[sdk.AccountObjectIdentifier], + StateContext: TrackingImportWrapper(resources.SecondaryConnection, ImportName[sdk.AccountObjectIdentifier]), }, } } diff --git a/pkg/resources/secondary_database.go b/pkg/resources/secondary_database.go index 30298a4084..448f9e5179 100644 --- a/pkg/resources/secondary_database.go +++ b/pkg/resources/secondary_database.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -46,19 +48,19 @@ var secondaryDatabaseSchema = map[string]*schema.Schema{ func SecondaryDatabase() *schema.Resource { return &schema.Resource{ - CreateContext: CreateSecondaryDatabase, - UpdateContext: UpdateSecondaryDatabase, - ReadContext: ReadSecondaryDatabase, - DeleteContext: DeleteSecondaryDatabase, + CreateContext: TrackingCreateWrapper(resources.SecondaryDatabase, CreateSecondaryDatabase), + UpdateContext: TrackingUpdateWrapper(resources.SecondaryDatabase, UpdateSecondaryDatabase), + ReadContext: TrackingReadWrapper(resources.SecondaryDatabase, ReadSecondaryDatabase), + DeleteContext: TrackingDeleteWrapper(resources.SecondaryDatabase, DeleteSecondaryDatabase), Description: "A secondary database creates a replica of an existing primary database (i.e. a secondary database). For more information about database replication, see [Introduction to database replication across multiple accounts](https://docs.snowflake.com/en/user-guide/db-replication-intro).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.SecondaryDatabase, customdiff.All( databaseParametersCustomDiff, ComputedIfAnyAttributeChanged(secondaryDatabaseSchema, FullyQualifiedNameAttributeName, "name"), - ), + )), Schema: collections.MergeMaps(secondaryDatabaseSchema, databaseParametersSchema), Importer: &schema.ResourceImporter{ - StateContext: ImportName[sdk.AccountObjectIdentifier], + StateContext: TrackingImportWrapper(resources.SecondaryDatabase, ImportName[sdk.AccountObjectIdentifier]), }, } } diff --git a/pkg/resources/secret_with_basic_authentication.go b/pkg/resources/secret_with_basic_authentication.go index 6bec639e38..4eb2025671 100644 --- a/pkg/resources/secret_with_basic_authentication.go +++ b/pkg/resources/secret_with_basic_authentication.go @@ -6,6 +6,8 @@ import ( "fmt" "reflect" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" @@ -36,21 +38,21 @@ var secretBasicAuthenticationSchema = func() map[string]*schema.Schema { func SecretWithBasicAuthentication() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextSecretWithBasicAuthentication, - ReadContext: ReadContextSecretWithBasicAuthentication, - UpdateContext: UpdateContextSecretWithBasicAuthentication, - DeleteContext: DeleteContextSecret, + CreateContext: TrackingCreateWrapper(resources.SecretWithBasicAuthentication, CreateContextSecretWithBasicAuthentication), + ReadContext: TrackingReadWrapper(resources.SecretWithBasicAuthentication, ReadContextSecretWithBasicAuthentication), + UpdateContext: TrackingUpdateWrapper(resources.SecretWithBasicAuthentication, UpdateContextSecretWithBasicAuthentication), + DeleteContext: TrackingDeleteWrapper(resources.SecretWithBasicAuthentication, DeleteContextSecret), Description: "Resource used to manage secret objects with Basic Authentication. For more information, check [secret documentation](https://docs.snowflake.com/en/sql-reference/sql/create-secret).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.SecretWithBasicAuthentication, customdiff.All( ComputedIfAnyAttributeChanged(secretBasicAuthenticationSchema, ShowOutputAttributeName, "comment"), ComputedIfAnyAttributeChanged(secretBasicAuthenticationSchema, DescribeOutputAttributeName, "username"), RecreateWhenSecretTypeChangedExternally(sdk.SecretTypePassword), - ), + )), Schema: secretBasicAuthenticationSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportSecretWithBasicAuthentication, + StateContext: TrackingImportWrapper(resources.SecretWithBasicAuthentication, ImportSecretWithBasicAuthentication), }, } } diff --git a/pkg/resources/secret_with_generic_string.go b/pkg/resources/secret_with_generic_string.go index 7715abd818..224bcdad31 100644 --- a/pkg/resources/secret_with_generic_string.go +++ b/pkg/resources/secret_with_generic_string.go @@ -6,6 +6,8 @@ import ( "fmt" "reflect" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" @@ -30,21 +32,21 @@ var secretGenericStringSchema = func() map[string]*schema.Schema { func SecretWithGenericString() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextSecretWithGenericString, - ReadContext: ReadContextSecretWithGenericString, - UpdateContext: UpdateContextSecretWithGenericString, - DeleteContext: DeleteContextSecret, + CreateContext: TrackingCreateWrapper(resources.SecretWithGenericString, CreateContextSecretWithGenericString), + ReadContext: TrackingReadWrapper(resources.SecretWithGenericString, ReadContextSecretWithGenericString), + UpdateContext: TrackingUpdateWrapper(resources.SecretWithGenericString, UpdateContextSecretWithGenericString), + DeleteContext: TrackingDeleteWrapper(resources.SecretWithGenericString, DeleteContextSecret), Description: "Resource used to manage secret objects with Generic String. For more information, check [secret documentation](https://docs.snowflake.com/en/sql-reference/sql/create-secret).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.SecretWithGenericString, customdiff.All( ComputedIfAnyAttributeChanged(secretGenericStringSchema, ShowOutputAttributeName, "comment"), ComputedIfAnyAttributeChanged(secretGenericStringSchema, DescribeOutputAttributeName), RecreateWhenSecretTypeChangedExternally(sdk.SecretTypeGenericString), - ), + )), Schema: secretGenericStringSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportSecretWithGenericString, + StateContext: TrackingImportWrapper(resources.SecretWithGenericString, ImportSecretWithGenericString), }, } } diff --git a/pkg/resources/secret_with_oauth_authorization_code_grant.go b/pkg/resources/secret_with_oauth_authorization_code_grant.go index 7ce5493ecf..6a9eaa85cf 100644 --- a/pkg/resources/secret_with_oauth_authorization_code_grant.go +++ b/pkg/resources/secret_with_oauth_authorization_code_grant.go @@ -6,6 +6,8 @@ import ( "fmt" "reflect" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" @@ -43,22 +45,22 @@ var secretAuthorizationCodeGrantSchema = func() map[string]*schema.Schema { func SecretWithAuthorizationCodeGrant() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextSecretWithAuthorizationCodeGrant, - ReadContext: ReadContextSecretWithAuthorizationCodeGrant(true), - UpdateContext: UpdateContextSecretWithAuthorizationCodeGrant, - DeleteContext: DeleteContextSecret, + CreateContext: TrackingCreateWrapper(resources.SecretWithAuthorizationCodeGrant, CreateContextSecretWithAuthorizationCodeGrant), + ReadContext: TrackingReadWrapper(resources.SecretWithAuthorizationCodeGrant, ReadContextSecretWithAuthorizationCodeGrant(true)), + UpdateContext: TrackingUpdateWrapper(resources.SecretWithAuthorizationCodeGrant, UpdateContextSecretWithAuthorizationCodeGrant), + DeleteContext: TrackingDeleteWrapper(resources.SecretWithAuthorizationCodeGrant, DeleteContextSecret), Description: "Resource used to manage secret objects with OAuth Authorization Code Grant. For more information, check [secret documentation](https://docs.snowflake.com/en/sql-reference/sql/create-secret).", Schema: secretAuthorizationCodeGrantSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportSecretWithAuthorizationCodeGrant, + StateContext: TrackingImportWrapper(resources.SecretWithAuthorizationCodeGrant, ImportSecretWithAuthorizationCodeGrant), }, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.SecretWithAuthorizationCodeGrant, customdiff.All( ComputedIfAnyAttributeChanged(secretAuthorizationCodeGrantSchema, ShowOutputAttributeName, "comment"), ComputedIfAnyAttributeChanged(secretAuthorizationCodeGrantSchema, DescribeOutputAttributeName, "oauth_refresh_token_expiry_time", "api_authentication"), RecreateWhenSecretTypeChangedExternally(sdk.SecretTypeOAuth2AuthorizationCodeGrant), - ), + )), } } diff --git a/pkg/resources/secret_with_oauth_client_credentials.go b/pkg/resources/secret_with_oauth_client_credentials.go index 1e46e40822..1df7c77feb 100644 --- a/pkg/resources/secret_with_oauth_client_credentials.go +++ b/pkg/resources/secret_with_oauth_client_credentials.go @@ -6,6 +6,8 @@ import ( "fmt" "reflect" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" @@ -37,21 +39,21 @@ var secretClientCredentialsSchema = func() map[string]*schema.Schema { func SecretWithClientCredentials() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextSecretWithClientCredentials, - ReadContext: ReadContextSecretWithClientCredentials, - UpdateContext: UpdateContextSecretWithClientCredentials, - DeleteContext: DeleteContextSecret, + CreateContext: TrackingCreateWrapper(resources.SecretWithClientCredentials, CreateContextSecretWithClientCredentials), + ReadContext: TrackingReadWrapper(resources.SecretWithClientCredentials, ReadContextSecretWithClientCredentials), + UpdateContext: TrackingUpdateWrapper(resources.SecretWithClientCredentials, UpdateContextSecretWithClientCredentials), + DeleteContext: TrackingDeleteWrapper(resources.SecretWithClientCredentials, DeleteContextSecret), Description: "Resource used to manage secret objects with OAuth Client Credentials. For more information, check [secret documentation](https://docs.snowflake.com/en/sql-reference/sql/create-secret).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.SecretWithClientCredentials, customdiff.All( ComputedIfAnyAttributeChanged(secretClientCredentialsSchema, DescribeOutputAttributeName, "oauth_scopes", "api_authentication"), ComputedIfAnyAttributeChanged(secretClientCredentialsSchema, ShowOutputAttributeName, "comment"), RecreateWhenSecretTypeChangedExternally(sdk.SecretTypeOAuth2ClientCredentials), - ), + )), Schema: secretClientCredentialsSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportSecretWithClientCredentials, + StateContext: TrackingImportWrapper(resources.SecretWithClientCredentials, ImportSecretWithClientCredentials), }, } } diff --git a/pkg/resources/share.go b/pkg/resources/share.go index 7ebaf4a25d..8c4d5ebe33 100644 --- a/pkg/resources/share.go +++ b/pkg/resources/share.go @@ -7,6 +7,8 @@ import ( "strings" "time" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -52,7 +54,7 @@ func Share() *schema.Resource { Schema: shareSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportName[sdk.AccountObjectIdentifier], + StateContext: TrackingImportWrapper(resources.Share, ImportName[sdk.AccountObjectIdentifier]), }, } } diff --git a/pkg/resources/shared_database.go b/pkg/resources/shared_database.go index 18a882d9a8..537bafb03a 100644 --- a/pkg/resources/shared_database.go +++ b/pkg/resources/shared_database.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -47,19 +49,19 @@ var sharedDatabaseSchema = map[string]*schema.Schema{ func SharedDatabase() *schema.Resource { return &schema.Resource{ - CreateContext: CreateSharedDatabase, - UpdateContext: UpdateSharedDatabase, - ReadContext: ReadSharedDatabase, - DeleteContext: DeleteSharedDatabase, + CreateContext: TrackingCreateWrapper(resources.SharedDatabase, CreateSharedDatabase), + UpdateContext: TrackingUpdateWrapper(resources.SharedDatabase, UpdateSharedDatabase), + ReadContext: TrackingReadWrapper(resources.SharedDatabase, ReadSharedDatabase), + DeleteContext: TrackingDeleteWrapper(resources.SharedDatabase, DeleteSharedDatabase), Description: "A shared database creates a database from a share provided by another Snowflake account. For more information about shares, see [Introduction to Secure Data Sharing](https://docs.snowflake.com/en/user-guide/data-sharing-intro).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.SharedDatabase, customdiff.All( ComputedIfAnyAttributeChanged(sharedDatabaseSchema, FullyQualifiedNameAttributeName, "name"), - ), + )), Schema: collections.MergeMaps(sharedDatabaseSchema, sharedDatabaseParametersSchema), Importer: &schema.ResourceImporter{ - StateContext: ImportName[sdk.AccountObjectIdentifier], + StateContext: TrackingImportWrapper(resources.SharedDatabase, ImportName[sdk.AccountObjectIdentifier]), }, } } diff --git a/pkg/resources/stage.go b/pkg/resources/stage.go index 07f9bea70f..314454716c 100644 --- a/pkg/resources/stage.go +++ b/pkg/resources/stage.go @@ -6,6 +6,8 @@ import ( "fmt" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -100,10 +102,10 @@ var stageSchema = map[string]*schema.Schema{ // TODO (SNOW-1019005): Remove snowflake package that is used in Create and Update operations func Stage() *schema.Resource { return &schema.Resource{ - CreateContext: CreateStage, - ReadContext: ReadStage, - UpdateContext: UpdateStage, - DeleteContext: DeleteStage, + CreateContext: TrackingCreateWrapper(resources.Stage, CreateStage), + ReadContext: TrackingReadWrapper(resources.Stage, ReadStage), + UpdateContext: TrackingUpdateWrapper(resources.Stage, UpdateStage), + DeleteContext: TrackingDeleteWrapper(resources.Stage, DeleteStage), Schema: stageSchema, Importer: &schema.ResourceImporter{ diff --git a/pkg/resources/stream_on_directory_table.go b/pkg/resources/stream_on_directory_table.go index 491692d7dd..c341f6b9f7 100644 --- a/pkg/resources/stream_on_directory_table.go +++ b/pkg/resources/stream_on_directory_table.go @@ -6,6 +6,8 @@ import ( "fmt" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -32,23 +34,23 @@ var streamOnDirectoryTableSchema = func() map[string]*schema.Schema { func StreamOnDirectoryTable() *schema.Resource { return &schema.Resource{ - CreateContext: CreateStreamOnDirectoryTable(false), - ReadContext: ReadStreamOnDirectoryTable(true), - UpdateContext: UpdateStreamOnDirectoryTable, - DeleteContext: DeleteStreamContext, + CreateContext: TrackingCreateWrapper(resources.StreamOnDirectoryTable, CreateStreamOnDirectoryTable(false)), + ReadContext: TrackingReadWrapper(resources.StreamOnDirectoryTable, ReadStreamOnDirectoryTable(true)), + UpdateContext: TrackingUpdateWrapper(resources.StreamOnDirectoryTable, UpdateStreamOnDirectoryTable), + DeleteContext: TrackingDeleteWrapper(resources.StreamOnDirectoryTable, DeleteStreamContext), Description: "Resource used to manage streams on directory tables. For more information, check [stream documentation](https://docs.snowflake.com/en/sql-reference/sql/create-stream).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.StreamOnDirectoryTable, customdiff.All( ComputedIfAnyAttributeChanged(streamOnDirectoryTableSchema, ShowOutputAttributeName, "stage", "comment"), ComputedIfAnyAttributeChanged(streamOnDirectoryTableSchema, DescribeOutputAttributeName, "stage", "comment"), RecreateWhenStreamIsStale(), RecreateWhenStreamTypeChangedExternally(sdk.StreamSourceTypeStage), - ), + )), Schema: streamOnDirectoryTableSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportName[sdk.SchemaObjectIdentifier], + StateContext: TrackingImportWrapper(resources.StreamOnDirectoryTable, ImportName[sdk.SchemaObjectIdentifier]), }, } } diff --git a/pkg/resources/stream_on_external_table.go b/pkg/resources/stream_on_external_table.go index bee62563d7..05d4b3289a 100644 --- a/pkg/resources/stream_on_external_table.go +++ b/pkg/resources/stream_on_external_table.go @@ -6,6 +6,8 @@ import ( "fmt" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -43,23 +45,23 @@ var streamOnExternalTableSchema = func() map[string]*schema.Schema { func StreamOnExternalTable() *schema.Resource { return &schema.Resource{ - CreateContext: CreateStreamOnExternalTable(false), - ReadContext: ReadStreamOnExternalTable(true), - UpdateContext: UpdateStreamOnExternalTable, - DeleteContext: DeleteStreamContext, + CreateContext: TrackingCreateWrapper(resources.StreamOnExternalTable, CreateStreamOnExternalTable(false)), + ReadContext: TrackingReadWrapper(resources.StreamOnExternalTable, ReadStreamOnExternalTable(true)), + UpdateContext: TrackingUpdateWrapper(resources.StreamOnExternalTable, UpdateStreamOnExternalTable), + DeleteContext: TrackingDeleteWrapper(resources.StreamOnExternalTable, DeleteStreamContext), Description: "Resource used to manage streams on external tables. For more information, check [stream documentation](https://docs.snowflake.com/en/sql-reference/sql/create-stream).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.StreamOnExternalTable, customdiff.All( ComputedIfAnyAttributeChanged(streamOnExternalTableSchema, ShowOutputAttributeName, "external_table", "insert_only", "comment"), ComputedIfAnyAttributeChanged(streamOnExternalTableSchema, DescribeOutputAttributeName, "external_table", "insert_only", "comment"), RecreateWhenStreamIsStale(), RecreateWhenStreamTypeChangedExternally(sdk.StreamSourceTypeExternalTable), - ), + )), Schema: streamOnExternalTableSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportStreamOnExternalTable, + StateContext: TrackingImportWrapper(resources.StreamOnExternalTable, ImportStreamOnExternalTable), }, } } diff --git a/pkg/resources/stream_on_table.go b/pkg/resources/stream_on_table.go index 18a99c1b55..cc9b56f371 100644 --- a/pkg/resources/stream_on_table.go +++ b/pkg/resources/stream_on_table.go @@ -6,6 +6,8 @@ import ( "fmt" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -50,23 +52,23 @@ var streamOnTableSchema = func() map[string]*schema.Schema { func StreamOnTable() *schema.Resource { return &schema.Resource{ - CreateContext: CreateStreamOnTable(false), - ReadContext: ReadStreamOnTable(true), - UpdateContext: UpdateStreamOnTable, - DeleteContext: DeleteStreamContext, + CreateContext: TrackingCreateWrapper(resources.StreamOnTable, CreateStreamOnTable(false)), + ReadContext: TrackingReadWrapper(resources.StreamOnTable, ReadStreamOnTable(true)), + UpdateContext: TrackingUpdateWrapper(resources.StreamOnTable, UpdateStreamOnTable), + DeleteContext: TrackingDeleteWrapper(resources.StreamOnTable, DeleteStreamContext), Description: "Resource used to manage streams on tables. For more information, check [stream documentation](https://docs.snowflake.com/en/sql-reference/sql/create-stream).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.StreamOnTable, customdiff.All( ComputedIfAnyAttributeChanged(streamOnTableSchema, ShowOutputAttributeName, "table", "append_only", "comment"), ComputedIfAnyAttributeChanged(streamOnTableSchema, DescribeOutputAttributeName, "table", "append_only", "comment"), RecreateWhenStreamIsStale(), RecreateWhenStreamTypeChangedExternally(sdk.StreamSourceTypeTable), - ), + )), Schema: streamOnTableSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportStreamOnTable, + StateContext: TrackingImportWrapper(resources.StreamOnTable, ImportStreamOnTable), }, } } diff --git a/pkg/resources/stream_on_view.go b/pkg/resources/stream_on_view.go index 3b6eb72941..b093ed726b 100644 --- a/pkg/resources/stream_on_view.go +++ b/pkg/resources/stream_on_view.go @@ -6,6 +6,8 @@ import ( "fmt" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -50,23 +52,23 @@ var StreamOnViewSchema = func() map[string]*schema.Schema { func StreamOnView() *schema.Resource { return &schema.Resource{ - CreateContext: CreateStreamOnView(false), - ReadContext: ReadStreamOnView(true), - UpdateContext: UpdateStreamOnView, - DeleteContext: DeleteStreamContext, + CreateContext: TrackingCreateWrapper(resources.StreamOnView, CreateStreamOnView(false)), + ReadContext: TrackingReadWrapper(resources.StreamOnView, ReadStreamOnView(true)), + UpdateContext: TrackingUpdateWrapper(resources.StreamOnView, UpdateStreamOnView), + DeleteContext: TrackingDeleteWrapper(resources.StreamOnView, DeleteStreamContext), Description: "Resource used to manage streams on views. For more information, check [stream documentation](https://docs.snowflake.com/en/sql-reference/sql/create-stream).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.StreamOnView, customdiff.All( ComputedIfAnyAttributeChanged(StreamOnViewSchema, ShowOutputAttributeName, "view", "append_only", "comment"), ComputedIfAnyAttributeChanged(StreamOnViewSchema, DescribeOutputAttributeName, "view", "append_only", "comment"), RecreateWhenStreamIsStale(), RecreateWhenStreamTypeChangedExternally(sdk.StreamSourceTypeView), - ), + )), Schema: StreamOnViewSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportStreamOnView, + StateContext: TrackingImportWrapper(resources.StreamOnView, ImportStreamOnView), }, } } diff --git a/pkg/resources/streamlit.go b/pkg/resources/streamlit.go index 830e7c0924..c7b8f84ca1 100644 --- a/pkg/resources/streamlit.go +++ b/pkg/resources/streamlit.go @@ -6,6 +6,8 @@ import ( "fmt" "path" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/go-cty/cty" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -106,22 +108,22 @@ var streamlitSchema = map[string]*schema.Schema{ func Streamlit() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextStreamlit, - ReadContext: ReadContextStreamlit, - UpdateContext: UpdateContextStreamlit, - DeleteContext: DeleteContextStreamlit, + CreateContext: TrackingCreateWrapper(resources.Streamlit, CreateContextStreamlit), + ReadContext: TrackingReadWrapper(resources.Streamlit, ReadContextStreamlit), + UpdateContext: TrackingUpdateWrapper(resources.Streamlit, UpdateContextStreamlit), + DeleteContext: TrackingDeleteWrapper(resources.Streamlit, DeleteContextStreamlit), Description: "Resource used to manage streamlits objects. For more information, check [streamlit documentation](https://docs.snowflake.com/en/sql-reference/commands-streamlit).", Schema: streamlitSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportStreamlit, + StateContext: TrackingImportWrapper(resources.Streamlit, ImportStreamlit), }, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.Streamlit, customdiff.All( ComputedIfAnyAttributeChanged(streamlitSchema, ShowOutputAttributeName, "name", "title", "comment", "query_warehouse"), ComputedIfAnyAttributeChanged(streamlitSchema, FullyQualifiedNameAttributeName, "name"), ComputedIfAnyAttributeChanged(streamlitSchema, DescribeOutputAttributeName, "title", "comment", "root_location", "main_file", "query_warehouse", "external_access_integrations"), - ), + )), SchemaVersion: 1, StateUpgraders: []schema.StateUpgrader{ diff --git a/pkg/resources/table.go b/pkg/resources/table.go index 100d89cffa..ce39f90765 100644 --- a/pkg/resources/table.go +++ b/pkg/resources/table.go @@ -7,6 +7,8 @@ import ( "strconv" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -211,9 +213,9 @@ func Table() *schema.Resource { Update: UpdateTable, Delete: DeleteTable, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.Table, customdiff.All( ComputedIfAnyAttributeChanged(tableSchema, FullyQualifiedNameAttributeName, "name"), - ), + )), Schema: tableSchema, Importer: &schema.ResourceImporter{ diff --git a/pkg/resources/tag.go b/pkg/resources/tag.go index 39ba4e9990..668617450e 100644 --- a/pkg/resources/tag.go +++ b/pkg/resources/tag.go @@ -6,6 +6,8 @@ import ( "fmt" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" @@ -108,20 +110,20 @@ func Tag() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, - CreateContext: CreateContextTag, - ReadContext: ReadContextTag, - UpdateContext: UpdateContextTag, - DeleteContext: DeleteContextTag, + CreateContext: TrackingCreateWrapper(resources.Tag, CreateContextTag), + ReadContext: TrackingReadWrapper(resources.Tag, ReadContextTag), + UpdateContext: TrackingUpdateWrapper(resources.Tag, UpdateContextTag), + DeleteContext: TrackingDeleteWrapper(resources.Tag, DeleteContextTag), Description: "Resource used to manage tags. For more information, check [tag documentation](https://docs.snowflake.com/en/sql-reference/sql/create-tag).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.Tag, customdiff.All( ComputedIfAnyAttributeChanged(tagSchema, ShowOutputAttributeName, "name", "comment", "allowed_values"), ComputedIfAnyAttributeChanged(tagSchema, FullyQualifiedNameAttributeName, "name"), - ), + )), Schema: tagSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportName[sdk.SchemaObjectIdentifier], + StateContext: TrackingImportWrapper(resources.Tag, ImportName[sdk.SchemaObjectIdentifier]), }, StateUpgraders: []schema.StateUpgrader{ diff --git a/pkg/resources/tag_association.go b/pkg/resources/tag_association.go index 629aa234f9..f9141ab1fe 100644 --- a/pkg/resources/tag_association.go +++ b/pkg/resources/tag_association.go @@ -8,6 +8,8 @@ import ( "strings" "time" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -84,10 +86,10 @@ var tagAssociationSchema = map[string]*schema.Schema{ // TagAssociation returns a pointer to the resource representing a schema. func TagAssociation() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextTagAssociation, - ReadContext: ReadContextTagAssociation, - UpdateContext: UpdateContextTagAssociation, - DeleteContext: DeleteContextTagAssociation, + CreateContext: TrackingCreateWrapper(resources.TagAssociation, CreateContextTagAssociation), + ReadContext: TrackingReadWrapper(resources.TagAssociation, ReadContextTagAssociation), + UpdateContext: TrackingUpdateWrapper(resources.TagAssociation, UpdateContextTagAssociation), + DeleteContext: TrackingDeleteWrapper(resources.TagAssociation, DeleteContextTagAssociation), Schema: tagAssociationSchema, Importer: &schema.ResourceImporter{ diff --git a/pkg/resources/tag_masking_policy_association.go b/pkg/resources/tag_masking_policy_association.go index 270ac027bd..84c4ed21bf 100644 --- a/pkg/resources/tag_masking_policy_association.go +++ b/pkg/resources/tag_masking_policy_association.go @@ -13,7 +13,7 @@ import ( "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" - providerresources "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/snowflake" ) @@ -77,16 +77,16 @@ func parseAttachmentID(id string) (*attachmentID, error) { // Schema returns a pointer to the resource representing a schema. func TagMaskingPolicyAssociation() *schema.Resource { return &schema.Resource{ - CreateContext: CreateContextTagMaskingPolicyAssociation, - ReadContext: ReadContextTagMaskingPolicyAssociation, - DeleteContext: DeleteContextTagMaskingPolicyAssociation, + CreateContext: TrackingCreateWrapper(resources.TagMaskingPolicyAssociation, CreateContextTagMaskingPolicyAssociation), + ReadContext: TrackingReadWrapper(resources.TagMaskingPolicyAssociation, ReadContextTagMaskingPolicyAssociation), + DeleteContext: TrackingDeleteWrapper(resources.TagMaskingPolicyAssociation, DeleteContextTagMaskingPolicyAssociation), Schema: mpAttachmentPolicySchema, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Description: "Attach a masking policy to a tag. Requires a current warehouse to be set. Either with SNOWFLAKE_WAREHOUSE env variable or in current session. If no warehouse is provided, a temporary warehouse will be created.", - DeprecationMessage: deprecatedResourceDescription(string(providerresources.Tag)), + DeprecationMessage: deprecatedResourceDescription(string(resources.Tag)), } } diff --git a/pkg/resources/task.go b/pkg/resources/task.go index 078e29686d..325e6ee2a6 100644 --- a/pkg/resources/task.go +++ b/pkg/resources/task.go @@ -8,6 +8,8 @@ import ( "strconv" "time" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/util" @@ -153,9 +155,9 @@ func Task() *schema.Resource { Read: ReadTask, Update: UpdateTask, Delete: DeleteTask, - CustomizeDiff: customdiff.ForceNewIfChange("when", func(ctx context.Context, old, new, meta any) bool { + CustomizeDiff: TrackingCustomDiffWrapper(resources.Task, customdiff.ForceNewIfChange("when", func(ctx context.Context, old, new, meta any) bool { return old.(string) != "" && new.(string) == "" - }), + })), Schema: taskSchema, Importer: &schema.ResourceImporter{ diff --git a/pkg/resources/unsafe_execute.go b/pkg/resources/unsafe_execute.go index d09ffc3ba1..8d56630ea6 100644 --- a/pkg/resources/unsafe_execute.go +++ b/pkg/resources/unsafe_execute.go @@ -5,6 +5,8 @@ import ( "fmt" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/hashicorp/go-uuid" @@ -53,7 +55,7 @@ func UnsafeExecute() *schema.Resource { Description: "Experimental resource allowing execution of ANY SQL statement. It may destroy resources if used incorrectly. It may behave incorrectly combined with other resources. Use at your own risk.", - CustomizeDiff: func(_ context.Context, diff *schema.ResourceDiff, _ interface{}) error { + CustomizeDiff: TrackingCustomDiffWrapper(resources.UnsafeExecute, func(_ context.Context, diff *schema.ResourceDiff, _ interface{}) error { if diff.HasChange("query") { err := diff.SetNewComputed("query_results") if err != nil { @@ -61,7 +63,7 @@ func UnsafeExecute() *schema.Resource { } } return nil - }, + }), } } diff --git a/pkg/resources/usage_tracking_acceptance_test.go b/pkg/resources/usage_tracking_acceptance_test.go new file mode 100644 index 0000000000..a50dbc855f --- /dev/null +++ b/pkg/resources/usage_tracking_acceptance_test.go @@ -0,0 +1,108 @@ +package resources_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/testenvs" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" + "github.com/hashicorp/terraform-plugin-testing/terraform" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/helpers" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/tracking" + + acc "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceassert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config/model" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/helpers/random" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" +) + +func TestAcc_CompleteUsageTracking(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + id := acc.TestClient().Ids.RandomDatabaseObjectIdentifier() + comment := random.Comment() + + schemaModel := model.Schema("test", id.DatabaseName(), id.Name()) + schemaModelWithComment := model.Schema("test", id.DatabaseName(), id.Name()).WithComment(comment) + + assertQueryMetadataExists := func(t *testing.T, operation tracking.Operation, query string) resource.TestCheckFunc { + t.Helper() + return func(state *terraform.State) error { + queryHistory := acc.TestClient().InformationSchema.GetQueryHistory(t, 60) + expectedMetadata := tracking.NewVersionedMetadata(resources.Schema, operation) + if _, err := collections.FindFirst(queryHistory, func(history helpers.QueryHistory) bool { + if metadata, err := tracking.ParseMetadata(history.QueryText); err == nil { + if expectedMetadata == metadata && strings.Contains(history.QueryText, query) { + return true + } + } + return false + }); err != nil { + return fmt.Errorf("query history does not contain query metadata: %v with query containing: %s", expectedMetadata, query) + } + return nil + } + } + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + PreCheck: func() { acc.TestAccPreCheck(t) }, + CheckDestroy: acc.CheckDestroy(t, resources.Schema), + Steps: []resource.TestStep{ + // Create + { + Config: config.FromModel(t, schemaModel), + Check: assert.AssertThat(t, + resourceassert.SchemaResource(t, schemaModel.ResourceReference()). + HasNameString(id.Name()). + HasCommentString(""), + assert.Check(assertQueryMetadataExists(t, tracking.CreateOperation, fmt.Sprintf(`CREATE SCHEMA %s`, id.FullyQualifiedName()))), + ), + }, + // Import + { + ResourceName: schemaModel.ResourceReference(), + ImportState: true, + ImportStateCheck: assert.AssertThatImport(t, + resourceassert.ImportedSchemaResource(t, id.FullyQualifiedName()). + HasCommentString(""), + assert.CheckImport(func(states []*terraform.InstanceState) error { + return assertQueryMetadataExists(t, tracking.ImportOperation, fmt.Sprintf(`SHOW SCHEMAS LIKE '%s'`, id.Name()))(nil) + }), + ), + }, + // Update + CustomDiff (parameters) + Read + { + Config: config.FromModel(t, schemaModelWithComment), + Check: assert.AssertThat(t, + resourceassert.SchemaResource(t, schemaModelWithComment.ResourceReference()). + HasNameString(id.Name()). + HasCommentString(comment), + assert.Check(assertQueryMetadataExists(t, tracking.UpdateOperation, fmt.Sprintf(`ALTER SCHEMA %s SET COMMENT = '%s'`, id.FullyQualifiedName(), comment))), + assert.Check(assertQueryMetadataExists(t, tracking.ReadOperation, fmt.Sprintf(`SHOW SCHEMAS LIKE '%s'`, id.Name()))), + assert.Check(assertQueryMetadataExists(t, tracking.CustomDiffOperation, fmt.Sprintf(`SHOW PARAMETERS IN SCHEMA %s`, id.FullyQualifiedName()))), + ), + }, + // Delete + { + Config: config.FromModel(t, schemaModelWithComment), + Destroy: true, + Check: assert.AssertThat(t, + assert.Check(assertQueryMetadataExists(t, tracking.DeleteOperation, fmt.Sprintf(`DROP SCHEMA IF EXISTS %s`, id.FullyQualifiedName()))), + ), + }, + }, + }) +} diff --git a/pkg/resources/user.go b/pkg/resources/user.go index 95ff6aeeea..8bf0fde1b0 100644 --- a/pkg/resources/user.go +++ b/pkg/resources/user.go @@ -7,6 +7,8 @@ import ( "log" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" @@ -185,25 +187,25 @@ func User() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, - CreateContext: GetCreateUserFunc(sdk.UserTypePerson), - UpdateContext: GetUpdateUserFunc(sdk.UserTypePerson), - ReadContext: GetReadUserFunc(sdk.UserTypePerson, true), - DeleteContext: DeleteUser, + CreateContext: TrackingCreateWrapper(resources.User, GetCreateUserFunc(sdk.UserTypePerson)), + UpdateContext: TrackingUpdateWrapper(resources.User, GetUpdateUserFunc(sdk.UserTypePerson)), + ReadContext: TrackingReadWrapper(resources.User, GetReadUserFunc(sdk.UserTypePerson, true)), + DeleteContext: TrackingDeleteWrapper(resources.User, DeleteUser), Description: "Resource used to manage user objects. For more information, check [user documentation](https://docs.snowflake.com/en/sql-reference/commands-user-role#user-management).", Schema: collections.MergeMaps(userSchema, userParametersSchema), Importer: &schema.ResourceImporter{ - StateContext: GetImportUserFunc(sdk.UserTypePerson), + StateContext: TrackingImportWrapper(resources.User, GetImportUserFunc(sdk.UserTypePerson)), }, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.User, customdiff.All( // TODO [SNOW-1629468 - next pr]: test "default_role", "default_secondary_roles" ComputedIfAnyAttributeChanged(userSchema, ShowOutputAttributeName, userExternalChangesAttributes...), ComputedIfAnyAttributeChanged(userParametersSchema, ParametersAttributeName, collections.Map(sdk.AsStringList(sdk.AllUserParameters), strings.ToLower)...), ComputedIfAnyAttributeChanged(userSchema, FullyQualifiedNameAttributeName, "name"), userParametersCustomDiff, RecreateWhenUserTypeChangedExternally(sdk.UserTypePerson), - ), + )), StateUpgraders: []schema.StateUpgrader{ { @@ -218,47 +220,47 @@ func User() *schema.Resource { func ServiceUser() *schema.Resource { return &schema.Resource{ - CreateContext: GetCreateUserFunc(sdk.UserTypeService), - UpdateContext: GetUpdateUserFunc(sdk.UserTypeService), - ReadContext: GetReadUserFunc(sdk.UserTypeService, true), - DeleteContext: DeleteUser, + CreateContext: TrackingCreateWrapper(resources.ServiceUser, GetCreateUserFunc(sdk.UserTypeService)), + UpdateContext: TrackingUpdateWrapper(resources.ServiceUser, GetUpdateUserFunc(sdk.UserTypeService)), + ReadContext: TrackingReadWrapper(resources.ServiceUser, GetReadUserFunc(sdk.UserTypeService, true)), + DeleteContext: TrackingDeleteWrapper(resources.ServiceUser, DeleteUser), Description: "Resource used to manage service user objects. For more information, check [user documentation](https://docs.snowflake.com/en/sql-reference/commands-user-role#user-management).", Schema: collections.MergeMaps(serviceUserSchema, userParametersSchema), Importer: &schema.ResourceImporter{ - StateContext: GetImportUserFunc(sdk.UserTypeService), + StateContext: TrackingImportWrapper(resources.ServiceUser, GetImportUserFunc(sdk.UserTypeService)), }, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.ServiceUser, customdiff.All( ComputedIfAnyAttributeChanged(userSchema, ShowOutputAttributeName, serviceUserExternalChangesAttributes...), ComputedIfAnyAttributeChanged(userParametersSchema, ParametersAttributeName, collections.Map(sdk.AsStringList(sdk.AllUserParameters), strings.ToLower)...), ComputedIfAnyAttributeChanged(userSchema, FullyQualifiedNameAttributeName, "name"), userParametersCustomDiff, RecreateWhenUserTypeChangedExternally(sdk.UserTypeService), - ), + )), } } func LegacyServiceUser() *schema.Resource { return &schema.Resource{ - CreateContext: GetCreateUserFunc(sdk.UserTypeLegacyService), - UpdateContext: GetUpdateUserFunc(sdk.UserTypeLegacyService), - ReadContext: GetReadUserFunc(sdk.UserTypeLegacyService, true), - DeleteContext: DeleteUser, + CreateContext: TrackingCreateWrapper(resources.LegacyServiceUser, GetCreateUserFunc(sdk.UserTypeLegacyService)), + UpdateContext: TrackingUpdateWrapper(resources.LegacyServiceUser, GetUpdateUserFunc(sdk.UserTypeLegacyService)), + ReadContext: TrackingReadWrapper(resources.LegacyServiceUser, GetReadUserFunc(sdk.UserTypeLegacyService, true)), + DeleteContext: TrackingDeleteWrapper(resources.LegacyServiceUser, DeleteUser), Description: "Resource used to manage legacy service user objects. For more information, check [user documentation](https://docs.snowflake.com/en/sql-reference/commands-user-role#user-management).", Schema: collections.MergeMaps(legacyServiceUserSchema, userParametersSchema), Importer: &schema.ResourceImporter{ - StateContext: GetImportUserFunc(sdk.UserTypeLegacyService), + StateContext: TrackingImportWrapper(resources.LegacyServiceUser, GetImportUserFunc(sdk.UserTypeLegacyService)), }, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.LegacyServiceUser, customdiff.All( ComputedIfAnyAttributeChanged(userSchema, ShowOutputAttributeName, legacyServiceUserExternalChangesAttributes...), ComputedIfAnyAttributeChanged(userParametersSchema, ParametersAttributeName, collections.Map(sdk.AsStringList(sdk.AllUserParameters), strings.ToLower)...), ComputedIfAnyAttributeChanged(userSchema, FullyQualifiedNameAttributeName, "name"), userParametersCustomDiff, RecreateWhenUserTypeChangedExternally(sdk.UserTypeLegacyService), - ), + )), } } diff --git a/pkg/resources/view.go b/pkg/resources/view.go index 9c782a71bf..aaa77b53d3 100644 --- a/pkg/resources/view.go +++ b/pkg/resources/view.go @@ -9,6 +9,8 @@ import ( "strconv" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -278,20 +280,20 @@ func View() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, - CreateContext: CreateView(false), - ReadContext: ReadView(true), - UpdateContext: UpdateView, - DeleteContext: DeleteView, + CreateContext: TrackingCreateWrapper(resources.View, CreateView(false)), + ReadContext: TrackingReadWrapper(resources.View, ReadView(true)), + UpdateContext: TrackingUpdateWrapper(resources.View, UpdateView), + DeleteContext: TrackingDeleteWrapper(resources.View, DeleteView), Description: "Resource used to manage view objects. For more information, check [view documentation](https://docs.snowflake.com/en/sql-reference/sql/create-view).", - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.View, customdiff.All( ComputedIfAnyAttributeChanged(viewSchema, ShowOutputAttributeName, "comment", "change_tracking", "is_secure", "is_temporary", "is_recursive", "statement"), ComputedIfAnyAttributeChanged(viewSchema, FullyQualifiedNameAttributeName, "name"), - ), + )), Schema: viewSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportView, + StateContext: TrackingImportWrapper(resources.View, ImportView), }, StateUpgraders: []schema.StateUpgrader{ diff --git a/pkg/resources/warehouse.go b/pkg/resources/warehouse.go index 15222e5a61..2c6f5bd57a 100644 --- a/pkg/resources/warehouse.go +++ b/pkg/resources/warehouse.go @@ -7,6 +7,8 @@ import ( "strconv" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -192,18 +194,18 @@ func Warehouse() *schema.Resource { return &schema.Resource{ SchemaVersion: 1, - CreateContext: CreateWarehouse, - UpdateContext: UpdateWarehouse, - ReadContext: GetReadWarehouseFunc(true), - DeleteContext: DeleteWarehouse, + CreateContext: TrackingCreateWrapper(resources.Warehouse, CreateWarehouse), + UpdateContext: TrackingUpdateWrapper(resources.Warehouse, UpdateWarehouse), + ReadContext: TrackingReadWrapper(resources.Warehouse, GetReadWarehouseFunc(true)), + DeleteContext: TrackingDeleteWrapper(resources.Warehouse, DeleteWarehouse), Description: "Resource used to manage warehouse objects. For more information, check [warehouse documentation](https://docs.snowflake.com/en/sql-reference/commands-warehouse).", Schema: warehouseSchema, Importer: &schema.ResourceImporter{ - StateContext: ImportWarehouse, + StateContext: TrackingImportWrapper(resources.Warehouse, ImportWarehouse), }, - CustomizeDiff: customdiff.All( + CustomizeDiff: TrackingCustomDiffWrapper(resources.Warehouse, customdiff.All( ComputedIfAnyAttributeChanged(warehouseSchema, ShowOutputAttributeName, "name", "warehouse_type", "warehouse_size", "max_cluster_count", "min_cluster_count", "scaling_policy", "auto_suspend", "auto_resume", "resource_monitor", "comment", "enable_query_acceleration", "query_acceleration_max_scale_factor"), ComputedIfAnyAttributeChanged(warehouseSchema, ParametersAttributeName, strings.ToLower(string(sdk.ObjectParameterMaxConcurrencyLevel)), strings.ToLower(string(sdk.ObjectParameterStatementQueuedTimeoutInSeconds)), strings.ToLower(string(sdk.ObjectParameterStatementTimeoutInSeconds))), ComputedIfAnyAttributeChanged(warehouseSchema, FullyQualifiedNameAttributeName, "name"), @@ -216,7 +218,7 @@ func Warehouse() *schema.Resource { parameter[sdk.AccountParameter]{sdk.AccountParameterMaxConcurrencyLevel, valueTypeInt, sdk.ParameterTypeWarehouse}, parameter[sdk.AccountParameter]{sdk.AccountParameterStatementQueuedTimeoutInSeconds, valueTypeInt, sdk.ParameterTypeWarehouse}, parameter[sdk.AccountParameter]{sdk.AccountParameterStatementTimeoutInSeconds, valueTypeInt, sdk.ParameterTypeWarehouse}, - ), + )), ), StateUpgraders: []schema.StateUpgrader{ diff --git a/pkg/sdk/client.go b/pkg/sdk/client.go index 134313439d..240162c2f1 100644 --- a/pkg/sdk/client.go +++ b/pkg/sdk/client.go @@ -134,7 +134,7 @@ func NewClient(cfg *gosnowflake.Config) (*Client, error) { logger := instrumentedsql.LoggerFunc(func(ctx context.Context, s string, kv ...interface{}) { switch s { case "sql-conn-query", "sql-conn-exec": - log.Printf("[DEBUG] %s: %v (%s)\n", s, kv, ctx.Value(SnowflakeAccountLocatorContextKey)) + log.Printf("[DEBUG] %s: %v (%s)\n", s, kv, ctx.Value(snowflakeAccountLocatorContextKey)) default: return } @@ -266,9 +266,9 @@ func (c *Client) Close() error { return nil } -type ContextKey string +type accountLocatorContextKey struct{} -const SnowflakeAccountLocatorContextKey ContextKey = "snowflake_account_locator" +var snowflakeAccountLocatorContextKey accountLocatorContextKey // Exec executes a query that does not return rows. func (c *Client) exec(ctx context.Context, sql string) (sql.Result, error) { @@ -277,7 +277,7 @@ func (c *Client) exec(ctx context.Context, sql string) (sql.Result, error) { log.Printf("[DEBUG] sql-conn-exec-dry: %v\n", sql) return nil, nil } - ctx = context.WithValue(ctx, SnowflakeAccountLocatorContextKey, c.accountLocator) + ctx = context.WithValue(ctx, snowflakeAccountLocatorContextKey, c.accountLocator) sql = appendQueryMetadata(ctx, sql) result, err := c.db.ExecContext(ctx, sql) return result, decodeDriverError(err) @@ -290,7 +290,7 @@ func (c *Client) query(ctx context.Context, dest interface{}, sql string) error log.Printf("[DEBUG] sql-conn-query-dry: %v\n", sql) return nil } - ctx = context.WithValue(ctx, SnowflakeAccountLocatorContextKey, c.accountLocator) + ctx = context.WithValue(ctx, snowflakeAccountLocatorContextKey, c.accountLocator) sql = appendQueryMetadata(ctx, sql) return decodeDriverError(c.db.SelectContext(ctx, dest, sql)) } @@ -302,7 +302,7 @@ func (c *Client) queryOne(ctx context.Context, dest interface{}, sql string) err log.Printf("[DEBUG] sql-conn-query-one-dry: %v\n", sql) return nil } - ctx = context.WithValue(ctx, SnowflakeAccountLocatorContextKey, c.accountLocator) + ctx = context.WithValue(ctx, snowflakeAccountLocatorContextKey, c.accountLocator) sql = appendQueryMetadata(ctx, sql) return decodeDriverError(c.db.GetContext(ctx, dest, sql)) } diff --git a/pkg/sdk/testint/basic_object_tracking_integration_test.go b/pkg/sdk/testint/basic_object_tracking_integration_test.go index 673eb31df3..2209fdd1dc 100644 --- a/pkg/sdk/testint/basic_object_tracking_integration_test.go +++ b/pkg/sdk/testint/basic_object_tracking_integration_test.go @@ -37,8 +37,8 @@ func TestInt_ContextQueryTags(t *testing.T) { }) }) queryId := executeQueryAndReturnQueryId(t, context.Background(), client) - queryTagResult := testClientHelper().InformationSchema.GetQueryTagByQueryId(t, queryId) - require.Equal(t, userQueryTag, queryTagResult) + queryTagResult := testClientHelper().InformationSchema.GetQueryHistoryByQueryId(t, 20, queryId) + require.Equal(t, userQueryTag, queryTagResult.QueryTag) // set query_tag on session level sessionQueryTag := "session query tag" @@ -59,15 +59,15 @@ func TestInt_ContextQueryTags(t *testing.T) { })) }) queryId = executeQueryAndReturnQueryId(t, context.Background(), client) - queryTagResult = testClientHelper().InformationSchema.GetQueryTagByQueryId(t, queryId) - require.Equal(t, sessionQueryTag, queryTagResult) + queryTagResult = testClientHelper().InformationSchema.GetQueryHistoryByQueryId(t, 20, queryId) + require.Equal(t, sessionQueryTag, queryTagResult.QueryTag) // set query_tag on query level perQueryQueryTag := "per-query query tag" ctxWithQueryTag := gosnowflake.WithQueryTag(context.Background(), perQueryQueryTag) queryId = executeQueryAndReturnQueryId(t, ctxWithQueryTag, client) - queryTagResult = testClientHelper().InformationSchema.GetQueryTagByQueryId(t, queryId) - require.Equal(t, perQueryQueryTag, queryTagResult) + queryTagResult = testClientHelper().InformationSchema.GetQueryHistoryByQueryId(t, 20, queryId) + require.Equal(t, perQueryQueryTag, queryTagResult.QueryTag) } func executeQueryAndReturnQueryId(t *testing.T, ctx context.Context, client *sdk.Client) string { @@ -92,7 +92,7 @@ func TestInt_QueryComment(t *testing.T) { require.NoError(t, err) queryId := <-queryIdChan - queryText := testClientHelper().InformationSchema.GetQueryTextByQueryId(t, queryId) + queryText := testClientHelper().InformationSchema.GetQueryHistoryByQueryId(t, 20, queryId).QueryText require.Equal(t, metadata, strings.Split(queryText, "--")[1]) } diff --git a/pkg/sdk/testint/client_integration_test.go b/pkg/sdk/testint/client_integration_test.go index 47a38e5449..8cacd6b540 100644 --- a/pkg/sdk/testint/client_integration_test.go +++ b/pkg/sdk/testint/client_integration_test.go @@ -16,7 +16,7 @@ func TestInt_Client_AdditionalMetadata(t *testing.T) { assertQueryMetadata := func(t *testing.T, queryId string) { t.Helper() - queryText := testClientHelper().InformationSchema.GetQueryTextByQueryId(t, queryId) + queryText := testClientHelper().InformationSchema.GetQueryHistoryByQueryId(t, 20, queryId).QueryText parsedMetadata, err := tracking.ParseMetadata(queryText) require.NoError(t, err) require.Equal(t, metadata, parsedMetadata) From a3a44ae5a6eca2a9623369499d8cac4516a87004 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Tue, 26 Nov 2024 11:03:47 +0100 Subject: [PATCH 04/10] chore: Storage integration with custom protocol (#3213) Support all s3 protocols (ref: https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3212) --- docs/resources/storage_integration.md | 2 +- .../helpers/storage_integration_client.go | 8 +- pkg/resources/storage_integration.go | 52 ++++++------ pkg/sdk/storage_integration_def.go | 33 +++++++- .../storage_integration_dto_builders_gen.go | 82 ++++++++++--------- pkg/sdk/storage_integration_dto_gen.go | 3 +- pkg/sdk/storage_integration_gen.go | 6 +- pkg/sdk/storage_integration_gen_test.go | 56 ++++++++++++- pkg/sdk/storage_integration_impl_gen.go | 3 +- ...torage_integration_gen_integration_test.go | 60 ++++++++------ 10 files changed, 203 insertions(+), 102 deletions(-) diff --git a/docs/resources/storage_integration.md b/docs/resources/storage_integration.md index 2905735bf2..23e69ec9b6 100644 --- a/docs/resources/storage_integration.md +++ b/docs/resources/storage_integration.md @@ -42,7 +42,7 @@ resource "snowflake_storage_integration" "integration" { - `name` (String) - `storage_allowed_locations` (List of String) Explicitly limits external stages that use the integration to reference one or more storage locations. -- `storage_provider` (String) +- `storage_provider` (String) Specifies the storage provider for the integration. Valid options are: `S3` | `S3GOV` | `S3CHINA` | `GCS` | `AZURE` ### Optional diff --git a/pkg/acceptance/helpers/storage_integration_client.go b/pkg/acceptance/helpers/storage_integration_client.go index 32041df07d..71203ce559 100644 --- a/pkg/acceptance/helpers/storage_integration_client.go +++ b/pkg/acceptance/helpers/storage_integration_client.go @@ -54,10 +54,10 @@ func (c *StorageIntegrationClient) CreateS3(t *testing.T, awsBucketUrl, awsRoleA id := c.ids.RandomAccountObjectIdentifier() req := sdk.NewCreateStorageIntegrationRequest(id, true, s3AllowedLocations). - WithIfNotExists(sdk.Bool(true)). - WithS3StorageProviderParams(sdk.NewS3StorageParamsRequest(awsRoleArn)). + WithIfNotExists(true). + WithS3StorageProviderParams(*sdk.NewS3StorageParamsRequest(sdk.RegularS3Protocol, awsRoleArn)). WithStorageBlockedLocations(s3BlockedLocations). - WithComment(sdk.String("some comment")) + WithComment("some comment") err := c.client().Create(ctx, req) require.NoError(t, err) @@ -73,7 +73,7 @@ func (c *StorageIntegrationClient) DropFunc(t *testing.T, id sdk.AccountObjectId ctx := context.Background() return func() { - err := c.client().Drop(ctx, sdk.NewDropStorageIntegrationRequest(id).WithIfExists(sdk.Bool(true))) + err := c.client().Drop(ctx, sdk.NewDropStorageIntegrationRequest(id).WithIfExists(true)) require.NoError(t, err) } } diff --git a/pkg/resources/storage_integration.go b/pkg/resources/storage_integration.go index c8a1be8cee..85a40729d3 100644 --- a/pkg/resources/storage_integration.go +++ b/pkg/resources/storage_integration.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "log" + "slices" "strings" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -52,12 +53,12 @@ var storageIntegrationSchema = map[string]*schema.Schema{ Optional: true, Description: "Explicitly prohibits external stages that use the integration from referencing one or more storage locations.", }, - // TODO (SNOW-1015282): Remove S3gov option before going into V1 "storage_provider": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"S3", "S3gov", "GCS", "AZURE", "S3GOV"}, false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: StringInSlice(sdk.AllStorageProviders, true), + Description: fmt.Sprintf("Specifies the storage provider for the integration. Valid options are: %s", possibleValuesListed(sdk.AllStorageProviders)), }, "storage_aws_external_id": { Type: schema.TypeString, @@ -140,7 +141,7 @@ func CreateStorageIntegration(d *schema.ResourceData, meta any) error { req := sdk.NewCreateStorageIntegrationRequest(name, enabled, storageAllowedLocations) if v, ok := d.GetOk("comment"); ok { - req.WithComment(sdk.String(v.(string))) + req.WithComment(v.(string)) } if _, ok := d.GetOk("storage_blocked_locations"); ok { @@ -154,28 +155,33 @@ func CreateStorageIntegration(d *schema.ResourceData, meta any) error { req.WithStorageBlockedLocations(storageBlockedLocations) } - storageProvider := d.Get("storage_provider").(string) + storageProvider := strings.ToUpper(d.Get("storage_provider").(string)) + + switch { + case slices.Contains(sdk.AllS3Protocols, sdk.S3Protocol(storageProvider)): + s3Protocol, err := sdk.ToS3Protocol(storageProvider) + if err != nil { + return err + } - switch storageProvider { - case "S3", "S3GOV", "S3gov": v, ok := d.GetOk("storage_aws_role_arn") if !ok { return fmt.Errorf("if you use the S3 storage provider you must specify a storage_aws_role_arn") } - s3Params := sdk.NewS3StorageParamsRequest(v.(string)) + s3Params := sdk.NewS3StorageParamsRequest(s3Protocol, v.(string)) if _, ok := d.GetOk("storage_aws_object_acl"); ok { - s3Params.WithStorageAwsObjectAcl(sdk.String(d.Get("storage_aws_object_acl").(string))) + s3Params.WithStorageAwsObjectAcl(d.Get("storage_aws_object_acl").(string)) } - req.WithS3StorageProviderParams(s3Params) - case "AZURE": + req.WithS3StorageProviderParams(*s3Params) + case storageProvider == "AZURE": v, ok := d.GetOk("azure_tenant_id") if !ok { return fmt.Errorf("if you use the Azure storage provider you must specify an azure_tenant_id") } - req.WithAzureStorageProviderParams(sdk.NewAzureStorageParamsRequest(sdk.String(v.(string)))) - case "GCS": - req.WithGCSStorageProviderParams(sdk.NewGCSStorageParamsRequest()) + req.WithAzureStorageProviderParams(*sdk.NewAzureStorageParamsRequest(sdk.String(v.(string)))) + case storageProvider == "GCS": + req.WithGCSStorageProviderParams(*sdk.NewGCSStorageParamsRequest()) default: return fmt.Errorf("unexpected provider %v", storageProvider) } @@ -295,7 +301,7 @@ func UpdateStorageIntegration(d *schema.ResourceData, meta any) error { if d.HasChange("comment") { runSetStatement = true - setReq.WithComment(sdk.String(d.Get("comment").(string))) + setReq.WithComment(d.Get("comment").(string)) } if d.HasChange("enabled") { @@ -320,7 +326,7 @@ func UpdateStorageIntegration(d *schema.ResourceData, meta any) error { v := d.Get("storage_blocked_locations").([]interface{}) if len(v) == 0 { if err := client.StorageIntegrations.Alter(ctx, sdk.NewAlterStorageIntegrationRequest(id). - WithUnset(sdk.NewStorageIntegrationUnsetRequest().WithStorageBlockedLocations(sdk.Bool(true)))); err != nil { + WithUnset(*sdk.NewStorageIntegrationUnsetRequest().WithStorageBlockedLocations(true))); err != nil { return fmt.Errorf("error unsetting storage_blocked_locations, err = %w", err) } } else { @@ -342,25 +348,25 @@ func UpdateStorageIntegration(d *schema.ResourceData, meta any) error { if d.HasChange("storage_aws_object_acl") { if v, ok := d.GetOk("storage_aws_object_acl"); ok { - s3SetParams.WithStorageAwsObjectAcl(sdk.String(v.(string))) + s3SetParams.WithStorageAwsObjectAcl(v.(string)) } else { if err := client.StorageIntegrations.Alter(ctx, sdk.NewAlterStorageIntegrationRequest(id). - WithUnset(sdk.NewStorageIntegrationUnsetRequest().WithStorageAwsObjectAcl(sdk.Bool(true)))); err != nil { + WithUnset(*sdk.NewStorageIntegrationUnsetRequest().WithStorageAwsObjectAcl(true))); err != nil { return fmt.Errorf("error unsetting storage_aws_object_acl, err = %w", err) } } } - setReq.WithS3Params(s3SetParams) + setReq.WithS3Params(*s3SetParams) } if d.HasChange("azure_tenant_id") { runSetStatement = true - setReq.WithAzureParams(sdk.NewSetAzureStorageParamsRequest(d.Get("azure_tenant_id").(string))) + setReq.WithAzureParams(*sdk.NewSetAzureStorageParamsRequest(d.Get("azure_tenant_id").(string))) } if runSetStatement { - if err := client.StorageIntegrations.Alter(ctx, sdk.NewAlterStorageIntegrationRequest(id).WithSet(setReq)); err != nil { + if err := client.StorageIntegrations.Alter(ctx, sdk.NewAlterStorageIntegrationRequest(id).WithSet(*setReq)); err != nil { return fmt.Errorf("error updating storage integration, err = %w", err) } } diff --git a/pkg/sdk/storage_integration_def.go b/pkg/sdk/storage_integration_def.go index bd41ad54b1..04e24c37da 100644 --- a/pkg/sdk/storage_integration_def.go +++ b/pkg/sdk/storage_integration_def.go @@ -1,9 +1,36 @@ package sdk -import g "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk/poc/generator" +import ( + "fmt" + "strings" + + g "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk/poc/generator" +) //go:generate go run ./poc/main.go +type S3Protocol string + +const ( + RegularS3Protocol S3Protocol = "S3" + GovS3Protocol S3Protocol = "S3GOV" + ChinaS3Protocol S3Protocol = "S3CHINA" +) + +var ( + AllS3Protocols = []S3Protocol{RegularS3Protocol, GovS3Protocol, ChinaS3Protocol} + AllStorageProviders = append(AsStringList(AllS3Protocols), "GCS", "AZURE") +) + +func ToS3Protocol(s string) (S3Protocol, error) { + switch protocol := S3Protocol(strings.ToUpper(s)); protocol { + case RegularS3Protocol, GovS3Protocol, ChinaS3Protocol: + return protocol, nil + default: + return "", fmt.Errorf("invalid S3 protocol: %s", s) + } +} + var StorageLocationDef = g.NewQueryStruct("StorageLocation").Text("Path", g.KeywordOptions().SingleQuotes().Required()) var StorageIntegrationDef = g.NewInterface( @@ -23,7 +50,7 @@ var StorageIntegrationDef = g.NewInterface( OptionalQueryStructField( "S3StorageProviderParams", g.NewQueryStruct("S3StorageParams"). - PredefinedQueryStructField("storageProvider", "string", g.StaticOptions().SQL("STORAGE_PROVIDER = 'S3'")). + PredefinedQueryStructField("Protocol", g.KindOfT[S3Protocol](), g.ParameterOptions().SQL("STORAGE_PROVIDER").SingleQuotes().Required()). TextAssignment("STORAGE_AWS_ROLE_ARN", g.ParameterOptions().SingleQuotes().Required()). OptionalTextAssignment("STORAGE_AWS_OBJECT_ACL", g.ParameterOptions().SingleQuotes()), g.KeywordOptions(), @@ -73,7 +100,7 @@ var StorageIntegrationDef = g.NewInterface( TextAssignment("AZURE_TENANT_ID", g.ParameterOptions().SingleQuotes().Required()), g.KeywordOptions(), ). - BooleanAssignment("ENABLED", g.ParameterOptions()). + OptionalBooleanAssignment("ENABLED", g.ParameterOptions()). ListAssignment("STORAGE_ALLOWED_LOCATIONS", "StorageLocation", g.ParameterOptions().Parentheses()). ListAssignment("STORAGE_BLOCKED_LOCATIONS", "StorageLocation", g.ParameterOptions().Parentheses()). OptionalComment(), diff --git a/pkg/sdk/storage_integration_dto_builders_gen.go b/pkg/sdk/storage_integration_dto_builders_gen.go index e9fedd8c01..f462b01cb1 100644 --- a/pkg/sdk/storage_integration_dto_builders_gen.go +++ b/pkg/sdk/storage_integration_dto_builders_gen.go @@ -16,28 +16,28 @@ func NewCreateStorageIntegrationRequest( return &s } -func (s *CreateStorageIntegrationRequest) WithOrReplace(OrReplace *bool) *CreateStorageIntegrationRequest { - s.OrReplace = OrReplace +func (s *CreateStorageIntegrationRequest) WithOrReplace(OrReplace bool) *CreateStorageIntegrationRequest { + s.OrReplace = &OrReplace return s } -func (s *CreateStorageIntegrationRequest) WithIfNotExists(IfNotExists *bool) *CreateStorageIntegrationRequest { - s.IfNotExists = IfNotExists +func (s *CreateStorageIntegrationRequest) WithIfNotExists(IfNotExists bool) *CreateStorageIntegrationRequest { + s.IfNotExists = &IfNotExists return s } -func (s *CreateStorageIntegrationRequest) WithS3StorageProviderParams(S3StorageProviderParams *S3StorageParamsRequest) *CreateStorageIntegrationRequest { - s.S3StorageProviderParams = S3StorageProviderParams +func (s *CreateStorageIntegrationRequest) WithS3StorageProviderParams(S3StorageProviderParams S3StorageParamsRequest) *CreateStorageIntegrationRequest { + s.S3StorageProviderParams = &S3StorageProviderParams return s } -func (s *CreateStorageIntegrationRequest) WithGCSStorageProviderParams(GCSStorageProviderParams *GCSStorageParamsRequest) *CreateStorageIntegrationRequest { - s.GCSStorageProviderParams = GCSStorageProviderParams +func (s *CreateStorageIntegrationRequest) WithGCSStorageProviderParams(GCSStorageProviderParams GCSStorageParamsRequest) *CreateStorageIntegrationRequest { + s.GCSStorageProviderParams = &GCSStorageProviderParams return s } -func (s *CreateStorageIntegrationRequest) WithAzureStorageProviderParams(AzureStorageProviderParams *AzureStorageParamsRequest) *CreateStorageIntegrationRequest { - s.AzureStorageProviderParams = AzureStorageProviderParams +func (s *CreateStorageIntegrationRequest) WithAzureStorageProviderParams(AzureStorageProviderParams AzureStorageParamsRequest) *CreateStorageIntegrationRequest { + s.AzureStorageProviderParams = &AzureStorageProviderParams return s } @@ -46,21 +46,23 @@ func (s *CreateStorageIntegrationRequest) WithStorageBlockedLocations(StorageBlo return s } -func (s *CreateStorageIntegrationRequest) WithComment(Comment *string) *CreateStorageIntegrationRequest { - s.Comment = Comment +func (s *CreateStorageIntegrationRequest) WithComment(Comment string) *CreateStorageIntegrationRequest { + s.Comment = &Comment return s } func NewS3StorageParamsRequest( + Protocol S3Protocol, StorageAwsRoleArn string, ) *S3StorageParamsRequest { s := S3StorageParamsRequest{} + s.Protocol = Protocol s.StorageAwsRoleArn = StorageAwsRoleArn return &s } -func (s *S3StorageParamsRequest) WithStorageAwsObjectAcl(StorageAwsObjectAcl *string) *S3StorageParamsRequest { - s.StorageAwsObjectAcl = StorageAwsObjectAcl +func (s *S3StorageParamsRequest) WithStorageAwsObjectAcl(StorageAwsObjectAcl string) *S3StorageParamsRequest { + s.StorageAwsObjectAcl = &StorageAwsObjectAcl return s } @@ -84,18 +86,18 @@ func NewAlterStorageIntegrationRequest( return &s } -func (s *AlterStorageIntegrationRequest) WithIfExists(IfExists *bool) *AlterStorageIntegrationRequest { - s.IfExists = IfExists +func (s *AlterStorageIntegrationRequest) WithIfExists(IfExists bool) *AlterStorageIntegrationRequest { + s.IfExists = &IfExists return s } -func (s *AlterStorageIntegrationRequest) WithSet(Set *StorageIntegrationSetRequest) *AlterStorageIntegrationRequest { - s.Set = Set +func (s *AlterStorageIntegrationRequest) WithSet(Set StorageIntegrationSetRequest) *AlterStorageIntegrationRequest { + s.Set = &Set return s } -func (s *AlterStorageIntegrationRequest) WithUnset(Unset *StorageIntegrationUnsetRequest) *AlterStorageIntegrationRequest { - s.Unset = Unset +func (s *AlterStorageIntegrationRequest) WithUnset(Unset StorageIntegrationUnsetRequest) *AlterStorageIntegrationRequest { + s.Unset = &Unset return s } @@ -113,13 +115,13 @@ func NewStorageIntegrationSetRequest() *StorageIntegrationSetRequest { return &StorageIntegrationSetRequest{} } -func (s *StorageIntegrationSetRequest) WithS3Params(S3Params *SetS3StorageParamsRequest) *StorageIntegrationSetRequest { - s.S3Params = S3Params +func (s *StorageIntegrationSetRequest) WithS3Params(S3Params SetS3StorageParamsRequest) *StorageIntegrationSetRequest { + s.S3Params = &S3Params return s } -func (s *StorageIntegrationSetRequest) WithAzureParams(AzureParams *SetAzureStorageParamsRequest) *StorageIntegrationSetRequest { - s.AzureParams = AzureParams +func (s *StorageIntegrationSetRequest) WithAzureParams(AzureParams SetAzureStorageParamsRequest) *StorageIntegrationSetRequest { + s.AzureParams = &AzureParams return s } @@ -138,8 +140,8 @@ func (s *StorageIntegrationSetRequest) WithStorageBlockedLocations(StorageBlocke return s } -func (s *StorageIntegrationSetRequest) WithComment(Comment *string) *StorageIntegrationSetRequest { - s.Comment = Comment +func (s *StorageIntegrationSetRequest) WithComment(Comment string) *StorageIntegrationSetRequest { + s.Comment = &Comment return s } @@ -151,8 +153,8 @@ func NewSetS3StorageParamsRequest( return &s } -func (s *SetS3StorageParamsRequest) WithStorageAwsObjectAcl(StorageAwsObjectAcl *string) *SetS3StorageParamsRequest { - s.StorageAwsObjectAcl = StorageAwsObjectAcl +func (s *SetS3StorageParamsRequest) WithStorageAwsObjectAcl(StorageAwsObjectAcl string) *SetS3StorageParamsRequest { + s.StorageAwsObjectAcl = &StorageAwsObjectAcl return s } @@ -168,23 +170,23 @@ func NewStorageIntegrationUnsetRequest() *StorageIntegrationUnsetRequest { return &StorageIntegrationUnsetRequest{} } -func (s *StorageIntegrationUnsetRequest) WithStorageAwsObjectAcl(StorageAwsObjectAcl *bool) *StorageIntegrationUnsetRequest { - s.StorageAwsObjectAcl = StorageAwsObjectAcl +func (s *StorageIntegrationUnsetRequest) WithStorageAwsObjectAcl(StorageAwsObjectAcl bool) *StorageIntegrationUnsetRequest { + s.StorageAwsObjectAcl = &StorageAwsObjectAcl return s } -func (s *StorageIntegrationUnsetRequest) WithEnabled(Enabled *bool) *StorageIntegrationUnsetRequest { - s.Enabled = Enabled +func (s *StorageIntegrationUnsetRequest) WithEnabled(Enabled bool) *StorageIntegrationUnsetRequest { + s.Enabled = &Enabled return s } -func (s *StorageIntegrationUnsetRequest) WithStorageBlockedLocations(StorageBlockedLocations *bool) *StorageIntegrationUnsetRequest { - s.StorageBlockedLocations = StorageBlockedLocations +func (s *StorageIntegrationUnsetRequest) WithStorageBlockedLocations(StorageBlockedLocations bool) *StorageIntegrationUnsetRequest { + s.StorageBlockedLocations = &StorageBlockedLocations return s } -func (s *StorageIntegrationUnsetRequest) WithComment(Comment *bool) *StorageIntegrationUnsetRequest { - s.Comment = Comment +func (s *StorageIntegrationUnsetRequest) WithComment(Comment bool) *StorageIntegrationUnsetRequest { + s.Comment = &Comment return s } @@ -196,8 +198,8 @@ func NewDropStorageIntegrationRequest( return &s } -func (s *DropStorageIntegrationRequest) WithIfExists(IfExists *bool) *DropStorageIntegrationRequest { - s.IfExists = IfExists +func (s *DropStorageIntegrationRequest) WithIfExists(IfExists bool) *DropStorageIntegrationRequest { + s.IfExists = &IfExists return s } @@ -205,8 +207,8 @@ func NewShowStorageIntegrationRequest() *ShowStorageIntegrationRequest { return &ShowStorageIntegrationRequest{} } -func (s *ShowStorageIntegrationRequest) WithLike(Like *Like) *ShowStorageIntegrationRequest { - s.Like = Like +func (s *ShowStorageIntegrationRequest) WithLike(Like Like) *ShowStorageIntegrationRequest { + s.Like = &Like return s } diff --git a/pkg/sdk/storage_integration_dto_gen.go b/pkg/sdk/storage_integration_dto_gen.go index 7612817825..27e0856970 100644 --- a/pkg/sdk/storage_integration_dto_gen.go +++ b/pkg/sdk/storage_integration_dto_gen.go @@ -24,7 +24,8 @@ type CreateStorageIntegrationRequest struct { } type S3StorageParamsRequest struct { - StorageAwsRoleArn string // required + Protocol S3Protocol // required + StorageAwsRoleArn string // required StorageAwsObjectAcl *string } diff --git a/pkg/sdk/storage_integration_gen.go b/pkg/sdk/storage_integration_gen.go index 82b02c4547..0aaa8eaf85 100644 --- a/pkg/sdk/storage_integration_gen.go +++ b/pkg/sdk/storage_integration_gen.go @@ -37,9 +37,9 @@ type StorageLocation struct { } type S3StorageParams struct { - storageProvider string `ddl:"static" sql:"STORAGE_PROVIDER = 'S3'"` - StorageAwsRoleArn string `ddl:"parameter,single_quotes" sql:"STORAGE_AWS_ROLE_ARN"` - StorageAwsObjectAcl *string `ddl:"parameter,single_quotes" sql:"STORAGE_AWS_OBJECT_ACL"` + Protocol S3Protocol `ddl:"parameter,single_quotes" sql:"STORAGE_PROVIDER"` + StorageAwsRoleArn string `ddl:"parameter,single_quotes" sql:"STORAGE_AWS_ROLE_ARN"` + StorageAwsObjectAcl *string `ddl:"parameter,single_quotes" sql:"STORAGE_AWS_OBJECT_ACL"` } type GCSStorageParams struct { diff --git a/pkg/sdk/storage_integration_gen_test.go b/pkg/sdk/storage_integration_gen_test.go index 2863dc49e0..61c74ddac2 100644 --- a/pkg/sdk/storage_integration_gen_test.go +++ b/pkg/sdk/storage_integration_gen_test.go @@ -1,6 +1,11 @@ package sdk -import "testing" +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) func TestStorageIntegrations_Create(t *testing.T) { id := randomAccountObjectIdentifier() @@ -10,6 +15,7 @@ func TestStorageIntegrations_Create(t *testing.T) { return &CreateStorageIntegrationOptions{ name: id, S3StorageProviderParams: &S3StorageParams{ + Protocol: RegularS3Protocol, StorageAwsRoleArn: "arn:aws:iam::001234567890:role/role", }, Enabled: true, @@ -52,10 +58,23 @@ func TestStorageIntegrations_Create(t *testing.T) { assertOptsValidAndSQLEquals(t, opts, `CREATE STORAGE INTEGRATION %s TYPE = EXTERNAL_STAGE STORAGE_PROVIDER = 'S3' STORAGE_AWS_ROLE_ARN = 'arn:aws:iam::001234567890:role/role' ENABLED = true STORAGE_ALLOWED_LOCATIONS = ('allowed-loc-1', 'allowed-loc-2')`, id.FullyQualifiedName()) }) + t.Run("basic - s3 gov protocol", func(t *testing.T) { + opts := defaultOpts() + opts.S3StorageProviderParams.Protocol = GovS3Protocol + assertOptsValidAndSQLEquals(t, opts, `CREATE STORAGE INTEGRATION %s TYPE = EXTERNAL_STAGE STORAGE_PROVIDER = 'S3GOV' STORAGE_AWS_ROLE_ARN = 'arn:aws:iam::001234567890:role/role' ENABLED = true STORAGE_ALLOWED_LOCATIONS = ('allowed-loc-1', 'allowed-loc-2')`, id.FullyQualifiedName()) + }) + + t.Run("basic - s3 china protocol", func(t *testing.T) { + opts := defaultOpts() + opts.S3StorageProviderParams.Protocol = ChinaS3Protocol + assertOptsValidAndSQLEquals(t, opts, `CREATE STORAGE INTEGRATION %s TYPE = EXTERNAL_STAGE STORAGE_PROVIDER = 'S3CHINA' STORAGE_AWS_ROLE_ARN = 'arn:aws:iam::001234567890:role/role' ENABLED = true STORAGE_ALLOWED_LOCATIONS = ('allowed-loc-1', 'allowed-loc-2')`, id.FullyQualifiedName()) + }) + t.Run("all options - s3", func(t *testing.T) { opts := defaultOpts() opts.IfNotExists = Bool(true) opts.S3StorageProviderParams = &S3StorageParams{ + Protocol: RegularS3Protocol, StorageAwsRoleArn: "arn:aws:iam::001234567890:role/role", StorageAwsObjectAcl: String("bucket-owner-full-control"), } @@ -283,3 +302,38 @@ func TestStorageIntegrations_Describe(t *testing.T) { assertOptsValidAndSQLEquals(t, opts, "DESCRIBE STORAGE INTEGRATION %s", id.FullyQualifiedName()) }) } + +func TestToS3Protocol(t *testing.T) { + testCases := []struct { + Name string + Input string + Expected S3Protocol + Error string + }{ + {Input: "S3", Expected: RegularS3Protocol}, + {Input: "s3", Expected: RegularS3Protocol}, + {Input: "S3gov", Expected: GovS3Protocol}, + {Input: "S3GOV", Expected: GovS3Protocol}, + {Input: "S3ChInA", Expected: ChinaS3Protocol}, + {Input: "S3CHINA", Expected: ChinaS3Protocol}, + {Name: "validation: incorrect s3 protocol", Input: "incorrect", Error: "invalid S3 protocol: incorrect"}, + {Name: "validation: empty input", Input: "", Error: "invalid S3 protocol: "}, + } + + for _, testCase := range testCases { + name := testCase.Name + if name == "" { + name = fmt.Sprintf("%v s3 protocol", testCase.Input) + } + t.Run(name, func(t *testing.T) { + value, err := ToS3Protocol(testCase.Input) + if testCase.Error != "" { + assert.Empty(t, value) + assert.ErrorContains(t, err, testCase.Error) + } else { + assert.NoError(t, err) + assert.Equal(t, testCase.Expected, value) + } + }) + } +} diff --git a/pkg/sdk/storage_integration_impl_gen.go b/pkg/sdk/storage_integration_impl_gen.go index 1361bfdfad..9ac54949d8 100644 --- a/pkg/sdk/storage_integration_impl_gen.go +++ b/pkg/sdk/storage_integration_impl_gen.go @@ -38,7 +38,7 @@ func (v *storageIntegrations) Show(ctx context.Context, request *ShowStorageInte } func (v *storageIntegrations) ShowByID(ctx context.Context, id AccountObjectIdentifier) (*StorageIntegration, error) { - storageIntegrations, err := v.Show(ctx, NewShowStorageIntegrationRequest().WithLike(&Like{ + storageIntegrations, err := v.Show(ctx, NewShowStorageIntegrationRequest().WithLike(Like{ Pattern: String(id.Name()), })) if err != nil { @@ -71,6 +71,7 @@ func (r *CreateStorageIntegrationRequest) toOpts() *CreateStorageIntegrationOpti } if r.S3StorageProviderParams != nil { opts.S3StorageProviderParams = &S3StorageParams{ + Protocol: r.S3StorageProviderParams.Protocol, StorageAwsRoleArn: r.S3StorageProviderParams.StorageAwsRoleArn, StorageAwsObjectAcl: r.S3StorageProviderParams.StorageAwsObjectAcl, } diff --git a/pkg/sdk/testint/storage_integration_gen_integration_test.go b/pkg/sdk/testint/storage_integration_gen_integration_test.go index 5fb1839705..1ff3aab9d9 100644 --- a/pkg/sdk/testint/storage_integration_gen_integration_test.go +++ b/pkg/sdk/testint/storage_integration_gen_integration_test.go @@ -151,15 +151,15 @@ func TestInt_StorageIntegrations(t *testing.T) { gcsBlockedLocations := blockedLocations(gcsBucketUrl) azureBlockedLocations := blockedLocations(azureBucketUrl) - createS3StorageIntegration := func(t *testing.T) sdk.AccountObjectIdentifier { + createS3StorageIntegration := func(t *testing.T, protocol sdk.S3Protocol) sdk.AccountObjectIdentifier { t.Helper() id := testClientHelper().Ids.RandomAccountObjectIdentifier() req := sdk.NewCreateStorageIntegrationRequest(id, true, s3AllowedLocations). - WithIfNotExists(sdk.Bool(true)). - WithS3StorageProviderParams(sdk.NewS3StorageParamsRequest(awsRoleARN)). + WithIfNotExists(true). + WithS3StorageProviderParams(*sdk.NewS3StorageParamsRequest(protocol, awsRoleARN)). WithStorageBlockedLocations(s3BlockedLocations). - WithComment(sdk.String("some comment")) + WithComment("some comment") err := client.StorageIntegrations.Create(ctx, req) require.NoError(t, err) @@ -177,10 +177,10 @@ func TestInt_StorageIntegrations(t *testing.T) { id := testClientHelper().Ids.RandomAccountObjectIdentifier() req := sdk.NewCreateStorageIntegrationRequest(id, true, gcsAllowedLocations). - WithIfNotExists(sdk.Bool(true)). - WithGCSStorageProviderParams(sdk.NewGCSStorageParamsRequest()). + WithIfNotExists(true). + WithGCSStorageProviderParams(*sdk.NewGCSStorageParamsRequest()). WithStorageBlockedLocations(gcsBlockedLocations). - WithComment(sdk.String("some comment")) + WithComment("some comment") err := client.StorageIntegrations.Create(ctx, req) require.NoError(t, err) @@ -198,10 +198,10 @@ func TestInt_StorageIntegrations(t *testing.T) { id := testClientHelper().Ids.RandomAccountObjectIdentifier() req := sdk.NewCreateStorageIntegrationRequest(id, true, azureAllowedLocations). - WithIfNotExists(sdk.Bool(true)). - WithAzureStorageProviderParams(sdk.NewAzureStorageParamsRequest(sdk.String(azureTenantId))). + WithIfNotExists(true). + WithAzureStorageProviderParams(*sdk.NewAzureStorageParamsRequest(sdk.String(azureTenantId))). WithStorageBlockedLocations(azureBlockedLocations). - WithComment(sdk.String("some comment")) + WithComment("some comment") err := client.StorageIntegrations.Create(ctx, req) require.NoError(t, err) @@ -215,7 +215,17 @@ func TestInt_StorageIntegrations(t *testing.T) { } t.Run("Create - S3", func(t *testing.T) { - id := createS3StorageIntegration(t) + id := createS3StorageIntegration(t, sdk.RegularS3Protocol) + + storageIntegration, err := client.StorageIntegrations.ShowByID(ctx, id) + require.NoError(t, err) + + assertStorageIntegrationShowResult(t, storageIntegration, id, "some comment") + }) + + t.Run("Create - S3GOV", func(t *testing.T) { + t.Skip("TODO(SNOW-1820099): Setup GOV accounts to be able to run this test on CI") + id := createS3StorageIntegration(t, sdk.GovS3Protocol) storageIntegration, err := client.StorageIntegrations.ShowByID(ctx, id) require.NoError(t, err) @@ -242,18 +252,18 @@ func TestInt_StorageIntegrations(t *testing.T) { }) t.Run("Alter - set - S3", func(t *testing.T) { - id := createS3StorageIntegration(t) + id := createS3StorageIntegration(t, sdk.RegularS3Protocol) changedS3AllowedLocations := append([]sdk.StorageLocation{{Path: awsBucketUrl + "/allowed-location3"}}, s3AllowedLocations...) changedS3BlockedLocations := append([]sdk.StorageLocation{{Path: awsBucketUrl + "/blocked-location3"}}, s3BlockedLocations...) req := sdk.NewAlterStorageIntegrationRequest(id). WithSet( - sdk.NewStorageIntegrationSetRequest(). - WithS3Params(sdk.NewSetS3StorageParamsRequest(awsRoleARN)). + *sdk.NewStorageIntegrationSetRequest(). + WithS3Params(*sdk.NewSetS3StorageParamsRequest(awsRoleARN)). WithEnabled(true). WithStorageAllowedLocations(changedS3AllowedLocations). WithStorageBlockedLocations(changedS3BlockedLocations). - WithComment(sdk.String("changed comment")), + WithComment("changed comment"), ) err := client.StorageIntegrations.Alter(ctx, req) require.NoError(t, err) @@ -271,12 +281,12 @@ func TestInt_StorageIntegrations(t *testing.T) { changedAzureBlockedLocations := append([]sdk.StorageLocation{{Path: azureBucketUrl + "/blocked-location3"}}, azureBlockedLocations...) req := sdk.NewAlterStorageIntegrationRequest(id). WithSet( - sdk.NewStorageIntegrationSetRequest(). - WithAzureParams(sdk.NewSetAzureStorageParamsRequest(azureTenantId)). + *sdk.NewStorageIntegrationSetRequest(). + WithAzureParams(*sdk.NewSetAzureStorageParamsRequest(azureTenantId)). WithEnabled(true). WithStorageAllowedLocations(changedAzureAllowedLocations). WithStorageBlockedLocations(changedAzureBlockedLocations). - WithComment(sdk.String("changed comment")), + WithComment("changed comment"), ) err := client.StorageIntegrations.Alter(ctx, req) require.NoError(t, err) @@ -288,15 +298,15 @@ func TestInt_StorageIntegrations(t *testing.T) { }) t.Run("Alter - unset", func(t *testing.T) { - id := createS3StorageIntegration(t) + id := createS3StorageIntegration(t, sdk.RegularS3Protocol) req := sdk.NewAlterStorageIntegrationRequest(id). WithUnset( - sdk.NewStorageIntegrationUnsetRequest(). - WithStorageAwsObjectAcl(sdk.Bool(true)). - WithEnabled(sdk.Bool(true)). - WithStorageBlockedLocations(sdk.Bool(true)). - WithComment(sdk.Bool(true)), + *sdk.NewStorageIntegrationUnsetRequest(). + WithStorageAwsObjectAcl(true). + WithEnabled(true). + WithStorageBlockedLocations(true). + WithComment(true), ) err := client.StorageIntegrations.Alter(ctx, req) require.NoError(t, err) @@ -308,7 +318,7 @@ func TestInt_StorageIntegrations(t *testing.T) { }) t.Run("Describe - S3", func(t *testing.T) { - id := createS3StorageIntegration(t) + id := createS3StorageIntegration(t, sdk.RegularS3Protocol) desc, err := client.StorageIntegrations.Describe(ctx, id) require.NoError(t, err) From e2284d98d23586031514934d7bc7c67139f5e272 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Tue, 26 Nov 2024 13:24:04 +0100 Subject: [PATCH 05/10] feat: Tasks v1 readiness (#3222) Pr that collects all the changes to tasks from previous prs: - https://github.com/Snowflake-Labs/terraform-provider-snowflake/pull/3202 - https://github.com/Snowflake-Labs/terraform-provider-snowflake/pull/3170 - https://github.com/Snowflake-Labs/terraform-provider-snowflake/pull/3113 **Note**: look at the last commit, added very small changes to documentation and code (around 8 lines) --- MIGRATION_GUIDE.md | 143 +- docs/data-sources/tasks.md | 963 ++++- docs/resources/resource_monitor.md | 4 +- docs/resources/task.md | 1050 +++++- .../snowflake_tasks/data-source.tf | 124 +- examples/resources/snowflake_task/resource.tf | 150 +- pkg/acceptance/bettertestspoc/README.md | 1 + .../assert/objectassert/task_snowflake_ext.go | 70 +- .../assert/objectassert/task_snowflake_gen.go | 22 - .../task_parameters_snowflake_gen.go | 2 +- .../assert/resource_assertions.go | 10 + .../resourceassert/gen/resource_schema_def.go | 4 + .../resourceassert/task_resource_ext.go | 39 + .../resourceassert/task_resource_gen.go | 767 ++++ .../task_resource_parameters_ext.go | 82 + .../task_show_output_ext.go | 89 + .../task_show_output_gen.go | 141 + .../bettertestspoc/config/config.go | 13 + .../config/model/task_model_ext.go | 87 + .../config/model/task_model_gen.go | 857 +++++ pkg/acceptance/helpers/ids_generator.go | 4 + .../notification_integration_client.go | 25 + pkg/acceptance/helpers/task_client.go | 8 + pkg/datasources/common.go | 58 + pkg/datasources/tasks.go | 121 +- pkg/datasources/tasks_acceptance_test.go | 272 +- .../collections/collection_helpers.go | 13 + .../collections/collection_helpers_test.go | 49 + .../grant_ownership_acceptance_test.go | 2 +- pkg/resources/resource_helpers_create.go | 11 + pkg/resources/resource_helpers_read.go | 14 + pkg/resources/resource_helpers_update.go | 15 + pkg/resources/resource_monitor.go | 1 + pkg/resources/task.go | 1022 +++--- pkg/resources/task_acceptance_test.go | 3167 +++++++++++++---- pkg/resources/task_parameters.go | 412 +++ pkg/resources/task_state_upgraders.go | 53 + .../TestAcc_GrantOwnership/OnAllTasks/test.tf | 2 + .../TestAcc_GrantOwnership/OnTask/test.tf | 1 + .../OnTask_Discussion2877/1/test.tf | 1 + .../OnTask_Discussion2877/2/test.tf | 4 +- .../OnTask_Discussion2877/3/test.tf | 1 + .../OnTask_Discussion2877/4/test.tf | 4 +- .../testdata/TestAcc_Task/basic/test.tf | 82 + .../testdata/TestAcc_Task/basic/variables.tf | 341 ++ .../TestAcc_Task/with_task_dependency/test.tf | 49 + .../with_task_dependency/variables.tf | 18 + .../testdata/TestAcc_Task_issue2036/1/test.tf | 9 - .../TestAcc_Task_issue2036/1/variables.tf | 15 - .../testdata/TestAcc_Task_issue2036/2/test.tf | 10 - .../TestAcc_Task_issue2036/2/variables.tf | 15 - .../testdata/TestAcc_Task_issue2207/1/test.tf | 20 - .../TestAcc_Task_issue2207/1/variables.tf | 23 - pkg/resources/user_parameters.go | 12 +- pkg/schemas/gen/README.md | 1 + pkg/schemas/task_gen.go | 58 +- pkg/schemas/task_parameters.go | 91 + pkg/sdk/grants_impl.go | 10 +- pkg/sdk/parameters.go | 64 + pkg/sdk/tasks_def.go | 35 +- pkg/sdk/tasks_dto_builders_gen.go | 19 +- pkg/sdk/tasks_dto_gen.go | 37 +- pkg/sdk/tasks_gen.go | 95 +- pkg/sdk/tasks_gen_test.go | 93 +- pkg/sdk/tasks_impl_gen.go | 77 +- pkg/sdk/tasks_validations_gen.go | 18 +- pkg/sdk/testint/tasks_gen_integration_test.go | 80 +- templates/data-sources/tasks.md.tmpl | 24 + templates/resources/task.md.tmpl | 35 + v1-preparations/ESSENTIAL_GA_OBJECTS.MD | 2 +- 70 files changed, 9519 insertions(+), 1692 deletions(-) create mode 100644 pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go create mode 100644 pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go create mode 100644 pkg/acceptance/bettertestspoc/assert/resourceparametersassert/task_resource_parameters_ext.go create mode 100644 pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go create mode 100644 pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go create mode 100644 pkg/acceptance/bettertestspoc/config/model/task_model_ext.go create mode 100644 pkg/acceptance/bettertestspoc/config/model/task_model_gen.go create mode 100644 pkg/resources/task_parameters.go create mode 100644 pkg/resources/task_state_upgraders.go create mode 100644 pkg/resources/testdata/TestAcc_Task/basic/test.tf create mode 100644 pkg/resources/testdata/TestAcc_Task/basic/variables.tf create mode 100644 pkg/resources/testdata/TestAcc_Task/with_task_dependency/test.tf create mode 100644 pkg/resources/testdata/TestAcc_Task/with_task_dependency/variables.tf delete mode 100644 pkg/resources/testdata/TestAcc_Task_issue2036/1/test.tf delete mode 100644 pkg/resources/testdata/TestAcc_Task_issue2036/1/variables.tf delete mode 100644 pkg/resources/testdata/TestAcc_Task_issue2036/2/test.tf delete mode 100644 pkg/resources/testdata/TestAcc_Task_issue2036/2/variables.tf delete mode 100644 pkg/resources/testdata/TestAcc_Task_issue2207/1/test.tf delete mode 100644 pkg/resources/testdata/TestAcc_Task_issue2207/1/variables.tf create mode 100644 pkg/schemas/task_parameters.go create mode 100644 templates/data-sources/tasks.md.tmpl create mode 100644 templates/resources/task.md.tmpl diff --git a/MIGRATION_GUIDE.md b/MIGRATION_GUIDE.md index 148dc15e45..ad1af3ec80 100644 --- a/MIGRATION_GUIDE.md +++ b/MIGRATION_GUIDE.md @@ -6,9 +6,150 @@ across different versions. > [!TIP] > We highly recommend upgrading the versions one by one instead of bulk upgrades. - + ## v0.98.0 ➞ v0.99.0 +### snowflake_tasks data source changes + +New filtering options: +- `with_parameters` +- `like` +- `in` +- `starts_with` +- `root_only` +- `limit` + +New output fields +- `show_output` +- `parameters` + +Breaking changes: +- `database` and `schema` are right now under `in` field + +Before: +```terraform +data "snowflake_tasks" "old_tasks" { + database = "" + schema = "" +} +``` +After: +```terraform +data "snowflake_tasks" "new_tasks" { + in { + # for IN SCHEMA specify: + schema = "." + + # for IN DATABASE specify: + database = "" + } +} +``` +- `tasks` field now organizes output of show under `show_output` field and the output of show parameters under `parameters` field. + +Before: +```terraform +output "simple_output" { + value = data.snowflake_tasks.test.tasks[0].name +} +``` +After: +```terraform +output "simple_output" { + value = data.snowflake_tasks.test.tasks[0].show_output[0].name +} +``` + +### snowflake_task resource changes +New fields: +- `config` - enables to specify JSON-formatted metadata that can be retrieved in the `sql_statement` by using [SYSTEM$GET_TASK_GRAPH_CONFIG](https://docs.snowflake.com/en/sql-reference/functions/system_get_task_graph_config). +- `show_output` and `parameters` fields added for holding SHOW and SHOW PARAMETERS output (see [raw Snowflake output](./v1-preparations/CHANGES_BEFORE_V1.md#raw-snowflake-output)). +- Added support for finalizer tasks with `finalize` field. It conflicts with `after` and `schedule` (see [finalizer tasks](https://docs.snowflake.com/en/user-guide/tasks-graphs#release-and-cleanup-of-task-graphs)). + +Changes: +- `enabled` field changed to `started` and type changed to string with only boolean values available (see ["empty" values](./v1-preparations/CHANGES_BEFORE_V1.md#empty-values)). It is also now required field, so make sure it's explicitly set (previously it was optional with the default value set to `false`). +- `allow_overlapping_execution` type was changed to string with only boolean values available (see ["empty" values](./v1-preparations/CHANGES_BEFORE_V1.md#empty-values)). Previously, it had the default set to `false` which will be migrated. If nothing will be set the provider will plan the change to `default` value. If you want to make sure it's turned off, set it explicitly to `false`. + +Before: +```terraform +resource "snowflake_task" "example" { + # ... + enabled = true + # ... +} +``` +After: +```terraform +resource "snowflake_task" "example" { + # ... + started = true + # ... +} +``` +- `schedule` field changed from single value to a nested object that allows for specifying either minutes or cron + +Before: +```terraform +resource "snowflake_task" "example" { + # ... + schedule = "5 MINUTES" + # or + schedule = "USING CRON * * * * * UTC" + # ... +} +``` +After: +```terraform +resource "snowflake_task" "example" { + # ... + schedule { + minutes = 5 + # or + using_cron = "* * * * * UTC" + } + # ... +} +``` +- All task parameters defined in [the Snowflake documentation](https://docs.snowflake.com/en/sql-reference/parameters) added into the top-level schema and removed `session_parameters` map. + +Before: +```terraform +resource "snowflake_task" "example" { + # ... + session_parameters = { + QUERY_TAG = "" + } + # ... +} +``` +After: +```terraform +resource "snowflake_task" "example" { + # ... + query_tag = "" + # ... +} +``` + +- `after` field type was changed from `list` to `set` and the values were changed from names to fully qualified names. + +Before: +```terraform +resource "snowflake_task" "example" { + # ... + after = ["", snowflake_task.some_task.name] + # ... +} +``` +After: +```terraform +resource "snowflake_task" "example" { + # ... + after = ["..", snowflake_task.some_task.fully_qualified_name] + # ... +} +``` + ### *(new feature)* snowflake_tags datasource Added a new datasource enabling querying and filtering tags. Notes: - all results are stored in `tags` field. diff --git a/docs/data-sources/tasks.md b/docs/data-sources/tasks.md index ac557968e0..f035be55cd 100644 --- a/docs/data-sources/tasks.md +++ b/docs/data-sources/tasks.md @@ -2,42 +2,983 @@ page_title: "snowflake_tasks Data Source - terraform-provider-snowflake" subcategory: "" description: |- - + Data source used to get details of filtered tasks. Filtering is aligned with the current possibilities for SHOW TASKS https://docs.snowflake.com/en/sql-reference/sql/show-tasks query. The results of SHOW and SHOW PARAMETERS IN are encapsulated in one output collection tasks. --- -# snowflake_tasks (Data Source) +!> **V1 release candidate** This data source was reworked and is a release candidate for the V1. We do not expect significant changes in it before the V1. We will welcome any feedback and adjust the data source if needed. Any errors reported will be resolved with a higher priority. We encourage checking this data source out before the V1 release. Please follow the [migration guide](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/MIGRATION_GUIDE.md#v0980--v0990) to use it. +# snowflake_tasks (Data Source) +Data source used to get details of filtered tasks. Filtering is aligned with the current possibilities for [SHOW TASKS](https://docs.snowflake.com/en/sql-reference/sql/show-tasks) query. The results of SHOW and SHOW PARAMETERS IN are encapsulated in one output collection `tasks`. ## Example Usage ```terraform -data "snowflake_tasks" "current" { - database = "MYDB" - schema = "MYSCHEMA" +# Simple usage +data "snowflake_tasks" "simple" { +} + +output "simple_output" { + value = data.snowflake_tasks.simple.tasks +} + +# Filtering (like) +data "snowflake_tasks" "like" { + like = "task-name" +} + +output "like_output" { + value = data.snowflake_tasks.like.tasks +} + +# Filtering (in - account - database - schema - application - application package) +data "snowflake_tasks" "in_account" { + in { + account = true + } +} + +data "snowflake_tasks" "in_database" { + in { + database = "" + } +} + +data "snowflake_tasks" "in_schema" { + in { + schema = "." + } +} + +data "snowflake_tasks" "in_application" { + in { + application = "" + } +} + +data "snowflake_tasks" "in_application_package" { + in { + application_package = "" + } +} + +output "in_output" { + value = { + "account" : data.snowflake_tasks.in_account.tasks, + "database" : data.snowflake_tasks.in_database.tasks, + "schema" : data.snowflake_tasks.in_schema.tasks, + "application" : data.snowflake_tasks.in_application.tasks, + "application_package" : data.snowflake_tasks.in_application_package.tasks, + } +} + +# Filtering (root only tasks) +data "snowflake_tasks" "root_only" { + root_only = true +} + +output "root_only_output" { + value = data.snowflake_tasks.root_only.tasks +} + +# Filtering (starts_with) +data "snowflake_tasks" "starts_with" { + starts_with = "task-" +} + +output "starts_with_output" { + value = data.snowflake_tasks.starts_with.tasks +} + +# Filtering (limit) +data "snowflake_tasks" "limit" { + limit { + rows = 10 + from = "task-" + } +} + +output "limit_output" { + value = data.snowflake_tasks.limit.tasks +} + +# Without additional data (to limit the number of calls make for every found task) +data "snowflake_tasks" "only_show" { + # with_parameters is turned on by default and it calls SHOW PARAMETERS FOR task for every task found and attaches its output to tasks.*.parameters field + with_parameters = false +} + +output "only_show_output" { + value = data.snowflake_tasks.only_show.tasks +} + +# Ensure the number of tasks is equal to at least one element (with the use of postcondition) +data "snowflake_tasks" "assert_with_postcondition" { + starts_with = "task-name" + lifecycle { + postcondition { + condition = length(self.tasks) > 0 + error_message = "there should be at least one task" + } + } +} + +# Ensure the number of tasks is equal to at exactly one element (with the use of check block) +check "task_check" { + data "snowflake_tasks" "assert_with_check_block" { + like = "task-name" + } + + assert { + condition = length(data.snowflake_tasks.assert_with_check_block.tasks) == 1 + error_message = "tasks filtered by '${data.snowflake_tasks.assert_with_check_block.like}' returned ${length(data.snowflake_tasks.assert_with_check_block.tasks)} tasks where one was expected" + } } ``` ## Schema -### Required +### Optional -- `database` (String) The database from which to return the schemas from. -- `schema` (String) The schema from which to return the tasks from. +- `in` (Block List, Max: 1) IN clause to filter the list of objects (see [below for nested schema](#nestedblock--in)) +- `like` (String) Filters the output with **case-insensitive** pattern, with support for SQL wildcard characters (`%` and `_`). +- `limit` (Block List, Max: 1) Limits the number of rows returned. If the `limit.from` is set, then the limit wll start from the first element matched by the expression. The expression is only used to match with the first element, later on the elements are not matched by the prefix, but you can enforce a certain pattern with `starts_with` or `like`. (see [below for nested schema](#nestedblock--limit)) +- `root_only` (Boolean) Filters the command output to return only root tasks (tasks with no predecessors). +- `starts_with` (String) Filters the output with **case-sensitive** characters indicating the beginning of the object name. +- `with_parameters` (Boolean) Runs SHOW PARAMETERS FOR TASK for each task returned by SHOW TASK and saves the output to the parameters field as a map. By default this value is set to true. ### Read-Only - `id` (String) The ID of this resource. -- `tasks` (List of Object) The tasks in the schema (see [below for nested schema](#nestedatt--tasks)) +- `tasks` (List of Object) Holds the aggregated output of all task details queries. (see [below for nested schema](#nestedatt--tasks)) + + +### Nested Schema for `in` + +Optional: + +- `account` (Boolean) Returns records for the entire account. +- `application` (String) Returns records for the specified application. +- `application_package` (String) Returns records for the specified application package. +- `database` (String) Returns records for the current database in use or for a specified database. +- `schema` (String) Returns records for the current schema in use or a specified schema. Use fully qualified name. + + + +### Nested Schema for `limit` + +Required: + +- `rows` (Number) The maximum number of rows to return. + +Optional: + +- `from` (String) Specifies a **case-sensitive** pattern that is used to match object name. After the first match, the limit on the number of rows will be applied. + ### Nested Schema for `tasks` Read-Only: +- `parameters` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters)) +- `show_output` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--show_output)) + + +### Nested Schema for `tasks.parameters` + +Read-Only: + +- `abort_detached_query` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--abort_detached_query)) +- `autocommit` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--autocommit)) +- `binary_input_format` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--binary_input_format)) +- `binary_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--binary_output_format)) +- `client_memory_limit` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--client_memory_limit)) +- `client_metadata_request_use_connection_ctx` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--client_metadata_request_use_connection_ctx)) +- `client_prefetch_threads` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--client_prefetch_threads)) +- `client_result_chunk_size` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--client_result_chunk_size)) +- `client_result_column_case_insensitive` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--client_result_column_case_insensitive)) +- `client_session_keep_alive` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--client_session_keep_alive)) +- `client_session_keep_alive_heartbeat_frequency` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--client_session_keep_alive_heartbeat_frequency)) +- `client_timestamp_type_mapping` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--client_timestamp_type_mapping)) +- `date_input_format` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--date_input_format)) +- `date_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--date_output_format)) +- `enable_unload_physical_type_optimization` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--enable_unload_physical_type_optimization)) +- `error_on_nondeterministic_merge` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--error_on_nondeterministic_merge)) +- `error_on_nondeterministic_update` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--error_on_nondeterministic_update)) +- `geography_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--geography_output_format)) +- `geometry_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--geometry_output_format)) +- `jdbc_treat_timestamp_ntz_as_utc` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--jdbc_treat_timestamp_ntz_as_utc)) +- `jdbc_use_session_timezone` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--jdbc_use_session_timezone)) +- `json_indent` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--json_indent)) +- `lock_timeout` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--lock_timeout)) +- `log_level` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--log_level)) +- `multi_statement_count` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--multi_statement_count)) +- `noorder_sequence_as_default` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--noorder_sequence_as_default)) +- `odbc_treat_decimal_as_int` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--odbc_treat_decimal_as_int)) +- `query_tag` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--query_tag)) +- `quoted_identifiers_ignore_case` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--quoted_identifiers_ignore_case)) +- `rows_per_resultset` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--rows_per_resultset)) +- `s3_stage_vpce_dns_name` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--s3_stage_vpce_dns_name)) +- `search_path` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--search_path)) +- `statement_queued_timeout_in_seconds` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--statement_queued_timeout_in_seconds)) +- `statement_timeout_in_seconds` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--statement_timeout_in_seconds)) +- `strict_json_output` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--strict_json_output)) +- `suspend_task_after_num_failures` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--suspend_task_after_num_failures)) +- `task_auto_retry_attempts` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--task_auto_retry_attempts)) +- `time_input_format` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--time_input_format)) +- `time_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--time_output_format)) +- `timestamp_day_is_always_24h` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--timestamp_day_is_always_24h)) +- `timestamp_input_format` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--timestamp_input_format)) +- `timestamp_ltz_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--timestamp_ltz_output_format)) +- `timestamp_ntz_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--timestamp_ntz_output_format)) +- `timestamp_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--timestamp_output_format)) +- `timestamp_type_mapping` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--timestamp_type_mapping)) +- `timestamp_tz_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--timestamp_tz_output_format)) +- `timezone` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--timezone)) +- `trace_level` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--trace_level)) +- `transaction_abort_on_error` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--transaction_abort_on_error)) +- `transaction_default_isolation_level` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--transaction_default_isolation_level)) +- `two_digit_century_start` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--two_digit_century_start)) +- `unsupported_ddl_action` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--unsupported_ddl_action)) +- `use_cached_result` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--use_cached_result)) +- `user_task_managed_initial_warehouse_size` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--user_task_managed_initial_warehouse_size)) +- `user_task_minimum_trigger_interval_in_seconds` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--user_task_minimum_trigger_interval_in_seconds)) +- `user_task_timeout_ms` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--user_task_timeout_ms)) +- `week_of_year_policy` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--week_of_year_policy)) +- `week_start` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--parameters--week_start)) + + +### Nested Schema for `tasks.parameters.abort_detached_query` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.autocommit` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.binary_input_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.binary_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.client_memory_limit` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.client_metadata_request_use_connection_ctx` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.client_prefetch_threads` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.client_result_chunk_size` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.client_result_column_case_insensitive` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.client_session_keep_alive` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.client_session_keep_alive_heartbeat_frequency` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.client_timestamp_type_mapping` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.date_input_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.date_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.enable_unload_physical_type_optimization` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.error_on_nondeterministic_merge` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.error_on_nondeterministic_update` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.geography_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.geometry_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.jdbc_treat_timestamp_ntz_as_utc` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.jdbc_use_session_timezone` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.json_indent` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.lock_timeout` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.log_level` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.multi_statement_count` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.noorder_sequence_as_default` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.odbc_treat_decimal_as_int` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.query_tag` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.quoted_identifiers_ignore_case` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.rows_per_resultset` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.s3_stage_vpce_dns_name` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.search_path` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.statement_queued_timeout_in_seconds` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.statement_timeout_in_seconds` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.strict_json_output` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.suspend_task_after_num_failures` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.task_auto_retry_attempts` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.time_input_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.time_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.timestamp_day_is_always_24h` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.timestamp_input_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.timestamp_ltz_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.timestamp_ntz_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.timestamp_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.timestamp_type_mapping` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.timestamp_tz_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.timezone` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.trace_level` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.transaction_abort_on_error` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.transaction_default_isolation_level` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.two_digit_century_start` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.unsupported_ddl_action` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.use_cached_result` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.user_task_managed_initial_warehouse_size` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.user_task_minimum_trigger_interval_in_seconds` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.user_task_timeout_ms` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.week_of_year_policy` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `tasks.parameters.week_start` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + + +### Nested Schema for `tasks.show_output` + +Read-Only: + +- `allow_overlapping_execution` (Boolean) +- `budget` (String) - `comment` (String) -- `database` (String) +- `condition` (String) +- `config` (String) +- `created_on` (String) +- `database_name` (String) +- `definition` (String) +- `error_integration` (String) +- `id` (String) +- `last_committed_on` (String) +- `last_suspended_on` (String) +- `last_suspended_reason` (String) - `name` (String) -- `schema` (String) +- `owner` (String) +- `owner_role_type` (String) +- `predecessors` (Set of String) +- `schedule` (String) +- `schema_name` (String) +- `state` (String) +- `task_relations` (List of Object) (see [below for nested schema](#nestedobjatt--tasks--show_output--task_relations)) - `warehouse` (String) + + +### Nested Schema for `tasks.show_output.task_relations` + +Read-Only: + +- `finalized_root_task` (String) +- `finalizer` (String) +- `predecessors` (List of String) diff --git a/docs/resources/resource_monitor.md b/docs/resources/resource_monitor.md index 03ade24eea..c5eb401268 100644 --- a/docs/resources/resource_monitor.md +++ b/docs/resources/resource_monitor.md @@ -2,7 +2,7 @@ page_title: "snowflake_resource_monitor Resource - terraform-provider-snowflake" subcategory: "" description: |- - + Resource used to manage resource monitor objects. For more information, check resource monitor documentation https://docs.snowflake.com/en/user-guide/resource-monitors. --- !> **V1 release candidate** This resource was reworked and is a release candidate for the V1. We do not expect significant changes in it before the V1. We will welcome any feedback and adjust the resource if needed. Any errors reported will be resolved with a higher priority. We encourage checking this resource out before the V1 release. Please follow the [migration guide](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/MIGRATION_GUIDE.md#v0950--v0960) to use it. @@ -15,7 +15,7 @@ description: |- # snowflake_resource_monitor (Resource) - +Resource used to manage resource monitor objects. For more information, check [resource monitor documentation](https://docs.snowflake.com/en/user-guide/resource-monitors). ## Example Usage diff --git a/docs/resources/task.md b/docs/resources/task.md index 8dd8acb62a..8343f80763 100644 --- a/docs/resources/task.md +++ b/docs/resources/task.md @@ -2,72 +2,147 @@ page_title: "snowflake_task Resource - terraform-provider-snowflake" subcategory: "" description: |- - + Resource used to manage task objects. For more information, check task documentation https://docs.snowflake.com/en/user-guide/tasks-intro. --- -# snowflake_task (Resource) +!> **V1 release candidate** This resource was reworked and is a release candidate for the V1. We do not expect significant changes in it before the V1. We will welcome any feedback and adjust the resource if needed. Any errors reported will be resolved with a higher priority. We encourage checking this resource out before the V1 release. Please follow the [migration guide](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/MIGRATION_GUIDE.md#v0980--v0990) to use it. +# snowflake_task (Resource) +Resource used to manage task objects. For more information, check [task documentation](https://docs.snowflake.com/en/user-guide/tasks-intro). ## Example Usage ```terraform +# Basic standalone task resource "snowflake_task" "task" { - comment = "my task" - database = "database" schema = "schema" + name = "task" warehouse = "warehouse" - - name = "task" - schedule = "10 MINUTE" - sql_statement = "select * from foo;" - - session_parameters = { - "foo" : "bar", + started = true + schedule { + minutes = 5 } - - user_task_timeout_ms = 10000 - after = "preceding_task" - when = "foo AND bar" - enabled = true + sql_statement = "select 1" } +# Basic serverless task resource "snowflake_task" "serverless_task" { - comment = "my serverless task" - - database = "db" - schema = "schema" - - name = "serverless_task" - schedule = "10 MINUTE" - sql_statement = "select * from foo;" - - session_parameters = { - "foo" : "bar", - } - - user_task_timeout_ms = 10000 + database = "database" + schema = "schema" + name = "task" user_task_managed_initial_warehouse_size = "XSMALL" - after = [snowflake_task.task.name] - when = "foo AND bar" - enabled = true + started = true + schedule { + minutes = 5 + } + sql_statement = "select 1" } -resource "snowflake_task" "test_task" { - comment = "task with allow_overlapping_execution" +# Basic child task +resource "snowflake_task" "child_task" { + database = "database" + schema = "schema" + name = "task" + warehouse = "warehouse" + started = true + # You can do it by referring to task by computed fully_qualified_name field or write the task name in manually if it's not managed by Terraform + after = [snowflake_task.root_task.fully_qualified_name, ".."] + sql_statement = "select 1" +} - database = "database" - schema = "schema" +# Basic finalizer task +resource "snowflake_task" "child_task" { + database = "database" + schema = "schema" + name = "task" + warehouse = "warehouse" + started = true + # You can do it by referring to task by computed fully_qualified_name field or write the task name in manually if it's not managed by Terraform + finalize = snowflake_task.root_task.fully_qualified_name + sql_statement = "select 1" +} - name = "test_task" - sql_statement = "select 1 as c;" +# Complete standalone task +resource "snowflake_task" "test" { + database = "database" + schema = "schema" + name = "task" + warehouse = "warehouse" + started = true + sql_statement = "select 1" + config = "{\"key\":\"value\"}" allow_overlapping_execution = true - enabled = true + error_integration = "" + when = "SYSTEM$STREAM_HAS_DATA('')" + comment = "complete task" + + schedule { + minutes = 10 + } + + # Session Parameters + suspend_task_after_num_failures = 10 + task_auto_retry_attempts = 0 + user_task_managed_initial_warehouse_size = "Medium" + user_task_minimum_trigger_interval_in_seconds = 30 + user_task_timeout_ms = 3600000 + abort_detached_query = false + autocommit = true + binary_input_format = "HEX" + binary_output_format = "HEX" + client_memory_limit = 1536 + client_metadata_request_use_connection_ctx = false + client_prefetch_threads = 4 + client_result_chunk_size = 160 + client_result_column_case_insensitive = false + client_session_keep_alive = false + client_session_keep_alive_heartbeat_frequency = 3600 + client_timestamp_type_mapping = "TIMESTAMP_LTZ" + date_input_format = "AUTO" + date_output_format = "YYYY-MM-DD" + enable_unload_physical_type_optimization = true + error_on_nondeterministic_merge = true + error_on_nondeterministic_update = false + geography_output_format = "GeoJSON" + geometry_output_format = "GeoJSON" + jdbc_use_session_timezone = true + json_indent = 2 + lock_timeout = 43200 + log_level = "OFF" + multi_statement_count = 1 + noorder_sequence_as_default = true + odbc_treat_decimal_as_int = false + query_tag = "" + quoted_identifiers_ignore_case = false + rows_per_resultset = 0 + s3_stage_vpce_dns_name = "" + search_path = "$current, $public" + statement_queued_timeout_in_seconds = 0 + statement_timeout_in_seconds = 172800 + strict_json_output = false + timestamp_day_is_always_24h = false + timestamp_input_format = "AUTO" + timestamp_ltz_output_format = "" + timestamp_ntz_output_format = "YYYY-MM-DD HH24:MI:SS.FF3" + timestamp_output_format = "YYYY-MM-DD HH24:MI:SS.FF3 TZHTZM" + timestamp_type_mapping = "TIMESTAMP_NTZ" + timestamp_tz_output_format = "" + timezone = "America/Los_Angeles" + time_input_format = "AUTO" + time_output_format = "HH24:MI:SS" + trace_level = "OFF" + transaction_abort_on_error = false + transaction_default_isolation_level = "READ COMMITTED" + two_digit_century_start = 1970 + unsupported_ddl_action = "ignore" + use_cached_result = true + week_of_year_policy = 0 + week_start = 0 } ``` - -> **Note** Instead of using fully_qualified_name, you can reference objects managed outside Terraform by constructing a correct ID, consult [identifiers guide](https://registry.terraform.io/providers/Snowflake-Labs/snowflake/latest/docs/guides/identifiers#new-computed-fully-qualified-name-field-in-resources). @@ -76,30 +151,895 @@ resource "snowflake_task" "test_task" { ### Required -- `database` (String) The database in which to create the task. -- `name` (String) Specifies the identifier for the task; must be unique for the database and schema in which the task is created. -- `schema` (String) The schema in which to create the task. +- `database` (String) The database in which to create the task. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `"` +- `name` (String) Specifies the identifier for the task; must be unique for the database and schema in which the task is created. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `"` +- `schema` (String) The schema in which to create the task. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `"` - `sql_statement` (String) Any single SQL statement, or a call to a stored procedure, executed when the task runs. +- `started` (Boolean) Specifies if the task should be started or suspended. ### Optional -- `after` (List of String) Specifies one or more predecessor tasks for the current task. Use this option to create a DAG of tasks or add this task to an existing DAG. A DAG is a series of tasks that starts with a scheduled root task and is linked together by dependencies. -- `allow_overlapping_execution` (Boolean) By default, Snowflake ensures that only one instance of a particular DAG is allowed to run at a time, setting the parameter value to TRUE permits DAG runs to overlap. +- `abort_detached_query` (Boolean) Specifies the action that Snowflake performs for in-progress queries if connectivity is lost due to abrupt termination of a session (e.g. network outage, browser termination, service interruption). For more information, check [ABORT_DETACHED_QUERY docs](https://docs.snowflake.com/en/sql-reference/parameters#abort-detached-query). +- `after` (Set of String) Specifies one or more predecessor tasks for the current task. Use this option to [create a DAG](https://docs.snowflake.com/en/user-guide/tasks-graphs.html#label-task-dag) of tasks or add this task to an existing DAG. A DAG is a series of tasks that starts with a scheduled root task and is linked together by dependencies. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `"` +- `allow_overlapping_execution` (String) By default, Snowflake ensures that only one instance of a particular DAG is allowed to run at a time, setting the parameter value to TRUE permits DAG runs to overlap. Available options are: "true" or "false". When the value is not set in the configuration the provider will put "default" there which means to use the Snowflake default for this value. +- `autocommit` (Boolean) Specifies whether autocommit is enabled for the session. Autocommit determines whether a DML statement, when executed without an active transaction, is automatically committed after the statement successfully completes. For more information, see [Transactions](https://docs.snowflake.com/en/sql-reference/transactions). For more information, check [AUTOCOMMIT docs](https://docs.snowflake.com/en/sql-reference/parameters#autocommit). +- `binary_input_format` (String) The format of VARCHAR values passed as input to VARCHAR-to-BINARY conversion functions. For more information, see [Binary input and output](https://docs.snowflake.com/en/sql-reference/binary-input-output). For more information, check [BINARY_INPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#binary-input-format). +- `binary_output_format` (String) The format for VARCHAR values returned as output by BINARY-to-VARCHAR conversion functions. For more information, see [Binary input and output](https://docs.snowflake.com/en/sql-reference/binary-input-output). For more information, check [BINARY_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#binary-output-format). +- `client_memory_limit` (Number) Parameter that specifies the maximum amount of memory the JDBC driver or ODBC driver should use for the result set from queries (in MB). For more information, check [CLIENT_MEMORY_LIMIT docs](https://docs.snowflake.com/en/sql-reference/parameters#client-memory-limit). +- `client_metadata_request_use_connection_ctx` (Boolean) For specific ODBC functions and JDBC methods, this parameter can change the default search scope from all databases/schemas to the current database/schema. The narrower search typically returns fewer rows and executes more quickly. For more information, check [CLIENT_METADATA_REQUEST_USE_CONNECTION_CTX docs](https://docs.snowflake.com/en/sql-reference/parameters#client-metadata-request-use-connection-ctx). +- `client_prefetch_threads` (Number) Parameter that specifies the number of threads used by the client to pre-fetch large result sets. The driver will attempt to honor the parameter value, but defines the minimum and maximum values (depending on your system’s resources) to improve performance. For more information, check [CLIENT_PREFETCH_THREADS docs](https://docs.snowflake.com/en/sql-reference/parameters#client-prefetch-threads). +- `client_result_chunk_size` (Number) Parameter that specifies the maximum size of each set (or chunk) of query results to download (in MB). The JDBC driver downloads query results in chunks. For more information, check [CLIENT_RESULT_CHUNK_SIZE docs](https://docs.snowflake.com/en/sql-reference/parameters#client-result-chunk-size). +- `client_result_column_case_insensitive` (Boolean) Parameter that indicates whether to match column name case-insensitively in ResultSet.get* methods in JDBC. For more information, check [CLIENT_RESULT_COLUMN_CASE_INSENSITIVE docs](https://docs.snowflake.com/en/sql-reference/parameters#client-result-column-case-insensitive). +- `client_session_keep_alive` (Boolean) Parameter that indicates whether to force a user to log in again after a period of inactivity in the session. For more information, check [CLIENT_SESSION_KEEP_ALIVE docs](https://docs.snowflake.com/en/sql-reference/parameters#client-session-keep-alive). +- `client_session_keep_alive_heartbeat_frequency` (Number) Number of seconds in-between client attempts to update the token for the session. For more information, check [CLIENT_SESSION_KEEP_ALIVE_HEARTBEAT_FREQUENCY docs](https://docs.snowflake.com/en/sql-reference/parameters#client-session-keep-alive-heartbeat-frequency). +- `client_timestamp_type_mapping` (String) Specifies the [TIMESTAMP_* variation](https://docs.snowflake.com/en/sql-reference/data-types-datetime.html#label-datatypes-timestamp-variations) to use when binding timestamp variables for JDBC or ODBC applications that use the bind API to load data. For more information, check [CLIENT_TIMESTAMP_TYPE_MAPPING docs](https://docs.snowflake.com/en/sql-reference/parameters#client-timestamp-type-mapping). - `comment` (String) Specifies a comment for the task. -- `enabled` (Boolean) Specifies if the task should be started (enabled) after creation or should remain suspended (default). -- `error_integration` (String) Specifies the name of the notification integration used for error notifications. -- `schedule` (String) The schedule for periodically running the task. This can be a cron or interval in minutes. (Conflict with after) -- `session_parameters` (Map of String) Specifies session parameters to set for the session when the task runs. A task supports all session parameters. -- `suspend_task_after_num_failures` (Number) Specifies the number of consecutive failed task runs after which the current task is suspended automatically. The default is 0 (no automatic suspension). -- `user_task_managed_initial_warehouse_size` (String) Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. (Conflicts with warehouse) -- `user_task_timeout_ms` (Number) Specifies the time limit on a single run of the task before it times out (in milliseconds). -- `warehouse` (String) The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. (Conflicts with user_task_managed_initial_warehouse_size) -- `when` (String) Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported. +- `config` (String) Specifies a string representation of key value pairs that can be accessed by all tasks in the task graph. Must be in JSON format. +- `date_input_format` (String) Specifies the input format for the DATE data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). For more information, check [DATE_INPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#date-input-format). +- `date_output_format` (String) Specifies the display format for the DATE data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). For more information, check [DATE_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#date-output-format). +- `enable_unload_physical_type_optimization` (Boolean) Specifies whether to set the schema for unloaded Parquet files based on the logical column data types (i.e. the types in the unload SQL query or source table) or on the unloaded column values (i.e. the smallest data types and precision that support the values in the output columns of the unload SQL statement or source table). For more information, check [ENABLE_UNLOAD_PHYSICAL_TYPE_OPTIMIZATION docs](https://docs.snowflake.com/en/sql-reference/parameters#enable-unload-physical-type-optimization). +- `error_integration` (String) Specifies the name of the notification integration used for error notifications. Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `"` +- `error_on_nondeterministic_merge` (Boolean) Specifies whether to return an error when the [MERGE](https://docs.snowflake.com/en/sql-reference/sql/merge) command is used to update or delete a target row that joins multiple source rows and the system cannot determine the action to perform on the target row. For more information, check [ERROR_ON_NONDETERMINISTIC_MERGE docs](https://docs.snowflake.com/en/sql-reference/parameters#error-on-nondeterministic-merge). +- `error_on_nondeterministic_update` (Boolean) Specifies whether to return an error when the [UPDATE](https://docs.snowflake.com/en/sql-reference/sql/update) command is used to update a target row that joins multiple source rows and the system cannot determine the action to perform on the target row. For more information, check [ERROR_ON_NONDETERMINISTIC_UPDATE docs](https://docs.snowflake.com/en/sql-reference/parameters#error-on-nondeterministic-update). +- `finalize` (String) Specifies the name of a root task that the finalizer task is associated with. Finalizer tasks run after all other tasks in the task graph run to completion. You can define the SQL of a finalizer task to handle notifications and the release and cleanup of resources that a task graph uses. For more information, see [Release and cleanup of task graphs](https://docs.snowflake.com/en/user-guide/tasks-graphs.html#label-finalizer-task). Due to technical limitations (read more [here](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/docs/technical-documentation/identifiers_rework_design_decisions.md#known-limitations-and-identifier-recommendations)), avoid using the following characters: `|`, `.`, `"` +- `geography_output_format` (String) Display format for [GEOGRAPHY values](https://docs.snowflake.com/en/sql-reference/data-types-geospatial.html#label-data-types-geography). For more information, check [GEOGRAPHY_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#geography-output-format). +- `geometry_output_format` (String) Display format for [GEOMETRY values](https://docs.snowflake.com/en/sql-reference/data-types-geospatial.html#label-data-types-geometry). For more information, check [GEOMETRY_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#geometry-output-format). +- `jdbc_treat_timestamp_ntz_as_utc` (Boolean) Specifies how JDBC processes TIMESTAMP_NTZ values. For more information, check [JDBC_TREAT_TIMESTAMP_NTZ_AS_UTC docs](https://docs.snowflake.com/en/sql-reference/parameters#jdbc-treat-timestamp-ntz-as-utc). +- `jdbc_use_session_timezone` (Boolean) Specifies whether the JDBC Driver uses the time zone of the JVM or the time zone of the session (specified by the [TIMEZONE](https://docs.snowflake.com/en/sql-reference/parameters#label-timezone) parameter) for the getDate(), getTime(), and getTimestamp() methods of the ResultSet class. For more information, check [JDBC_USE_SESSION_TIMEZONE docs](https://docs.snowflake.com/en/sql-reference/parameters#jdbc-use-session-timezone). +- `json_indent` (Number) Specifies the number of blank spaces to indent each new element in JSON output in the session. Also specifies whether to insert newline characters after each element. For more information, check [JSON_INDENT docs](https://docs.snowflake.com/en/sql-reference/parameters#json-indent). +- `lock_timeout` (Number) Number of seconds to wait while trying to lock a resource, before timing out and aborting the statement. For more information, check [LOCK_TIMEOUT docs](https://docs.snowflake.com/en/sql-reference/parameters#lock-timeout). +- `log_level` (String) Specifies the severity level of messages that should be ingested and made available in the active event table. Messages at the specified level (and at more severe levels) are ingested. For more information about log levels, see [Setting log level](https://docs.snowflake.com/en/developer-guide/logging-tracing/logging-log-level). For more information, check [LOG_LEVEL docs](https://docs.snowflake.com/en/sql-reference/parameters#log-level). +- `multi_statement_count` (Number) Number of statements to execute when using the multi-statement capability. For more information, check [MULTI_STATEMENT_COUNT docs](https://docs.snowflake.com/en/sql-reference/parameters#multi-statement-count). +- `noorder_sequence_as_default` (Boolean) Specifies whether the ORDER or NOORDER property is set by default when you create a new sequence or add a new table column. The ORDER and NOORDER properties determine whether or not the values are generated for the sequence or auto-incremented column in [increasing or decreasing order](https://docs.snowflake.com/en/user-guide/querying-sequences.html#label-querying-sequences-increasing-values). For more information, check [NOORDER_SEQUENCE_AS_DEFAULT docs](https://docs.snowflake.com/en/sql-reference/parameters#noorder-sequence-as-default). +- `odbc_treat_decimal_as_int` (Boolean) Specifies how ODBC processes columns that have a scale of zero (0). For more information, check [ODBC_TREAT_DECIMAL_AS_INT docs](https://docs.snowflake.com/en/sql-reference/parameters#odbc-treat-decimal-as-int). +- `query_tag` (String) Optional string that can be used to tag queries and other SQL statements executed within a session. The tags are displayed in the output of the [QUERY_HISTORY, QUERY_HISTORY_BY_*](https://docs.snowflake.com/en/sql-reference/functions/query_history) functions. For more information, check [QUERY_TAG docs](https://docs.snowflake.com/en/sql-reference/parameters#query-tag). +- `quoted_identifiers_ignore_case` (Boolean) Specifies whether letters in double-quoted object identifiers are stored and resolved as uppercase letters. By default, Snowflake preserves the case of alphabetic characters when storing and resolving double-quoted identifiers (see [Identifier resolution](https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html#label-identifier-casing)). You can use this parameter in situations in which [third-party applications always use double quotes around identifiers](https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html#label-identifier-casing-parameter). For more information, check [QUOTED_IDENTIFIERS_IGNORE_CASE docs](https://docs.snowflake.com/en/sql-reference/parameters#quoted-identifiers-ignore-case). +- `rows_per_resultset` (Number) Specifies the maximum number of rows returned in a result set. A value of 0 specifies no maximum. For more information, check [ROWS_PER_RESULTSET docs](https://docs.snowflake.com/en/sql-reference/parameters#rows-per-resultset). +- `s3_stage_vpce_dns_name` (String) Specifies the DNS name of an Amazon S3 interface endpoint. Requests sent to the internal stage of an account via [AWS PrivateLink for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html) use this endpoint to connect. For more information, see [Accessing Internal stages with dedicated interface endpoints](https://docs.snowflake.com/en/user-guide/private-internal-stages-aws.html#label-aws-privatelink-internal-stage-network-isolation). For more information, check [S3_STAGE_VPCE_DNS_NAME docs](https://docs.snowflake.com/en/sql-reference/parameters#s3-stage-vpce-dns-name). +- `schedule` (Block List, Max: 1) The schedule for periodically running the task. This can be a cron or interval in minutes. (Conflicts with finalize and after; when set, one of the sub-fields `minutes` or `using_cron` should be set) (see [below for nested schema](#nestedblock--schedule)) +- `search_path` (String) Specifies the path to search to resolve unqualified object names in queries. For more information, see [Name resolution in queries](https://docs.snowflake.com/en/sql-reference/name-resolution.html#label-object-name-resolution-search-path). Comma-separated list of identifiers. An identifier can be a fully or partially qualified schema name. For more information, check [SEARCH_PATH docs](https://docs.snowflake.com/en/sql-reference/parameters#search-path). +- `statement_queued_timeout_in_seconds` (Number) Amount of time, in seconds, a SQL statement (query, DDL, DML, etc.) remains queued for a warehouse before it is canceled by the system. This parameter can be used in conjunction with the [MAX_CONCURRENCY_LEVEL](https://docs.snowflake.com/en/sql-reference/parameters#label-max-concurrency-level) parameter to ensure a warehouse is never backlogged. For more information, check [STATEMENT_QUEUED_TIMEOUT_IN_SECONDS docs](https://docs.snowflake.com/en/sql-reference/parameters#statement-queued-timeout-in-seconds). +- `statement_timeout_in_seconds` (Number) Amount of time, in seconds, after which a running SQL statement (query, DDL, DML, etc.) is canceled by the system. For more information, check [STATEMENT_TIMEOUT_IN_SECONDS docs](https://docs.snowflake.com/en/sql-reference/parameters#statement-timeout-in-seconds). +- `strict_json_output` (Boolean) This parameter specifies whether JSON output in a session is compatible with the general standard (as described by [http://json.org](http://json.org)). By design, Snowflake allows JSON input that contains non-standard values; however, these non-standard values might result in Snowflake outputting JSON that is incompatible with other platforms and languages. This parameter, when enabled, ensures that Snowflake outputs valid/compatible JSON. For more information, check [STRICT_JSON_OUTPUT docs](https://docs.snowflake.com/en/sql-reference/parameters#strict-json-output). +- `suspend_task_after_num_failures` (Number) Specifies the number of consecutive failed task runs after which the current task is suspended automatically. The default is 0 (no automatic suspension). For more information, check [SUSPEND_TASK_AFTER_NUM_FAILURES docs](https://docs.snowflake.com/en/sql-reference/parameters#suspend-task-after-num-failures). +- `task_auto_retry_attempts` (Number) Specifies the number of automatic task graph retry attempts. If any task graphs complete in a FAILED state, Snowflake can automatically retry the task graphs from the last task in the graph that failed. For more information, check [TASK_AUTO_RETRY_ATTEMPTS docs](https://docs.snowflake.com/en/sql-reference/parameters#task-auto-retry-attempts). +- `time_input_format` (String) Specifies the input format for the TIME data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). Any valid, supported time format or AUTO (AUTO specifies that Snowflake attempts to automatically detect the format of times stored in the system during the session). For more information, check [TIME_INPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#time-input-format). +- `time_output_format` (String) Specifies the display format for the TIME data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). For more information, check [TIME_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#time-output-format). +- `timestamp_day_is_always_24h` (Boolean) Specifies whether the [DATEADD](https://docs.snowflake.com/en/sql-reference/functions/dateadd) function (and its aliases) always consider a day to be exactly 24 hours for expressions that span multiple days. For more information, check [TIMESTAMP_DAY_IS_ALWAYS_24H docs](https://docs.snowflake.com/en/sql-reference/parameters#timestamp-day-is-always-24h). +- `timestamp_input_format` (String) Specifies the input format for the TIMESTAMP data type alias. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). Any valid, supported timestamp format or AUTO (AUTO specifies that Snowflake attempts to automatically detect the format of timestamps stored in the system during the session). For more information, check [TIMESTAMP_INPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#timestamp-input-format). +- `timestamp_ltz_output_format` (String) Specifies the display format for the TIMESTAMP_LTZ data type. If no format is specified, defaults to [TIMESTAMP_OUTPUT_FORMAT](https://docs.snowflake.com/en/sql-reference/parameters#label-timestamp-output-format). For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). For more information, check [TIMESTAMP_LTZ_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#timestamp-ltz-output-format). +- `timestamp_ntz_output_format` (String) Specifies the display format for the TIMESTAMP_NTZ data type. For more information, check [TIMESTAMP_NTZ_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#timestamp-ntz-output-format). +- `timestamp_output_format` (String) Specifies the display format for the TIMESTAMP data type alias. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). For more information, check [TIMESTAMP_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#timestamp-output-format). +- `timestamp_type_mapping` (String) Specifies the TIMESTAMP_* variation that the TIMESTAMP data type alias maps to. For more information, check [TIMESTAMP_TYPE_MAPPING docs](https://docs.snowflake.com/en/sql-reference/parameters#timestamp-type-mapping). +- `timestamp_tz_output_format` (String) Specifies the display format for the TIMESTAMP_TZ data type. If no format is specified, defaults to [TIMESTAMP_OUTPUT_FORMAT](https://docs.snowflake.com/en/sql-reference/parameters#label-timestamp-output-format). For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). For more information, check [TIMESTAMP_TZ_OUTPUT_FORMAT docs](https://docs.snowflake.com/en/sql-reference/parameters#timestamp-tz-output-format). +- `timezone` (String) Specifies the time zone for the session. You can specify a [time zone name](https://data.iana.org/time-zones/tzdb-2021a/zone1970.tab) or a [link name](https://data.iana.org/time-zones/tzdb-2021a/backward) from release 2021a of the [IANA Time Zone Database](https://www.iana.org/time-zones) (e.g. America/Los_Angeles, Europe/London, UTC, Etc/GMT, etc.). For more information, check [TIMEZONE docs](https://docs.snowflake.com/en/sql-reference/parameters#timezone). +- `trace_level` (String) Controls how trace events are ingested into the event table. For more information about trace levels, see [Setting trace level](https://docs.snowflake.com/en/developer-guide/logging-tracing/tracing-trace-level). For more information, check [TRACE_LEVEL docs](https://docs.snowflake.com/en/sql-reference/parameters#trace-level). +- `transaction_abort_on_error` (Boolean) Specifies the action to perform when a statement issued within a non-autocommit transaction returns with an error. For more information, check [TRANSACTION_ABORT_ON_ERROR docs](https://docs.snowflake.com/en/sql-reference/parameters#transaction-abort-on-error). +- `transaction_default_isolation_level` (String) Specifies the isolation level for transactions in the user session. For more information, check [TRANSACTION_DEFAULT_ISOLATION_LEVEL docs](https://docs.snowflake.com/en/sql-reference/parameters#transaction-default-isolation-level). +- `two_digit_century_start` (Number) Specifies the “century start” year for 2-digit years (i.e. the earliest year such dates can represent). This parameter prevents ambiguous dates when importing or converting data with the `YY` date format component (i.e. years represented as 2 digits). For more information, check [TWO_DIGIT_CENTURY_START docs](https://docs.snowflake.com/en/sql-reference/parameters#two-digit-century-start). +- `unsupported_ddl_action` (String) Determines if an unsupported (i.e. non-default) value specified for a constraint property returns an error. For more information, check [UNSUPPORTED_DDL_ACTION docs](https://docs.snowflake.com/en/sql-reference/parameters#unsupported-ddl-action). +- `use_cached_result` (Boolean) Specifies whether to reuse persisted query results, if available, when a matching query is submitted. For more information, check [USE_CACHED_RESULT docs](https://docs.snowflake.com/en/sql-reference/parameters#use-cached-result). +- `user_task_managed_initial_warehouse_size` (String) Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. Valid values are (case-insensitive): %s. (Conflicts with warehouse) For more information, check [USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE docs](https://docs.snowflake.com/en/sql-reference/parameters#user-task-managed-initial-warehouse-size). +- `user_task_minimum_trigger_interval_in_seconds` (Number) Minimum amount of time between Triggered Task executions in seconds For more information, check [USER_TASK_MINIMUM_TRIGGER_INTERVAL_IN_SECONDS docs](https://docs.snowflake.com/en/sql-reference/parameters#user-task-minimum-trigger-interval-in-seconds). +- `user_task_timeout_ms` (Number) Specifies the time limit on a single run of the task before it times out (in milliseconds). For more information, check [USER_TASK_TIMEOUT_MS docs](https://docs.snowflake.com/en/sql-reference/parameters#user-task-timeout-ms). +- `warehouse` (String) The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. Due to Snowflake limitations warehouse identifier can consist of only upper-cased letters. (Conflicts with user_task_managed_initial_warehouse_size) +- `week_of_year_policy` (Number) Specifies how the weeks in a given year are computed. `0`: The semantics used are equivalent to the ISO semantics, in which a week belongs to a given year if at least 4 days of that week are in that year. `1`: January 1 is included in the first week of the year and December 31 is included in the last week of the year. For more information, check [WEEK_OF_YEAR_POLICY docs](https://docs.snowflake.com/en/sql-reference/parameters#week-of-year-policy). +- `week_start` (Number) Specifies the first day of the week (used by week-related date functions). `0`: Legacy Snowflake behavior is used (i.e. ISO-like semantics). `1` (Monday) to `7` (Sunday): All the week-related functions use weeks that start on the specified day of the week. For more information, check [WEEK_START docs](https://docs.snowflake.com/en/sql-reference/parameters#week-start). +- `when` (String) Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported. When a task is triggered (based on its SCHEDULE or AFTER setting), it validates the conditions of the expression to determine whether to execute. If the conditions of the expression are not met, then the task skips the current run. Any tasks that identify this task as a predecessor also don’t run. ### Read-Only - `fully_qualified_name` (String) Fully qualified name of the resource. For more information, see [object name resolution](https://docs.snowflake.com/en/sql-reference/name-resolution). - `id` (String) The ID of this resource. +- `parameters` (List of Object) Outputs the result of `SHOW PARAMETERS IN TASK` for the given task. (see [below for nested schema](#nestedatt--parameters)) +- `show_output` (List of Object) Outputs the result of `SHOW TASKS` for the given task. (see [below for nested schema](#nestedatt--show_output)) + + +### Nested Schema for `schedule` + +Optional: + +- `minutes` (Number) Specifies an interval (in minutes) of wait time inserted between runs of the task. Accepts positive integers only. (conflicts with `using_cron`) +- `using_cron` (String) Specifies a cron expression and time zone for periodically running the task. Supports a subset of standard cron utility syntax. (conflicts with `minutes`) + + + +### Nested Schema for `parameters` + +Read-Only: + +- `abort_detached_query` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--abort_detached_query)) +- `autocommit` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--autocommit)) +- `binary_input_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--binary_input_format)) +- `binary_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--binary_output_format)) +- `client_memory_limit` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_memory_limit)) +- `client_metadata_request_use_connection_ctx` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_metadata_request_use_connection_ctx)) +- `client_prefetch_threads` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_prefetch_threads)) +- `client_result_chunk_size` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_result_chunk_size)) +- `client_result_column_case_insensitive` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_result_column_case_insensitive)) +- `client_session_keep_alive` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_session_keep_alive)) +- `client_session_keep_alive_heartbeat_frequency` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_session_keep_alive_heartbeat_frequency)) +- `client_timestamp_type_mapping` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--client_timestamp_type_mapping)) +- `date_input_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--date_input_format)) +- `date_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--date_output_format)) +- `enable_unload_physical_type_optimization` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--enable_unload_physical_type_optimization)) +- `error_on_nondeterministic_merge` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--error_on_nondeterministic_merge)) +- `error_on_nondeterministic_update` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--error_on_nondeterministic_update)) +- `geography_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--geography_output_format)) +- `geometry_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--geometry_output_format)) +- `jdbc_treat_timestamp_ntz_as_utc` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--jdbc_treat_timestamp_ntz_as_utc)) +- `jdbc_use_session_timezone` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--jdbc_use_session_timezone)) +- `json_indent` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--json_indent)) +- `lock_timeout` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--lock_timeout)) +- `log_level` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--log_level)) +- `multi_statement_count` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--multi_statement_count)) +- `noorder_sequence_as_default` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--noorder_sequence_as_default)) +- `odbc_treat_decimal_as_int` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--odbc_treat_decimal_as_int)) +- `query_tag` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--query_tag)) +- `quoted_identifiers_ignore_case` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--quoted_identifiers_ignore_case)) +- `rows_per_resultset` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--rows_per_resultset)) +- `s3_stage_vpce_dns_name` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--s3_stage_vpce_dns_name)) +- `search_path` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--search_path)) +- `statement_queued_timeout_in_seconds` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--statement_queued_timeout_in_seconds)) +- `statement_timeout_in_seconds` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--statement_timeout_in_seconds)) +- `strict_json_output` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--strict_json_output)) +- `suspend_task_after_num_failures` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--suspend_task_after_num_failures)) +- `task_auto_retry_attempts` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--task_auto_retry_attempts)) +- `time_input_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--time_input_format)) +- `time_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--time_output_format)) +- `timestamp_day_is_always_24h` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timestamp_day_is_always_24h)) +- `timestamp_input_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timestamp_input_format)) +- `timestamp_ltz_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timestamp_ltz_output_format)) +- `timestamp_ntz_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timestamp_ntz_output_format)) +- `timestamp_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timestamp_output_format)) +- `timestamp_type_mapping` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timestamp_type_mapping)) +- `timestamp_tz_output_format` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timestamp_tz_output_format)) +- `timezone` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--timezone)) +- `trace_level` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--trace_level)) +- `transaction_abort_on_error` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--transaction_abort_on_error)) +- `transaction_default_isolation_level` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--transaction_default_isolation_level)) +- `two_digit_century_start` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--two_digit_century_start)) +- `unsupported_ddl_action` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--unsupported_ddl_action)) +- `use_cached_result` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--use_cached_result)) +- `user_task_managed_initial_warehouse_size` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--user_task_managed_initial_warehouse_size)) +- `user_task_minimum_trigger_interval_in_seconds` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--user_task_minimum_trigger_interval_in_seconds)) +- `user_task_timeout_ms` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--user_task_timeout_ms)) +- `week_of_year_policy` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--week_of_year_policy)) +- `week_start` (List of Object) (see [below for nested schema](#nestedobjatt--parameters--week_start)) + + +### Nested Schema for `parameters.abort_detached_query` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.autocommit` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.binary_input_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.binary_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_memory_limit` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_metadata_request_use_connection_ctx` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_prefetch_threads` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_result_chunk_size` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_result_column_case_insensitive` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_session_keep_alive` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_session_keep_alive_heartbeat_frequency` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.client_timestamp_type_mapping` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.date_input_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.date_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.enable_unload_physical_type_optimization` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.error_on_nondeterministic_merge` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.error_on_nondeterministic_update` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.geography_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.geometry_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.jdbc_treat_timestamp_ntz_as_utc` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.jdbc_use_session_timezone` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.json_indent` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.lock_timeout` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.log_level` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.multi_statement_count` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.noorder_sequence_as_default` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.odbc_treat_decimal_as_int` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.query_tag` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.quoted_identifiers_ignore_case` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.rows_per_resultset` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.s3_stage_vpce_dns_name` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.search_path` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.statement_queued_timeout_in_seconds` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.statement_timeout_in_seconds` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.strict_json_output` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.suspend_task_after_num_failures` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.task_auto_retry_attempts` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.time_input_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.time_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timestamp_day_is_always_24h` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timestamp_input_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timestamp_ltz_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timestamp_ntz_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timestamp_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timestamp_type_mapping` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timestamp_tz_output_format` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.timezone` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.trace_level` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.transaction_abort_on_error` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.transaction_default_isolation_level` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.two_digit_century_start` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.unsupported_ddl_action` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.use_cached_result` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.user_task_managed_initial_warehouse_size` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.user_task_minimum_trigger_interval_in_seconds` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.user_task_timeout_ms` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.week_of_year_policy` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + +### Nested Schema for `parameters.week_start` + +Read-Only: + +- `default` (String) +- `description` (String) +- `key` (String) +- `level` (String) +- `value` (String) + + + + +### Nested Schema for `show_output` + +Read-Only: + +- `allow_overlapping_execution` (Boolean) +- `budget` (String) +- `comment` (String) +- `condition` (String) +- `config` (String) +- `created_on` (String) +- `database_name` (String) +- `definition` (String) +- `error_integration` (String) +- `id` (String) +- `last_committed_on` (String) +- `last_suspended_on` (String) +- `last_suspended_reason` (String) +- `name` (String) +- `owner` (String) +- `owner_role_type` (String) +- `predecessors` (Set of String) +- `schedule` (String) +- `schema_name` (String) +- `state` (String) +- `task_relations` (List of Object) (see [below for nested schema](#nestedobjatt--show_output--task_relations)) +- `warehouse` (String) + + +### Nested Schema for `show_output.task_relations` + +Read-Only: + +- `finalized_root_task` (String) +- `finalizer` (String) +- `predecessors` (List of String) ## Import diff --git a/examples/data-sources/snowflake_tasks/data-source.tf b/examples/data-sources/snowflake_tasks/data-source.tf index dba5b39d70..c6dea29f1e 100644 --- a/examples/data-sources/snowflake_tasks/data-source.tf +++ b/examples/data-sources/snowflake_tasks/data-source.tf @@ -1,4 +1,120 @@ -data "snowflake_tasks" "current" { - database = "MYDB" - schema = "MYSCHEMA" -} \ No newline at end of file +# Simple usage +data "snowflake_tasks" "simple" { +} + +output "simple_output" { + value = data.snowflake_tasks.simple.tasks +} + +# Filtering (like) +data "snowflake_tasks" "like" { + like = "task-name" +} + +output "like_output" { + value = data.snowflake_tasks.like.tasks +} + +# Filtering (in - account - database - schema - application - application package) +data "snowflake_tasks" "in_account" { + in { + account = true + } +} + +data "snowflake_tasks" "in_database" { + in { + database = "" + } +} + +data "snowflake_tasks" "in_schema" { + in { + schema = "." + } +} + +data "snowflake_tasks" "in_application" { + in { + application = "" + } +} + +data "snowflake_tasks" "in_application_package" { + in { + application_package = "" + } +} + +output "in_output" { + value = { + "account" : data.snowflake_tasks.in_account.tasks, + "database" : data.snowflake_tasks.in_database.tasks, + "schema" : data.snowflake_tasks.in_schema.tasks, + "application" : data.snowflake_tasks.in_application.tasks, + "application_package" : data.snowflake_tasks.in_application_package.tasks, + } +} + +# Filtering (root only tasks) +data "snowflake_tasks" "root_only" { + root_only = true +} + +output "root_only_output" { + value = data.snowflake_tasks.root_only.tasks +} + +# Filtering (starts_with) +data "snowflake_tasks" "starts_with" { + starts_with = "task-" +} + +output "starts_with_output" { + value = data.snowflake_tasks.starts_with.tasks +} + +# Filtering (limit) +data "snowflake_tasks" "limit" { + limit { + rows = 10 + from = "task-" + } +} + +output "limit_output" { + value = data.snowflake_tasks.limit.tasks +} + +# Without additional data (to limit the number of calls make for every found task) +data "snowflake_tasks" "only_show" { + # with_parameters is turned on by default and it calls SHOW PARAMETERS FOR task for every task found and attaches its output to tasks.*.parameters field + with_parameters = false +} + +output "only_show_output" { + value = data.snowflake_tasks.only_show.tasks +} + +# Ensure the number of tasks is equal to at least one element (with the use of postcondition) +data "snowflake_tasks" "assert_with_postcondition" { + starts_with = "task-name" + lifecycle { + postcondition { + condition = length(self.tasks) > 0 + error_message = "there should be at least one task" + } + } +} + +# Ensure the number of tasks is equal to at exactly one element (with the use of check block) +check "task_check" { + data "snowflake_tasks" "assert_with_check_block" { + like = "task-name" + } + + assert { + condition = length(data.snowflake_tasks.assert_with_check_block.tasks) == 1 + error_message = "tasks filtered by '${data.snowflake_tasks.assert_with_check_block.like}' returned ${length(data.snowflake_tasks.assert_with_check_block.tasks)} tasks where one was expected" + } +} diff --git a/examples/resources/snowflake_task/resource.tf b/examples/resources/snowflake_task/resource.tf index 76e8a3c7a6..da18d05a81 100644 --- a/examples/resources/snowflake_task/resource.tf +++ b/examples/resources/snowflake_task/resource.tf @@ -1,54 +1,128 @@ +# Basic standalone task resource "snowflake_task" "task" { - comment = "my task" - database = "database" schema = "schema" + name = "task" warehouse = "warehouse" - - name = "task" - schedule = "10 MINUTE" - sql_statement = "select * from foo;" - - session_parameters = { - "foo" : "bar", + started = true + schedule { + minutes = 5 } - - user_task_timeout_ms = 10000 - after = "preceding_task" - when = "foo AND bar" - enabled = true + sql_statement = "select 1" } +# Basic serverless task resource "snowflake_task" "serverless_task" { - comment = "my serverless task" - - database = "db" - schema = "schema" - - name = "serverless_task" - schedule = "10 MINUTE" - sql_statement = "select * from foo;" - - session_parameters = { - "foo" : "bar", - } - - user_task_timeout_ms = 10000 + database = "database" + schema = "schema" + name = "task" user_task_managed_initial_warehouse_size = "XSMALL" - after = [snowflake_task.task.name] - when = "foo AND bar" - enabled = true + started = true + schedule { + minutes = 5 + } + sql_statement = "select 1" } -resource "snowflake_task" "test_task" { - comment = "task with allow_overlapping_execution" +# Basic child task +resource "snowflake_task" "child_task" { + database = "database" + schema = "schema" + name = "task" + warehouse = "warehouse" + started = true + # You can do it by referring to task by computed fully_qualified_name field or write the task name in manually if it's not managed by Terraform + after = [snowflake_task.root_task.fully_qualified_name, ".."] + sql_statement = "select 1" +} - database = "database" - schema = "schema" +# Basic finalizer task +resource "snowflake_task" "child_task" { + database = "database" + schema = "schema" + name = "task" + warehouse = "warehouse" + started = true + # You can do it by referring to task by computed fully_qualified_name field or write the task name in manually if it's not managed by Terraform + finalize = snowflake_task.root_task.fully_qualified_name + sql_statement = "select 1" +} - name = "test_task" - sql_statement = "select 1 as c;" +# Complete standalone task +resource "snowflake_task" "test" { + database = "database" + schema = "schema" + name = "task" + warehouse = "warehouse" + started = true + sql_statement = "select 1" + config = "{\"key\":\"value\"}" allow_overlapping_execution = true - enabled = true + error_integration = "" + when = "SYSTEM$STREAM_HAS_DATA('')" + comment = "complete task" + + schedule { + minutes = 10 + } + + # Session Parameters + suspend_task_after_num_failures = 10 + task_auto_retry_attempts = 0 + user_task_managed_initial_warehouse_size = "Medium" + user_task_minimum_trigger_interval_in_seconds = 30 + user_task_timeout_ms = 3600000 + abort_detached_query = false + autocommit = true + binary_input_format = "HEX" + binary_output_format = "HEX" + client_memory_limit = 1536 + client_metadata_request_use_connection_ctx = false + client_prefetch_threads = 4 + client_result_chunk_size = 160 + client_result_column_case_insensitive = false + client_session_keep_alive = false + client_session_keep_alive_heartbeat_frequency = 3600 + client_timestamp_type_mapping = "TIMESTAMP_LTZ" + date_input_format = "AUTO" + date_output_format = "YYYY-MM-DD" + enable_unload_physical_type_optimization = true + error_on_nondeterministic_merge = true + error_on_nondeterministic_update = false + geography_output_format = "GeoJSON" + geometry_output_format = "GeoJSON" + jdbc_use_session_timezone = true + json_indent = 2 + lock_timeout = 43200 + log_level = "OFF" + multi_statement_count = 1 + noorder_sequence_as_default = true + odbc_treat_decimal_as_int = false + query_tag = "" + quoted_identifiers_ignore_case = false + rows_per_resultset = 0 + s3_stage_vpce_dns_name = "" + search_path = "$current, $public" + statement_queued_timeout_in_seconds = 0 + statement_timeout_in_seconds = 172800 + strict_json_output = false + timestamp_day_is_always_24h = false + timestamp_input_format = "AUTO" + timestamp_ltz_output_format = "" + timestamp_ntz_output_format = "YYYY-MM-DD HH24:MI:SS.FF3" + timestamp_output_format = "YYYY-MM-DD HH24:MI:SS.FF3 TZHTZM" + timestamp_type_mapping = "TIMESTAMP_NTZ" + timestamp_tz_output_format = "" + timezone = "America/Los_Angeles" + time_input_format = "AUTO" + time_output_format = "HH24:MI:SS" + trace_level = "OFF" + transaction_abort_on_error = false + transaction_default_isolation_level = "READ COMMITTED" + two_digit_century_start = 1970 + unsupported_ddl_action = "ignore" + use_cached_result = true + week_of_year_policy = 0 + week_start = 0 } diff --git a/pkg/acceptance/bettertestspoc/README.md b/pkg/acceptance/bettertestspoc/README.md index a24c6abe6d..7b96af6362 100644 --- a/pkg/acceptance/bettertestspoc/README.md +++ b/pkg/acceptance/bettertestspoc/README.md @@ -352,4 +352,5 @@ func (w *WarehouseDatasourceShowOutputAssert) IsEmpty() { 2. `testing` is a package name that makes Go think that we want to have unnamed parameter there, but we just didn't generate the type for that field in the function argument. - generate assertions checking that time is not empty - we often do not compare time fields by value, but check if they are set - utilize `ContainsExactlyInAnyOrder` function in `pkg/acceptance/bettertestspoc/assert/commons.go` to create asserts on collections that are order independent +- Additional asserts for sets and lists that wouldn't rely on the order of items saved to the state (SNOW-1706544) - support generating provider config and use generated configs in `pkg/provider/provider_acceptance_test.go` diff --git a/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_ext.go b/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_ext.go index 40f5894ba3..696d990459 100644 --- a/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_ext.go +++ b/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_ext.go @@ -4,9 +4,10 @@ import ( "errors" "fmt" "reflect" - "slices" "testing" + "github.com/stretchr/testify/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" ) @@ -32,21 +33,13 @@ func (t *TaskAssert) HasNotEmptyId() *TaskAssert { return t } -func (t *TaskAssert) HasPredecessors(ids ...sdk.SchemaObjectIdentifier) *TaskAssert { +func (t *TaskAssert) HasPredecessorsInAnyOrder(ids ...sdk.SchemaObjectIdentifier) *TaskAssert { t.AddAssertion(func(t *testing.T, o *sdk.Task) error { t.Helper() - if len(o.Predecessors) != len(ids) { - return fmt.Errorf("expected %d (%v) predecessors, got %d (%v)", len(ids), ids, len(o.Predecessors), o.Predecessors) - } - var errs []error - for _, id := range ids { - if !slices.ContainsFunc(o.Predecessors, func(predecessorId sdk.SchemaObjectIdentifier) bool { - return predecessorId.FullyQualifiedName() == id.FullyQualifiedName() - }) { - errs = append(errs, fmt.Errorf("expected id: %s, to be in the list of predecessors: %v", id.FullyQualifiedName(), o.Predecessors)) - } + if !assert.ElementsMatch(t, ids, o.Predecessors) { + return fmt.Errorf("expected %v predecessors in task relations, got %v", ids, o.TaskRelations.Predecessors) } - return errors.Join(errs...) + return nil }) return t } @@ -54,21 +47,52 @@ func (t *TaskAssert) HasPredecessors(ids ...sdk.SchemaObjectIdentifier) *TaskAss func (t *TaskAssert) HasTaskRelations(expected sdk.TaskRelations) *TaskAssert { t.AddAssertion(func(t *testing.T, o *sdk.Task) error { t.Helper() - if len(o.TaskRelations.Predecessors) != len(expected.Predecessors) { - return fmt.Errorf("expected %d (%v) predecessors in task relations, got %d (%v)", len(expected.Predecessors), expected.Predecessors, len(o.TaskRelations.Predecessors), o.TaskRelations.Predecessors) - } - var errs []error - for _, id := range expected.Predecessors { - if !slices.ContainsFunc(o.TaskRelations.Predecessors, func(predecessorId sdk.SchemaObjectIdentifier) bool { - return predecessorId.FullyQualifiedName() == id.FullyQualifiedName() - }) { - errs = append(errs, fmt.Errorf("expected id: %s, to be in the list of predecessors in task relations: %v", id.FullyQualifiedName(), o.TaskRelations.Predecessors)) - } + errs := make([]error, 0) + if !assert.ElementsMatch(t, o.TaskRelations.Predecessors, expected.Predecessors) { + errs = append(errs, fmt.Errorf("expected %v predecessors in task relations, got %v", expected.Predecessors, o.TaskRelations.Predecessors)) } if !reflect.DeepEqual(expected.FinalizerTask, o.TaskRelations.FinalizerTask) { errs = append(errs, fmt.Errorf("expected finalizer task: %v; got: %v", expected.FinalizerTask, o.TaskRelations.FinalizerTask)) } + if expected.FinalizedRootTask != nil { + // This is not supported because we would have to traverse the task graph to find the root task. + errs = append(errs, fmt.Errorf("asserting FinalizedRootTask is not supported")) + } return errors.Join(errs...) }) return t } + +func (t *TaskAssert) HasWarehouse(expected *sdk.AccountObjectIdentifier) *TaskAssert { + t.AddAssertion(func(t *testing.T, o *sdk.Task) error { + t.Helper() + if o.Warehouse == nil && expected != nil { + return fmt.Errorf("expected warehouse to have value; got: nil") + } + if o.Warehouse != nil && expected == nil { + return fmt.Errorf("expected warehouse to no have value; got: %s", o.Warehouse.Name()) + } + if o.Warehouse != nil && expected != nil && o.Warehouse.Name() != expected.Name() { + return fmt.Errorf("expected warehouse: %v; got: %v", expected.Name(), o.Warehouse.Name()) + } + return nil + }) + return t +} + +func (t *TaskAssert) HasErrorIntegration(expected *sdk.AccountObjectIdentifier) *TaskAssert { + t.AddAssertion(func(t *testing.T, o *sdk.Task) error { + t.Helper() + if o.ErrorIntegration == nil && expected != nil { + return fmt.Errorf("expected error integration to have value; got: nil") + } + if o.ErrorIntegration != nil && expected == nil { + return fmt.Errorf("expected error integration to have no value; got: %s", o.ErrorIntegration.Name()) + } + if o.ErrorIntegration != nil && expected != nil && o.ErrorIntegration.Name() != expected.Name() { + return fmt.Errorf("expected error integration: %v; got: %v", expected.Name(), o.ErrorIntegration.Name()) + } + return nil + }) + return t +} diff --git a/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go b/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go index c0180747ab..0d2f32c3a0 100644 --- a/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go +++ b/pkg/acceptance/bettertestspoc/assert/objectassert/task_snowflake_gen.go @@ -107,17 +107,6 @@ func (t *TaskAssert) HasComment(expected string) *TaskAssert { return t } -func (t *TaskAssert) HasWarehouse(expected string) *TaskAssert { - t.AddAssertion(func(t *testing.T, o *sdk.Task) error { - t.Helper() - if o.Warehouse != expected { - return fmt.Errorf("expected warehouse: %v; got: %v", expected, o.Warehouse) - } - return nil - }) - return t -} - func (t *TaskAssert) HasSchedule(expected string) *TaskAssert { t.AddAssertion(func(t *testing.T, o *sdk.Task) error { t.Helper() @@ -173,17 +162,6 @@ func (t *TaskAssert) HasAllowOverlappingExecution(expected bool) *TaskAssert { return t } -func (t *TaskAssert) HasErrorIntegration(expected *sdk.AccountObjectIdentifier) *TaskAssert { - t.AddAssertion(func(t *testing.T, o *sdk.Task) error { - t.Helper() - if o.ErrorIntegration != expected { - return fmt.Errorf("expected error integration: %v; got: %v", expected, o.ErrorIntegration) - } - return nil - }) - return t -} - func (t *TaskAssert) HasLastCommittedOn(expected string) *TaskAssert { t.AddAssertion(func(t *testing.T, o *sdk.Task) error { t.Helper() diff --git a/pkg/acceptance/bettertestspoc/assert/objectparametersassert/task_parameters_snowflake_gen.go b/pkg/acceptance/bettertestspoc/assert/objectparametersassert/task_parameters_snowflake_gen.go index 24ac1f78bd..b5c571149d 100644 --- a/pkg/acceptance/bettertestspoc/assert/objectparametersassert/task_parameters_snowflake_gen.go +++ b/pkg/acceptance/bettertestspoc/assert/objectparametersassert/task_parameters_snowflake_gen.go @@ -1027,7 +1027,7 @@ func (t *TaskParametersAssert) HasDefaultTaskAutoRetryAttemptsValueExplicit() *T } func (t *TaskParametersAssert) HasDefaultUserTaskManagedInitialWarehouseSizeValueExplicit() *TaskParametersAssert { - return t.HasUserTaskManagedInitialWarehouseSize("Medium") + return t.HasUserTaskManagedInitialWarehouseSize(sdk.WarehouseSize("Medium")) } func (t *TaskParametersAssert) HasDefaultUserTaskMinimumTriggerIntervalInSecondsValueExplicit() *TaskParametersAssert { diff --git a/pkg/acceptance/bettertestspoc/assert/resource_assertions.go b/pkg/acceptance/bettertestspoc/assert/resource_assertions.go index 79f4e47ac0..09c8c875cc 100644 --- a/pkg/acceptance/bettertestspoc/assert/resource_assertions.go +++ b/pkg/acceptance/bettertestspoc/assert/resource_assertions.go @@ -62,6 +62,7 @@ const ( resourceAssertionTypeValuePresent = "VALUE_PRESENT" resourceAssertionTypeValueSet = "VALUE_SET" resourceAssertionTypeValueNotSet = "VALUE_NOT_SET" + resourceAssertionTypeSetElem = "SET_ELEM" ) type ResourceAssertion struct { @@ -75,6 +76,10 @@ func (r *ResourceAssert) AddAssertion(assertion ResourceAssertion) { r.assertions = append(r.assertions, assertion) } +func SetElem(fieldName string, expected string) ResourceAssertion { + return ResourceAssertion{fieldName: fieldName, expectedValue: expected, resourceAssertionType: resourceAssertionTypeSetElem} +} + func ValuePresent(fieldName string) ResourceAssertion { return ResourceAssertion{fieldName: fieldName, resourceAssertionType: resourceAssertionTypeValuePresent} } @@ -152,6 +157,11 @@ func (r *ResourceAssert) ToTerraformTestCheckFunc(t *testing.T) resource.TestChe for i, a := range r.assertions { switch a.resourceAssertionType { + case resourceAssertionTypeSetElem: + if err := resource.TestCheckTypeSetElemAttr(r.name, a.fieldName, a.expectedValue)(s); err != nil { + errCut, _ := strings.CutPrefix(err.Error(), fmt.Sprintf("%s: ", r.name)) + result = append(result, fmt.Errorf("%s %s assertion [%d/%d]: failed with error: %s", r.name, r.prefix, i+1, len(r.assertions), errCut)) + } case resourceAssertionTypeValueSet: if err := resource.TestCheckResourceAttr(r.name, a.fieldName, a.expectedValue)(s); err != nil { errCut, _ := strings.CutPrefix(err.Error(), fmt.Sprintf("%s: ", r.name)) diff --git a/pkg/acceptance/bettertestspoc/assert/resourceassert/gen/resource_schema_def.go b/pkg/acceptance/bettertestspoc/assert/resourceassert/gen/resource_schema_def.go index 144b0d5975..bcbe79ed5b 100644 --- a/pkg/acceptance/bettertestspoc/assert/resourceassert/gen/resource_schema_def.go +++ b/pkg/acceptance/bettertestspoc/assert/resourceassert/gen/resource_schema_def.go @@ -109,4 +109,8 @@ var allResourceSchemaDefs = []ResourceSchemaDef{ name: "Tag", schema: resources.Tag().Schema, }, + { + name: "Task", + schema: resources.Task().Schema, + }, } diff --git a/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go new file mode 100644 index 0000000000..f948594340 --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_ext.go @@ -0,0 +1,39 @@ +package resourceassert + +import ( + "strconv" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" +) + +func (t *TaskResourceAssert) HasAfter(ids ...sdk.SchemaObjectIdentifier) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("after.#", strconv.FormatInt(int64(len(ids)), 10))) + for _, id := range ids { + t.AddAssertion(assert.SetElem("after.*", id.FullyQualifiedName())) + } + return t +} + +func (t *TaskResourceAssert) HasScheduleMinutes(minutes int) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("schedule.#", "1")) + t.AddAssertion(assert.ValueSet("schedule.0.minutes", strconv.Itoa(minutes))) + return t +} + +func (t *TaskResourceAssert) HasScheduleCron(cron string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("schedule.#", "1")) + t.AddAssertion(assert.ValueSet("schedule.0.using_cron", cron)) + return t +} + +func (t *TaskResourceAssert) HasNoScheduleSet() *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("schedule.#", "0")) + return t +} + +func (t *TaskResourceAssert) HasUserTaskManagedInitialWarehouseSizeEnum(size sdk.WarehouseSize) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("user_task_managed_initial_warehouse_size", string(size))) + return t +} diff --git a/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go new file mode 100644 index 0000000000..27dd1a43ec --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/resourceassert/task_resource_gen.go @@ -0,0 +1,767 @@ +// Code generated by assertions generator; DO NOT EDIT. + +package resourceassert + +import ( + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" +) + +type TaskResourceAssert struct { + *assert.ResourceAssert +} + +func TaskResource(t *testing.T, name string) *TaskResourceAssert { + t.Helper() + + return &TaskResourceAssert{ + ResourceAssert: assert.NewResourceAssert(name, "resource"), + } +} + +func ImportedTaskResource(t *testing.T, id string) *TaskResourceAssert { + t.Helper() + + return &TaskResourceAssert{ + ResourceAssert: assert.NewImportedResourceAssert(id, "imported resource"), + } +} + +/////////////////////////////////// +// Attribute value string checks // +/////////////////////////////////// + +func (t *TaskResourceAssert) HasAbortDetachedQueryString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("abort_detached_query", expected)) + return t +} + +func (t *TaskResourceAssert) HasAfterString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("after", expected)) + return t +} + +func (t *TaskResourceAssert) HasAllowOverlappingExecutionString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("allow_overlapping_execution", expected)) + return t +} + +func (t *TaskResourceAssert) HasAutocommitString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("autocommit", expected)) + return t +} + +func (t *TaskResourceAssert) HasBinaryInputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("binary_input_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasBinaryOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("binary_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientMemoryLimitString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_memory_limit", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientMetadataRequestUseConnectionCtxString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_metadata_request_use_connection_ctx", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientPrefetchThreadsString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_prefetch_threads", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientResultChunkSizeString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_result_chunk_size", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientResultColumnCaseInsensitiveString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_result_column_case_insensitive", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientSessionKeepAliveString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_session_keep_alive", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientSessionKeepAliveHeartbeatFrequencyString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_session_keep_alive_heartbeat_frequency", expected)) + return t +} + +func (t *TaskResourceAssert) HasClientTimestampTypeMappingString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("client_timestamp_type_mapping", expected)) + return t +} + +func (t *TaskResourceAssert) HasCommentString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("comment", expected)) + return t +} + +func (t *TaskResourceAssert) HasConfigString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("config", expected)) + return t +} + +func (t *TaskResourceAssert) HasDatabaseString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("database", expected)) + return t +} + +func (t *TaskResourceAssert) HasDateInputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("date_input_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasDateOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("date_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasEnableUnloadPhysicalTypeOptimizationString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("enable_unload_physical_type_optimization", expected)) + return t +} + +func (t *TaskResourceAssert) HasErrorIntegrationString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("error_integration", expected)) + return t +} + +func (t *TaskResourceAssert) HasErrorOnNondeterministicMergeString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("error_on_nondeterministic_merge", expected)) + return t +} + +func (t *TaskResourceAssert) HasErrorOnNondeterministicUpdateString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("error_on_nondeterministic_update", expected)) + return t +} + +func (t *TaskResourceAssert) HasFinalizeString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("finalize", expected)) + return t +} + +func (t *TaskResourceAssert) HasFullyQualifiedNameString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("fully_qualified_name", expected)) + return t +} + +func (t *TaskResourceAssert) HasGeographyOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("geography_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasGeometryOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("geometry_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasJdbcTreatTimestampNtzAsUtcString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("jdbc_treat_timestamp_ntz_as_utc", expected)) + return t +} + +func (t *TaskResourceAssert) HasJdbcUseSessionTimezoneString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("jdbc_use_session_timezone", expected)) + return t +} + +func (t *TaskResourceAssert) HasJsonIndentString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("json_indent", expected)) + return t +} + +func (t *TaskResourceAssert) HasLockTimeoutString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("lock_timeout", expected)) + return t +} + +func (t *TaskResourceAssert) HasLogLevelString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("log_level", expected)) + return t +} + +func (t *TaskResourceAssert) HasMultiStatementCountString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("multi_statement_count", expected)) + return t +} + +func (t *TaskResourceAssert) HasNameString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("name", expected)) + return t +} + +func (t *TaskResourceAssert) HasNoorderSequenceAsDefaultString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("noorder_sequence_as_default", expected)) + return t +} + +func (t *TaskResourceAssert) HasOdbcTreatDecimalAsIntString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("odbc_treat_decimal_as_int", expected)) + return t +} + +func (t *TaskResourceAssert) HasQueryTagString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("query_tag", expected)) + return t +} + +func (t *TaskResourceAssert) HasQuotedIdentifiersIgnoreCaseString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("quoted_identifiers_ignore_case", expected)) + return t +} + +func (t *TaskResourceAssert) HasRowsPerResultsetString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("rows_per_resultset", expected)) + return t +} + +func (t *TaskResourceAssert) HasS3StageVpceDnsNameString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("s3_stage_vpce_dns_name", expected)) + return t +} + +func (t *TaskResourceAssert) HasScheduleString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("schedule", expected)) + return t +} + +func (t *TaskResourceAssert) HasSchemaString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("schema", expected)) + return t +} + +func (t *TaskResourceAssert) HasSearchPathString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("search_path", expected)) + return t +} + +func (t *TaskResourceAssert) HasSqlStatementString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("sql_statement", expected)) + return t +} + +func (t *TaskResourceAssert) HasStartedString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("started", expected)) + return t +} + +func (t *TaskResourceAssert) HasStatementQueuedTimeoutInSecondsString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("statement_queued_timeout_in_seconds", expected)) + return t +} + +func (t *TaskResourceAssert) HasStatementTimeoutInSecondsString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("statement_timeout_in_seconds", expected)) + return t +} + +func (t *TaskResourceAssert) HasStrictJsonOutputString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("strict_json_output", expected)) + return t +} + +func (t *TaskResourceAssert) HasSuspendTaskAfterNumFailuresString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("suspend_task_after_num_failures", expected)) + return t +} + +func (t *TaskResourceAssert) HasTaskAutoRetryAttemptsString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("task_auto_retry_attempts", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimeInputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("time_input_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimeOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("time_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimestampDayIsAlways24hString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timestamp_day_is_always_24h", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimestampInputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timestamp_input_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimestampLtzOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timestamp_ltz_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimestampNtzOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timestamp_ntz_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimestampOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timestamp_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimestampTypeMappingString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timestamp_type_mapping", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimestampTzOutputFormatString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timestamp_tz_output_format", expected)) + return t +} + +func (t *TaskResourceAssert) HasTimezoneString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("timezone", expected)) + return t +} + +func (t *TaskResourceAssert) HasTraceLevelString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("trace_level", expected)) + return t +} + +func (t *TaskResourceAssert) HasTransactionAbortOnErrorString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("transaction_abort_on_error", expected)) + return t +} + +func (t *TaskResourceAssert) HasTransactionDefaultIsolationLevelString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("transaction_default_isolation_level", expected)) + return t +} + +func (t *TaskResourceAssert) HasTwoDigitCenturyStartString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("two_digit_century_start", expected)) + return t +} + +func (t *TaskResourceAssert) HasUnsupportedDdlActionString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("unsupported_ddl_action", expected)) + return t +} + +func (t *TaskResourceAssert) HasUseCachedResultString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("use_cached_result", expected)) + return t +} + +func (t *TaskResourceAssert) HasUserTaskManagedInitialWarehouseSizeString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("user_task_managed_initial_warehouse_size", expected)) + return t +} + +func (t *TaskResourceAssert) HasUserTaskMinimumTriggerIntervalInSecondsString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("user_task_minimum_trigger_interval_in_seconds", expected)) + return t +} + +func (t *TaskResourceAssert) HasUserTaskTimeoutMsString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("user_task_timeout_ms", expected)) + return t +} + +func (t *TaskResourceAssert) HasWarehouseString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("warehouse", expected)) + return t +} + +func (t *TaskResourceAssert) HasWeekOfYearPolicyString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("week_of_year_policy", expected)) + return t +} + +func (t *TaskResourceAssert) HasWeekStartString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("week_start", expected)) + return t +} + +func (t *TaskResourceAssert) HasWhenString(expected string) *TaskResourceAssert { + t.AddAssertion(assert.ValueSet("when", expected)) + return t +} + +//////////////////////////// +// Attribute empty checks // +//////////////////////////// + +func (t *TaskResourceAssert) HasNoAbortDetachedQuery() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("abort_detached_query")) + return t +} + +func (t *TaskResourceAssert) HasNoAfter() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("after")) + return t +} + +func (t *TaskResourceAssert) HasNoAllowOverlappingExecution() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("allow_overlapping_execution")) + return t +} + +func (t *TaskResourceAssert) HasNoAutocommit() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("autocommit")) + return t +} + +func (t *TaskResourceAssert) HasNoBinaryInputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("binary_input_format")) + return t +} + +func (t *TaskResourceAssert) HasNoBinaryOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("binary_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoClientMemoryLimit() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_memory_limit")) + return t +} + +func (t *TaskResourceAssert) HasNoClientMetadataRequestUseConnectionCtx() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_metadata_request_use_connection_ctx")) + return t +} + +func (t *TaskResourceAssert) HasNoClientPrefetchThreads() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_prefetch_threads")) + return t +} + +func (t *TaskResourceAssert) HasNoClientResultChunkSize() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_result_chunk_size")) + return t +} + +func (t *TaskResourceAssert) HasNoClientResultColumnCaseInsensitive() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_result_column_case_insensitive")) + return t +} + +func (t *TaskResourceAssert) HasNoClientSessionKeepAlive() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_session_keep_alive")) + return t +} + +func (t *TaskResourceAssert) HasNoClientSessionKeepAliveHeartbeatFrequency() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_session_keep_alive_heartbeat_frequency")) + return t +} + +func (t *TaskResourceAssert) HasNoClientTimestampTypeMapping() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("client_timestamp_type_mapping")) + return t +} + +func (t *TaskResourceAssert) HasNoComment() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("comment")) + return t +} + +func (t *TaskResourceAssert) HasNoConfig() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("config")) + return t +} + +func (t *TaskResourceAssert) HasNoDatabase() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("database")) + return t +} + +func (t *TaskResourceAssert) HasNoDateInputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("date_input_format")) + return t +} + +func (t *TaskResourceAssert) HasNoDateOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("date_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoEnableUnloadPhysicalTypeOptimization() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("enable_unload_physical_type_optimization")) + return t +} + +func (t *TaskResourceAssert) HasNoErrorIntegration() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("error_integration")) + return t +} + +func (t *TaskResourceAssert) HasNoErrorOnNondeterministicMerge() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("error_on_nondeterministic_merge")) + return t +} + +func (t *TaskResourceAssert) HasNoErrorOnNondeterministicUpdate() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("error_on_nondeterministic_update")) + return t +} + +func (t *TaskResourceAssert) HasNoFinalize() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("finalize")) + return t +} + +func (t *TaskResourceAssert) HasNoFullyQualifiedName() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("fully_qualified_name")) + return t +} + +func (t *TaskResourceAssert) HasNoGeographyOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("geography_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoGeometryOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("geometry_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoJdbcTreatTimestampNtzAsUtc() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("jdbc_treat_timestamp_ntz_as_utc")) + return t +} + +func (t *TaskResourceAssert) HasNoJdbcUseSessionTimezone() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("jdbc_use_session_timezone")) + return t +} + +func (t *TaskResourceAssert) HasNoJsonIndent() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("json_indent")) + return t +} + +func (t *TaskResourceAssert) HasNoLockTimeout() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("lock_timeout")) + return t +} + +func (t *TaskResourceAssert) HasNoLogLevel() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("log_level")) + return t +} + +func (t *TaskResourceAssert) HasNoMultiStatementCount() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("multi_statement_count")) + return t +} + +func (t *TaskResourceAssert) HasNoName() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("name")) + return t +} + +func (t *TaskResourceAssert) HasNoNoorderSequenceAsDefault() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("noorder_sequence_as_default")) + return t +} + +func (t *TaskResourceAssert) HasNoOdbcTreatDecimalAsInt() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("odbc_treat_decimal_as_int")) + return t +} + +func (t *TaskResourceAssert) HasNoQueryTag() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("query_tag")) + return t +} + +func (t *TaskResourceAssert) HasNoQuotedIdentifiersIgnoreCase() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("quoted_identifiers_ignore_case")) + return t +} + +func (t *TaskResourceAssert) HasNoRowsPerResultset() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("rows_per_resultset")) + return t +} + +func (t *TaskResourceAssert) HasNoS3StageVpceDnsName() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("s3_stage_vpce_dns_name")) + return t +} + +func (t *TaskResourceAssert) HasNoSchedule() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("schedule")) + return t +} + +func (t *TaskResourceAssert) HasNoSchema() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("schema")) + return t +} + +func (t *TaskResourceAssert) HasNoSearchPath() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("search_path")) + return t +} + +func (t *TaskResourceAssert) HasNoSqlStatement() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("sql_statement")) + return t +} + +func (t *TaskResourceAssert) HasNoStarted() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("started")) + return t +} + +func (t *TaskResourceAssert) HasNoStatementQueuedTimeoutInSeconds() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("statement_queued_timeout_in_seconds")) + return t +} + +func (t *TaskResourceAssert) HasNoStatementTimeoutInSeconds() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("statement_timeout_in_seconds")) + return t +} + +func (t *TaskResourceAssert) HasNoStrictJsonOutput() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("strict_json_output")) + return t +} + +func (t *TaskResourceAssert) HasNoSuspendTaskAfterNumFailures() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("suspend_task_after_num_failures")) + return t +} + +func (t *TaskResourceAssert) HasNoTaskAutoRetryAttempts() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("task_auto_retry_attempts")) + return t +} + +func (t *TaskResourceAssert) HasNoTimeInputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("time_input_format")) + return t +} + +func (t *TaskResourceAssert) HasNoTimeOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("time_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoTimestampDayIsAlways24h() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timestamp_day_is_always_24h")) + return t +} + +func (t *TaskResourceAssert) HasNoTimestampInputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timestamp_input_format")) + return t +} + +func (t *TaskResourceAssert) HasNoTimestampLtzOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timestamp_ltz_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoTimestampNtzOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timestamp_ntz_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoTimestampOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timestamp_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoTimestampTypeMapping() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timestamp_type_mapping")) + return t +} + +func (t *TaskResourceAssert) HasNoTimestampTzOutputFormat() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timestamp_tz_output_format")) + return t +} + +func (t *TaskResourceAssert) HasNoTimezone() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("timezone")) + return t +} + +func (t *TaskResourceAssert) HasNoTraceLevel() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("trace_level")) + return t +} + +func (t *TaskResourceAssert) HasNoTransactionAbortOnError() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("transaction_abort_on_error")) + return t +} + +func (t *TaskResourceAssert) HasNoTransactionDefaultIsolationLevel() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("transaction_default_isolation_level")) + return t +} + +func (t *TaskResourceAssert) HasNoTwoDigitCenturyStart() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("two_digit_century_start")) + return t +} + +func (t *TaskResourceAssert) HasNoUnsupportedDdlAction() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("unsupported_ddl_action")) + return t +} + +func (t *TaskResourceAssert) HasNoUseCachedResult() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("use_cached_result")) + return t +} + +func (t *TaskResourceAssert) HasNoUserTaskManagedInitialWarehouseSize() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("user_task_managed_initial_warehouse_size")) + return t +} + +func (t *TaskResourceAssert) HasNoUserTaskMinimumTriggerIntervalInSeconds() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("user_task_minimum_trigger_interval_in_seconds")) + return t +} + +func (t *TaskResourceAssert) HasNoUserTaskTimeoutMs() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("user_task_timeout_ms")) + return t +} + +func (t *TaskResourceAssert) HasNoWarehouse() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("warehouse")) + return t +} + +func (t *TaskResourceAssert) HasNoWeekOfYearPolicy() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("week_of_year_policy")) + return t +} + +func (t *TaskResourceAssert) HasNoWeekStart() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("week_start")) + return t +} + +func (t *TaskResourceAssert) HasNoWhen() *TaskResourceAssert { + t.AddAssertion(assert.ValueNotSet("when")) + return t +} diff --git a/pkg/acceptance/bettertestspoc/assert/resourceparametersassert/task_resource_parameters_ext.go b/pkg/acceptance/bettertestspoc/assert/resourceparametersassert/task_resource_parameters_ext.go new file mode 100644 index 0000000000..f6bca65b34 --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/resourceparametersassert/task_resource_parameters_ext.go @@ -0,0 +1,82 @@ +package resourceparametersassert + +import ( + "strings" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" +) + +// TaskDatasourceParameters is a temporary workaround to have better parameter assertions in data source acceptance tests. +func TaskDatasourceParameters(t *testing.T, name string) *TaskResourceParametersAssert { + t.Helper() + + taskAssert := TaskResourceParametersAssert{ + ResourceAssert: assert.NewDatasourceAssert("data."+name, "parameters", "tasks.0."), + } + taskAssert.AddAssertion(assert.ValueSet("parameters.#", "1")) + return &taskAssert +} + +func (u *TaskResourceParametersAssert) HasAllDefaults() *TaskResourceParametersAssert { + return u. + HasSuspendTaskAfterNumFailures(10). + HasTaskAutoRetryAttempts(0). + HasUserTaskManagedInitialWarehouseSize("Medium"). + HasUserTaskMinimumTriggerIntervalInSeconds(30). + HasUserTaskTimeoutMs(3600000). + HasAbortDetachedQuery(false). + HasAutocommit(true). + HasBinaryInputFormat(sdk.BinaryInputFormatHex). + HasBinaryOutputFormat(sdk.BinaryOutputFormatHex). + HasClientMemoryLimit(1536). + HasClientMetadataRequestUseConnectionCtx(false). + HasClientPrefetchThreads(4). + HasClientResultChunkSize(160). + HasClientResultColumnCaseInsensitive(false). + HasClientSessionKeepAlive(false). + HasClientSessionKeepAliveHeartbeatFrequency(3600). + HasClientTimestampTypeMapping(sdk.ClientTimestampTypeMappingLtz). + HasDateInputFormat("AUTO"). + HasDateOutputFormat("YYYY-MM-DD"). + HasEnableUnloadPhysicalTypeOptimization(true). + HasErrorOnNondeterministicMerge(true). + HasErrorOnNondeterministicUpdate(false). + HasGeographyOutputFormat(sdk.GeographyOutputFormatGeoJSON). + HasGeometryOutputFormat(sdk.GeometryOutputFormatGeoJSON). + HasJdbcTreatTimestampNtzAsUtc(false). + HasJdbcUseSessionTimezone(true). + HasJsonIndent(2). + HasLockTimeout(43200). + HasLogLevel(sdk.LogLevelOff). + HasMultiStatementCount(1). + HasNoorderSequenceAsDefault(true). + HasOdbcTreatDecimalAsInt(false). + HasQueryTag(""). + HasQuotedIdentifiersIgnoreCase(false). + HasRowsPerResultset(0). + HasS3StageVpceDnsName(""). + HasSearchPath("$current, $public"). + HasStatementQueuedTimeoutInSeconds(0). + HasStatementTimeoutInSeconds(172800). + HasStrictJsonOutput(false). + HasTimestampDayIsAlways24h(false). + HasTimestampInputFormat("AUTO"). + HasTimestampLtzOutputFormat(""). + HasTimestampNtzOutputFormat("YYYY-MM-DD HH24:MI:SS.FF3"). + HasTimestampOutputFormat("YYYY-MM-DD HH24:MI:SS.FF3 TZHTZM"). + HasTimestampTypeMapping(sdk.TimestampTypeMappingNtz). + HasTimestampTzOutputFormat(""). + HasTimezone("America/Los_Angeles"). + HasTimeInputFormat("AUTO"). + HasTimeOutputFormat("HH24:MI:SS"). + HasTraceLevel(sdk.TraceLevelOff). + HasTransactionAbortOnError(false). + HasTransactionDefaultIsolationLevel(sdk.TransactionDefaultIsolationLevelReadCommitted). + HasTwoDigitCenturyStart(1970). + HasUnsupportedDdlAction(sdk.UnsupportedDDLAction(strings.ToLower(string(sdk.UnsupportedDDLActionIgnore)))). + HasUseCachedResult(true). + HasWeekOfYearPolicy(0). + HasWeekStart(0) +} diff --git a/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go new file mode 100644 index 0000000000..2107ddd500 --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_ext.go @@ -0,0 +1,89 @@ +package resourceshowoutputassert + +import ( + "fmt" + "strconv" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" +) + +// TaskDatasourceShowOutput is a temporary workaround to have better show output assertions in data source acceptance tests. +func TaskDatasourceShowOutput(t *testing.T, name string) *TaskShowOutputAssert { + t.Helper() + + taskAssert := TaskShowOutputAssert{ + ResourceAssert: assert.NewDatasourceAssert("data."+name, "show_output", "tasks.0."), + } + taskAssert.AddAssertion(assert.ValueSet("show_output.#", "1")) + return &taskAssert +} + +func (t *TaskShowOutputAssert) HasErrorIntegrationEmpty() *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("error_integration", "")) + return t +} + +func (t *TaskShowOutputAssert) HasCreatedOnNotEmpty() *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValuePresent("created_on")) + return t +} + +func (t *TaskShowOutputAssert) HasIdNotEmpty() *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValuePresent("id")) + return t +} + +func (t *TaskShowOutputAssert) HasOwnerNotEmpty() *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValuePresent("owner")) + return t +} + +func (t *TaskShowOutputAssert) HasLastCommittedOnNotEmpty() *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValuePresent("last_committed_on")) + return t +} + +func (t *TaskShowOutputAssert) HasLastSuspendedOnNotEmpty() *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValuePresent("last_suspended_on")) + return t +} + +func (t *TaskShowOutputAssert) HasPredecessors(predecessors ...sdk.SchemaObjectIdentifier) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("predecessors.#", strconv.Itoa(len(predecessors)))) + for i, predecessor := range predecessors { + t.AddAssertion(assert.ResourceShowOutputValueSet(fmt.Sprintf("predecessors.%d", i), predecessor.FullyQualifiedName())) + } + return t +} + +func (t *TaskShowOutputAssert) HasTaskRelations(expected sdk.TaskRelations) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.#", "1")) + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.0.predecessors.#", strconv.Itoa(len(expected.Predecessors)))) + for i, predecessor := range expected.Predecessors { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet(fmt.Sprintf("task_relations.0.predecessors.%d", i), predecessor.FullyQualifiedName())) + } + if expected.FinalizerTask != nil && len(expected.FinalizerTask.Name()) > 0 { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.0.finalizer", expected.FinalizerTask.FullyQualifiedName())) + } + if expected.FinalizedRootTask != nil && len(expected.FinalizedRootTask.Name()) > 0 { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("task_relations.0.finalized_root_task", expected.FinalizedRootTask.FullyQualifiedName())) + } + return t +} + +func (t *TaskShowOutputAssert) HasNoSchedule() *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("schedule", "")) + return t +} + +func (t *TaskShowOutputAssert) HasScheduleMinutes(minutes int) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("schedule", fmt.Sprintf("%d MINUTE", minutes))) + return t +} + +func (t *TaskShowOutputAssert) HasScheduleCron(cron string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("schedule", fmt.Sprintf("USING CRON %s", cron))) + return t +} diff --git a/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go new file mode 100644 index 0000000000..2b09ca5def --- /dev/null +++ b/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert/task_show_output_gen.go @@ -0,0 +1,141 @@ +// Code generated by assertions generator; DO NOT EDIT. + +package resourceshowoutputassert + +import ( + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" +) + +// to ensure sdk package is used +var _ = sdk.Object{} + +type TaskShowOutputAssert struct { + *assert.ResourceAssert +} + +func TaskShowOutput(t *testing.T, name string) *TaskShowOutputAssert { + t.Helper() + + task := TaskShowOutputAssert{ + ResourceAssert: assert.NewResourceAssert(name, "show_output"), + } + task.AddAssertion(assert.ValueSet("show_output.#", "1")) + return &task +} + +func ImportedTaskShowOutput(t *testing.T, id string) *TaskShowOutputAssert { + t.Helper() + + task := TaskShowOutputAssert{ + ResourceAssert: assert.NewImportedResourceAssert(id, "show_output"), + } + task.AddAssertion(assert.ValueSet("show_output.#", "1")) + return &task +} + +//////////////////////////// +// Attribute value checks // +//////////////////////////// + +func (t *TaskShowOutputAssert) HasCreatedOn(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("created_on", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasName(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("name", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasId(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("id", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasDatabaseName(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("database_name", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasSchemaName(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("schema_name", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasOwner(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("owner", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasComment(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("comment", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasWarehouse(expected sdk.AccountObjectIdentifier) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("warehouse", expected.Name())) + return t +} + +func (t *TaskShowOutputAssert) HasSchedule(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("schedule", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasState(expected sdk.TaskState) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("state", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasDefinition(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("definition", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasCondition(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("condition", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasAllowOverlappingExecution(expected bool) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputBoolValueSet("allow_overlapping_execution", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasErrorIntegration(expected sdk.AccountObjectIdentifier) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputStringUnderlyingValueSet("error_integration", expected.Name())) + return t +} + +func (t *TaskShowOutputAssert) HasLastCommittedOn(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("last_committed_on", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasLastSuspendedOn(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("last_suspended_on", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasOwnerRoleType(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("owner_role_type", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasConfig(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("config", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasBudget(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("budget", expected)) + return t +} + +func (t *TaskShowOutputAssert) HasLastSuspendedReason(expected string) *TaskShowOutputAssert { + t.AddAssertion(assert.ResourceShowOutputValueSet("last_suspended_reason", expected)) + return t +} diff --git a/pkg/acceptance/bettertestspoc/config/config.go b/pkg/acceptance/bettertestspoc/config/config.go index a47a2a43d2..1e256a811c 100644 --- a/pkg/acceptance/bettertestspoc/config/config.go +++ b/pkg/acceptance/bettertestspoc/config/config.go @@ -127,6 +127,19 @@ func ConfigVariablesFromModel(t *testing.T, model ResourceModel) tfconfig.Variab return variables } +// ConfigVariablesFromModels can be used to create a list of objects that are referring to the same resource model. +// It's useful when there's a need to create associations between objects of the same type in Snowflake. +func ConfigVariablesFromModels(t *testing.T, variableName string, models ...ResourceModel) tfconfig.Variables { + t.Helper() + allVariables := make([]tfconfig.Variable, 0) + for _, model := range models { + allVariables = append(allVariables, tfconfig.ObjectVariable(ConfigVariablesFromModel(t, model))) + } + return tfconfig.Variables{ + variableName: tfconfig.ListVariable(allVariables...), + } +} + type nullVariable struct{} // MarshalJSON returns the JSON encoding of nullVariable. diff --git a/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go b/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go new file mode 100644 index 0000000000..a61ea00f0c --- /dev/null +++ b/pkg/acceptance/bettertestspoc/config/model/task_model_ext.go @@ -0,0 +1,87 @@ +package model + +import ( + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + tfconfig "github.com/hashicorp/terraform-plugin-testing/config" +) + +func TaskWithId(resourceName string, id sdk.SchemaObjectIdentifier, started bool, sqlStatement string) *TaskModel { + t := &TaskModel{ResourceModelMeta: config.Meta(resourceName, resources.Task)} + t.WithDatabase(id.DatabaseName()) + t.WithSchema(id.SchemaName()) + t.WithName(id.Name()) + t.WithStarted(started) + t.WithSqlStatement(sqlStatement) + return t +} + +func (t *TaskModel) WithBinaryInputFormatEnum(binaryInputFormat sdk.BinaryInputFormat) *TaskModel { + t.BinaryInputFormat = tfconfig.StringVariable(string(binaryInputFormat)) + return t +} + +func (t *TaskModel) WithBinaryOutputFormatEnum(binaryOutputFormat sdk.BinaryOutputFormat) *TaskModel { + t.BinaryOutputFormat = tfconfig.StringVariable(string(binaryOutputFormat)) + return t +} + +func (t *TaskModel) WithClientTimestampTypeMappingEnum(clientTimestampTypeMapping sdk.ClientTimestampTypeMapping) *TaskModel { + t.ClientTimestampTypeMapping = tfconfig.StringVariable(string(clientTimestampTypeMapping)) + return t +} + +func (t *TaskModel) WithGeographyOutputFormatEnum(geographyOutputFormat sdk.GeographyOutputFormat) *TaskModel { + t.GeographyOutputFormat = tfconfig.StringVariable(string(geographyOutputFormat)) + return t +} + +func (t *TaskModel) WithGeometryOutputFormatEnum(geometryOutputFormat sdk.GeometryOutputFormat) *TaskModel { + t.GeometryOutputFormat = tfconfig.StringVariable(string(geometryOutputFormat)) + return t +} + +func (t *TaskModel) WithLogLevelEnum(logLevel sdk.LogLevel) *TaskModel { + t.LogLevel = tfconfig.StringVariable(string(logLevel)) + return t +} + +func (t *TaskModel) WithTimestampTypeMappingEnum(timestampTypeMapping sdk.TimestampTypeMapping) *TaskModel { + t.TimestampTypeMapping = tfconfig.StringVariable(string(timestampTypeMapping)) + return t +} + +func (t *TaskModel) WithTraceLevelEnum(traceLevel sdk.TraceLevel) *TaskModel { + t.TraceLevel = tfconfig.StringVariable(string(traceLevel)) + return t +} + +func (t *TaskModel) WithTransactionDefaultIsolationLevelEnum(transactionDefaultIsolationLevel sdk.TransactionDefaultIsolationLevel) *TaskModel { + t.TransactionDefaultIsolationLevel = tfconfig.StringVariable(string(transactionDefaultIsolationLevel)) + return t +} + +func (t *TaskModel) WithUnsupportedDdlActionEnum(unsupportedDdlAction sdk.UnsupportedDDLAction) *TaskModel { + t.UnsupportedDdlAction = tfconfig.StringVariable(string(unsupportedDdlAction)) + return t +} + +func (t *TaskModel) WithUserTaskManagedInitialWarehouseSizeEnum(warehouseSize sdk.WarehouseSize) *TaskModel { + t.UserTaskManagedInitialWarehouseSize = tfconfig.StringVariable(string(warehouseSize)) + return t +} + +func (t *TaskModel) WithScheduleMinutes(minutes int) *TaskModel { + t.Schedule = tfconfig.MapVariable(map[string]tfconfig.Variable{ + "minutes": tfconfig.IntegerVariable(minutes), + }) + return t +} + +func (t *TaskModel) WithScheduleCron(cron string) *TaskModel { + t.Schedule = tfconfig.MapVariable(map[string]tfconfig.Variable{ + "cron": tfconfig.StringVariable(cron), + }) + return t +} diff --git a/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go b/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go new file mode 100644 index 0000000000..306c525fad --- /dev/null +++ b/pkg/acceptance/bettertestspoc/config/model/task_model_gen.go @@ -0,0 +1,857 @@ +// Code generated by config model builder generator; DO NOT EDIT. + +package model + +import ( + tfconfig "github.com/hashicorp/terraform-plugin-testing/config" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" +) + +type TaskModel struct { + AbortDetachedQuery tfconfig.Variable `json:"abort_detached_query,omitempty"` + After tfconfig.Variable `json:"after,omitempty"` + AllowOverlappingExecution tfconfig.Variable `json:"allow_overlapping_execution,omitempty"` + Autocommit tfconfig.Variable `json:"autocommit,omitempty"` + BinaryInputFormat tfconfig.Variable `json:"binary_input_format,omitempty"` + BinaryOutputFormat tfconfig.Variable `json:"binary_output_format,omitempty"` + ClientMemoryLimit tfconfig.Variable `json:"client_memory_limit,omitempty"` + ClientMetadataRequestUseConnectionCtx tfconfig.Variable `json:"client_metadata_request_use_connection_ctx,omitempty"` + ClientPrefetchThreads tfconfig.Variable `json:"client_prefetch_threads,omitempty"` + ClientResultChunkSize tfconfig.Variable `json:"client_result_chunk_size,omitempty"` + ClientResultColumnCaseInsensitive tfconfig.Variable `json:"client_result_column_case_insensitive,omitempty"` + ClientSessionKeepAlive tfconfig.Variable `json:"client_session_keep_alive,omitempty"` + ClientSessionKeepAliveHeartbeatFrequency tfconfig.Variable `json:"client_session_keep_alive_heartbeat_frequency,omitempty"` + ClientTimestampTypeMapping tfconfig.Variable `json:"client_timestamp_type_mapping,omitempty"` + Comment tfconfig.Variable `json:"comment,omitempty"` + Config tfconfig.Variable `json:"config,omitempty"` + Database tfconfig.Variable `json:"database,omitempty"` + DateInputFormat tfconfig.Variable `json:"date_input_format,omitempty"` + DateOutputFormat tfconfig.Variable `json:"date_output_format,omitempty"` + EnableUnloadPhysicalTypeOptimization tfconfig.Variable `json:"enable_unload_physical_type_optimization,omitempty"` + ErrorIntegration tfconfig.Variable `json:"error_integration,omitempty"` + ErrorOnNondeterministicMerge tfconfig.Variable `json:"error_on_nondeterministic_merge,omitempty"` + ErrorOnNondeterministicUpdate tfconfig.Variable `json:"error_on_nondeterministic_update,omitempty"` + Finalize tfconfig.Variable `json:"finalize,omitempty"` + FullyQualifiedName tfconfig.Variable `json:"fully_qualified_name,omitempty"` + GeographyOutputFormat tfconfig.Variable `json:"geography_output_format,omitempty"` + GeometryOutputFormat tfconfig.Variable `json:"geometry_output_format,omitempty"` + JdbcTreatTimestampNtzAsUtc tfconfig.Variable `json:"jdbc_treat_timestamp_ntz_as_utc,omitempty"` + JdbcUseSessionTimezone tfconfig.Variable `json:"jdbc_use_session_timezone,omitempty"` + JsonIndent tfconfig.Variable `json:"json_indent,omitempty"` + LockTimeout tfconfig.Variable `json:"lock_timeout,omitempty"` + LogLevel tfconfig.Variable `json:"log_level,omitempty"` + MultiStatementCount tfconfig.Variable `json:"multi_statement_count,omitempty"` + Name tfconfig.Variable `json:"name,omitempty"` + NoorderSequenceAsDefault tfconfig.Variable `json:"noorder_sequence_as_default,omitempty"` + OdbcTreatDecimalAsInt tfconfig.Variable `json:"odbc_treat_decimal_as_int,omitempty"` + QueryTag tfconfig.Variable `json:"query_tag,omitempty"` + QuotedIdentifiersIgnoreCase tfconfig.Variable `json:"quoted_identifiers_ignore_case,omitempty"` + RowsPerResultset tfconfig.Variable `json:"rows_per_resultset,omitempty"` + S3StageVpceDnsName tfconfig.Variable `json:"s3_stage_vpce_dns_name,omitempty"` + Schedule tfconfig.Variable `json:"schedule,omitempty"` + Schema tfconfig.Variable `json:"schema,omitempty"` + SearchPath tfconfig.Variable `json:"search_path,omitempty"` + SqlStatement tfconfig.Variable `json:"sql_statement,omitempty"` + Started tfconfig.Variable `json:"started,omitempty"` + StatementQueuedTimeoutInSeconds tfconfig.Variable `json:"statement_queued_timeout_in_seconds,omitempty"` + StatementTimeoutInSeconds tfconfig.Variable `json:"statement_timeout_in_seconds,omitempty"` + StrictJsonOutput tfconfig.Variable `json:"strict_json_output,omitempty"` + SuspendTaskAfterNumFailures tfconfig.Variable `json:"suspend_task_after_num_failures,omitempty"` + TaskAutoRetryAttempts tfconfig.Variable `json:"task_auto_retry_attempts,omitempty"` + TimeInputFormat tfconfig.Variable `json:"time_input_format,omitempty"` + TimeOutputFormat tfconfig.Variable `json:"time_output_format,omitempty"` + TimestampDayIsAlways24h tfconfig.Variable `json:"timestamp_day_is_always_24h,omitempty"` + TimestampInputFormat tfconfig.Variable `json:"timestamp_input_format,omitempty"` + TimestampLtzOutputFormat tfconfig.Variable `json:"timestamp_ltz_output_format,omitempty"` + TimestampNtzOutputFormat tfconfig.Variable `json:"timestamp_ntz_output_format,omitempty"` + TimestampOutputFormat tfconfig.Variable `json:"timestamp_output_format,omitempty"` + TimestampTypeMapping tfconfig.Variable `json:"timestamp_type_mapping,omitempty"` + TimestampTzOutputFormat tfconfig.Variable `json:"timestamp_tz_output_format,omitempty"` + Timezone tfconfig.Variable `json:"timezone,omitempty"` + TraceLevel tfconfig.Variable `json:"trace_level,omitempty"` + TransactionAbortOnError tfconfig.Variable `json:"transaction_abort_on_error,omitempty"` + TransactionDefaultIsolationLevel tfconfig.Variable `json:"transaction_default_isolation_level,omitempty"` + TwoDigitCenturyStart tfconfig.Variable `json:"two_digit_century_start,omitempty"` + UnsupportedDdlAction tfconfig.Variable `json:"unsupported_ddl_action,omitempty"` + UseCachedResult tfconfig.Variable `json:"use_cached_result,omitempty"` + UserTaskManagedInitialWarehouseSize tfconfig.Variable `json:"user_task_managed_initial_warehouse_size,omitempty"` + UserTaskMinimumTriggerIntervalInSeconds tfconfig.Variable `json:"user_task_minimum_trigger_interval_in_seconds,omitempty"` + UserTaskTimeoutMs tfconfig.Variable `json:"user_task_timeout_ms,omitempty"` + Warehouse tfconfig.Variable `json:"warehouse,omitempty"` + WeekOfYearPolicy tfconfig.Variable `json:"week_of_year_policy,omitempty"` + WeekStart tfconfig.Variable `json:"week_start,omitempty"` + When tfconfig.Variable `json:"when,omitempty"` + + *config.ResourceModelMeta +} + +///////////////////////////////////////////////// +// Basic builders (resource name and required) // +///////////////////////////////////////////////// + +func Task( + resourceName string, + database string, + name string, + schema string, + sqlStatement string, + started bool, +) *TaskModel { + t := &TaskModel{ResourceModelMeta: config.Meta(resourceName, resources.Task)} + t.WithDatabase(database) + t.WithName(name) + t.WithSchema(schema) + t.WithSqlStatement(sqlStatement) + t.WithStarted(started) + return t +} + +func TaskWithDefaultMeta( + database string, + name string, + schema string, + sqlStatement string, + started bool, +) *TaskModel { + t := &TaskModel{ResourceModelMeta: config.DefaultMeta(resources.Task)} + t.WithDatabase(database) + t.WithName(name) + t.WithSchema(schema) + t.WithSqlStatement(sqlStatement) + t.WithStarted(started) + return t +} + +///////////////////////////////// +// below all the proper values // +///////////////////////////////// + +func (t *TaskModel) WithAbortDetachedQuery(abortDetachedQuery bool) *TaskModel { + t.AbortDetachedQuery = tfconfig.BoolVariable(abortDetachedQuery) + return t +} + +// after attribute type is not yet supported, so WithAfter can't be generated + +func (t *TaskModel) WithAllowOverlappingExecution(allowOverlappingExecution string) *TaskModel { + t.AllowOverlappingExecution = tfconfig.StringVariable(allowOverlappingExecution) + return t +} + +func (t *TaskModel) WithAutocommit(autocommit bool) *TaskModel { + t.Autocommit = tfconfig.BoolVariable(autocommit) + return t +} + +func (t *TaskModel) WithBinaryInputFormat(binaryInputFormat string) *TaskModel { + t.BinaryInputFormat = tfconfig.StringVariable(binaryInputFormat) + return t +} + +func (t *TaskModel) WithBinaryOutputFormat(binaryOutputFormat string) *TaskModel { + t.BinaryOutputFormat = tfconfig.StringVariable(binaryOutputFormat) + return t +} + +func (t *TaskModel) WithClientMemoryLimit(clientMemoryLimit int) *TaskModel { + t.ClientMemoryLimit = tfconfig.IntegerVariable(clientMemoryLimit) + return t +} + +func (t *TaskModel) WithClientMetadataRequestUseConnectionCtx(clientMetadataRequestUseConnectionCtx bool) *TaskModel { + t.ClientMetadataRequestUseConnectionCtx = tfconfig.BoolVariable(clientMetadataRequestUseConnectionCtx) + return t +} + +func (t *TaskModel) WithClientPrefetchThreads(clientPrefetchThreads int) *TaskModel { + t.ClientPrefetchThreads = tfconfig.IntegerVariable(clientPrefetchThreads) + return t +} + +func (t *TaskModel) WithClientResultChunkSize(clientResultChunkSize int) *TaskModel { + t.ClientResultChunkSize = tfconfig.IntegerVariable(clientResultChunkSize) + return t +} + +func (t *TaskModel) WithClientResultColumnCaseInsensitive(clientResultColumnCaseInsensitive bool) *TaskModel { + t.ClientResultColumnCaseInsensitive = tfconfig.BoolVariable(clientResultColumnCaseInsensitive) + return t +} + +func (t *TaskModel) WithClientSessionKeepAlive(clientSessionKeepAlive bool) *TaskModel { + t.ClientSessionKeepAlive = tfconfig.BoolVariable(clientSessionKeepAlive) + return t +} + +func (t *TaskModel) WithClientSessionKeepAliveHeartbeatFrequency(clientSessionKeepAliveHeartbeatFrequency int) *TaskModel { + t.ClientSessionKeepAliveHeartbeatFrequency = tfconfig.IntegerVariable(clientSessionKeepAliveHeartbeatFrequency) + return t +} + +func (t *TaskModel) WithClientTimestampTypeMapping(clientTimestampTypeMapping string) *TaskModel { + t.ClientTimestampTypeMapping = tfconfig.StringVariable(clientTimestampTypeMapping) + return t +} + +func (t *TaskModel) WithComment(comment string) *TaskModel { + t.Comment = tfconfig.StringVariable(comment) + return t +} + +func (t *TaskModel) WithConfig(config string) *TaskModel { + t.Config = tfconfig.StringVariable(config) + return t +} + +func (t *TaskModel) WithDatabase(database string) *TaskModel { + t.Database = tfconfig.StringVariable(database) + return t +} + +func (t *TaskModel) WithDateInputFormat(dateInputFormat string) *TaskModel { + t.DateInputFormat = tfconfig.StringVariable(dateInputFormat) + return t +} + +func (t *TaskModel) WithDateOutputFormat(dateOutputFormat string) *TaskModel { + t.DateOutputFormat = tfconfig.StringVariable(dateOutputFormat) + return t +} + +func (t *TaskModel) WithEnableUnloadPhysicalTypeOptimization(enableUnloadPhysicalTypeOptimization bool) *TaskModel { + t.EnableUnloadPhysicalTypeOptimization = tfconfig.BoolVariable(enableUnloadPhysicalTypeOptimization) + return t +} + +func (t *TaskModel) WithErrorIntegration(errorIntegration string) *TaskModel { + t.ErrorIntegration = tfconfig.StringVariable(errorIntegration) + return t +} + +func (t *TaskModel) WithErrorOnNondeterministicMerge(errorOnNondeterministicMerge bool) *TaskModel { + t.ErrorOnNondeterministicMerge = tfconfig.BoolVariable(errorOnNondeterministicMerge) + return t +} + +func (t *TaskModel) WithErrorOnNondeterministicUpdate(errorOnNondeterministicUpdate bool) *TaskModel { + t.ErrorOnNondeterministicUpdate = tfconfig.BoolVariable(errorOnNondeterministicUpdate) + return t +} + +func (t *TaskModel) WithFinalize(finalize string) *TaskModel { + t.Finalize = tfconfig.StringVariable(finalize) + return t +} + +func (t *TaskModel) WithFullyQualifiedName(fullyQualifiedName string) *TaskModel { + t.FullyQualifiedName = tfconfig.StringVariable(fullyQualifiedName) + return t +} + +func (t *TaskModel) WithGeographyOutputFormat(geographyOutputFormat string) *TaskModel { + t.GeographyOutputFormat = tfconfig.StringVariable(geographyOutputFormat) + return t +} + +func (t *TaskModel) WithGeometryOutputFormat(geometryOutputFormat string) *TaskModel { + t.GeometryOutputFormat = tfconfig.StringVariable(geometryOutputFormat) + return t +} + +func (t *TaskModel) WithJdbcTreatTimestampNtzAsUtc(jdbcTreatTimestampNtzAsUtc bool) *TaskModel { + t.JdbcTreatTimestampNtzAsUtc = tfconfig.BoolVariable(jdbcTreatTimestampNtzAsUtc) + return t +} + +func (t *TaskModel) WithJdbcUseSessionTimezone(jdbcUseSessionTimezone bool) *TaskModel { + t.JdbcUseSessionTimezone = tfconfig.BoolVariable(jdbcUseSessionTimezone) + return t +} + +func (t *TaskModel) WithJsonIndent(jsonIndent int) *TaskModel { + t.JsonIndent = tfconfig.IntegerVariable(jsonIndent) + return t +} + +func (t *TaskModel) WithLockTimeout(lockTimeout int) *TaskModel { + t.LockTimeout = tfconfig.IntegerVariable(lockTimeout) + return t +} + +func (t *TaskModel) WithLogLevel(logLevel string) *TaskModel { + t.LogLevel = tfconfig.StringVariable(logLevel) + return t +} + +func (t *TaskModel) WithMultiStatementCount(multiStatementCount int) *TaskModel { + t.MultiStatementCount = tfconfig.IntegerVariable(multiStatementCount) + return t +} + +func (t *TaskModel) WithName(name string) *TaskModel { + t.Name = tfconfig.StringVariable(name) + return t +} + +func (t *TaskModel) WithNoorderSequenceAsDefault(noorderSequenceAsDefault bool) *TaskModel { + t.NoorderSequenceAsDefault = tfconfig.BoolVariable(noorderSequenceAsDefault) + return t +} + +func (t *TaskModel) WithOdbcTreatDecimalAsInt(odbcTreatDecimalAsInt bool) *TaskModel { + t.OdbcTreatDecimalAsInt = tfconfig.BoolVariable(odbcTreatDecimalAsInt) + return t +} + +func (t *TaskModel) WithQueryTag(queryTag string) *TaskModel { + t.QueryTag = tfconfig.StringVariable(queryTag) + return t +} + +func (t *TaskModel) WithQuotedIdentifiersIgnoreCase(quotedIdentifiersIgnoreCase bool) *TaskModel { + t.QuotedIdentifiersIgnoreCase = tfconfig.BoolVariable(quotedIdentifiersIgnoreCase) + return t +} + +func (t *TaskModel) WithRowsPerResultset(rowsPerResultset int) *TaskModel { + t.RowsPerResultset = tfconfig.IntegerVariable(rowsPerResultset) + return t +} + +func (t *TaskModel) WithS3StageVpceDnsName(s3StageVpceDnsName string) *TaskModel { + t.S3StageVpceDnsName = tfconfig.StringVariable(s3StageVpceDnsName) + return t +} + +// schedule attribute type is not yet supported, so WithSchedule can't be generated + +func (t *TaskModel) WithSchema(schema string) *TaskModel { + t.Schema = tfconfig.StringVariable(schema) + return t +} + +func (t *TaskModel) WithSearchPath(searchPath string) *TaskModel { + t.SearchPath = tfconfig.StringVariable(searchPath) + return t +} + +func (t *TaskModel) WithSqlStatement(sqlStatement string) *TaskModel { + t.SqlStatement = tfconfig.StringVariable(sqlStatement) + return t +} + +func (t *TaskModel) WithStarted(started bool) *TaskModel { + t.Started = tfconfig.BoolVariable(started) + return t +} + +func (t *TaskModel) WithStatementQueuedTimeoutInSeconds(statementQueuedTimeoutInSeconds int) *TaskModel { + t.StatementQueuedTimeoutInSeconds = tfconfig.IntegerVariable(statementQueuedTimeoutInSeconds) + return t +} + +func (t *TaskModel) WithStatementTimeoutInSeconds(statementTimeoutInSeconds int) *TaskModel { + t.StatementTimeoutInSeconds = tfconfig.IntegerVariable(statementTimeoutInSeconds) + return t +} + +func (t *TaskModel) WithStrictJsonOutput(strictJsonOutput bool) *TaskModel { + t.StrictJsonOutput = tfconfig.BoolVariable(strictJsonOutput) + return t +} + +func (t *TaskModel) WithSuspendTaskAfterNumFailures(suspendTaskAfterNumFailures int) *TaskModel { + t.SuspendTaskAfterNumFailures = tfconfig.IntegerVariable(suspendTaskAfterNumFailures) + return t +} + +func (t *TaskModel) WithTaskAutoRetryAttempts(taskAutoRetryAttempts int) *TaskModel { + t.TaskAutoRetryAttempts = tfconfig.IntegerVariable(taskAutoRetryAttempts) + return t +} + +func (t *TaskModel) WithTimeInputFormat(timeInputFormat string) *TaskModel { + t.TimeInputFormat = tfconfig.StringVariable(timeInputFormat) + return t +} + +func (t *TaskModel) WithTimeOutputFormat(timeOutputFormat string) *TaskModel { + t.TimeOutputFormat = tfconfig.StringVariable(timeOutputFormat) + return t +} + +func (t *TaskModel) WithTimestampDayIsAlways24h(timestampDayIsAlways24h bool) *TaskModel { + t.TimestampDayIsAlways24h = tfconfig.BoolVariable(timestampDayIsAlways24h) + return t +} + +func (t *TaskModel) WithTimestampInputFormat(timestampInputFormat string) *TaskModel { + t.TimestampInputFormat = tfconfig.StringVariable(timestampInputFormat) + return t +} + +func (t *TaskModel) WithTimestampLtzOutputFormat(timestampLtzOutputFormat string) *TaskModel { + t.TimestampLtzOutputFormat = tfconfig.StringVariable(timestampLtzOutputFormat) + return t +} + +func (t *TaskModel) WithTimestampNtzOutputFormat(timestampNtzOutputFormat string) *TaskModel { + t.TimestampNtzOutputFormat = tfconfig.StringVariable(timestampNtzOutputFormat) + return t +} + +func (t *TaskModel) WithTimestampOutputFormat(timestampOutputFormat string) *TaskModel { + t.TimestampOutputFormat = tfconfig.StringVariable(timestampOutputFormat) + return t +} + +func (t *TaskModel) WithTimestampTypeMapping(timestampTypeMapping string) *TaskModel { + t.TimestampTypeMapping = tfconfig.StringVariable(timestampTypeMapping) + return t +} + +func (t *TaskModel) WithTimestampTzOutputFormat(timestampTzOutputFormat string) *TaskModel { + t.TimestampTzOutputFormat = tfconfig.StringVariable(timestampTzOutputFormat) + return t +} + +func (t *TaskModel) WithTimezone(timezone string) *TaskModel { + t.Timezone = tfconfig.StringVariable(timezone) + return t +} + +func (t *TaskModel) WithTraceLevel(traceLevel string) *TaskModel { + t.TraceLevel = tfconfig.StringVariable(traceLevel) + return t +} + +func (t *TaskModel) WithTransactionAbortOnError(transactionAbortOnError bool) *TaskModel { + t.TransactionAbortOnError = tfconfig.BoolVariable(transactionAbortOnError) + return t +} + +func (t *TaskModel) WithTransactionDefaultIsolationLevel(transactionDefaultIsolationLevel string) *TaskModel { + t.TransactionDefaultIsolationLevel = tfconfig.StringVariable(transactionDefaultIsolationLevel) + return t +} + +func (t *TaskModel) WithTwoDigitCenturyStart(twoDigitCenturyStart int) *TaskModel { + t.TwoDigitCenturyStart = tfconfig.IntegerVariable(twoDigitCenturyStart) + return t +} + +func (t *TaskModel) WithUnsupportedDdlAction(unsupportedDdlAction string) *TaskModel { + t.UnsupportedDdlAction = tfconfig.StringVariable(unsupportedDdlAction) + return t +} + +func (t *TaskModel) WithUseCachedResult(useCachedResult bool) *TaskModel { + t.UseCachedResult = tfconfig.BoolVariable(useCachedResult) + return t +} + +func (t *TaskModel) WithUserTaskManagedInitialWarehouseSize(userTaskManagedInitialWarehouseSize string) *TaskModel { + t.UserTaskManagedInitialWarehouseSize = tfconfig.StringVariable(userTaskManagedInitialWarehouseSize) + return t +} + +func (t *TaskModel) WithUserTaskMinimumTriggerIntervalInSeconds(userTaskMinimumTriggerIntervalInSeconds int) *TaskModel { + t.UserTaskMinimumTriggerIntervalInSeconds = tfconfig.IntegerVariable(userTaskMinimumTriggerIntervalInSeconds) + return t +} + +func (t *TaskModel) WithUserTaskTimeoutMs(userTaskTimeoutMs int) *TaskModel { + t.UserTaskTimeoutMs = tfconfig.IntegerVariable(userTaskTimeoutMs) + return t +} + +func (t *TaskModel) WithWarehouse(warehouse string) *TaskModel { + t.Warehouse = tfconfig.StringVariable(warehouse) + return t +} + +func (t *TaskModel) WithWeekOfYearPolicy(weekOfYearPolicy int) *TaskModel { + t.WeekOfYearPolicy = tfconfig.IntegerVariable(weekOfYearPolicy) + return t +} + +func (t *TaskModel) WithWeekStart(weekStart int) *TaskModel { + t.WeekStart = tfconfig.IntegerVariable(weekStart) + return t +} + +func (t *TaskModel) WithWhen(when string) *TaskModel { + t.When = tfconfig.StringVariable(when) + return t +} + +////////////////////////////////////////// +// below it's possible to set any value // +////////////////////////////////////////// + +func (t *TaskModel) WithAbortDetachedQueryValue(value tfconfig.Variable) *TaskModel { + t.AbortDetachedQuery = value + return t +} + +func (t *TaskModel) WithAfterValue(value tfconfig.Variable) *TaskModel { + t.After = value + return t +} + +func (t *TaskModel) WithAllowOverlappingExecutionValue(value tfconfig.Variable) *TaskModel { + t.AllowOverlappingExecution = value + return t +} + +func (t *TaskModel) WithAutocommitValue(value tfconfig.Variable) *TaskModel { + t.Autocommit = value + return t +} + +func (t *TaskModel) WithBinaryInputFormatValue(value tfconfig.Variable) *TaskModel { + t.BinaryInputFormat = value + return t +} + +func (t *TaskModel) WithBinaryOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.BinaryOutputFormat = value + return t +} + +func (t *TaskModel) WithClientMemoryLimitValue(value tfconfig.Variable) *TaskModel { + t.ClientMemoryLimit = value + return t +} + +func (t *TaskModel) WithClientMetadataRequestUseConnectionCtxValue(value tfconfig.Variable) *TaskModel { + t.ClientMetadataRequestUseConnectionCtx = value + return t +} + +func (t *TaskModel) WithClientPrefetchThreadsValue(value tfconfig.Variable) *TaskModel { + t.ClientPrefetchThreads = value + return t +} + +func (t *TaskModel) WithClientResultChunkSizeValue(value tfconfig.Variable) *TaskModel { + t.ClientResultChunkSize = value + return t +} + +func (t *TaskModel) WithClientResultColumnCaseInsensitiveValue(value tfconfig.Variable) *TaskModel { + t.ClientResultColumnCaseInsensitive = value + return t +} + +func (t *TaskModel) WithClientSessionKeepAliveValue(value tfconfig.Variable) *TaskModel { + t.ClientSessionKeepAlive = value + return t +} + +func (t *TaskModel) WithClientSessionKeepAliveHeartbeatFrequencyValue(value tfconfig.Variable) *TaskModel { + t.ClientSessionKeepAliveHeartbeatFrequency = value + return t +} + +func (t *TaskModel) WithClientTimestampTypeMappingValue(value tfconfig.Variable) *TaskModel { + t.ClientTimestampTypeMapping = value + return t +} + +func (t *TaskModel) WithCommentValue(value tfconfig.Variable) *TaskModel { + t.Comment = value + return t +} + +func (t *TaskModel) WithConfigValue(value tfconfig.Variable) *TaskModel { + t.Config = value + return t +} + +func (t *TaskModel) WithDatabaseValue(value tfconfig.Variable) *TaskModel { + t.Database = value + return t +} + +func (t *TaskModel) WithDateInputFormatValue(value tfconfig.Variable) *TaskModel { + t.DateInputFormat = value + return t +} + +func (t *TaskModel) WithDateOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.DateOutputFormat = value + return t +} + +func (t *TaskModel) WithEnableUnloadPhysicalTypeOptimizationValue(value tfconfig.Variable) *TaskModel { + t.EnableUnloadPhysicalTypeOptimization = value + return t +} + +func (t *TaskModel) WithErrorIntegrationValue(value tfconfig.Variable) *TaskModel { + t.ErrorIntegration = value + return t +} + +func (t *TaskModel) WithErrorOnNondeterministicMergeValue(value tfconfig.Variable) *TaskModel { + t.ErrorOnNondeterministicMerge = value + return t +} + +func (t *TaskModel) WithErrorOnNondeterministicUpdateValue(value tfconfig.Variable) *TaskModel { + t.ErrorOnNondeterministicUpdate = value + return t +} + +func (t *TaskModel) WithFinalizeValue(value tfconfig.Variable) *TaskModel { + t.Finalize = value + return t +} + +func (t *TaskModel) WithFullyQualifiedNameValue(value tfconfig.Variable) *TaskModel { + t.FullyQualifiedName = value + return t +} + +func (t *TaskModel) WithGeographyOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.GeographyOutputFormat = value + return t +} + +func (t *TaskModel) WithGeometryOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.GeometryOutputFormat = value + return t +} + +func (t *TaskModel) WithJdbcTreatTimestampNtzAsUtcValue(value tfconfig.Variable) *TaskModel { + t.JdbcTreatTimestampNtzAsUtc = value + return t +} + +func (t *TaskModel) WithJdbcUseSessionTimezoneValue(value tfconfig.Variable) *TaskModel { + t.JdbcUseSessionTimezone = value + return t +} + +func (t *TaskModel) WithJsonIndentValue(value tfconfig.Variable) *TaskModel { + t.JsonIndent = value + return t +} + +func (t *TaskModel) WithLockTimeoutValue(value tfconfig.Variable) *TaskModel { + t.LockTimeout = value + return t +} + +func (t *TaskModel) WithLogLevelValue(value tfconfig.Variable) *TaskModel { + t.LogLevel = value + return t +} + +func (t *TaskModel) WithMultiStatementCountValue(value tfconfig.Variable) *TaskModel { + t.MultiStatementCount = value + return t +} + +func (t *TaskModel) WithNameValue(value tfconfig.Variable) *TaskModel { + t.Name = value + return t +} + +func (t *TaskModel) WithNoorderSequenceAsDefaultValue(value tfconfig.Variable) *TaskModel { + t.NoorderSequenceAsDefault = value + return t +} + +func (t *TaskModel) WithOdbcTreatDecimalAsIntValue(value tfconfig.Variable) *TaskModel { + t.OdbcTreatDecimalAsInt = value + return t +} + +func (t *TaskModel) WithQueryTagValue(value tfconfig.Variable) *TaskModel { + t.QueryTag = value + return t +} + +func (t *TaskModel) WithQuotedIdentifiersIgnoreCaseValue(value tfconfig.Variable) *TaskModel { + t.QuotedIdentifiersIgnoreCase = value + return t +} + +func (t *TaskModel) WithRowsPerResultsetValue(value tfconfig.Variable) *TaskModel { + t.RowsPerResultset = value + return t +} + +func (t *TaskModel) WithS3StageVpceDnsNameValue(value tfconfig.Variable) *TaskModel { + t.S3StageVpceDnsName = value + return t +} + +func (t *TaskModel) WithScheduleValue(value tfconfig.Variable) *TaskModel { + t.Schedule = value + return t +} + +func (t *TaskModel) WithSchemaValue(value tfconfig.Variable) *TaskModel { + t.Schema = value + return t +} + +func (t *TaskModel) WithSearchPathValue(value tfconfig.Variable) *TaskModel { + t.SearchPath = value + return t +} + +func (t *TaskModel) WithSqlStatementValue(value tfconfig.Variable) *TaskModel { + t.SqlStatement = value + return t +} + +func (t *TaskModel) WithStartedValue(value tfconfig.Variable) *TaskModel { + t.Started = value + return t +} + +func (t *TaskModel) WithStatementQueuedTimeoutInSecondsValue(value tfconfig.Variable) *TaskModel { + t.StatementQueuedTimeoutInSeconds = value + return t +} + +func (t *TaskModel) WithStatementTimeoutInSecondsValue(value tfconfig.Variable) *TaskModel { + t.StatementTimeoutInSeconds = value + return t +} + +func (t *TaskModel) WithStrictJsonOutputValue(value tfconfig.Variable) *TaskModel { + t.StrictJsonOutput = value + return t +} + +func (t *TaskModel) WithSuspendTaskAfterNumFailuresValue(value tfconfig.Variable) *TaskModel { + t.SuspendTaskAfterNumFailures = value + return t +} + +func (t *TaskModel) WithTaskAutoRetryAttemptsValue(value tfconfig.Variable) *TaskModel { + t.TaskAutoRetryAttempts = value + return t +} + +func (t *TaskModel) WithTimeInputFormatValue(value tfconfig.Variable) *TaskModel { + t.TimeInputFormat = value + return t +} + +func (t *TaskModel) WithTimeOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.TimeOutputFormat = value + return t +} + +func (t *TaskModel) WithTimestampDayIsAlways24hValue(value tfconfig.Variable) *TaskModel { + t.TimestampDayIsAlways24h = value + return t +} + +func (t *TaskModel) WithTimestampInputFormatValue(value tfconfig.Variable) *TaskModel { + t.TimestampInputFormat = value + return t +} + +func (t *TaskModel) WithTimestampLtzOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.TimestampLtzOutputFormat = value + return t +} + +func (t *TaskModel) WithTimestampNtzOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.TimestampNtzOutputFormat = value + return t +} + +func (t *TaskModel) WithTimestampOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.TimestampOutputFormat = value + return t +} + +func (t *TaskModel) WithTimestampTypeMappingValue(value tfconfig.Variable) *TaskModel { + t.TimestampTypeMapping = value + return t +} + +func (t *TaskModel) WithTimestampTzOutputFormatValue(value tfconfig.Variable) *TaskModel { + t.TimestampTzOutputFormat = value + return t +} + +func (t *TaskModel) WithTimezoneValue(value tfconfig.Variable) *TaskModel { + t.Timezone = value + return t +} + +func (t *TaskModel) WithTraceLevelValue(value tfconfig.Variable) *TaskModel { + t.TraceLevel = value + return t +} + +func (t *TaskModel) WithTransactionAbortOnErrorValue(value tfconfig.Variable) *TaskModel { + t.TransactionAbortOnError = value + return t +} + +func (t *TaskModel) WithTransactionDefaultIsolationLevelValue(value tfconfig.Variable) *TaskModel { + t.TransactionDefaultIsolationLevel = value + return t +} + +func (t *TaskModel) WithTwoDigitCenturyStartValue(value tfconfig.Variable) *TaskModel { + t.TwoDigitCenturyStart = value + return t +} + +func (t *TaskModel) WithUnsupportedDdlActionValue(value tfconfig.Variable) *TaskModel { + t.UnsupportedDdlAction = value + return t +} + +func (t *TaskModel) WithUseCachedResultValue(value tfconfig.Variable) *TaskModel { + t.UseCachedResult = value + return t +} + +func (t *TaskModel) WithUserTaskManagedInitialWarehouseSizeValue(value tfconfig.Variable) *TaskModel { + t.UserTaskManagedInitialWarehouseSize = value + return t +} + +func (t *TaskModel) WithUserTaskMinimumTriggerIntervalInSecondsValue(value tfconfig.Variable) *TaskModel { + t.UserTaskMinimumTriggerIntervalInSeconds = value + return t +} + +func (t *TaskModel) WithUserTaskTimeoutMsValue(value tfconfig.Variable) *TaskModel { + t.UserTaskTimeoutMs = value + return t +} + +func (t *TaskModel) WithWarehouseValue(value tfconfig.Variable) *TaskModel { + t.Warehouse = value + return t +} + +func (t *TaskModel) WithWeekOfYearPolicyValue(value tfconfig.Variable) *TaskModel { + t.WeekOfYearPolicy = value + return t +} + +func (t *TaskModel) WithWeekStartValue(value tfconfig.Variable) *TaskModel { + t.WeekStart = value + return t +} + +func (t *TaskModel) WithWhenValue(value tfconfig.Variable) *TaskModel { + t.When = value + return t +} diff --git a/pkg/acceptance/helpers/ids_generator.go b/pkg/acceptance/helpers/ids_generator.go index 42e247e6d5..7cb9c767ec 100644 --- a/pkg/acceptance/helpers/ids_generator.go +++ b/pkg/acceptance/helpers/ids_generator.go @@ -81,6 +81,10 @@ func (c *IdsGenerator) RandomSchemaObjectIdentifierInSchema(schemaId sdk.Databas return sdk.NewSchemaObjectIdentifierInSchema(schemaId, c.Alpha()) } +func (c *IdsGenerator) RandomSchemaObjectIdentifierInSchemaWithPrefix(prefix string, schemaId sdk.DatabaseObjectIdentifier) sdk.SchemaObjectIdentifier { + return sdk.NewSchemaObjectIdentifierInSchema(schemaId, c.AlphaWithPrefix(prefix)) +} + func (c *IdsGenerator) RandomSchemaObjectIdentifierWithArgumentsOld(arguments ...sdk.DataType) sdk.SchemaObjectIdentifier { return sdk.NewSchemaObjectIdentifierWithArgumentsOld(c.SchemaId().DatabaseName(), c.SchemaId().Name(), c.Alpha(), arguments) } diff --git a/pkg/acceptance/helpers/notification_integration_client.go b/pkg/acceptance/helpers/notification_integration_client.go index b453156c07..e8686a234d 100644 --- a/pkg/acceptance/helpers/notification_integration_client.go +++ b/pkg/acceptance/helpers/notification_integration_client.go @@ -8,6 +8,9 @@ import ( "github.com/stretchr/testify/require" ) +// TODO [SNOW-1017580]: replace with real value +const gcpPubsubSubscriptionName = "projects/project-1234/subscriptions/sub2" + type NotificationIntegrationClient struct { context *TestClientContext ids *IdsGenerator @@ -24,6 +27,15 @@ func (c *NotificationIntegrationClient) client() sdk.NotificationIntegrations { return c.context.client.NotificationIntegrations } +func (c *NotificationIntegrationClient) CreateWithGcpPubSub(t *testing.T) (*sdk.NotificationIntegration, func()) { + t.Helper() + return c.CreateWithRequest(t, sdk.NewCreateNotificationIntegrationRequest(c.ids.RandomAccountObjectIdentifier(), true). + WithAutomatedDataLoadsParams(sdk.NewAutomatedDataLoadsParamsRequest(). + WithGoogleAutoParams(sdk.NewGoogleAutoParamsRequest(gcpPubsubSubscriptionName)), + ), + ) +} + func (c *NotificationIntegrationClient) Create(t *testing.T) (*sdk.NotificationIntegration, func()) { t.Helper() ctx := context.Background() @@ -43,6 +55,19 @@ func (c *NotificationIntegrationClient) Create(t *testing.T) (*sdk.NotificationI return integration, c.DropFunc(t, id) } +func (c *NotificationIntegrationClient) CreateWithRequest(t *testing.T, request *sdk.CreateNotificationIntegrationRequest) (*sdk.NotificationIntegration, func()) { + t.Helper() + ctx := context.Background() + + err := c.client().Create(ctx, request) + require.NoError(t, err) + + networkRule, err := c.client().ShowByID(ctx, request.GetName()) + require.NoError(t, err) + + return networkRule, c.DropFunc(t, request.GetName()) +} + func (c *NotificationIntegrationClient) DropFunc(t *testing.T, id sdk.AccountObjectIdentifier) func() { t.Helper() ctx := context.Background() diff --git a/pkg/acceptance/helpers/task_client.go b/pkg/acceptance/helpers/task_client.go index 968662e436..98592b5eec 100644 --- a/pkg/acceptance/helpers/task_client.go +++ b/pkg/acceptance/helpers/task_client.go @@ -61,6 +61,14 @@ func (c *TaskClient) CreateWithRequest(t *testing.T, request *sdk.CreateTaskRequ return task, c.DropFunc(t, id) } +func (c *TaskClient) Alter(t *testing.T, req *sdk.AlterTaskRequest) { + t.Helper() + ctx := context.Background() + + err := c.client().Alter(ctx, req) + require.NoError(t, err) +} + func (c *TaskClient) DropFunc(t *testing.T, id sdk.SchemaObjectIdentifier) func() { t.Helper() ctx := context.Background() diff --git a/pkg/datasources/common.go b/pkg/datasources/common.go index 6f8ab20169..24c68fcb85 100644 --- a/pkg/datasources/common.go +++ b/pkg/datasources/common.go @@ -1,6 +1,8 @@ package datasources import ( + "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -12,6 +14,37 @@ var likeSchema = &schema.Schema{ Description: "Filters the output with **case-insensitive** pattern, with support for SQL wildcard characters (`%` and `_`).", } +var inSchema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "IN clause to filter the list of objects", + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account": { + Type: schema.TypeBool, + Optional: true, + Description: "Returns records for the entire account.", + ExactlyOneOf: []string{"in.0.account", "in.0.database", "in.0.schema"}, + }, + "database": { + Type: schema.TypeString, + Optional: true, + Description: "Returns records for the current database in use or for a specified database.", + ExactlyOneOf: []string{"in.0.account", "in.0.database", "in.0.schema"}, + ValidateDiagFunc: resources.IsValidIdentifier[sdk.AccountObjectIdentifier](), + }, + "schema": { + Type: schema.TypeString, + Optional: true, + Description: "Returns records for the current schema in use or a specified schema. Use fully qualified name.", + ExactlyOneOf: []string{"in.0.account", "in.0.database", "in.0.schema"}, + ValidateDiagFunc: resources.IsValidIdentifier[sdk.DatabaseObjectIdentifier](), + }, + }, + }, +} + var extendedInSchema = &schema.Schema{ Type: schema.TypeList, Optional: true, @@ -114,6 +147,31 @@ func handleLimitFrom(d *schema.ResourceData, setField **sdk.LimitFrom) { } } +func handleIn(d *schema.ResourceData, setField **sdk.In) error { + if v, ok := d.GetOk("in"); ok { + in := v.([]any)[0].(map[string]any) + accountValue, okAccount := in["account"] + databaseValue, okDatabase := in["database"] + schemaValue, okSchema := in["schema"] + + switch { + case okAccount && accountValue.(bool): + *setField = &sdk.In{Account: sdk.Bool(true)} + case okDatabase && databaseValue.(string) != "": + *setField = &sdk.In{Database: sdk.NewAccountObjectIdentifier(databaseValue.(string))} + case okSchema && schemaValue.(string) != "": + schemaId, err := sdk.ParseDatabaseObjectIdentifier(schemaValue.(string)) + if err != nil { + return err + } + *setField = &sdk.In{Schema: schemaId} + default: + return fmt.Errorf("the `in` filtering field was set, but none of the subfields (account, database, schema) was specified") + } + } + return nil +} + func handleExtendedIn(d *schema.ResourceData, setField **sdk.ExtendedIn) error { if v, ok := d.GetOk("in"); ok { in := v.([]any)[0].(map[string]any) diff --git a/pkg/datasources/tasks.go b/pkg/datasources/tasks.go index 5af820fa7c..a43cad365d 100644 --- a/pkg/datasources/tasks.go +++ b/pkg/datasources/tasks.go @@ -2,53 +2,53 @@ package datasources import ( "context" - "fmt" - "log" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) var tasksSchema = map[string]*schema.Schema{ - "database": { - Type: schema.TypeString, - Required: true, - Description: "The database from which to return the schemas from.", + "with_parameters": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: "Runs SHOW PARAMETERS FOR TASK for each task returned by SHOW TASK and saves the output to the parameters field as a map. By default this value is set to true.", }, - "schema": { - Type: schema.TypeString, - Required: true, - Description: "The schema from which to return the tasks from.", + "like": likeSchema, + "in": extendedInSchema, + "starts_with": startsWithSchema, + "root_only": { + Type: schema.TypeBool, + Optional: true, + Description: "Filters the command output to return only root tasks (tasks with no predecessors).", }, + "limit": limitFromSchema, "tasks": { Type: schema.TypeList, Computed: true, - Description: "The tasks in the schema", + Description: "Holds the aggregated output of all task details queries.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Computed: true, + resources.ShowOutputAttributeName: { + Type: schema.TypeList, + Computed: true, + Description: "Holds the output of SHOW TASKS.", + Elem: &schema.Resource{ + Schema: schemas.ShowTaskSchema, + }, }, - "database": { - Type: schema.TypeString, - Computed: true, - }, - "schema": { - Type: schema.TypeString, - Computed: true, - }, - "comment": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "warehouse": { - Type: schema.TypeString, - Optional: true, - Computed: true, + resources.ParametersAttributeName: { + Type: schema.TypeList, + Computed: true, + Description: "Holds the output of SHOW PARAMETERS FOR TASK.", + Elem: &schema.Resource{ + Schema: schemas.ShowTaskParametersSchema, + }, }, }, }, @@ -57,39 +57,54 @@ var tasksSchema = map[string]*schema.Schema{ func Tasks() *schema.Resource { return &schema.Resource{ - Read: ReadTasks, - Schema: tasksSchema, + ReadContext: ReadTasks, + Schema: tasksSchema, + Description: "Data source used to get details of filtered tasks. Filtering is aligned with the current possibilities for [SHOW TASKS](https://docs.snowflake.com/en/sql-reference/sql/show-tasks) query. The results of SHOW and SHOW PARAMETERS IN are encapsulated in one output collection `tasks`.", } } -func ReadTasks(d *schema.ResourceData, meta interface{}) error { +func ReadTasks(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + req := sdk.NewShowTaskRequest() - databaseName := d.Get("database").(string) - schemaName := d.Get("schema").(string) + handleLike(d, &req.Like) + if err := handleExtendedIn(d, &req.In); err != nil { + return diag.FromErr(err) + } + handleStartsWith(d, &req.StartsWith) + if v, ok := d.GetOk("root_only"); ok && v.(bool) { + req.WithRootOnly(true) + } + handleLimitFrom(d, &req.Limit) - extractedTasks, err := client.Tasks.Show(ctx, sdk.NewShowTaskRequest().WithIn(sdk.In{Schema: sdk.NewDatabaseObjectIdentifier(databaseName, schemaName)})) + tasks, err := client.Tasks.Show(ctx, req) if err != nil { - // If not found, mark resource to be removed from state file during apply or refresh - log.Printf("[DEBUG] tasks in schema (%s) not found", d.Id()) - d.SetId("") - return nil + return diag.FromErr(err) } + d.SetId("tasks_read") + + flattenedTasks := make([]map[string]any, len(tasks)) + for i, task := range tasks { + task := task - tasks := make([]map[string]any, 0, len(extractedTasks)) - for _, task := range extractedTasks { - taskMap := map[string]any{} + var taskParameters []map[string]any + if d.Get("with_parameters").(bool) { + parameters, err := client.Tasks.ShowParameters(ctx, task.ID()) + if err != nil { + return diag.FromErr(err) + } + taskParameters = []map[string]any{schemas.TaskParametersToSchema(parameters)} + } - taskMap["name"] = task.Name - taskMap["database"] = task.DatabaseName - taskMap["schema"] = task.SchemaName - taskMap["comment"] = task.Comment - taskMap["warehouse"] = task.Warehouse + flattenedTasks[i] = map[string]any{ + resources.ShowOutputAttributeName: []map[string]any{schemas.TaskToSchema(&task)}, + resources.ParametersAttributeName: taskParameters, + } + } - tasks = append(tasks, taskMap) + if err := d.Set("tasks", flattenedTasks); err != nil { + return diag.FromErr(err) } - d.SetId(fmt.Sprintf(`%v|%v`, databaseName, schemaName)) - return d.Set("tasks", tasks) + return nil } diff --git a/pkg/datasources/tasks_acceptance_test.go b/pkg/datasources/tasks_acceptance_test.go index 83ef718791..7358268599 100644 --- a/pkg/datasources/tasks_acceptance_test.go +++ b/pkg/datasources/tasks_acceptance_test.go @@ -1,76 +1,270 @@ package datasources_test import ( + "bytes" "fmt" + "strconv" "testing" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceparametersassert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/testenvs" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + acc "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/tfversion" ) -func TestAcc_Tasks(t *testing.T) { - databaseName := acc.TestClient().Ids.Alpha() - schemaName := acc.TestClient().Ids.Alpha() - taskName := acc.TestClient().Ids.Alpha() +func TestAcc_Tasks_Like_RootTask(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + // Created to show LIKE is working + _, standaloneTaskCleanup := acc.TestClient().Task.Create(t) + t.Cleanup(standaloneTaskCleanup) + + createRootReq := sdk.NewCreateTaskRequest(acc.TestClient().Ids.RandomSchemaObjectIdentifier(), "SELECT 1"). + WithSchedule("1 MINUTE"). + WithComment("some comment"). + WithAllowOverlappingExecution(true). + WithWarehouse(*sdk.NewCreateTaskWarehouseRequest().WithWarehouse(acc.TestClient().Ids.WarehouseId())) + rootTask, rootTaskCleanup := acc.TestClient().Task.CreateWithRequest(t, createRootReq) + t.Cleanup(rootTaskCleanup) + + childTask, childTaskCleanup := acc.TestClient().Task.CreateWithAfter(t, rootTask.ID()) + t.Cleanup(childTaskCleanup) + resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, PreCheck: func() { acc.TestAccPreCheck(t) }, TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.RequireAbove(tfversion.Version1_5_0), }, - CheckDestroy: nil, Steps: []resource.TestStep{ { - Config: tasks(databaseName, schemaName, taskName), + Config: taskDatasourceLikeRootOnly(rootTask.ID().Name(), true), + Check: assert.AssertThat(t, + assert.Check(resource.TestCheckResourceAttr("data.snowflake_tasks.test", "tasks.#", "1")), + resourceshowoutputassert.TaskDatasourceShowOutput(t, "snowflake_tasks.test"). + HasName(rootTask.Name). + HasSchemaName(rootTask.SchemaName). + HasDatabaseName(rootTask.DatabaseName). + HasCreatedOnNotEmpty(). + HasIdNotEmpty(). + HasOwnerNotEmpty(). + HasComment("some comment"). + HasWarehouse(acc.TestClient().Ids.WarehouseId()). + HasSchedule("1 MINUTE"). + HasPredecessors(). + HasDefinition("SELECT 1"). + HasCondition(""). + HasAllowOverlappingExecution(true). + HasErrorIntegrationEmpty(). + HasLastCommittedOn(""). + HasLastSuspendedOn(""). + HasOwnerRoleType("ROLE"). + HasConfig(""). + HasBudget(""). + HasTaskRelations(sdk.TaskRelations{}). + HasLastSuspendedReason(""), + resourceparametersassert.TaskDatasourceParameters(t, "snowflake_tasks.test"). + HasAllDefaults(), + ), + }, + { + Config: taskDatasourceLikeRootOnly(childTask.ID().Name(), true), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.snowflake_tasks.t", "database", databaseName), - resource.TestCheckResourceAttr("data.snowflake_tasks.t", "schema", schemaName), - resource.TestCheckResourceAttrSet("data.snowflake_tasks.t", "tasks.#"), - resource.TestCheckResourceAttr("data.snowflake_tasks.t", "tasks.#", "1"), - resource.TestCheckResourceAttr("data.snowflake_tasks.t", "tasks.0.name", taskName), + resource.TestCheckResourceAttr("data.snowflake_tasks.test", "tasks.#", "0"), ), }, }, }) } -func tasks(databaseName string, schemaName string, taskName string) string { - return fmt.Sprintf(` +func TestAcc_Tasks_In_StartsWith(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) - resource snowflake_database "test" { - name = "%v" - } + prefix := acc.TestClient().Ids.AlphaN(4) + + _, standaloneTaskCleanup := acc.TestClient().Task.CreateWithRequest(t, sdk.NewCreateTaskRequest(acc.TestClient().Ids.RandomSchemaObjectIdentifierWithPrefix(prefix), "SELECT 1")) + t.Cleanup(standaloneTaskCleanup) + + schema, schemaCleanup := acc.TestClient().Schema.CreateSchema(t) + t.Cleanup(schemaCleanup) + + standaloneTask2, standaloneTask2Cleanup := acc.TestClient().Task.CreateWithRequest(t, sdk.NewCreateTaskRequest(acc.TestClient().Ids.RandomSchemaObjectIdentifierInSchemaWithPrefix(prefix, schema.ID()), "SELECT 1")) + t.Cleanup(standaloneTask2Cleanup) + + _, standaloneTask3Cleanup := acc.TestClient().Task.CreateWithRequest(t, sdk.NewCreateTaskRequest(acc.TestClient().Ids.RandomSchemaObjectIdentifierInSchema(schema.ID()), "SELECT 1")) + t.Cleanup(standaloneTask3Cleanup) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + Steps: []resource.TestStep{ + // On account with prefix + { + Config: taskDatasourceOnAccountStartsWith(prefix), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.snowflake_tasks.test", "tasks.#", "2"), + ), + }, + // On database with prefix + { + Config: taskDatasourceInDatabaseStartsWith(acc.TestClient().Ids.DatabaseId(), prefix), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.snowflake_tasks.test", "tasks.#", "2"), + ), + }, + // On schema with prefix + { + Config: taskDatasourceInSchemaStartsWith(schema.ID(), prefix), + Check: assert.AssertThat(t, + assert.Check(resource.TestCheckResourceAttr("data.snowflake_tasks.test", "tasks.#", "1")), + resourceshowoutputassert.TaskDatasourceShowOutput(t, "snowflake_tasks.test"). + HasName(standaloneTask2.Name). + HasSchemaName(standaloneTask2.SchemaName). + HasDatabaseName(standaloneTask2.DatabaseName), + ), + }, + // On schema + { + Config: taskDatasourceInSchema(schema.ID()), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.snowflake_tasks.test", "tasks.#", "2"), + ), + }, + }, + }) +} + +func TestAcc_Tasks_Limit(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + prefix := acc.TestClient().Ids.AlphaN(4) + + _, standaloneTaskCleanup := acc.TestClient().Task.CreateWithRequest(t, sdk.NewCreateTaskRequest(acc.TestClient().Ids.RandomSchemaObjectIdentifierWithPrefix(prefix), "SELECT 1")) + t.Cleanup(standaloneTaskCleanup) - resource snowflake_schema "test"{ - name = "%v" - database = snowflake_database.test.name + _, standaloneTask2Cleanup := acc.TestClient().Task.CreateWithRequest(t, sdk.NewCreateTaskRequest(acc.TestClient().Ids.RandomSchemaObjectIdentifierWithPrefix(prefix), "SELECT 1")) + t.Cleanup(standaloneTask2Cleanup) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + Steps: []resource.TestStep{ + // Limit with prefix + { + Config: taskDatasourceLimitWithPrefix(2, prefix), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.snowflake_tasks.test", "tasks.#", "2"), + ), + }, + // Only limit + { + Config: taskDatasourceLimit(1), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.snowflake_tasks.test", "tasks.#", "1"), + ), + }, + }, + }) +} + +func taskDatasourceLikeRootOnly(like string, rootOnly bool) string { + return taskDatasourceConfig(like, false, sdk.AccountObjectIdentifier{}, sdk.DatabaseObjectIdentifier{}, "", rootOnly, nil) +} + +func taskDatasourceOnAccountStartsWith(startsWith string) string { + return taskDatasourceConfig("", true, sdk.AccountObjectIdentifier{}, sdk.DatabaseObjectIdentifier{}, startsWith, false, nil) +} + +func taskDatasourceInDatabaseStartsWith(databaseId sdk.AccountObjectIdentifier, startsWith string) string { + return taskDatasourceConfig("", false, databaseId, sdk.DatabaseObjectIdentifier{}, startsWith, false, nil) +} + +func taskDatasourceInSchemaStartsWith(schemaId sdk.DatabaseObjectIdentifier, startsWith string) string { + return taskDatasourceConfig("", false, sdk.AccountObjectIdentifier{}, schemaId, startsWith, false, nil) +} + +func taskDatasourceInSchema(schemaId sdk.DatabaseObjectIdentifier) string { + return taskDatasourceConfig("", false, sdk.AccountObjectIdentifier{}, schemaId, "", false, nil) +} + +func taskDatasourceLimit(limit int) string { + return taskDatasourceConfig("", false, sdk.AccountObjectIdentifier{}, sdk.DatabaseObjectIdentifier{}, "", false, &sdk.LimitFrom{ + Rows: sdk.Int(limit), + }) +} + +func taskDatasourceLimitWithPrefix(limit int, prefix string) string { + return taskDatasourceConfig("", false, sdk.AccountObjectIdentifier{}, sdk.DatabaseObjectIdentifier{}, "", false, &sdk.LimitFrom{ + Rows: sdk.Int(limit), + From: sdk.String(prefix), + }) +} + +func taskDatasourceConfig(like string, onAccount bool, onDatabase sdk.AccountObjectIdentifier, onSchema sdk.DatabaseObjectIdentifier, startsWith string, rootOnly bool, limitFrom *sdk.LimitFrom) string { + var likeString string + if len(like) > 0 { + likeString = fmt.Sprintf("like = \"%s\"", like) } - resource snowflake_warehouse "test" { - name = snowflake_database.test.name - max_concurrency_level = 8 - statement_timeout_in_seconds = 172800 + var startsWithString string + if len(startsWith) > 0 { + startsWithString = fmt.Sprintf("starts_with = \"%s\"", startsWith) } - resource snowflake_task "test" { - name = "%v" - database = snowflake_database.test.name - schema = snowflake_schema.test.name - warehouse = snowflake_warehouse.test.name - sql_statement = "SHOW FUNCTIONS" - enabled = true - schedule = "15 MINUTES" - lifecycle { - ignore_changes = [session_parameters] + var inString string + if onAccount || (onDatabase != sdk.AccountObjectIdentifier{}) || (onSchema != sdk.DatabaseObjectIdentifier{}) { + inStringBuffer := new(bytes.Buffer) + inStringBuffer.WriteString("in {\n") + switch { + case onAccount: + inStringBuffer.WriteString("account = true\n") + case onDatabase != sdk.AccountObjectIdentifier{}: + inStringBuffer.WriteString(fmt.Sprintf("database = %s\n", strconv.Quote(onDatabase.FullyQualifiedName()))) + case onSchema != sdk.DatabaseObjectIdentifier{}: + inStringBuffer.WriteString(fmt.Sprintf("schema = %s\n", strconv.Quote(onSchema.FullyQualifiedName()))) } - } + inStringBuffer.WriteString("}\n") + inString = inStringBuffer.String() + } - data snowflake_tasks "t" { - database = snowflake_task.test.database - schema = snowflake_task.test.schema - depends_on = [snowflake_task.test] + var rootOnlyString string + if rootOnly { + rootOnlyString = fmt.Sprintf("root_only = %t", rootOnly) } - `, databaseName, schemaName, taskName) + + var limitFromString string + if limitFrom != nil { + inStringBuffer := new(bytes.Buffer) + inStringBuffer.WriteString("limit {\n") + inStringBuffer.WriteString(fmt.Sprintf("rows = %d\n", *limitFrom.Rows)) + if limitFrom.From != nil { + inStringBuffer.WriteString(fmt.Sprintf("from = \"%s\"\n", *limitFrom.From)) + } + inStringBuffer.WriteString("}\n") + limitFromString = inStringBuffer.String() + } + + return fmt.Sprintf(` + data "snowflake_tasks" "test" { + %[1]s + %[2]s + %[3]s + %[4]s + %[5]s + }`, likeString, inString, startsWithString, rootOnlyString, limitFromString) } diff --git a/pkg/internal/collections/collection_helpers.go b/pkg/internal/collections/collection_helpers.go index 80375cfba0..871b2f6dd6 100644 --- a/pkg/internal/collections/collection_helpers.go +++ b/pkg/internal/collections/collection_helpers.go @@ -23,6 +23,19 @@ func Map[T any, R any](collection []T, mapper func(T) R) []R { return result } +func MapErr[T any, R any](collection []T, mapper func(T) (R, error)) ([]R, error) { + result := make([]R, len(collection)) + errs := make([]error, 0) + for i, elem := range collection { + value, err := mapper(elem) + if err != nil { + errs = append(errs, err) + } + result[i] = value + } + return result, errors.Join(errs...) +} + // TODO(SNOW-1479870): Test // MergeMaps takes any number of maps (of the same type) and concatenates them. // In case of key collision, the value will be selected from the map that is provided diff --git a/pkg/internal/collections/collection_helpers_test.go b/pkg/internal/collections/collection_helpers_test.go index 87260e1113..c9ff5c8b86 100644 --- a/pkg/internal/collections/collection_helpers_test.go +++ b/pkg/internal/collections/collection_helpers_test.go @@ -1,9 +1,13 @@ package collections import ( + "errors" + "fmt" "strings" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) @@ -58,3 +62,48 @@ func Test_Map(t *testing.T) { }) }) } + +func Test_MapErr(t *testing.T) { + t.Run("basic mapping", func(t *testing.T) { + stringSlice := []string{"1", "22", "333"} + stringLenSlice, err := MapErr(stringSlice, func(s string) (int, error) { return len(s), nil }) + assert.NoError(t, err) + assert.Equal(t, stringLenSlice, []int{1, 2, 3}) + }) + + t.Run("basic mapping - multiple errors", func(t *testing.T) { + stringSlice := []string{"1", "22", "333"} + stringLenSlice, err := MapErr(stringSlice, func(s string) (int, error) { + if s == "1" { + return -1, fmt.Errorf("error: 1") + } + if s == "22" { + return -1, fmt.Errorf("error: 22") + } + return len(s), nil + }) + assert.Equal(t, stringLenSlice, []int{-1, -1, 3}) + assert.ErrorContains(t, err, errors.Join(fmt.Errorf("error: 1"), fmt.Errorf("error: 22")).Error()) + }) + + t.Run("validation: empty slice", func(t *testing.T) { + stringSlice := make([]string, 0) + stringLenSlice, err := MapErr(stringSlice, func(s string) (int, error) { return len(s), nil }) + assert.NoError(t, err) + assert.Equal(t, stringLenSlice, []int{}) + }) + + t.Run("validation: nil slice", func(t *testing.T) { + var stringSlice []string = nil + stringLenSlice, err := MapErr(stringSlice, func(s string) (int, error) { return len(s), nil }) + assert.NoError(t, err) + assert.Equal(t, stringLenSlice, []int{}) + }) + + t.Run("validation: nil mapping function", func(t *testing.T) { + assert.PanicsWithError(t, "runtime error: invalid memory address or nil pointer dereference", func() { + stringSlice := []string{"1", "22", "333"} + _, _ = MapErr[string, int](stringSlice, nil) + }) + }) +} diff --git a/pkg/resources/grant_ownership_acceptance_test.go b/pkg/resources/grant_ownership_acceptance_test.go index e59975eb48..c7b3455596 100644 --- a/pkg/resources/grant_ownership_acceptance_test.go +++ b/pkg/resources/grant_ownership_acceptance_test.go @@ -1210,7 +1210,7 @@ func TestAcc_GrantOwnership_OnTask_Discussion2877(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("snowflake_task.test", "name", taskId.Name()), resource.TestCheckResourceAttr("snowflake_task.child", "name", childId.Name()), - resource.TestCheckResourceAttr("snowflake_task.child", "after.0", taskId.Name()), + resource.TestCheckResourceAttr("snowflake_task.child", "after.0", taskId.FullyQualifiedName()), checkResourceOwnershipIsGranted(&sdk.ShowGrantOptions{ On: &sdk.ShowGrantsOn{ Object: &sdk.Object{ diff --git a/pkg/resources/resource_helpers_create.go b/pkg/resources/resource_helpers_create.go index 5ca92130bb..837fada163 100644 --- a/pkg/resources/resource_helpers_create.go +++ b/pkg/resources/resource_helpers_create.go @@ -62,6 +62,17 @@ func attributeDirectValueCreate[T any](d *schema.ResourceData, key string, creat return nil } +func attributeMappedValueCreate[T any](d *schema.ResourceData, key string, createField **T, mapper func(value any) (*T, error)) error { + if v, ok := d.GetOk(key); ok { + value, err := mapper(v) + if err != nil { + return err + } + *createField = value + } + return nil +} + func copyGrantsAttributeCreate(d *schema.ResourceData, isOrReplace bool, orReplaceField, copyGrantsField **bool) error { if isOrReplace { *orReplaceField = sdk.Bool(true) diff --git a/pkg/resources/resource_helpers_read.go b/pkg/resources/resource_helpers_read.go index b8f94a1e63..b3dcfcebf1 100644 --- a/pkg/resources/resource_helpers_read.go +++ b/pkg/resources/resource_helpers_read.go @@ -49,3 +49,17 @@ func setBooleanStringFromBoolProperty(d *schema.ResourceData, key string, proper } return nil } + +func attributeMappedValueReadOrDefault[T, R any](d *schema.ResourceData, key string, value *T, mapper func(*T) (R, error), defaultValue *R) error { + if value != nil { + mappedValue, err := mapper(value) + if err != nil { + return err + } + return d.Set(key, mappedValue) + } + if defaultValue != nil { + return d.Set(key, *defaultValue) + } + return d.Set(key, nil) +} diff --git a/pkg/resources/resource_helpers_update.go b/pkg/resources/resource_helpers_update.go index 602b2408b0..aca7856e66 100644 --- a/pkg/resources/resource_helpers_update.go +++ b/pkg/resources/resource_helpers_update.go @@ -104,3 +104,18 @@ func attributeDirectValueUpdate[T any](d *schema.ResourceData, key string, setFi } return nil } + +func attributeMappedValueUpdate[T, R any](d *schema.ResourceData, key string, setField **R, unsetField **bool, mapper func(T) (R, error)) error { + if d.HasChange(key) { + if v, ok := d.GetOk(key); ok { + mappedValue, err := mapper(v.(T)) + if err != nil { + return err + } + *setField = sdk.Pointer(mappedValue) + } else { + *unsetField = sdk.Bool(true) + } + } + return nil +} diff --git a/pkg/resources/resource_monitor.go b/pkg/resources/resource_monitor.go index 14e0a6557b..53c07c77e4 100644 --- a/pkg/resources/resource_monitor.go +++ b/pkg/resources/resource_monitor.go @@ -103,6 +103,7 @@ func ResourceMonitor() *schema.Resource { ReadContext: TrackingReadWrapper(resources.ResourceMonitor, ReadResourceMonitor(true)), UpdateContext: TrackingUpdateWrapper(resources.ResourceMonitor, UpdateResourceMonitor), DeleteContext: TrackingDeleteWrapper(resources.ResourceMonitor, DeleteResourceMonitor), + Description: "Resource used to manage resource monitor objects. For more information, check [resource monitor documentation](https://docs.snowflake.com/en/user-guide/resource-monitors).", Schema: resourceMonitorSchema, Importer: &schema.ResourceImporter{ diff --git a/pkg/resources/task.go b/pkg/resources/task.go index 325e6ee2a6..e7e6c940d3 100644 --- a/pkg/resources/task.go +++ b/pkg/resources/task.go @@ -2,12 +2,21 @@ package resources import ( "context" + "errors" "fmt" - "log" "slices" - "strconv" + "strings" "time" + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -15,698 +24,659 @@ import ( "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/util" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) var taskSchema = map[string]*schema.Schema{ - "enabled": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "Specifies if the task should be started (enabled) after creation or should remain suspended (default).", - }, - "name": { - Type: schema.TypeString, - Required: true, - Description: "Specifies the identifier for the task; must be unique for the database and schema in which the task is created.", - ForceNew: true, - }, "database": { - Type: schema.TypeString, - Required: true, - Description: "The database in which to create the task.", - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: suppressIdentifierQuoting, + Description: blocklistedCharactersFieldDescription("The database in which to create the task."), }, "schema": { - Type: schema.TypeString, - Required: true, - Description: "The schema in which to create the task.", - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: suppressIdentifierQuoting, + Description: blocklistedCharactersFieldDescription("The schema in which to create the task."), + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: suppressIdentifierQuoting, + Description: blocklistedCharactersFieldDescription("Specifies the identifier for the task; must be unique for the database and schema in which the task is created."), + }, + "started": { + Type: schema.TypeBool, + Required: true, + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValueInShowWithMapping("state", func(state any) any { + stateEnum, err := sdk.ToTaskState(state.(string)) + if err != nil { + return false + } + return stateEnum == sdk.TaskStateStarted + }), + Description: "Specifies if the task should be started or suspended.", }, "warehouse": { - Type: schema.TypeString, - Optional: true, - Description: "The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. (Conflicts with user_task_managed_initial_warehouse_size)", - ForceNew: false, - ConflictsWith: []string{"user_task_managed_initial_warehouse_size"}, + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: IsValidIdentifier[sdk.AccountObjectIdentifier](), + DiffSuppressFunc: suppressIdentifierQuoting, + Description: "The warehouse the task will use. Omit this parameter to use Snowflake-managed compute resources for runs of this task. Due to Snowflake limitations warehouse identifier can consist of only upper-cased letters. (Conflicts with user_task_managed_initial_warehouse_size)", + ConflictsWith: []string{"user_task_managed_initial_warehouse_size"}, }, "schedule": { - Type: schema.TypeString, + Type: schema.TypeList, Optional: true, - Description: "The schedule for periodically running the task. This can be a cron or interval in minutes. (Conflict with after)", - ConflictsWith: []string{"after"}, + MaxItems: 1, + Description: "The schedule for periodically running the task. This can be a cron or interval in minutes. (Conflicts with finalize and after; when set, one of the sub-fields `minutes` or `using_cron` should be set)", + ConflictsWith: []string{"finalize", "after"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minutes": { + Type: schema.TypeInt, + Optional: true, + Description: "Specifies an interval (in minutes) of wait time inserted between runs of the task. Accepts positive integers only. (conflicts with `using_cron`)", + ValidateDiagFunc: validation.ToDiagFunc(validation.IntAtLeast(1)), + ExactlyOneOf: []string{"schedule.0.minutes", "schedule.0.using_cron"}, + }, + "using_cron": { + Type: schema.TypeString, + Optional: true, + Description: "Specifies a cron expression and time zone for periodically running the task. Supports a subset of standard cron utility syntax. (conflicts with `minutes`)", + DiffSuppressFunc: ignoreCaseSuppressFunc, + ExactlyOneOf: []string{"schedule.0.minutes", "schedule.0.using_cron"}, + }, + }, + }, }, - "session_parameters": { - Type: schema.TypeMap, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Description: "Specifies session parameters to set for the session when the task runs. A task supports all session parameters.", + "config": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValueInShow("config"), + Description: "Specifies a string representation of key value pairs that can be accessed by all tasks in the task graph. Must be in JSON format.", }, - "user_task_timeout_ms": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntBetween(0, 86400000), - Description: "Specifies the time limit on a single run of the task before it times out (in milliseconds).", + "allow_overlapping_execution": { + Type: schema.TypeString, + Optional: true, + Default: BooleanDefault, + ValidateDiagFunc: validateBooleanString, + DiffSuppressFunc: IgnoreChangeToCurrentSnowflakeValueInShow("allow_overlapping_execution"), + Description: booleanStringFieldDescription("By default, Snowflake ensures that only one instance of a particular DAG is allowed to run at a time, setting the parameter value to TRUE permits DAG runs to overlap."), }, - "suspend_task_after_num_failures": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - ValidateFunc: validation.IntAtLeast(0), - Description: "Specifies the number of consecutive failed task runs after which the current task is suspended automatically. The default is 0 (no automatic suspension).", + "error_integration": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: IsValidIdentifier[sdk.AccountObjectIdentifier](), + DiffSuppressFunc: SuppressIfAny(suppressIdentifierQuoting, IgnoreChangeToCurrentSnowflakeValueInShow("error_integration")), + Description: blocklistedCharactersFieldDescription("Specifies the name of the notification integration used for error notifications."), }, "comment": { Type: schema.TypeString, Optional: true, Description: "Specifies a comment for the task.", }, + "finalize": { + Optional: true, + Type: schema.TypeString, + ValidateDiagFunc: IsValidIdentifier[sdk.SchemaObjectIdentifier](), + DiffSuppressFunc: SuppressIfAny( + suppressIdentifierQuoting, + IgnoreChangeToCurrentSnowflakeValueInShow("task_relations.0.finalized_root_task"), + ), + Description: blocklistedCharactersFieldDescription("Specifies the name of a root task that the finalizer task is associated with. Finalizer tasks run after all other tasks in the task graph run to completion. You can define the SQL of a finalizer task to handle notifications and the release and cleanup of resources that a task graph uses. For more information, see [Release and cleanup of task graphs](https://docs.snowflake.com/en/user-guide/tasks-graphs.html#label-finalizer-task)."), + ConflictsWith: []string{"schedule", "after"}, + }, "after": { - Type: schema.TypeList, - Elem: &schema.Schema{Type: schema.TypeString}, - Optional: true, - Description: "Specifies one or more predecessor tasks for the current task. Use this option to create a DAG of tasks or add this task to an existing DAG. A DAG is a series of tasks that starts with a scheduled root task and is linked together by dependencies.", - ConflictsWith: []string{"schedule"}, + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: IsValidIdentifier[sdk.SchemaObjectIdentifier](), + }, + DiffSuppressFunc: NormalizeAndCompareIdentifiersInSet("after"), + Description: blocklistedCharactersFieldDescription("Specifies one or more predecessor tasks for the current task. Use this option to [create a DAG](https://docs.snowflake.com/en/user-guide/tasks-graphs.html#label-task-dag) of tasks or add this task to an existing DAG. A DAG is a series of tasks that starts with a scheduled root task and is linked together by dependencies."), + ConflictsWith: []string{"schedule", "finalize"}, }, "when": { - Type: schema.TypeString, - Optional: true, - Description: "Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported.", + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: SuppressIfAny(DiffSuppressStatement, IgnoreChangeToCurrentSnowflakeValueInShow("condition")), + Description: "Specifies a Boolean SQL expression; multiple conditions joined with AND/OR are supported. When a task is triggered (based on its SCHEDULE or AFTER setting), it validates the conditions of the expression to determine whether to execute. If the conditions of the expression are not met, then the task skips the current run. Any tasks that identify this task as a predecessor also don’t run.", }, "sql_statement": { Type: schema.TypeString, Required: true, + DiffSuppressFunc: SuppressIfAny(DiffSuppressStatement, IgnoreChangeToCurrentSnowflakeValueInShow("definition")), Description: "Any single SQL statement, or a call to a stored procedure, executed when the task runs.", - ForceNew: false, - DiffSuppressFunc: DiffSuppressStatement, }, - "user_task_managed_initial_warehouse_size": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - "XSMALL", "X-SMALL", "SMALL", "MEDIUM", "LARGE", "XLARGE", "X-LARGE", "XXLARGE", "X2LARGE", "2X-LARGE", - }, true), - Description: "Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. (Conflicts with warehouse)", - ConflictsWith: []string{"warehouse"}, - }, - "error_integration": { - Type: schema.TypeString, - Optional: true, - Description: "Specifies the name of the notification integration used for error notifications.", + FullyQualifiedNameAttributeName: schemas.FullyQualifiedNameSchema, + ShowOutputAttributeName: { + Type: schema.TypeList, + Computed: true, + Description: "Outputs the result of `SHOW TASKS` for the given task.", + Elem: &schema.Resource{ + Schema: schemas.ShowTaskSchema, + }, }, - "allow_overlapping_execution": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "By default, Snowflake ensures that only one instance of a particular DAG is allowed to run at a time, setting the parameter value to TRUE permits DAG runs to overlap.", + ParametersAttributeName: { + Type: schema.TypeList, + Computed: true, + Description: "Outputs the result of `SHOW PARAMETERS IN TASK` for the given task.", + Elem: &schema.Resource{ + Schema: schemas.ShowTaskParametersSchema, + }, }, - FullyQualifiedNameAttributeName: schemas.FullyQualifiedNameSchema, } -// difference find keys in 'a' but not in 'b'. -func difference(a, b map[string]any) map[string]any { - diff := make(map[string]any) - for k := range a { - if _, ok := b[k]; !ok { - diff[k] = a[k] - } - } - return diff -} - -// differentValue find keys present both in 'a' and 'b' but having different values. -func differentValue(a, b map[string]any) map[string]any { - diff := make(map[string]any) - for k, va := range a { - if vb, ok := b[k]; ok { - if vb != va { - diff[k] = vb - } - } - } - return diff -} - -// Task returns a pointer to the resource representing a task. func Task() *schema.Resource { return &schema.Resource{ - Create: CreateTask, - Read: ReadTask, - Update: UpdateTask, - Delete: DeleteTask, - CustomizeDiff: TrackingCustomDiffWrapper(resources.Task, customdiff.ForceNewIfChange("when", func(ctx context.Context, old, new, meta any) bool { - return old.(string) != "" && new.(string) == "" - })), - - Schema: taskSchema, + CreateContext: TrackingCreateWrapper(resources.Task, CreateTask), + UpdateContext: TrackingUpdateWrapper(resources.Task, UpdateTask), + ReadContext: TrackingReadWrapper(resources.Task, ReadTask(true)), + DeleteContext: TrackingDeleteWrapper(resources.Task, DeleteTask), + Description: "Resource used to manage task objects. For more information, check [task documentation](https://docs.snowflake.com/en/user-guide/tasks-intro).", + + Schema: collections.MergeMaps(taskSchema, taskParametersSchema), Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, + StateContext: TrackingImportWrapper(resources.Task, ImportTask), + }, + + CustomizeDiff: TrackingCustomDiffWrapper(resources.Task, customdiff.All( + ComputedIfAnyAttributeChanged(taskSchema, ShowOutputAttributeName, "name", "started", "warehouse", "user_task_managed_initial_warehouse_size", "schedule", "config", "allow_overlapping_execution", "error_integration", "comment", "finalize", "after", "when"), + ComputedIfAnyAttributeChanged(taskParametersSchema, ParametersAttributeName, collections.Map(sdk.AsStringList(sdk.AllTaskParameters), strings.ToLower)...), + ComputedIfAnyAttributeChanged(taskSchema, FullyQualifiedNameAttributeName, "name"), + taskParametersCustomDiff, + )), + + SchemaVersion: 1, + StateUpgraders: []schema.StateUpgrader{ + { + Version: 0, + Type: cty.EmptyObject, + Upgrade: v098TaskStateUpgrader, + }, }, } } -// ReadTask implements schema.ReadFunc. -func ReadTask(d *schema.ResourceData, meta interface{}) error { +func ImportTask(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { + logging.DebugLogger.Printf("[DEBUG] Starting task import") client := meta.(*provider.Context).Client - ctx := context.Background() - - taskId := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) - task, err := client.Tasks.ShowByID(ctx, taskId) + id, err := sdk.ParseSchemaObjectIdentifier(d.Id()) if err != nil { - // If not found, mark resource to be removed from state file during apply or refresh - log.Printf("[DEBUG] task (%s) not found", d.Id()) - d.SetId("") - return nil - } - if err := d.Set(FullyQualifiedNameAttributeName, taskId.FullyQualifiedName()); err != nil { - return err - } - - if err := d.Set("enabled", task.State == sdk.TaskStateStarted); err != nil { - return err - } - - if err := d.Set("name", task.Name); err != nil { - return err + return nil, err } - if err := d.Set("database", task.DatabaseName); err != nil { - return err - } - - if err := d.Set("schema", task.SchemaName); err != nil { - return err - } - - if err := d.Set("warehouse", task.Warehouse); err != nil { - return err - } - - if err := d.Set("schedule", task.Schedule); err != nil { - return err - } - - if err := d.Set("comment", task.Comment); err != nil { - return err - } - - if err := d.Set("allow_overlapping_execution", task.AllowOverlappingExecution); err != nil { - return err - } - - if err := d.Set("error_integration", task.ErrorIntegration); err != nil { - return err - } - - predecessors := make([]string, len(task.Predecessors)) - for i, p := range task.Predecessors { - predecessors[i] = p.Name() - } - if err := d.Set("after", predecessors); err != nil { - return err - } - - if err := d.Set("when", task.Condition); err != nil { - return err + task, err := client.Tasks.ShowByID(ctx, id) + if err != nil { + return nil, err } - if err := d.Set("sql_statement", task.Definition); err != nil { - return err + if _, err := ImportName[sdk.SchemaObjectIdentifier](context.Background(), d, nil); err != nil { + return nil, err } - opts := &sdk.ShowParametersOptions{In: &sdk.ParametersIn{Task: taskId}} - params, err := client.Parameters.ShowParameters(ctx, opts) - if err != nil { - return err + if err := d.Set("allow_overlapping_execution", booleanStringFromBool(task.AllowOverlappingExecution)); err != nil { + return nil, err } - if len(params) > 0 { - sessionParameters := make(map[string]any) - fieldParameters := map[string]interface{}{ - "user_task_managed_initial_warehouse_size": "", - } - - for _, param := range params { - if param.Level != "TASK" { - continue - } - switch param.Key { - case "USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE": - fieldParameters["user_task_managed_initial_warehouse_size"] = param.Value - case "USER_TASK_TIMEOUT_MS": - timeout, err := strconv.ParseInt(param.Value, 10, 64) - if err != nil { - return err - } - - fieldParameters["user_task_timeout_ms"] = timeout - case "SUSPEND_TASK_AFTER_NUM_FAILURES": - num, err := strconv.ParseInt(param.Value, 10, 64) - if err != nil { - return err - } - - fieldParameters["suspend_task_after_num_failures"] = num - default: - sessionParameters[param.Key] = param.Value - } - } - - if err := d.Set("session_parameters", sessionParameters); err != nil { - return err - } - - for key, value := range fieldParameters { - // lintignore:R001 - err = d.Set(key, value) - if err != nil { - return err - } - } - } - - return nil + return []*schema.ResourceData{d}, nil } -// CreateTask implements schema.CreateFunc. -func CreateTask(d *schema.ResourceData, meta interface{}) error { +func CreateTask(ctx context.Context, d *schema.ResourceData, meta any) (diags diag.Diagnostics) { client := meta.(*provider.Context).Client - ctx := context.Background() databaseName := d.Get("database").(string) schemaName := d.Get("schema").(string) name := d.Get("name").(string) + id := sdk.NewSchemaObjectIdentifier(databaseName, schemaName, name) - sqlStatement := d.Get("sql_statement").(string) - - taskId := sdk.NewSchemaObjectIdentifier(databaseName, schemaName, name) - createRequest := sdk.NewCreateTaskRequest(taskId, sqlStatement) + req := sdk.NewCreateTaskRequest(id, d.Get("sql_statement").(string)) + tasksToResume := make([]sdk.SchemaObjectIdentifier, 0) - // Set optionals - if v, ok := d.GetOk("warehouse"); ok { - warehouseId := sdk.NewAccountObjectIdentifier(v.(string)) - createRequest.WithWarehouse(*sdk.NewCreateTaskWarehouseRequest().WithWarehouse(warehouseId)) - } - - if v, ok := d.GetOk("user_task_managed_initial_warehouse_size"); ok { - size, err := sdk.ToWarehouseSize(v.(string)) + if errs := errors.Join( + attributeMappedValueCreate(d, "warehouse", &req.Warehouse, func(v any) (*sdk.CreateTaskWarehouseRequest, error) { + warehouseId, err := sdk.ParseAccountObjectIdentifier(v.(string)) + if err != nil { + return nil, err + } + return sdk.NewCreateTaskWarehouseRequest().WithWarehouse(warehouseId), nil + }), + attributeMappedValueCreate(d, "schedule", &req.Schedule, func(v any) (*string, error) { + if len(v.([]any)) > 0 { + if minutes, ok := d.GetOk("schedule.0.minutes"); ok { + return sdk.String(fmt.Sprintf("%d MINUTE", minutes)), nil + } + if cron, ok := d.GetOk("schedule.0.using_cron"); ok { + return sdk.String(fmt.Sprintf("USING CRON %s", cron)), nil + } + return nil, fmt.Errorf("when setting a schedule either minutes or using_cron field should be set") + } + return nil, nil + }), + stringAttributeCreate(d, "config", &req.Config), + booleanStringAttributeCreate(d, "allow_overlapping_execution", &req.AllowOverlappingExecution), + accountObjectIdentifierAttributeCreate(d, "error_integration", &req.ErrorIntegration), + stringAttributeCreate(d, "comment", &req.Comment), + stringAttributeCreate(d, "when", &req.When), + ); errs != nil { + return diag.FromErr(errs) + } + + if v, ok := d.GetOk("finalize"); ok { + rootTaskId, err := sdk.ParseSchemaObjectIdentifier(v.(string)) if err != nil { - return err + return diag.FromErr(err) } - createRequest.WithWarehouse(*sdk.NewCreateTaskWarehouseRequest().WithUserTaskManagedInitialWarehouseSize(size)) - } - - if v, ok := d.GetOk("schedule"); ok { - createRequest.WithSchedule(v.(string)) - } - if v, ok := d.GetOk("session_parameters"); ok { - sessionParameters, err := sdk.GetSessionParametersFrom(v.(map[string]any)) + rootTask, err := client.Tasks.ShowByID(ctx, rootTaskId) if err != nil { - return err + return diag.FromErr(err) } - createRequest.WithSessionParameters(*sessionParameters) - } - - if v, ok := d.GetOk("user_task_timeout_ms"); ok { - createRequest.WithUserTaskTimeoutMs(v.(int)) - } - - if v, ok := d.GetOk("suspend_task_after_num_failures"); ok { - createRequest.WithSuspendTaskAfterNumFailures(v.(int)) - } - - if v, ok := d.GetOk("comment"); ok { - createRequest.WithComment(v.(string)) - } - - if v, ok := d.GetOk("allow_overlapping_execution"); ok { - createRequest.WithAllowOverlappingExecution(v.(bool)) - } - if v, ok := d.GetOk("error_integration"); ok { - errorIntegrationId, err := sdk.ParseAccountObjectIdentifier(v.(string)) - if err != nil { - return err + if rootTask.IsStarted() { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(rootTaskId).WithSuspend(true)); err != nil { + return diag.FromErr(sdk.JoinErrors(err)) + } + tasksToResume = append(tasksToResume, rootTaskId) } - createRequest.WithErrorNotificationIntegration(errorIntegrationId) + + req.WithFinalize(rootTaskId) } if v, ok := d.GetOk("after"); ok { - after := expandStringList(v.([]interface{})) + after := expandStringList(v.(*schema.Set).List()) precedingTasks := make([]sdk.SchemaObjectIdentifier, 0) - for _, dep := range after { - precedingTaskId := sdk.NewSchemaObjectIdentifier(databaseName, schemaName, dep) - tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, precedingTaskId, taskId) - defer func() { - if err := client.Tasks.ResumeTasks(ctx, tasksToResume); err != nil { - log.Printf("[WARN] failed to resume tasks: %s", err) - } - }() + for _, parentTaskIdString := range after { + parentTaskId, err := sdk.ParseSchemaObjectIdentifier(parentTaskIdString) if err != nil { - return err + return diag.FromErr(err) } - - precedingTasks = append(precedingTasks, precedingTaskId) + resumeTasks, err := client.Tasks.SuspendRootTasks(ctx, parentTaskId, id) + tasksToResume = append(tasksToResume, resumeTasks...) + if err != nil { + return diag.FromErr(sdk.JoinErrors(err)) + } + precedingTasks = append(precedingTasks, parentTaskId) } - createRequest.WithAfter(precedingTasks) + req.WithAfter(precedingTasks) } - if v, ok := d.GetOk("when"); ok { - createRequest.WithWhen(v.(string)) + if parameterCreateDiags := handleTaskParametersCreate(d, req); len(parameterCreateDiags) > 0 { + return parameterCreateDiags } - if err := client.Tasks.Create(ctx, createRequest); err != nil { - return fmt.Errorf("error creating task %s err = %w", taskId.FullyQualifiedName(), err) + if err := client.Tasks.Create(ctx, req); err != nil { + return diag.FromErr(err) } - d.SetId(helpers.EncodeSnowflakeID(taskId)) + d.SetId(helpers.EncodeResourceIdentifier(id)) - enabled := d.Get("enabled").(bool) - if enabled { - if err := waitForTaskStart(ctx, client, taskId); err != nil { - log.Printf("[WARN] failed to resume task %s", name) + if d.Get("started").(bool) { + if err := waitForTaskStart(ctx, client, id); err != nil { + return diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "Failed to start the task", + Detail: fmt.Sprintf("Id: %s, err: %s", id.FullyQualifiedName(), err), + }, + } } + // Else case not handled, because tasks are created as suspended (https://docs.snowflake.com/en/sql-reference/sql/create-task; "important" section) } - return ReadTask(d, meta) + defer func() { + if err := client.Tasks.ResumeTasks(ctx, tasksToResume); err != nil { + diags = append(diags, resumeTaskErrorDiag(id, "create", err)) + } + }() + + return append(diags, ReadTask(false)(ctx, d, meta)...) } -func waitForTaskStart(ctx context.Context, client *sdk.Client, id sdk.SchemaObjectIdentifier) error { - err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithResume(true)) +func UpdateTask(ctx context.Context, d *schema.ResourceData, meta any) (diags diag.Diagnostics) { + client := meta.(*provider.Context).Client + id, err := sdk.ParseSchemaObjectIdentifier(d.Id()) if err != nil { - return fmt.Errorf("error starting task %s err = %w", id.FullyQualifiedName(), err) + return diag.FromErr(err) } - return util.Retry(5, 5*time.Second, func() (error, bool) { - task, err := client.Tasks.ShowByID(ctx, id) - if err != nil { - return fmt.Errorf("error starting task %s err = %w", id.FullyQualifiedName(), err), false - } - if task.State != sdk.TaskStateStarted { - return nil, false - } - return nil, true - }) -} -// UpdateTask implements schema.UpdateFunc. -func UpdateTask(d *schema.ResourceData, meta interface{}) error { - client := meta.(*provider.Context).Client - ctx := context.Background() + task, err := client.Tasks.ShowByID(ctx, id) + if err != nil { + return diag.FromErr(err) + } - taskId := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) + tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, id, id) + if err != nil { + return diag.FromErr(sdk.JoinErrors(err)) + } - tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, taskId, taskId) defer func() { if err := client.Tasks.ResumeTasks(ctx, tasksToResume); err != nil { - log.Printf("[WARN] failed to resume tasks: %s", err) + diags = append(diags, resumeTaskErrorDiag(id, "create", err)) } }() + + if task.IsStarted() { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSuspend(true)); err != nil { + return diag.FromErr(sdk.JoinErrors(err)) + } + } + + unset := sdk.NewTaskUnsetRequest() + set := sdk.NewTaskSetRequest() + + err = errors.Join( + attributeMappedValueUpdate(d, "user_task_managed_initial_warehouse_size", &set.UserTaskManagedInitialWarehouseSize, &unset.UserTaskManagedInitialWarehouseSize, sdk.ToWarehouseSize), + accountObjectIdentifierAttributeUpdate(d, "warehouse", &set.Warehouse, &unset.Warehouse), + stringAttributeUpdate(d, "config", &set.Config, &unset.Config), + booleanStringAttributeUpdate(d, "allow_overlapping_execution", &set.AllowOverlappingExecution, &unset.AllowOverlappingExecution), + accountObjectIdentifierAttributeUpdate(d, "error_integration", &set.ErrorIntegration, &unset.ErrorIntegration), + stringAttributeUpdate(d, "comment", &set.Comment, &unset.Comment), + ) if err != nil { - return err + return diag.FromErr(err) } - if d.HasChange("warehouse") { - newWarehouse := d.Get("warehouse") - alterRequest := sdk.NewAlterTaskRequest(taskId) - if newWarehouse == "" { - alterRequest.WithUnset(*sdk.NewTaskUnsetRequest().WithWarehouse(true)) + if d.HasChange("schedule") { + _, newSchedule := d.GetChange("schedule") + + if newSchedule != nil && len(newSchedule.([]any)) == 1 { + if _, newMinutes := d.GetChange("schedule.0.minutes"); newMinutes.(int) > 0 { + set.Schedule = sdk.String(fmt.Sprintf("%d MINUTE", newMinutes.(int))) + } + if _, newCron := d.GetChange("schedule.0.using_cron"); newCron.(string) != "" { + set.Schedule = sdk.String(fmt.Sprintf("USING CRON %s", newCron.(string))) + } } else { - alterRequest.WithSet(*sdk.NewTaskSetRequest().WithWarehouse(sdk.NewAccountObjectIdentifier(newWarehouse.(string)))) - } - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return fmt.Errorf("error updating warehouse on task %s err = %w", taskId.FullyQualifiedName(), err) + unset.Schedule = sdk.Bool(true) } } - if d.HasChange("user_task_managed_initial_warehouse_size") { - newSize := d.Get("user_task_managed_initial_warehouse_size") - warehouse := d.Get("warehouse") + if updateDiags := handleTaskParametersUpdate(d, set, unset); len(updateDiags) > 0 { + return updateDiags + } - if warehouse == "" && newSize != "" { - size, err := sdk.ToWarehouseSize(newSize.(string)) - if err != nil { - return err - } - alterRequest := sdk.NewAlterTaskRequest(taskId).WithSet(*sdk.NewTaskSetRequest().WithUserTaskManagedInitialWarehouseSize(size)) - err = client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return fmt.Errorf("error updating user_task_managed_initial_warehouse_size on task %s", taskId.FullyQualifiedName()) - } + if *unset != (sdk.TaskUnsetRequest{}) { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithUnset(*unset)); err != nil { + return diag.FromErr(err) } } - if d.HasChange("error_integration") { - newErrorIntegration := d.Get("error_integration") - alterRequest := sdk.NewAlterTaskRequest(taskId) - if newErrorIntegration == "" { - alterRequest.WithUnset(*sdk.NewTaskUnsetRequest().WithErrorIntegration(true)) + if d.HasChange("when") { + if v := d.Get("when"); v != "" { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithModifyWhen(v.(string))); err != nil { + return diag.FromErr(err) + } } else { - newErrorIntegrationId, err := sdk.ParseAccountObjectIdentifier(newErrorIntegration.(string)) - if err != nil { - return err + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithRemoveWhen(true)); err != nil { + return diag.FromErr(err) } - alterRequest.WithSet(*sdk.NewTaskSetRequest().WithErrorNotificationIntegration(newErrorIntegrationId)) - } - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return fmt.Errorf("error updating error integration on task %s", taskId.FullyQualifiedName()) } } - if d.HasChange("after") { - // making changes to after require suspending the current task - // (the task will be brought up to the correct running state in the "enabled" check at the bottom of Update function). - err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithSuspend(true)) - if err != nil { - return fmt.Errorf("error suspending task %s, err: %w", taskId.FullyQualifiedName(), err) + if d.HasChange("sql_statement") { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithModifyAs(d.Get("sql_statement").(string))); err != nil { + return diag.FromErr(err) } + } - o, n := d.GetChange("after") - oldAfter := expandStringList(o.([]interface{})) - newAfter := expandStringList(n.([]interface{})) + if d.HasChange("finalize") { + if v, ok := d.GetOk("finalize"); ok { + rootTaskId, err := sdk.ParseSchemaObjectIdentifier(v.(string)) + if err != nil { + return diag.FromErr(err) + } - if len(newAfter) > 0 { - // preemptively removing schedule because a task cannot have both after and schedule - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithUnset(*sdk.NewTaskUnsetRequest().WithSchedule(true))); err != nil { - return fmt.Errorf("error updating schedule on task %s", taskId.FullyQualifiedName()) + rootTask, err := client.Tasks.ShowByID(ctx, rootTaskId) + if err != nil { + return diag.FromErr(err) } - } - // Remove old dependencies that are not in new dependencies - toRemove := make([]sdk.SchemaObjectIdentifier, 0) - for _, dep := range oldAfter { - if !slices.Contains(newAfter, dep) { - toRemove = append(toRemove, sdk.NewSchemaObjectIdentifierInSchema(taskId.SchemaId(), dep)) + if rootTask.IsStarted() { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(rootTaskId).WithSuspend(true)); err != nil { + return diag.FromErr(sdk.JoinErrors(err)) + } } - } - if len(toRemove) > 0 { - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithRemoveAfter(toRemove)); err != nil { - return fmt.Errorf("error removing after dependencies from task %s", taskId.FullyQualifiedName()) + + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSetFinalize(rootTaskId)); err != nil { + return diag.FromErr(err) } - } - // Add new dependencies that are not in old dependencies - toAdd := make([]sdk.SchemaObjectIdentifier, 0) - for _, dep := range newAfter { - if !slices.Contains(oldAfter, dep) { - toAdd = append(toAdd, sdk.NewSchemaObjectIdentifierInSchema(taskId.SchemaId(), dep)) + if rootTask.IsStarted() && !slices.ContainsFunc(tasksToResume, func(identifier sdk.SchemaObjectIdentifier) bool { + return identifier.FullyQualifiedName() == rootTaskId.FullyQualifiedName() + }) { + tasksToResume = append(tasksToResume, rootTaskId) } - } - if len(toAdd) > 0 { - for _, depId := range toAdd { - tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, depId, taskId) - defer func() { - if err := client.Tasks.ResumeTasks(ctx, tasksToResume); err != nil { - log.Printf("[WARN] failed to resume tasks: %s", err) - } - }() - if err != nil { - return err + } else { + rootTask, err := client.Tasks.ShowByID(ctx, *task.TaskRelations.FinalizedRootTask) + if err != nil { + return diag.FromErr(err) + } + + if rootTask.IsStarted() { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(rootTask.ID()).WithSuspend(true)); err != nil { + return diag.FromErr(sdk.JoinErrors(err)) } } - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithAddAfter(toAdd)); err != nil { - return fmt.Errorf("error adding after dependencies from task %s", taskId.FullyQualifiedName()) + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithUnsetFinalize(true)); err != nil { + return diag.FromErr(err) } - } - } - if d.HasChange("schedule") { - newSchedule := d.Get("schedule") - alterRequest := sdk.NewAlterTaskRequest(taskId) - if newSchedule == "" { - alterRequest.WithUnset(*sdk.NewTaskUnsetRequest().WithSchedule(true)) - } else { - alterRequest.WithSet(*sdk.NewTaskSetRequest().WithSchedule(newSchedule.(string))) - } - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return fmt.Errorf("error updating schedule on task %s", taskId.FullyQualifiedName()) + if rootTask.IsStarted() && !slices.ContainsFunc(tasksToResume, func(identifier sdk.SchemaObjectIdentifier) bool { + return identifier.FullyQualifiedName() == rootTask.ID().FullyQualifiedName() + }) { + tasksToResume = append(tasksToResume, rootTask.ID()) + } } } - if d.HasChange("user_task_timeout_ms") { - o, n := d.GetChange("user_task_timeout_ms") - alterRequest := sdk.NewAlterTaskRequest(taskId) - if o.(int) > 0 && n.(int) == 0 { - alterRequest.WithUnset(*sdk.NewTaskUnsetRequest().WithUserTaskTimeoutMs(true)) - } else { - alterRequest.WithSet(*sdk.NewTaskSetRequest().WithUserTaskTimeoutMs(n.(int))) - } - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return fmt.Errorf("error updating user task timeout on task %s", taskId.FullyQualifiedName()) - } - } + if d.HasChange("after") { + oldAfter, newAfter := d.GetChange("after") + addedTasks, removedTasks := ListDiff( + expandStringList(oldAfter.(*schema.Set).List()), + expandStringList(newAfter.(*schema.Set).List()), + ) + + if len(addedTasks) > 0 { + addedTaskIds, err := collections.MapErr(addedTasks, sdk.ParseSchemaObjectIdentifier) + if err != nil { + return diag.FromErr(err) + } - if d.HasChange("suspend_task_after_num_failures") { - o, n := d.GetChange("suspend_task_after_num_failures") - alterRequest := sdk.NewAlterTaskRequest(taskId) - if o.(int) > 0 && n.(int) == 0 { - alterRequest.WithUnset(*sdk.NewTaskUnsetRequest().WithSuspendTaskAfterNumFailures(true)) - } else { - alterRequest.WithSet(*sdk.NewTaskSetRequest().WithSuspendTaskAfterNumFailures(n.(int))) + for _, addedTaskId := range addedTaskIds { + addedTasksToResume, err := client.Tasks.SuspendRootTasks(ctx, addedTaskId, sdk.NewSchemaObjectIdentifier("", "", "")) + tasksToResume = append(tasksToResume, addedTasksToResume...) + if err != nil { + return diag.FromErr(sdk.JoinErrors(err)) + } + } + + err = client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithAddAfter(addedTaskIds)) + if err != nil { + return diag.FromErr(err) + } } - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return fmt.Errorf("error updating suspend task after num failures on task %s", taskId.FullyQualifiedName()) + + if len(removedTasks) > 0 { + removedTaskIds, err := collections.MapErr(removedTasks, sdk.ParseSchemaObjectIdentifier) + if err != nil { + return diag.FromErr(err) + } + err = client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithRemoveAfter(removedTaskIds)) + if err != nil { + return diag.FromErr(err) + } } } - if d.HasChange("comment") { - newComment := d.Get("comment") - alterRequest := sdk.NewAlterTaskRequest(taskId) - if newComment == "" { - alterRequest.WithUnset(*sdk.NewTaskUnsetRequest().WithComment(true)) - } else { - alterRequest.WithSet(*sdk.NewTaskSetRequest().WithComment(newComment.(string))) - } - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return fmt.Errorf("error updating comment on task %s", taskId.FullyQualifiedName()) + if *set != (sdk.TaskSetRequest{}) { + if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithSet(*set)); err != nil { + return diag.FromErr(err) } } - if d.HasChange("allow_overlapping_execution") { - n := d.Get("allow_overlapping_execution") - alterRequest := sdk.NewAlterTaskRequest(taskId) - if n == "" { - alterRequest.WithUnset(*sdk.NewTaskUnsetRequest().WithAllowOverlappingExecution(true)) - } else { - alterRequest.WithSet(*sdk.NewTaskSetRequest().WithAllowOverlappingExecution(n.(bool))) - } - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return fmt.Errorf("error updating allow overlapping execution on task %s", taskId.FullyQualifiedName()) + if d.Get("started").(bool) { + if err := waitForTaskStart(ctx, client, id); err != nil { + return diag.FromErr(fmt.Errorf("failed to resume task %s, err = %w", id.FullyQualifiedName(), err)) } } + // We don't process the else case, because the task was already suspended at the beginning of the Update method. + tasksToResume = slices.DeleteFunc(tasksToResume, func(identifier sdk.SchemaObjectIdentifier) bool { + return identifier.FullyQualifiedName() == id.FullyQualifiedName() + }) - if d.HasChange("session_parameters") { - o, n := d.GetChange("session_parameters") + return append(diags, ReadTask(false)(ctx, d, meta)...) +} - if o == nil { - o = make(map[string]interface{}) - } - if n == nil { - n = make(map[string]interface{}) +func ReadTask(withExternalChangesMarking bool) schema.ReadContextFunc { + return func(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + client := meta.(*provider.Context).Client + id, err := sdk.ParseSchemaObjectIdentifier(d.Id()) + if err != nil { + return diag.FromErr(err) } - os := o.(map[string]any) - ns := n.(map[string]any) - remove := difference(os, ns) - add := difference(ns, os) - change := differentValue(os, ns) - - if len(remove) > 0 { - sessionParametersUnset, err := sdk.GetSessionParametersUnsetFrom(remove) - if err != nil { - return err - } - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithUnset(*sdk.NewTaskUnsetRequest().WithSessionParametersUnset(*sessionParametersUnset))); err != nil { - return fmt.Errorf("error removing session_parameters on task %v err = %w", d.Id(), err) + task, err := client.Tasks.ShowByID(ctx, id) + if err != nil { + if errors.Is(err, sdk.ErrObjectNotFound) { + d.SetId("") + return diag.Diagnostics{ + diag.Diagnostic{ + Severity: diag.Warning, + Summary: "Failed to query task. Marking the resource as removed.", + Detail: fmt.Sprintf("task name: %s, Err: %s", id.FullyQualifiedName(), err), + }, + } } + return diag.FromErr(err) } - if len(add) > 0 { - sessionParameters, err := sdk.GetSessionParametersFrom(add) - if err != nil { - return err - } - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithSet(*sdk.NewTaskSetRequest().WithSessionParameters(*sessionParameters))); err != nil { - return fmt.Errorf("error adding session_parameters to task %v err = %w", d.Id(), err) - } + taskParameters, err := client.Tasks.ShowParameters(ctx, id) + if err != nil { + return diag.FromErr(err) } - if len(change) > 0 { - sessionParameters, err := sdk.GetSessionParametersFrom(change) - if err != nil { - return err + if withExternalChangesMarking { + if err = handleExternalChangesToObjectInShow(d, + outputMapping{"allow_overlapping_execution", "allow_overlapping_execution", task.AllowOverlappingExecution, booleanStringFromBool(task.AllowOverlappingExecution), nil}, + ); err != nil { + return diag.FromErr(err) } - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithSet(*sdk.NewTaskSetRequest().WithSessionParameters(*sessionParameters))); err != nil { - return fmt.Errorf("error updating session_parameters in task %v err = %w", d.Id(), err) + } else { + if err = setStateToValuesFromConfig(d, taskSchema, []string{ + "allow_overlapping_execution", + }); err != nil { + return diag.FromErr(err) } } - } - if d.HasChange("when") { - n := d.Get("when") - alterRequest := sdk.NewAlterTaskRequest(taskId).WithModifyWhen(n.(string)) - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return fmt.Errorf("error updating when condition on task %s", taskId.FullyQualifiedName()) - } - } - - if d.HasChange("sql_statement") { - n := d.Get("sql_statement") - alterRequest := sdk.NewAlterTaskRequest(taskId).WithModifyAs(n.(string)) - err := client.Tasks.Alter(ctx, alterRequest) - if err != nil { - return fmt.Errorf("error updating sql statement on task %s", taskId.FullyQualifiedName()) + if errs := errors.Join( + attributeMappedValueReadOrDefault(d, "finalize", task.TaskRelations.FinalizedRootTask, func(finalizedRootTask *sdk.SchemaObjectIdentifier) (string, error) { + return finalizedRootTask.FullyQualifiedName(), nil + }, nil), + attributeMappedValueReadOrDefault(d, "error_integration", task.ErrorIntegration, func(errorIntegration *sdk.AccountObjectIdentifier) (string, error) { + return errorIntegration.Name(), nil + }, nil), + attributeMappedValueReadOrDefault(d, "warehouse", task.Warehouse, func(warehouse *sdk.AccountObjectIdentifier) (string, error) { + return warehouse.Name(), nil + }, nil), + func() error { + if len(task.Schedule) > 0 { + taskSchedule, err := sdk.ParseTaskSchedule(task.Schedule) + if err != nil { + return err + } + switch { + case len(taskSchedule.Cron) > 0: + if err := d.Set("schedule", []any{map[string]any{ + "using_cron": taskSchedule.Cron, + }}); err != nil { + return err + } + case taskSchedule.Minutes > 0: + if err := d.Set("schedule", []any{map[string]any{ + "minutes": taskSchedule.Minutes, + }}); err != nil { + return err + } + } + return nil + } + return d.Set("schedule", nil) + }(), + d.Set("started", task.IsStarted()), + d.Set("when", task.Condition), + d.Set("config", task.Config), + d.Set("comment", task.Comment), + d.Set("sql_statement", task.Definition), + d.Set("after", collections.Map(task.Predecessors, sdk.SchemaObjectIdentifier.FullyQualifiedName)), + handleTaskParameterRead(d, taskParameters), + d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()), + d.Set(ShowOutputAttributeName, []map[string]any{schemas.TaskToSchema(task)}), + d.Set(ParametersAttributeName, []map[string]any{schemas.TaskParametersToSchema(taskParameters)}), + ); errs != nil { + return diag.FromErr(errs) } - } - enabled := d.Get("enabled").(bool) - if enabled { - if waitForTaskStart(ctx, client, taskId) != nil { - log.Printf("[WARN] failed to resume task %s", taskId.FullyQualifiedName()) - } - } else { - if err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(taskId).WithSuspend(true)); err != nil { - return fmt.Errorf("failed to suspend task %s", taskId.FullyQualifiedName()) - } + return nil } - - return ReadTask(d, meta) } -// DeleteTask implements schema.DeleteFunc. -func DeleteTask(d *schema.ResourceData, meta interface{}) error { +func DeleteTask(ctx context.Context, d *schema.ResourceData, meta any) (diags diag.Diagnostics) { client := meta.(*provider.Context).Client - ctx := context.Background() - - taskId := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) + id, err := sdk.ParseSchemaObjectIdentifier(d.Id()) + if err != nil { + return diag.FromErr(err) + } - tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, taskId, taskId) + tasksToResume, err := client.Tasks.SuspendRootTasks(ctx, id, id) defer func() { if err := client.Tasks.ResumeTasks(ctx, tasksToResume); err != nil { - log.Printf("[WARN] failed to resume tasks: %s", err) + diags = append(diags, resumeTaskErrorDiag(id, "delete", err)) } }() if err != nil { - return err + return diag.FromErr(sdk.JoinErrors(err)) } - dropRequest := sdk.NewDropTaskRequest(taskId) - err = client.Tasks.Drop(ctx, dropRequest) + err = client.Tasks.Drop(ctx, sdk.NewDropTaskRequest(id).WithIfExists(true)) if err != nil { - return fmt.Errorf("error deleting task %s err = %w", taskId.FullyQualifiedName(), err) + return diag.FromErr(fmt.Errorf("error deleting task %s err = %w", id.FullyQualifiedName(), err)) } d.SetId("") - return nil + return diags +} + +func resumeTaskErrorDiag(id sdk.SchemaObjectIdentifier, operation string, originalErr error) diag.Diagnostic { + return diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf("Failed to resume tasks in %s operation (id=%s)", operation, id.FullyQualifiedName()), + Detail: fmt.Sprintf("Failed to resume some of the tasks with the following errors (tasks can be resumed by applying the same configuration again): %v", originalErr), + } +} + +func waitForTaskStart(ctx context.Context, client *sdk.Client, id sdk.SchemaObjectIdentifier) error { + err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(id).WithResume(true)) + if err != nil { + return fmt.Errorf("error starting task %s err = %w", id.FullyQualifiedName(), err) + } + return util.Retry(5, 5*time.Second, func() (error, bool) { + task, err := client.Tasks.ShowByID(ctx, id) + if err != nil { + return fmt.Errorf("error starting task %s err = %w", id.FullyQualifiedName(), err), false + } + if task.State != sdk.TaskStateStarted { + return nil, false + } + return nil, true + }) } diff --git a/pkg/resources/task_acceptance_test.go b/pkg/resources/task_acceptance_test.go index 05ffa51176..0a0545397f 100644 --- a/pkg/resources/task_acceptance_test.go +++ b/pkg/resources/task_acceptance_test.go @@ -3,562 +3,877 @@ package resources_test import ( "bytes" "fmt" + "regexp" + "strconv" "testing" - "text/template" - acc "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/objectparametersassert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceparametersassert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/helpers/random" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + configvariable "github.com/hashicorp/terraform-plugin-testing/config" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + acc "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceassert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceshowoutputassert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config/model" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/testenvs" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" - "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" - "github.com/hashicorp/terraform-plugin-testing/config" + r "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/plancheck" - "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-plugin-testing/tfversion" ) -type ( - AccTaskTestSettings struct { - DatabaseName string - WarehouseName string - RootTask *TaskSettings - ChildTask *TaskSettings - SoloTask *TaskSettings - } +// TODO(SNOW-1822118): Create more complicated tests for task - TaskSettings struct { - Name string - Enabled bool - Schema string - SQL string - Schedule string - Comment string - When string - SessionParams map[string]string - UserTaskTimeoutMs int64 - } -) +func TestAcc_Task_Basic(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) -var ( - rootname = acc.TestClient().Ids.AlphaContaining("_root_task") - rootId = sdk.NewSchemaObjectIdentifier(acc.TestDatabaseName, acc.TestSchemaName, rootname) - childname = acc.TestClient().Ids.AlphaContaining("_child_task") - childId = sdk.NewSchemaObjectIdentifier(acc.TestDatabaseName, acc.TestSchemaName, childname) - soloname = acc.TestClient().Ids.AlphaContaining("_standalone_task") - - initialState = &AccTaskTestSettings{ //nolint - WarehouseName: acc.TestWarehouseName, - DatabaseName: acc.TestDatabaseName, - RootTask: &TaskSettings{ - Name: rootname, - Schema: acc.TestSchemaName, - SQL: "SHOW FUNCTIONS", - Enabled: true, - Schedule: "5 MINUTE", - UserTaskTimeoutMs: 1800000, - SessionParams: map[string]string{ - string(sdk.SessionParameterLockTimeout): "1000", - string(sdk.SessionParameterStrictJSONOutput): "true", - }, - }, - - ChildTask: &TaskSettings{ - Name: childname, - SQL: "SELECT 1", - Enabled: false, - Comment: "initial state", - }, - - SoloTask: &TaskSettings{ - Name: soloname, - Schema: acc.TestSchemaName, - SQL: "SELECT 1", - When: "TRUE", - Enabled: false, - }, - } + currentRole := acc.TestClient().Context.CurrentRole(t) - // Enables the Child and changes the SQL. - stepOne = &AccTaskTestSettings{ //nolint - WarehouseName: acc.TestWarehouseName, - DatabaseName: acc.TestDatabaseName, - RootTask: &TaskSettings{ - Name: rootname, - Schema: acc.TestSchemaName, - SQL: "SHOW FUNCTIONS", - Enabled: true, - Schedule: "5 MINUTE", - UserTaskTimeoutMs: 1800000, - SessionParams: map[string]string{ - string(sdk.SessionParameterLockTimeout): "1000", - string(sdk.SessionParameterStrictJSONOutput): "true", - }, - }, - - ChildTask: &TaskSettings{ - Name: childname, - SQL: "SELECT *", - Enabled: true, - Comment: "secondary state", - }, - - SoloTask: &TaskSettings{ - Name: soloname, - Schema: acc.TestSchemaName, - SQL: "SELECT *", - When: "TRUE", - Enabled: true, - SessionParams: map[string]string{ - string(sdk.SessionParameterTimestampInputFormat): "YYYY-MM-DD HH24", - }, - Schedule: "5 MINUTE", - UserTaskTimeoutMs: 1800000, - }, - } + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + configModel := model.TaskWithId("test", id, false, statement) - // Changes Root Schedule and SQL. - stepTwo = &AccTaskTestSettings{ //nolint - WarehouseName: acc.TestWarehouseName, - DatabaseName: acc.TestDatabaseName, - RootTask: &TaskSettings{ - Name: rootname, - Schema: acc.TestSchemaName, - SQL: "SHOW TABLES", - Enabled: true, - Schedule: "15 MINUTE", - UserTaskTimeoutMs: 1800000, - SessionParams: map[string]string{ - string(sdk.SessionParameterLockTimeout): "1000", - string(sdk.SessionParameterStrictJSONOutput): "true", - }, - }, - - ChildTask: &TaskSettings{ - Name: childname, - SQL: "SELECT 1", - Enabled: true, - Comment: "third state", - }, - - SoloTask: &TaskSettings{ - Name: soloname, - Schema: acc.TestSchemaName, - SQL: "SELECT *", - When: "FALSE", - Enabled: true, - Schedule: "15 MINUTE", - UserTaskTimeoutMs: 900000, + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), }, - } + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, configModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModel.ResourceReference()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanFalse). + HasWarehouseString(""). + HasNoScheduleSet(). + HasConfigString(""). + HasAllowOverlappingExecutionString(r.BooleanDefault). + HasErrorIntegrationString(""). + HasCommentString(""). + HasFinalizeString(""). + HasAfter(). + HasWhenString(""). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, configModel.ResourceReference()). + HasCreatedOnNotEmpty(). + HasName(id.Name()). + HasIdNotEmpty(). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasOwner(currentRole.Name()). + HasComment(""). + HasWarehouse(sdk.NewAccountObjectIdentifier("")). + HasNoSchedule(). + HasPredecessors(). + HasState(sdk.TaskStateSuspended). + HasDefinition(statement). + HasCondition(""). + HasAllowOverlappingExecution(false). + HasErrorIntegration(sdk.NewAccountObjectIdentifier("")). + HasLastCommittedOn(""). + HasLastSuspendedOn(""). + HasOwnerRoleType("ROLE"). + HasConfig(""). + HasBudget(""). + HasTaskRelations(sdk.TaskRelations{}), + resourceparametersassert.TaskResourceParameters(t, configModel.ResourceReference()). + HasAllDefaults(), + ), + }, + { + ResourceName: configModel.ResourceReference(), + ImportState: true, + ImportStateCheck: assert.AssertThatImport(t, + resourceassert.ImportedTaskResource(t, helpers.EncodeResourceIdentifier(id)). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanFalse). + HasWarehouseString(""). + HasNoScheduleSet(). + HasConfigString(""). + HasAllowOverlappingExecutionString(r.BooleanFalse). + HasErrorIntegrationString(""). + HasCommentString(""). + HasFinalizeString(""). + HasNoAfter(). + HasWhenString(""). + HasSqlStatementString(statement), + ), + }, + }, + }) +} + +func TestAcc_Task_Complete(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + currentRole := acc.TestClient().Context.CurrentRole(t) - stepThree = &AccTaskTestSettings{ //nolint - WarehouseName: acc.TestWarehouseName, - DatabaseName: acc.TestDatabaseName, + errorNotificationIntegration, errorNotificationIntegrationCleanup := acc.TestClient().NotificationIntegration.CreateWithGcpPubSub(t) + t.Cleanup(errorNotificationIntegrationCleanup) - RootTask: &TaskSettings{ - Name: rootname, - Schema: acc.TestSchemaName, - SQL: "SHOW FUNCTIONS", - Enabled: false, - Schedule: "5 MINUTE", - UserTaskTimeoutMs: 1800000, - // Changes session params: one is updated, one is removed, one is added - SessionParams: map[string]string{ - string(sdk.SessionParameterLockTimeout): "2000", - string(sdk.SessionParameterMultiStatementCount): "5", + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + taskConfig := `{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}` + comment := random.Comment() + condition := `SYSTEM$STREAM_HAS_DATA('MYSTREAM')` + configModel := model.TaskWithId("test", id, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithScheduleMinutes(10). + WithConfigValue(configvariable.StringVariable(taskConfig)). + WithAllowOverlappingExecution(r.BooleanTrue). + WithErrorIntegration(errorNotificationIntegration.ID().Name()). + WithComment(comment). + WithWhen(condition) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModel.ResourceReference()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanTrue). + HasWarehouseString(acc.TestClient().Ids.WarehouseId().Name()). + HasScheduleMinutes(10). + HasConfigString(taskConfig). + HasAllowOverlappingExecutionString(r.BooleanTrue). + HasErrorIntegrationString(errorNotificationIntegration.ID().Name()). + HasCommentString(comment). + HasFinalizeString(""). + HasNoAfter(). + HasWhenString(condition). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, configModel.ResourceReference()). + HasCreatedOnNotEmpty(). + HasName(id.Name()). + HasIdNotEmpty(). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasOwner(currentRole.Name()). + HasComment(comment). + HasWarehouse(acc.TestClient().Ids.WarehouseId()). + HasScheduleMinutes(10). + HasPredecessors(). + HasState(sdk.TaskStateStarted). + HasDefinition(statement). + HasCondition(condition). + HasAllowOverlappingExecution(true). + HasErrorIntegration(errorNotificationIntegration.ID()). + HasLastCommittedOnNotEmpty(). + HasLastSuspendedOn(""). + HasOwnerRoleType("ROLE"). + HasConfig(taskConfig). + HasBudget(""). + HasTaskRelations(sdk.TaskRelations{}), + resourceparametersassert.TaskResourceParameters(t, configModel.ResourceReference()). + HasAllDefaults(), + ), + }, + { + ResourceName: configModel.ResourceReference(), + ImportState: true, + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModel), + ImportStateCheck: assert.AssertThatImport(t, + resourceassert.ImportedTaskResource(t, helpers.EncodeResourceIdentifier(id)). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanTrue). + HasWarehouseString(acc.TestClient().Ids.WarehouseId().Name()). + HasScheduleMinutes(10). + HasConfigString(taskConfig). + HasAllowOverlappingExecutionString(r.BooleanTrue). + HasErrorIntegrationString(errorNotificationIntegration.ID().Name()). + HasCommentString(comment). + HasFinalizeString(""). + HasNoAfter(). + HasWhenString(condition). + HasSqlStatementString(statement), + ), }, }, + }) +} - ChildTask: &TaskSettings{ - Name: childname, - SQL: "SELECT 1", - Enabled: false, - Comment: "reset", - }, +func TestAcc_Task_Updates(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + currentRole := acc.TestClient().Context.CurrentRole(t) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + newStatement := "SELECT 123" + basicConfigModel := model.TaskWithId("test", id, false, statement) + + // TODO(SNOW-1736173): New warehouse created, because the common one has lower-case letters that won't work + warehouse, warehouseCleanup := acc.TestClient().Warehouse.CreateWarehouse(t) + t.Cleanup(warehouseCleanup) - SoloTask: &TaskSettings{ - Name: soloname, - Schema: acc.TestSchemaName, - SQL: "SELECT 1", - When: "TRUE", - Enabled: true, - SessionParams: map[string]string{ - string(sdk.SessionParameterTimestampInputFormat): "YYYY-MM-DD HH24", + errorNotificationIntegration, errorNotificationIntegrationCleanup := acc.TestClient().NotificationIntegration.CreateWithGcpPubSub(t) + t.Cleanup(errorNotificationIntegrationCleanup) + + taskConfig := `{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}` + comment := random.Comment() + condition := `SYSTEM$STREAM_HAS_DATA('MYSTREAM')` + completeConfigModel := model.TaskWithId("test", id, true, newStatement). + WithWarehouse(warehouse.ID().Name()). + WithScheduleMinutes(5). + WithConfigValue(configvariable.StringVariable(taskConfig)). + WithAllowOverlappingExecution(r.BooleanTrue). + WithErrorIntegration(errorNotificationIntegration.ID().Name()). + WithComment(comment). + WithWhen(condition) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, basicConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, basicConfigModel.ResourceReference()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanFalse). + HasWarehouseString(""). + HasNoScheduleSet(). + HasConfigString(""). + HasAllowOverlappingExecutionString(r.BooleanDefault). + HasErrorIntegrationString(""). + HasCommentString(""). + HasFinalizeString(""). + HasAfter(). + HasWhenString(""). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, basicConfigModel.ResourceReference()). + HasCreatedOnNotEmpty(). + HasName(id.Name()). + HasIdNotEmpty(). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasOwner(currentRole.Name()). + HasComment(""). + HasWarehouse(sdk.NewAccountObjectIdentifier("")). + HasNoSchedule(). + HasPredecessors(). + HasState(sdk.TaskStateSuspended). + HasDefinition(statement). + HasCondition(""). + HasAllowOverlappingExecution(false). + HasErrorIntegration(sdk.NewAccountObjectIdentifier("")). + HasLastCommittedOn(""). + HasLastSuspendedOn(""). + HasOwnerRoleType("ROLE"). + HasConfig(""). + HasBudget(""). + HasTaskRelations(sdk.TaskRelations{}), + ), + }, + // Set + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, completeConfigModel), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(completeConfigModel.ResourceReference(), plancheck.ResourceActionUpdate), + }, + }, + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, completeConfigModel.ResourceReference()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanTrue). + HasWarehouseString(warehouse.ID().Name()). + HasScheduleMinutes(5). + HasConfigString(taskConfig). + HasAllowOverlappingExecutionString(r.BooleanTrue). + HasErrorIntegrationString(errorNotificationIntegration.ID().Name()). + HasCommentString(comment). + HasFinalizeString(""). + HasAfter(). + HasWhenString(condition). + HasSqlStatementString(newStatement), + resourceshowoutputassert.TaskShowOutput(t, completeConfigModel.ResourceReference()). + HasCreatedOnNotEmpty(). + HasName(id.Name()). + HasIdNotEmpty(). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasOwner(currentRole.Name()). + HasWarehouse(warehouse.ID()). + HasComment(comment). + HasScheduleMinutes(5). + HasPredecessors(). + HasState(sdk.TaskStateStarted). + HasDefinition(newStatement). + HasCondition(condition). + HasAllowOverlappingExecution(true). + HasErrorIntegration(errorNotificationIntegration.ID()). + HasLastCommittedOnNotEmpty(). + HasLastSuspendedOn(""). + HasOwnerRoleType("ROLE"). + HasConfig(taskConfig). + HasBudget(""). + HasTaskRelations(sdk.TaskRelations{}), + ), + }, + // Unset + { + Config: config.FromModel(t, basicConfigModel), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(basicConfigModel.ResourceReference(), plancheck.ResourceActionUpdate), + }, + }, + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, basicConfigModel.ResourceReference()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanFalse). + HasWarehouseString(""). + HasNoScheduleSet(). + HasConfigString(""). + HasAllowOverlappingExecutionString(r.BooleanDefault). + HasErrorIntegrationString(""). + HasCommentString(""). + HasFinalizeString(""). + HasAfter(). + HasWhenString(""). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, basicConfigModel.ResourceReference()). + HasCreatedOnNotEmpty(). + HasName(id.Name()). + HasIdNotEmpty(). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasOwner(currentRole.Name()). + HasComment(""). + HasWarehouse(sdk.NewAccountObjectIdentifier("")). + HasNoSchedule(). + HasPredecessors(). + HasState(sdk.TaskStateSuspended). + HasDefinition(statement). + HasCondition(""). + HasAllowOverlappingExecution(false). + HasErrorIntegration(sdk.NewAccountObjectIdentifier("")). + HasLastCommittedOnNotEmpty(). + HasLastSuspendedOnNotEmpty(). + HasOwnerRoleType("ROLE"). + HasConfig(""). + HasBudget(""). + HasTaskRelations(sdk.TaskRelations{}), + ), }, - Schedule: "5 MINUTE", - UserTaskTimeoutMs: 0, }, - } -) + }) +} + +/* +DAG structure (the test proves child3 won't have any issues with updates in the following scenario): + + child1 + / \ + root child3 + \ / + child2 +*/ +func TestAcc_Task_UpdatesInComplexDAG(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + rootTask, rootTaskCleanup := acc.TestClient().Task.CreateWithSchedule(t) + t.Cleanup(rootTaskCleanup) + + child1, child1Cleanup := acc.TestClient().Task.CreateWithAfter(t, rootTask.ID()) + t.Cleanup(child1Cleanup) + + child2, child2Cleanup := acc.TestClient().Task.CreateWithAfter(t, rootTask.ID()) + t.Cleanup(child2Cleanup) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(child1.ID()).WithResume(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(child2.ID()).WithResume(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootTask.ID()).WithResume(true)) + t.Cleanup(func() { acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootTask.ID()).WithSuspend(true)) }) + + child3Id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + basicConfigModel := model.TaskWithId("test", child3Id, true, "SELECT 1"). + WithAfterValue(configvariable.SetVariable( + configvariable.StringVariable(child1.ID().FullyQualifiedName()), + configvariable.StringVariable(child2.ID().FullyQualifiedName()), + )) + + comment := random.Comment() + basicConfigModelAfterUpdate := model.TaskWithId("test", child3Id, true, "SELECT 123"). + WithAfterValue(configvariable.SetVariable( + configvariable.StringVariable(child1.ID().FullyQualifiedName()), + configvariable.StringVariable(child2.ID().FullyQualifiedName()), + )). + WithComment(comment) -func TestAcc_Task(t *testing.T) { resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.RequireAbove(tfversion.Version1_5_0), }, - PreCheck: func() { acc.TestAccPreCheck(t) }, CheckDestroy: acc.CheckDestroy(t, resources.Task), Steps: []resource.TestStep{ { - Config: taskConfig(initialState), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "false"), - resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), - resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), - resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", initialState.RootTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", initialState.ChildTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "after.0", rootname), - resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", initialState.ChildTask.Comment), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", initialState.RootTask.Schedule), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", initialState.ChildTask.Schedule), - checkInt64("snowflake_task.root_task", "user_task_timeout_ms", initialState.RootTask.UserTaskTimeoutMs), - resource.TestCheckNoResourceAttr("snowflake_task.solo_task", "user_task_timeout_ms"), - checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), - resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), - resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), - ), - }, - { - Config: taskConfig(stepOne), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), - resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), - resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", stepOne.RootTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", stepOne.ChildTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", stepOne.ChildTask.Comment), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", stepOne.RootTask.Schedule), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", stepOne.ChildTask.Schedule), - checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepOne.RootTask.UserTaskTimeoutMs), - checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", stepOne.SoloTask.UserTaskTimeoutMs), - checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), - resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), - resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), - ), - }, - { - Config: taskConfig(stepTwo), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), - resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), - resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", stepTwo.RootTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", stepTwo.ChildTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", stepTwo.ChildTask.Comment), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", stepTwo.RootTask.Schedule), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", stepTwo.ChildTask.Schedule), - checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepTwo.RootTask.UserTaskTimeoutMs), - checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", stepTwo.SoloTask.UserTaskTimeoutMs), - checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), - resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), - resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), - ), - }, - { - Config: taskConfig(stepThree), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "false"), - resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "false"), - resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), - resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), - resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", stepThree.RootTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", stepThree.ChildTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", stepThree.ChildTask.Comment), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", stepThree.RootTask.Schedule), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", stepThree.ChildTask.Schedule), - checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepThree.RootTask.UserTaskTimeoutMs), - checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", stepThree.SoloTask.UserTaskTimeoutMs), - checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 2000), - resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT"), - checkInt64("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT", 5), - ), - }, - { - Config: taskConfig(initialState), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "false"), - resource.TestCheckResourceAttr("snowflake_task.root_task", "name", rootname), - resource.TestCheckResourceAttr("snowflake_task.root_task", "fully_qualified_name", rootId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.child_task", "name", childname), - resource.TestCheckResourceAttr("snowflake_task.child_task", "fully_qualified_name", childId.FullyQualifiedName()), - resource.TestCheckResourceAttr("snowflake_task.root_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.root_task", "sql_statement", initialState.RootTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "sql_statement", initialState.ChildTask.SQL), - resource.TestCheckResourceAttr("snowflake_task.child_task", "comment", initialState.ChildTask.Comment), - checkInt64("snowflake_task.root_task", "user_task_timeout_ms", stepOne.RootTask.UserTaskTimeoutMs), - resource.TestCheckResourceAttr("snowflake_task.root_task", "schedule", initialState.RootTask.Schedule), - resource.TestCheckResourceAttr("snowflake_task.child_task", "schedule", initialState.ChildTask.Schedule), - // Terraform SDK is not able to differentiate if the - // attribute has deleted or set to zero value. - // ResourceData.GetChange returns the zero value of defined - // type in schema as new the value. Provider handles 0 for - // `user_task_timeout_ms` by unsetting the - // USER_TASK_TIMEOUT_MS session variable. - checkInt64("snowflake_task.solo_task", "user_task_timeout_ms", initialState.ChildTask.UserTaskTimeoutMs), - checkInt64("snowflake_task.root_task", "session_parameters.LOCK_TIMEOUT", 1000), - resource.TestCheckResourceAttr("snowflake_task.root_task", "session_parameters.STRICT_JSON_OUTPUT", "true"), - resource.TestCheckNoResourceAttr("snowflake_task.root_task", "session_parameters.MULTI_STATEMENT_COUNT"), + Config: config.FromModel(t, basicConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, basicConfigModel.ResourceReference()). + HasFullyQualifiedNameString(child3Id.FullyQualifiedName()). + HasDatabaseString(child3Id.DatabaseName()). + HasSchemaString(child3Id.SchemaName()). + HasNameString(child3Id.Name()). + HasStartedString(r.BooleanTrue). + HasAfter(child1.ID(), child2.ID()). + HasSqlStatementString("SELECT 1"), + resourceshowoutputassert.TaskShowOutput(t, basicConfigModel.ResourceReference()). + HasCreatedOnNotEmpty(). + HasName(child3Id.Name()). + HasDatabaseName(child3Id.DatabaseName()). + HasSchemaName(child3Id.SchemaName()). + HasState(sdk.TaskStateStarted). + HasDefinition("SELECT 1"), + ), + }, + // Update some fields in child3 + { + Config: config.FromModel(t, basicConfigModelAfterUpdate), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, basicConfigModelAfterUpdate.ResourceReference()). + HasFullyQualifiedNameString(child3Id.FullyQualifiedName()). + HasDatabaseString(child3Id.DatabaseName()). + HasSchemaString(child3Id.SchemaName()). + HasNameString(child3Id.Name()). + HasStartedString(r.BooleanTrue). + HasCommentString(comment). + HasAfter(child1.ID(), child2.ID()). + HasSqlStatementString("SELECT 123"), + resourceshowoutputassert.TaskShowOutput(t, basicConfigModelAfterUpdate.ResourceReference()). + HasCreatedOnNotEmpty(). + HasName(child3Id.Name()). + HasDatabaseName(child3Id.DatabaseName()). + HasSchemaName(child3Id.SchemaName()). + HasState(sdk.TaskStateStarted). + HasComment(comment). + HasDefinition("SELECT 123"), ), }, }, }) } -func taskConfig(settings *AccTaskTestSettings) string { //nolint - config, err := template.New("task_acceptance_test_config").Parse(` -resource "snowflake_warehouse" "wh" { - name = "{{ .WarehouseName }}-{{ .RootTask.Name }}" -} -resource "snowflake_task" "root_task" { - name = "{{ .RootTask.Name }}" - database = "{{ .DatabaseName }}" - schema = "{{ .RootTask.Schema }}" - warehouse = "${snowflake_warehouse.wh.name}" - sql_statement = "{{ .RootTask.SQL }}" - enabled = {{ .RootTask.Enabled }} - schedule = "{{ .RootTask.Schedule }}" - {{ if .RootTask.UserTaskTimeoutMs }} - user_task_timeout_ms = {{ .RootTask.UserTaskTimeoutMs }} - {{- end }} - - {{ if .RootTask.SessionParams }} - session_parameters = { - {{ range $key, $value := .RootTask.SessionParams}} - {{ $key }} = "{{ $value }}", - {{- end }} - } - {{- end }} -} -resource "snowflake_task" "child_task" { - name = "{{ .ChildTask.Name }}" - database = snowflake_task.root_task.database - schema = snowflake_task.root_task.schema - warehouse = snowflake_task.root_task.warehouse - sql_statement = "{{ .ChildTask.SQL }}" - enabled = {{ .ChildTask.Enabled }} - after = [snowflake_task.root_task.name] - comment = "{{ .ChildTask.Comment }}" - {{ if .ChildTask.UserTaskTimeoutMs }} - user_task_timeout_ms = {{ .ChildTask.UserTaskTimeoutMs }} - {{- end }} - - {{ if .ChildTask.SessionParams }} - session_parameters = { - {{ range $key, $value := .ChildTask.SessionParams}} - {{ $key }} = "{{ $value }}", - {{- end }} - } - {{- end }} -} -resource "snowflake_task" "solo_task" { - name = "{{ .SoloTask.Name }}" - database = "{{ .DatabaseName }}" - schema = "{{ .SoloTask.Schema }}" - warehouse = "{{ .WarehouseName }}" - sql_statement = "{{ .SoloTask.SQL }}" - enabled = {{ .SoloTask.Enabled }} - when = "{{ .SoloTask.When }}" - {{ if .SoloTask.Schedule }} - schedule = "{{ .SoloTask.Schedule }}" - {{- end }} - - {{ if .SoloTask.UserTaskTimeoutMs }} - user_task_timeout_ms = {{ .SoloTask.UserTaskTimeoutMs }} - {{- end }} - - {{ if .SoloTask.SessionParams }} - session_parameters = { - {{ range $key, $value := .SoloTask.SessionParams}} - {{ $key }} = "{{ $value }}", - {{- end }} - } - {{- end }} -} - `) - if err != nil { - fmt.Println(err) - } +func TestAcc_Task_StatementSpaces(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + when := "1 > 2" + configModel := model.TaskWithId("test", id, false, statement).WithWhen(when) - var result bytes.Buffer - config.Execute(&result, settings) //nolint + statementWithSpaces := " SELECT 1 " + whenWithSpaces := " 1 > 2 " + configModelWithSpacesInStatements := model.TaskWithId("test", id, false, statementWithSpaces).WithWhen(whenWithSpaces) - return result.String() + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, configModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModel.ResourceReference()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasWhenString(when). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, configModel.ResourceReference()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasName(id.Name()). + HasCondition(when). + HasDefinition(statement), + ), + }, + { + Config: config.FromModel(t, configModelWithSpacesInStatements), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModel.ResourceReference()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasWhenString(when). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, configModel.ResourceReference()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasName(id.Name()). + HasCondition(when). + HasDefinition(statement), + ), + }, + }, + }) } -/* -todo: this test is failing due to error message below. Need to figure out why this is happening -=== RUN TestAcc_Task_Managed +func TestAcc_Task_ExternalChanges(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + currentRole := acc.TestClient().Context.CurrentRole(t) - task_acceptance_test.go:371: Step 2/4 error: Error running apply: exit status 1 + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + basicConfigModel := model.TaskWithId("test", id, false, statement) - Error: error updating warehouse on task "terraform_test_database"."terraform_test_schema"."tst-terraform-DBMPMESYJB" err = 091083 (42601): Nonexistent warehouse terraform_test_warehouse-tst-terraform-DBMPMESYJB was specified. + // TODO(SNOW-1736173): New warehouse created, because the common one has lower-case letters that won't work + warehouse, warehouseCleanup := acc.TestClient().Warehouse.CreateWarehouse(t) + t.Cleanup(warehouseCleanup) - with snowflake_task.managed_task, - on terraform_plugin_test.tf line 7, in resource "snowflake_task" "managed_task": - 7: resource "snowflake_task" "managed_task" { + errorNotificationIntegration, errorNotificationIntegrationCleanup := acc.TestClient().NotificationIntegration.CreateWithGcpPubSub(t) + t.Cleanup(errorNotificationIntegrationCleanup) + taskConfig := `{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}` + comment := random.Comment() + condition := `SYSTEM$STREAM_HAS_DATA('MYSTREAM')` + completeConfigModel := model.TaskWithId("test", id, true, statement). + WithWarehouse(warehouse.ID().Name()). + WithScheduleMinutes(5). + WithConfigValue(configvariable.StringVariable(taskConfig)). + WithAllowOverlappingExecution(r.BooleanTrue). + WithErrorIntegration(errorNotificationIntegration.ID().Name()). + WithComment(comment). + WithWhen(condition) - func TestAcc_Task_Managed(t *testing.T) { - accName := acc.TestClient().Ids.Alpha() - resource.Test(t, resource.TestCase{ - ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, TerraformVersionChecks: []tfversion.TerraformVersionCheck{ tfversion.RequireAbove(tfversion.Version1_5_0), }, - PreCheck: func() { acc.TestAccPreCheck(t) }, - CheckDestroy: acc.CheckDestroy(t, resources.Task), - Steps: []resource.TestStep{ - { - Config: taskConfigManaged1(accName, acc.TestDatabaseName, acc.TestSchemaName), - Check: resource.ComposeTestCheckFunc( - checkBool("snowflake_task.managed_task", "enabled", true), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schedule", "5 MINUTE"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "user_task_managed_initial_warehouse_size", "XSMALL"), - resource.TestCheckResourceAttr("snowflake_task.managed_task_no_init", "user_task_managed_initial_warehouse_size", ""), - resource.TestCheckResourceAttr("snowflake_task.managed_task_no_init", "session_parameters.TIMESTAMP_INPUT_FORMAT", "YYYY-MM-DD HH24"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "warehouse", ""), - ), + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + // Optionals set + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, completeConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, completeConfigModel.ResourceReference()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanTrue). + HasWarehouseString(warehouse.ID().Name()). + HasScheduleMinutes(5). + HasConfigString(taskConfig). + HasAllowOverlappingExecutionString(r.BooleanTrue). + HasErrorIntegrationString(errorNotificationIntegration.ID().Name()). + HasCommentString(comment). + HasFinalizeString(""). + HasAfter(). + HasWhenString(condition). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, completeConfigModel.ResourceReference()). + HasCreatedOnNotEmpty(). + HasName(id.Name()). + HasIdNotEmpty(). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasOwner(currentRole.Name()). + HasWarehouse(warehouse.ID()). + HasComment(comment). + HasScheduleMinutes(5). + HasPredecessors(). + HasState(sdk.TaskStateStarted). + HasDefinition(statement). + HasCondition(condition). + HasAllowOverlappingExecution(true). + HasErrorIntegration(errorNotificationIntegration.ID()). + HasLastCommittedOnNotEmpty(). + HasLastSuspendedOn(""). + HasOwnerRoleType("ROLE"). + HasConfig(taskConfig). + HasBudget(""). + HasTaskRelations(sdk.TaskRelations{}), + ), + }, + // External change - unset all optional fields and expect no change + { + PreConfig: func() { + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(id).WithSuspend(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(id).WithUnset(*sdk.NewTaskUnsetRequest(). + WithWarehouse(true). + WithConfig(true). + WithAllowOverlappingExecution(true). + WithErrorIntegration(true). + WithComment(true). + WithSchedule(true), + )) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(id).WithRemoveWhen(true)) }, - { - Config: taskConfigManaged2(accName, acc.TestDatabaseName, acc.TestSchemaName, acc.TestWarehouseName), - Check: resource.ComposeTestCheckFunc( - checkBool("snowflake_task.managed_task", "enabled", true), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schedule", "5 MINUTE"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "user_task_managed_initial_warehouse_size", ""), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "warehouse", fmt.Sprintf("%s-%s", acc.TestWarehouseName, accName)), - ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(basicConfigModel.ResourceReference(), plancheck.ResourceActionUpdate), + }, }, - { - Config: taskConfigManaged1(accName, acc.TestDatabaseName, acc.TestSchemaName), - Check: resource.ComposeTestCheckFunc( - checkBool("snowflake_task.managed_task", "enabled", true), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schedule", "5 MINUTE"), - resource.TestCheckResourceAttr("snowflake_task.managed_task_no_init", "session_parameters.TIMESTAMP_INPUT_FORMAT", "YYYY-MM-DD HH24"), - resource.TestCheckResourceAttr("snowflake_task.managed_task_no_init", "user_task_managed_initial_warehouse_size", ""), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "warehouse", ""), - ), + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, completeConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, completeConfigModel.ResourceReference()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanTrue). + HasWarehouseString(warehouse.ID().Name()). + HasScheduleMinutes(5). + HasConfigString(taskConfig). + HasAllowOverlappingExecutionString(r.BooleanTrue). + HasErrorIntegrationString(errorNotificationIntegration.ID().Name()). + HasCommentString(comment). + HasFinalizeString(""). + HasAfter(). + HasWhenString(condition). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, completeConfigModel.ResourceReference()). + HasCreatedOnNotEmpty(). + HasName(id.Name()). + HasIdNotEmpty(). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasOwner(currentRole.Name()). + HasWarehouse(warehouse.ID()). + HasComment(comment). + HasScheduleMinutes(5). + HasPredecessors(). + HasState(sdk.TaskStateStarted). + HasDefinition(statement). + HasCondition(condition). + HasAllowOverlappingExecution(true). + HasErrorIntegration(errorNotificationIntegration.ID()). + HasLastCommittedOnNotEmpty(). + HasLastSuspendedOnNotEmpty(). + HasOwnerRoleType("ROLE"). + HasConfig(taskConfig). + HasBudget(""). + HasTaskRelations(sdk.TaskRelations{}), + ), + }, + // Unset optional values + { + Config: config.FromModel(t, basicConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, basicConfigModel.ResourceReference()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanFalse). + HasWarehouseString(""). + HasNoScheduleSet(). + HasConfigString(""). + HasAllowOverlappingExecutionString(r.BooleanDefault). + HasErrorIntegrationString(""). + HasCommentString(""). + HasFinalizeString(""). + HasAfter(). + HasWhenString(""). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, basicConfigModel.ResourceReference()). + HasCreatedOnNotEmpty(). + HasName(id.Name()). + HasIdNotEmpty(). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasOwner(currentRole.Name()). + HasComment(""). + HasWarehouse(sdk.NewAccountObjectIdentifier("")). + HasNoSchedule(). + HasPredecessors(). + HasState(sdk.TaskStateSuspended). + HasDefinition(statement). + HasCondition(""). + HasAllowOverlappingExecution(false). + HasErrorIntegration(sdk.NewAccountObjectIdentifier("")). + HasLastCommittedOnNotEmpty(). + HasLastSuspendedOnNotEmpty(). + HasOwnerRoleType("ROLE"). + HasConfig(""). + HasBudget(""). + HasTaskRelations(sdk.TaskRelations{}), + ), + }, + // External change - set all optional fields and expect no change + { + PreConfig: func() { + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(id).WithSuspend(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(id).WithSet(*sdk.NewTaskSetRequest(). + WithWarehouse(warehouse.ID()). + WithConfig(taskConfig). + WithAllowOverlappingExecution(true). + WithErrorIntegration(errorNotificationIntegration.ID()). + WithComment(comment). + WithSchedule("5 MINUTE"), + )) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(id).WithModifyWhen(condition)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(id).WithModifyAs("SELECT 123")) }, - { - Config: taskConfigManaged3(accName, acc.TestDatabaseName, acc.TestSchemaName), - Check: resource.ComposeTestCheckFunc( - checkBool("snowflake_task.managed_task", "enabled", true), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "schedule", "5 MINUTE"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "user_task_managed_initial_warehouse_size", "SMALL"), - resource.TestCheckResourceAttr("snowflake_task.managed_task", "warehouse", ""), - ), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(basicConfigModel.ResourceReference(), plancheck.ResourceActionUpdate), + }, }, + Config: config.FromModel(t, basicConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, basicConfigModel.ResourceReference()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanFalse). + HasWarehouseString(""). + HasNoScheduleSet(). + HasConfigString(""). + HasAllowOverlappingExecutionString(r.BooleanDefault). + HasErrorIntegrationString(""). + HasCommentString(""). + HasFinalizeString(""). + HasAfter(). + HasWhenString(""). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, basicConfigModel.ResourceReference()). + HasCreatedOnNotEmpty(). + HasName(id.Name()). + HasIdNotEmpty(). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasOwner(currentRole.Name()). + HasComment(""). + HasWarehouse(sdk.NewAccountObjectIdentifier("")). + HasNoSchedule(). + HasPredecessors(). + HasState(sdk.TaskStateSuspended). + HasDefinition(statement). + HasCondition(""). + HasAllowOverlappingExecution(false). + HasErrorIntegration(sdk.NewAccountObjectIdentifier("")). + HasLastCommittedOnNotEmpty(). + HasLastSuspendedOnNotEmpty(). + HasOwnerRoleType("ROLE"). + HasConfig(""). + HasBudget(""). + HasTaskRelations(sdk.TaskRelations{}), + ), }, - }) - } -*/ -func taskConfigManaged1(name string, databaseName string, schemaName string) string { - s := ` -resource "snowflake_task" "managed_task" { - name = "%s" - database = "%s" - schema = "%s" - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" - user_task_managed_initial_warehouse_size = "XSMALL" -} -resource "snowflake_task" "managed_task_no_init" { - name = "%s_no_init" - database = "%s" - schema = "%s" - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" - session_parameters = { - TIMESTAMP_INPUT_FORMAT = "YYYY-MM-DD HH24", - } + }, + }) } -` - return fmt.Sprintf(s, name, databaseName, schemaName, name, databaseName, schemaName) -} +func TestAcc_Task_CallingProcedure(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) -func taskConfigManaged2(name, databaseName, schemaName, warehouseName string) string { - s := ` -resource "snowflake_warehouse" "wh" { - name = "%s-%s" -} + procedure := acc.TestClient().Procedure.Create(t, sdk.DataTypeNumber) -resource "snowflake_task" "managed_task" { - name = "%s" - database = "%s" - schema = "%s" - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" - warehouse = snowflake_warehouse.wh.name -} -` - return fmt.Sprintf(s, warehouseName, name, name, databaseName, schemaName) -} + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := fmt.Sprintf("call %s(123)", procedure.Name) + configModel := model.TaskWithId("test", id, false, statement).WithUserTaskManagedInitialWarehouseSizeEnum(sdk.WarehouseSizeXSmall) -func taskConfigManaged3(name, databaseName, schemaName string) string { - s := ` -resource "snowflake_task" "managed_task" { - name = "%s" - database = "%s" - schema = "%s" - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" - user_task_managed_initial_warehouse_size = "SMALL" -} -` - return fmt.Sprintf(s, name, databaseName, schemaName) + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, configModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModel.ResourceReference()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanFalse). + HasUserTaskManagedInitialWarehouseSizeEnum(sdk.WarehouseSizeXSmall). + HasSqlStatementString(statement), + resourceshowoutputassert.TaskShowOutput(t, configModel.ResourceReference()). + HasCreatedOnNotEmpty(). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasName(id.Name()). + HasState(sdk.TaskStateSuspended). + HasDefinition(statement), + resourceparametersassert.TaskResourceParameters(t, configModel.ResourceReference()). + HasUserTaskManagedInitialWarehouseSize(sdk.WarehouseSizeXSmall), + ), + }, + }, + }) } -func TestAcc_Task_SwitchScheduled(t *testing.T) { - accName := acc.TestClient().Ids.Alpha() - taskRootName := acc.TestClient().Ids.Alpha() +func TestAcc_Task_CronAndMinutes(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + minutes := 5 + cron := "*/5 * * * * UTC" + configModelWithoutSchedule := model.TaskWithId("test", id, false, "SELECT 1") + configModelWithMinutes := model.TaskWithId("test", id, true, "SELECT 1").WithScheduleMinutes(minutes) + configModelWithCron := model.TaskWithId("test", id, true, "SELECT 1").WithScheduleCron(cron) resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, @@ -568,147 +883,134 @@ func TestAcc_Task_SwitchScheduled(t *testing.T) { }, CheckDestroy: acc.CheckDestroy(t, resources.Task), Steps: []resource.TestStep{ + // create with minutes + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelWithMinutes), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelWithMinutes.ResourceReference()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanTrue). + HasScheduleMinutes(minutes). + HasSqlStatementString("SELECT 1"), + resourceshowoutputassert.TaskShowOutput(t, configModelWithMinutes.ResourceReference()). + HasName(id.Name()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasScheduleMinutes(minutes), + ), + }, + // Unset schedule (from minutes) { - Config: taskConfigManagedScheduled(accName, taskRootName, acc.TestDatabaseName, acc.TestSchemaName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schedule", "5 MINUTE"), - resource.TestCheckResourceAttr("snowflake_task.test_task_root", "suspend_task_after_num_failures", "1"), + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelWithoutSchedule), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelWithoutSchedule.ResourceReference()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanFalse). + HasNoScheduleSet(). + HasSqlStatementString("SELECT 1"), + resourceshowoutputassert.TaskShowOutput(t, configModelWithoutSchedule.ResourceReference()). + HasName(id.Name()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasNoSchedule(), ), }, + // Create with cron { - Config: taskConfigManagedScheduled2(accName, taskRootName, acc.TestDatabaseName, acc.TestSchemaName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schedule", ""), - resource.TestCheckResourceAttr("snowflake_task.test_task_root", "suspend_task_after_num_failures", "2"), + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelWithCron), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelWithCron.ResourceReference()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanTrue). + HasScheduleCron(cron). + HasSqlStatementString("SELECT 1"), + resourceshowoutputassert.TaskShowOutput(t, configModelWithCron.ResourceReference()). + HasName(id.Name()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasScheduleCron(cron), ), }, + // Change to minutes { - Config: taskConfigManagedScheduled(accName, taskRootName, acc.TestDatabaseName, acc.TestSchemaName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schedule", "5 MINUTE"), - resource.TestCheckResourceAttr("snowflake_task.test_task_root", "suspend_task_after_num_failures", "1"), + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelWithMinutes), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelWithMinutes.ResourceReference()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanTrue). + HasScheduleMinutes(minutes). + HasSqlStatementString("SELECT 1"), + resourceshowoutputassert.TaskShowOutput(t, configModelWithMinutes.ResourceReference()). + HasName(id.Name()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasScheduleMinutes(minutes), ), }, + // Change back to cron { - Config: taskConfigManagedScheduled3(accName, taskRootName, acc.TestDatabaseName, acc.TestSchemaName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "false"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "database", acc.TestDatabaseName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schema", acc.TestSchemaName), - resource.TestCheckResourceAttr("snowflake_task.test_task", "sql_statement", "SELECT 1"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "schedule", ""), - resource.TestCheckResourceAttr("snowflake_task.test_task_root", "suspend_task_after_num_failures", "0"), + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelWithCron), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelWithCron.ResourceReference()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanTrue). + HasScheduleCron(cron). + HasSqlStatementString("SELECT 1"), + resourceshowoutputassert.TaskShowOutput(t, configModelWithCron.ResourceReference()). + HasName(id.Name()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasScheduleCron(cron), + ), + }, + // Unset schedule (from cron) + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelWithoutSchedule), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelWithoutSchedule.ResourceReference()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanFalse). + HasNoScheduleSet(). + HasSqlStatementString("SELECT 1"), + resourceshowoutputassert.TaskShowOutput(t, configModelWithoutSchedule.ResourceReference()). + HasName(id.Name()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasNoSchedule(), ), }, }, }) } -func taskConfigManagedScheduled(name string, taskRootName string, databaseName string, schemaName string) string { - return fmt.Sprintf(` -resource "snowflake_task" "test_task_root" { - name = "%[1]s" - database = "%[2]s" - schema = "%[3]s" - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" - suspend_task_after_num_failures = 1 -} - -resource "snowflake_task" "test_task" { - depends_on = [snowflake_task.test_task_root] - name = "%[4]s" - database = "%[2]s" - schema = "%[3]s" - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" -} -`, taskRootName, databaseName, schemaName, name) -} - -func taskConfigManagedScheduled2(name string, taskRootName string, databaseName string, schemaName string) string { - return fmt.Sprintf(` -resource "snowflake_task" "test_task_root" { - name = "%[1]s" - database = "%[2]s" - schema = "%[3]s" - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" - suspend_task_after_num_failures = 2 -} - -resource "snowflake_task" "test_task" { - name = "%[4]s" - database = "%[2]s" - schema = "%[3]s" - sql_statement = "SELECT 1" - enabled = true - after = [snowflake_task.test_task_root.name] -} -`, taskRootName, databaseName, schemaName, name) -} - -func taskConfigManagedScheduled3(name string, taskRootName string, databaseName string, schemaName string) string { - s := ` -resource "snowflake_task" "test_task_root" { - name = "%s" - database = "%s" - schema = "%s" - sql_statement = "SELECT 1" - enabled = false - schedule = "5 MINUTE" -} - -resource "snowflake_task" "test_task" { - name = "%s" - database = "%s" - schema = "%s" - sql_statement = "SELECT 1" - enabled = false - after = [snowflake_task.test_task_root.name] -} -` - return fmt.Sprintf(s, taskRootName, databaseName, schemaName, name, databaseName, schemaName) -} - -func checkInt64(name, key string, value int64) func(*terraform.State) error { - return func(state *terraform.State) error { - return resource.TestCheckResourceAttr(name, key, fmt.Sprintf("%v", value))(state) - } -} +func TestAcc_Task_CronAndMinutes_ExternalChanges(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) -func TestAcc_Task_issue2207(t *testing.T) { - prefix := acc.TestClient().Ids.Alpha() - rootName := prefix + "_root_task" - childName := prefix + "_child_task" - - m := func() map[string]config.Variable { - return map[string]config.Variable{ - "root_name": config.StringVariable(rootName), - "database": config.StringVariable(acc.TestDatabaseName), - "schema": config.StringVariable(acc.TestSchemaName), - "warehouse": config.StringVariable(acc.TestWarehouseName), - "child_name": config.StringVariable(childName), - "comment": config.StringVariable("abc"), - } - } - m2 := m() - m2["comment"] = config.StringVariable("def") + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + minutes := 5 + cron := "*/5 * * * * UTC" + configModelWithoutSchedule := model.TaskWithId("test", id, false, "SELECT 1") + configModelWithMinutes := model.TaskWithId("test", id, false, "SELECT 1").WithScheduleMinutes(minutes) + configModelWithCron := model.TaskWithId("test", id, false, "SELECT 1").WithScheduleCron(cron) resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, @@ -718,43 +1020,142 @@ func TestAcc_Task_issue2207(t *testing.T) { }, CheckDestroy: acc.CheckDestroy(t, resources.Task), Steps: []resource.TestStep{ + // Create without a schedule { - ConfigDirectory: config.TestStepDirectory(), - ConfigVariables: m(), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelWithoutSchedule), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelWithoutSchedule.ResourceReference()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasNoScheduleSet(), + resourceshowoutputassert.TaskShowOutput(t, configModelWithoutSchedule.ResourceReference()). + HasName(id.Name()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasNoSchedule(), ), - ConfigPlanChecks: resource.ConfigPlanChecks{ - PostApplyPostRefresh: []plancheck.PlanCheck{ - plancheck.ExpectEmptyPlan(), - }, + }, + // External change - set minutes + { + PreConfig: func() { + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(id).WithSet(*sdk.NewTaskSetRequest().WithSchedule("5 MINUTES"))) }, + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelWithoutSchedule), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelWithoutSchedule.ResourceReference()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasNoScheduleSet(), + resourceshowoutputassert.TaskShowOutput(t, configModelWithoutSchedule.ResourceReference()). + HasName(id.Name()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasNoSchedule(), + ), }, - // change comment + // External change - set cron + { + PreConfig: func() { + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(id).WithSet(*sdk.NewTaskSetRequest().WithSchedule(fmt.Sprintf("USING CRON %s", cron)))) + }, + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelWithoutSchedule), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelWithoutSchedule.ResourceReference()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasNoScheduleSet(), + resourceshowoutputassert.TaskShowOutput(t, configModelWithoutSchedule.ResourceReference()). + HasName(id.Name()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasNoSchedule(), + ), + }, + // Set minutes schedule + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelWithMinutes), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelWithMinutes.ResourceReference()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasScheduleMinutes(minutes), + resourceshowoutputassert.TaskShowOutput(t, configModelWithMinutes.ResourceReference()). + HasName(id.Name()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasScheduleMinutes(minutes), + ), + }, + // External change - unset schedule + { + PreConfig: func() { + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(id).WithUnset(*sdk.NewTaskUnsetRequest().WithSchedule(true))) + }, + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelWithMinutes), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelWithMinutes.ResourceReference()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasScheduleMinutes(minutes), + resourceshowoutputassert.TaskShowOutput(t, configModelWithMinutes.ResourceReference()). + HasName(id.Name()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasScheduleMinutes(minutes), + ), + }, + // Set cron schedule + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelWithCron), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelWithCron.ResourceReference()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasScheduleCron(cron), + resourceshowoutputassert.TaskShowOutput(t, configModelWithCron.ResourceReference()). + HasName(id.Name()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasScheduleCron(cron), + ), + }, + // External change - unset schedule { - ConfigDirectory: acc.ConfigurationSameAsStepN(1), - ConfigVariables: m2, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.root_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.child_task", "enabled", "true"), + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelWithCron), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelWithCron.ResourceReference()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasScheduleCron(cron), + resourceshowoutputassert.TaskShowOutput(t, configModelWithCron.ResourceReference()). + HasName(id.Name()). + HasDatabaseName(id.DatabaseName()). + HasSchemaName(id.SchemaName()). + HasScheduleCron(cron), ), }, }, }) } -func TestAcc_Task_issue2036(t *testing.T) { - name := acc.TestClient().Ids.Alpha() - - m := func() map[string]config.Variable { - return map[string]config.Variable{ - "name": config.StringVariable(name), - "database": config.StringVariable(acc.TestDatabaseName), - "schema": config.StringVariable(acc.TestSchemaName), - "warehouse": config.StringVariable(acc.TestWarehouseName), - } - } +func TestAcc_Task_ScheduleSchemaValidation(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() resource.Test(t, resource.TestCase{ ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, @@ -764,33 +1165,1471 @@ func TestAcc_Task_issue2036(t *testing.T) { }, CheckDestroy: acc.CheckDestroy(t, resources.Task), Steps: []resource.TestStep{ - // create without when { - ConfigDirectory: config.TestStepDirectory(), - ConfigVariables: m(), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "when", ""), + Config: taskConfigInvalidScheduleSetMultipleOrEmpty(id, true), + ExpectError: regexp.MustCompile("\"schedule.0.minutes\": only one of `schedule.0.minutes,schedule.0.using_cron`"), + }, + { + Config: taskConfigInvalidScheduleSetMultipleOrEmpty(id, false), + ExpectError: regexp.MustCompile("\"schedule.0.minutes\": one of `schedule.0.minutes,schedule.0.using_cron`"), + }, + }, + }) +} + +func taskConfigInvalidScheduleSetMultipleOrEmpty(id sdk.SchemaObjectIdentifier, setMultiple bool) string { + var scheduleString string + scheduleBuffer := new(bytes.Buffer) + scheduleBuffer.WriteString("schedule {\n") + if setMultiple { + scheduleBuffer.WriteString("minutes = 10\n") + scheduleBuffer.WriteString("using_cron = \"*/5 * * * * UTC\"\n") + } + scheduleBuffer.WriteString("}\n") + scheduleString = scheduleBuffer.String() + + return fmt.Sprintf(` +resource "snowflake_task" "test" { + database = "%[1]s" + schema = "%[2]s" + name = "%[3]s" + started = false + sql_statement = "SELECT 1" + + %[4]s +}`, id.DatabaseName(), id.SchemaName(), id.Name(), scheduleString) +} + +func TestAcc_Task_AllParameters(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + configModel := model.TaskWithId("test", id, true, statement). + WithScheduleMinutes(5) + configModelWithAllParametersSet := model.TaskWithId("test", id, true, statement). + WithScheduleMinutes(5). + WithSuspendTaskAfterNumFailures(15). + WithTaskAutoRetryAttempts(15). + WithUserTaskManagedInitialWarehouseSizeEnum(sdk.WarehouseSizeXSmall). + WithUserTaskMinimumTriggerIntervalInSeconds(30). + WithUserTaskTimeoutMs(1000). + WithAbortDetachedQuery(true). + WithAutocommit(false). + WithBinaryInputFormatEnum(sdk.BinaryInputFormatUTF8). + WithBinaryOutputFormatEnum(sdk.BinaryOutputFormatBase64). + WithClientMemoryLimit(1024). + WithClientMetadataRequestUseConnectionCtx(true). + WithClientPrefetchThreads(2). + WithClientResultChunkSize(48). + WithClientResultColumnCaseInsensitive(true). + WithClientSessionKeepAlive(true). + WithClientSessionKeepAliveHeartbeatFrequency(2400). + WithClientTimestampTypeMappingEnum(sdk.ClientTimestampTypeMappingNtz). + WithDateInputFormat("YYYY-MM-DD"). + WithDateOutputFormat("YY-MM-DD"). + WithEnableUnloadPhysicalTypeOptimization(false). + WithErrorOnNondeterministicMerge(false). + WithErrorOnNondeterministicUpdate(true). + WithGeographyOutputFormatEnum(sdk.GeographyOutputFormatWKB). + WithGeometryOutputFormatEnum(sdk.GeometryOutputFormatWKB). + WithJdbcUseSessionTimezone(false). + WithJsonIndent(4). + WithLockTimeout(21222). + WithLogLevelEnum(sdk.LogLevelError). + WithMultiStatementCount(0). + WithNoorderSequenceAsDefault(false). + WithOdbcTreatDecimalAsInt(true). + WithQueryTag("some_tag"). + WithQuotedIdentifiersIgnoreCase(true). + WithRowsPerResultset(2). + WithS3StageVpceDnsName("vpce-id.s3.region.vpce.amazonaws.com"). + WithSearchPath("$public, $current"). + WithStatementQueuedTimeoutInSeconds(10). + WithStatementTimeoutInSeconds(10). + WithStrictJsonOutput(true). + WithTimestampDayIsAlways24h(true). + WithTimestampInputFormat("YYYY-MM-DD"). + WithTimestampLtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + WithTimestampNtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + WithTimestampOutputFormat("YYYY-MM-DD HH24:MI:SS"). + WithTimestampTypeMappingEnum(sdk.TimestampTypeMappingLtz). + WithTimestampTzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + WithTimezone("Europe/Warsaw"). + WithTimeInputFormat("HH24:MI"). + WithTimeOutputFormat("HH24:MI"). + WithTraceLevelEnum(sdk.TraceLevelOnEvent). + WithTransactionAbortOnError(true). + WithTransactionDefaultIsolationLevelEnum(sdk.TransactionDefaultIsolationLevelReadCommitted). + WithTwoDigitCenturyStart(1980). + WithUnsupportedDdlActionEnum(sdk.UnsupportedDDLActionFail). + WithUseCachedResult(false). + WithWeekOfYearPolicy(1). + WithWeekStart(1) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + PreCheck: func() { acc.TestAccPreCheck(t) }, + CheckDestroy: acc.CheckDestroy(t, resources.User), + Steps: []resource.TestStep{ + // create with default values for all the parameters + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModel), + Check: assert.AssertThat(t, + objectparametersassert.TaskParameters(t, id). + HasAllDefaults(). + HasAllDefaultsExplicit(), + resourceparametersassert.TaskResourceParameters(t, configModel.ResourceReference()). + HasAllDefaults(), ), }, - // add when + // import when no parameter set { - ConfigDirectory: config.TestStepDirectory(), - ConfigVariables: m(), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "when", "TRUE"), + ResourceName: configModel.ResourceReference(), + ImportState: true, + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModel), + ImportStateCheck: assert.AssertThatImport(t, + resourceparametersassert.ImportedTaskResourceParameters(t, helpers.EncodeResourceIdentifier(id)). + HasAllDefaults(), ), }, - // remove when + // set all parameters + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelWithAllParametersSet), + Check: assert.AssertThat(t, + objectparametersassert.TaskParameters(t, id). + HasSuspendTaskAfterNumFailures(15). + HasTaskAutoRetryAttempts(15). + HasUserTaskManagedInitialWarehouseSize(sdk.WarehouseSizeXSmall). + HasUserTaskMinimumTriggerIntervalInSeconds(30). + HasUserTaskTimeoutMs(1000). + HasAbortDetachedQuery(true). + HasAutocommit(false). + HasBinaryInputFormat(sdk.BinaryInputFormatUTF8). + HasBinaryOutputFormat(sdk.BinaryOutputFormatBase64). + HasClientMemoryLimit(1024). + HasClientMetadataRequestUseConnectionCtx(true). + HasClientPrefetchThreads(2). + HasClientResultChunkSize(48). + HasClientResultColumnCaseInsensitive(true). + HasClientSessionKeepAlive(true). + HasClientSessionKeepAliveHeartbeatFrequency(2400). + HasClientTimestampTypeMapping(sdk.ClientTimestampTypeMappingNtz). + HasDateInputFormat("YYYY-MM-DD"). + HasDateOutputFormat("YY-MM-DD"). + HasEnableUnloadPhysicalTypeOptimization(false). + HasErrorOnNondeterministicMerge(false). + HasErrorOnNondeterministicUpdate(true). + HasGeographyOutputFormat(sdk.GeographyOutputFormatWKB). + HasGeometryOutputFormat(sdk.GeometryOutputFormatWKB). + HasJdbcUseSessionTimezone(false). + HasJsonIndent(4). + HasLockTimeout(21222). + HasLogLevel(sdk.LogLevelError). + HasMultiStatementCount(0). + HasNoorderSequenceAsDefault(false). + HasOdbcTreatDecimalAsInt(true). + HasQueryTag("some_tag"). + HasQuotedIdentifiersIgnoreCase(true). + HasRowsPerResultset(2). + HasS3StageVpceDnsName("vpce-id.s3.region.vpce.amazonaws.com"). + HasSearchPath("$public, $current"). + HasStatementQueuedTimeoutInSeconds(10). + HasStatementTimeoutInSeconds(10). + HasStrictJsonOutput(true). + HasTimestampDayIsAlways24h(true). + HasTimestampInputFormat("YYYY-MM-DD"). + HasTimestampLtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampNtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampTypeMapping(sdk.TimestampTypeMappingLtz). + HasTimestampTzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimezone("Europe/Warsaw"). + HasTimeInputFormat("HH24:MI"). + HasTimeOutputFormat("HH24:MI"). + HasTraceLevel(sdk.TraceLevelOnEvent). + HasTransactionAbortOnError(true). + HasTransactionDefaultIsolationLevel(sdk.TransactionDefaultIsolationLevelReadCommitted). + HasTwoDigitCenturyStart(1980). + HasUnsupportedDdlAction(sdk.UnsupportedDDLActionFail). + HasUseCachedResult(false). + HasWeekOfYearPolicy(1). + HasWeekStart(1), + resourceparametersassert.TaskResourceParameters(t, configModelWithAllParametersSet.ResourceReference()). + HasSuspendTaskAfterNumFailures(15). + HasTaskAutoRetryAttempts(15). + HasUserTaskManagedInitialWarehouseSize(sdk.WarehouseSizeXSmall). + HasUserTaskMinimumTriggerIntervalInSeconds(30). + HasUserTaskTimeoutMs(1000). + HasAbortDetachedQuery(true). + HasAutocommit(false). + HasBinaryInputFormat(sdk.BinaryInputFormatUTF8). + HasBinaryOutputFormat(sdk.BinaryOutputFormatBase64). + HasClientMemoryLimit(1024). + HasClientMetadataRequestUseConnectionCtx(true). + HasClientPrefetchThreads(2). + HasClientResultChunkSize(48). + HasClientResultColumnCaseInsensitive(true). + HasClientSessionKeepAlive(true). + HasClientSessionKeepAliveHeartbeatFrequency(2400). + HasClientTimestampTypeMapping(sdk.ClientTimestampTypeMappingNtz). + HasDateInputFormat("YYYY-MM-DD"). + HasDateOutputFormat("YY-MM-DD"). + HasEnableUnloadPhysicalTypeOptimization(false). + HasErrorOnNondeterministicMerge(false). + HasErrorOnNondeterministicUpdate(true). + HasGeographyOutputFormat(sdk.GeographyOutputFormatWKB). + HasGeometryOutputFormat(sdk.GeometryOutputFormatWKB). + HasJdbcUseSessionTimezone(false). + HasJsonIndent(4). + HasLockTimeout(21222). + HasLogLevel(sdk.LogLevelError). + HasMultiStatementCount(0). + HasNoorderSequenceAsDefault(false). + HasOdbcTreatDecimalAsInt(true). + HasQueryTag("some_tag"). + HasQuotedIdentifiersIgnoreCase(true). + HasRowsPerResultset(2). + HasS3StageVpceDnsName("vpce-id.s3.region.vpce.amazonaws.com"). + HasSearchPath("$public, $current"). + HasStatementQueuedTimeoutInSeconds(10). + HasStatementTimeoutInSeconds(10). + HasStrictJsonOutput(true). + HasTimestampDayIsAlways24h(true). + HasTimestampInputFormat("YYYY-MM-DD"). + HasTimestampLtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampNtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampTypeMapping(sdk.TimestampTypeMappingLtz). + HasTimestampTzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimezone("Europe/Warsaw"). + HasTimeInputFormat("HH24:MI"). + HasTimeOutputFormat("HH24:MI"). + HasTraceLevel(sdk.TraceLevelOnEvent). + HasTransactionAbortOnError(true). + HasTransactionDefaultIsolationLevel(sdk.TransactionDefaultIsolationLevelReadCommitted). + HasTwoDigitCenturyStart(1980). + HasUnsupportedDdlAction(sdk.UnsupportedDDLActionFail). + HasUseCachedResult(false). + HasWeekOfYearPolicy(1). + HasWeekStart(1), + ), + }, + // import when all parameters set + { + ResourceName: configModelWithAllParametersSet.ResourceReference(), + ImportState: true, + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelWithAllParametersSet), + ImportStateCheck: assert.AssertThatImport(t, + resourceparametersassert.ImportedTaskResourceParameters(t, helpers.EncodeResourceIdentifier(id)). + HasSuspendTaskAfterNumFailures(15). + HasTaskAutoRetryAttempts(15). + HasUserTaskManagedInitialWarehouseSize(sdk.WarehouseSizeXSmall). + HasUserTaskMinimumTriggerIntervalInSeconds(30). + HasUserTaskTimeoutMs(1000). + HasAbortDetachedQuery(true). + HasAutocommit(false). + HasBinaryInputFormat(sdk.BinaryInputFormatUTF8). + HasBinaryOutputFormat(sdk.BinaryOutputFormatBase64). + HasClientMemoryLimit(1024). + HasClientMetadataRequestUseConnectionCtx(true). + HasClientPrefetchThreads(2). + HasClientResultChunkSize(48). + HasClientResultColumnCaseInsensitive(true). + HasClientSessionKeepAlive(true). + HasClientSessionKeepAliveHeartbeatFrequency(2400). + HasClientTimestampTypeMapping(sdk.ClientTimestampTypeMappingNtz). + HasDateInputFormat("YYYY-MM-DD"). + HasDateOutputFormat("YY-MM-DD"). + HasEnableUnloadPhysicalTypeOptimization(false). + HasErrorOnNondeterministicMerge(false). + HasErrorOnNondeterministicUpdate(true). + HasGeographyOutputFormat(sdk.GeographyOutputFormatWKB). + HasGeometryOutputFormat(sdk.GeometryOutputFormatWKB). + HasJdbcUseSessionTimezone(false). + HasJsonIndent(4). + HasLockTimeout(21222). + HasLogLevel(sdk.LogLevelError). + HasMultiStatementCount(0). + HasNoorderSequenceAsDefault(false). + HasOdbcTreatDecimalAsInt(true). + HasQueryTag("some_tag"). + HasQuotedIdentifiersIgnoreCase(true). + HasRowsPerResultset(2). + HasS3StageVpceDnsName("vpce-id.s3.region.vpce.amazonaws.com"). + HasSearchPath("$public, $current"). + HasStatementQueuedTimeoutInSeconds(10). + HasStatementTimeoutInSeconds(10). + HasStrictJsonOutput(true). + HasTimestampDayIsAlways24h(true). + HasTimestampInputFormat("YYYY-MM-DD"). + HasTimestampLtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampNtzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimestampTypeMapping(sdk.TimestampTypeMappingLtz). + HasTimestampTzOutputFormat("YYYY-MM-DD HH24:MI:SS"). + HasTimezone("Europe/Warsaw"). + HasTimeInputFormat("HH24:MI"). + HasTimeOutputFormat("HH24:MI"). + HasTraceLevel(sdk.TraceLevelOnEvent). + HasTransactionAbortOnError(true). + HasTransactionDefaultIsolationLevel(sdk.TransactionDefaultIsolationLevelReadCommitted). + HasTwoDigitCenturyStart(1980). + HasUnsupportedDdlAction(sdk.UnsupportedDDLActionFail). + HasUseCachedResult(false). + HasWeekOfYearPolicy(1). + HasWeekStart(1), + ), + }, + // unset all the parameters { - ConfigDirectory: acc.ConfigurationSameAsStepN(1), - ConfigVariables: m(), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("snowflake_task.test_task", "enabled", "true"), - resource.TestCheckResourceAttr("snowflake_task.test_task", "when", ""), + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModel), + Check: assert.AssertThat(t, + objectparametersassert.TaskParameters(t, id). + HasAllDefaults(). + HasAllDefaultsExplicit(), + resourceparametersassert.TaskResourceParameters(t, configModel.ResourceReference()). + HasAllDefaults(), ), }, }, }) } + +func TestAcc_Task_Enabled(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + configModelEnabled := model.TaskWithId("test", id, true, statement). + WithScheduleMinutes(5) + configModelDisabled := model.TaskWithId("test", id, false, statement). + WithScheduleMinutes(5) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelDisabled), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelDisabled.ResourceReference()). + HasStartedString(r.BooleanFalse), + resourceshowoutputassert.TaskShowOutput(t, configModelDisabled.ResourceReference()). + HasState(sdk.TaskStateSuspended), + ), + }, + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelEnabled), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelEnabled.ResourceReference()). + HasStartedString(r.BooleanTrue), + resourceshowoutputassert.TaskShowOutput(t, configModelEnabled.ResourceReference()). + HasState(sdk.TaskStateStarted), + ), + }, + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModelDisabled), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModelDisabled.ResourceReference()). + HasStartedString(r.BooleanFalse), + resourceshowoutputassert.TaskShowOutput(t, configModelDisabled.ResourceReference()). + HasState(sdk.TaskStateSuspended), + ), + }, + }, + }) +} + +func TestAcc_Task_ConvertStandaloneTaskToSubtask(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + id2 := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + + firstTaskStandaloneModel := model.TaskWithId("root", id, true, statement). + WithScheduleMinutes(5). + WithSuspendTaskAfterNumFailures(1) + secondTaskStandaloneModel := model.TaskWithId("child", id2, true, statement). + WithScheduleMinutes(5) + + rootTaskModel := model.TaskWithId("root", id, true, statement). + WithScheduleMinutes(5). + WithSuspendTaskAfterNumFailures(2) + childTaskModel := model.TaskWithId("child", id2, true, statement). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(id.FullyQualifiedName()))) + childTaskModel.SetDependsOn(rootTaskModel.ResourceReference()) + + firstTaskStandaloneModelDisabled := model.TaskWithId("root", id, false, statement). + WithScheduleMinutes(5) + secondTaskStandaloneModelDisabled := model.TaskWithId("child", id2, false, statement). + WithScheduleMinutes(5) + secondTaskStandaloneModelDisabled.SetDependsOn(firstTaskStandaloneModelDisabled.ResourceReference()) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", firstTaskStandaloneModel, secondTaskStandaloneModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, firstTaskStandaloneModel.ResourceReference()). + HasScheduleMinutes(5). + HasStartedString(r.BooleanTrue). + HasSuspendTaskAfterNumFailuresString("1"), + resourceshowoutputassert.TaskShowOutput(t, firstTaskStandaloneModel.ResourceReference()). + HasScheduleMinutes(5). + HasState(sdk.TaskStateStarted), + resourceassert.TaskResource(t, secondTaskStandaloneModel.ResourceReference()). + HasScheduleMinutes(5). + HasStartedString(r.BooleanTrue), + resourceshowoutputassert.TaskShowOutput(t, secondTaskStandaloneModel.ResourceReference()). + HasScheduleMinutes(5). + HasState(sdk.TaskStateStarted), + ), + }, + // Change the second task to run after the first one (creating a DAG) + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskModel, childTaskModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskModel.ResourceReference()). + HasScheduleMinutes(5). + HasStartedString(r.BooleanTrue). + HasSuspendTaskAfterNumFailuresString("2"), + resourceshowoutputassert.TaskShowOutput(t, rootTaskModel.ResourceReference()). + HasScheduleMinutes(5). + HasState(sdk.TaskStateStarted), + resourceassert.TaskResource(t, childTaskModel.ResourceReference()). + HasAfter(id). + HasStartedString(r.BooleanTrue), + resourceshowoutputassert.TaskShowOutput(t, childTaskModel.ResourceReference()). + HasPredecessors(id). + HasState(sdk.TaskStateStarted), + ), + }, + // Change tasks in DAG to standalone tasks (disabled to check if resuming/suspending works correctly) + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", firstTaskStandaloneModelDisabled, secondTaskStandaloneModelDisabled), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, firstTaskStandaloneModelDisabled.ResourceReference()). + HasScheduleMinutes(5). + HasStartedString(r.BooleanFalse). + HasSuspendTaskAfterNumFailuresString("10"), + resourceshowoutputassert.TaskShowOutput(t, firstTaskStandaloneModelDisabled.ResourceReference()). + HasScheduleMinutes(5). + HasState(sdk.TaskStateSuspended), + resourceassert.TaskResource(t, secondTaskStandaloneModelDisabled.ResourceReference()). + HasScheduleMinutes(5). + HasStartedString(r.BooleanFalse), + resourceshowoutputassert.TaskShowOutput(t, secondTaskStandaloneModelDisabled.ResourceReference()). + HasScheduleMinutes(5). + HasState(sdk.TaskStateSuspended), + ), + }, + }, + }) +} + +func TestAcc_Task_ConvertStandaloneTaskToFinalizer(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + rootTaskId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + finalizerTaskId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := 5 + + firstTaskStandaloneModel := model.TaskWithId("root", rootTaskId, true, statement). + WithScheduleMinutes(schedule). + WithSuspendTaskAfterNumFailures(1) + secondTaskStandaloneModel := model.TaskWithId("child", finalizerTaskId, true, statement). + WithScheduleMinutes(schedule) + + rootTaskModel := model.TaskWithId("root", rootTaskId, true, statement). + WithScheduleMinutes(schedule). + WithSuspendTaskAfterNumFailures(2) + childTaskModel := model.TaskWithId("child", finalizerTaskId, true, statement). + WithFinalize(rootTaskId.FullyQualifiedName()) + childTaskModel.SetDependsOn(rootTaskModel.ResourceReference()) + + firstTaskStandaloneModelDisabled := model.TaskWithId("root", rootTaskId, false, statement). + WithScheduleMinutes(schedule) + secondTaskStandaloneModelDisabled := model.TaskWithId("child", finalizerTaskId, false, statement). + WithScheduleMinutes(schedule) + secondTaskStandaloneModelDisabled.SetDependsOn(firstTaskStandaloneModelDisabled.ResourceReference()) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", firstTaskStandaloneModel, secondTaskStandaloneModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, firstTaskStandaloneModel.ResourceReference()). + HasScheduleMinutes(schedule). + HasStartedString(r.BooleanTrue). + HasSuspendTaskAfterNumFailuresString("1"), + resourceshowoutputassert.TaskShowOutput(t, firstTaskStandaloneModel.ResourceReference()). + HasScheduleMinutes(schedule). + HasTaskRelations(sdk.TaskRelations{}). + HasState(sdk.TaskStateStarted), + resourceassert.TaskResource(t, secondTaskStandaloneModel.ResourceReference()). + HasScheduleMinutes(schedule). + HasStartedString(r.BooleanTrue), + resourceshowoutputassert.TaskShowOutput(t, secondTaskStandaloneModel.ResourceReference()). + HasScheduleMinutes(schedule). + HasTaskRelations(sdk.TaskRelations{}). + HasState(sdk.TaskStateStarted), + ), + }, + // Change the second task to run after the first one (creating a DAG) + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskModel, childTaskModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskModel.ResourceReference()). + HasScheduleMinutes(schedule). + HasStartedString(r.BooleanTrue). + HasSuspendTaskAfterNumFailuresString("2"), + resourceshowoutputassert.TaskShowOutput(t, rootTaskModel.ResourceReference()). + HasScheduleMinutes(schedule). + // TODO(SNOW-1348116 - next pr): Create ticket and report; this field in task relations seems to have mixed chances of appearing (needs deeper digging, doesn't affect the resource; could be removed for now) + // HasTaskRelations(sdk.TaskRelations{FinalizerTask: &finalizerTaskId}). + HasState(sdk.TaskStateStarted), + resourceassert.TaskResource(t, childTaskModel.ResourceReference()). + HasStartedString(r.BooleanTrue), + resourceshowoutputassert.TaskShowOutput(t, childTaskModel.ResourceReference()). + HasTaskRelations(sdk.TaskRelations{FinalizedRootTask: &rootTaskId}). + HasState(sdk.TaskStateStarted), + ), + }, + // Change tasks in DAG to standalone tasks (disabled to check if resuming/suspending works correctly) + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", firstTaskStandaloneModelDisabled, secondTaskStandaloneModelDisabled), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, firstTaskStandaloneModelDisabled.ResourceReference()). + HasScheduleMinutes(schedule). + HasStartedString(r.BooleanFalse). + HasSuspendTaskAfterNumFailuresString("10"), + resourceshowoutputassert.TaskShowOutput(t, firstTaskStandaloneModelDisabled.ResourceReference()). + HasScheduleMinutes(schedule). + HasTaskRelations(sdk.TaskRelations{}). + HasState(sdk.TaskStateSuspended), + resourceassert.TaskResource(t, secondTaskStandaloneModelDisabled.ResourceReference()). + HasScheduleMinutes(schedule). + HasStartedString(r.BooleanFalse), + resourceshowoutputassert.TaskShowOutput(t, secondTaskStandaloneModelDisabled.ResourceReference()). + HasScheduleMinutes(schedule). + HasTaskRelations(sdk.TaskRelations{}). + HasState(sdk.TaskStateSuspended), + ), + }, + }, + }) +} + +func TestAcc_Task_SwitchScheduledWithAfter(t *testing.T) { + rootId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := 5 + rootTaskConfigModel := model.TaskWithId("root", rootId, true, statement). + WithScheduleMinutes(schedule). + WithSuspendTaskAfterNumFailures(1) + childTaskConfigModel := model.TaskWithId("child", childId, true, statement). + WithScheduleMinutes(schedule) + + rootTaskConfigModelAfterSuspendFailuresUpdate := model.TaskWithId("root", rootId, true, statement). + WithScheduleMinutes(schedule). + WithSuspendTaskAfterNumFailures(2) + childTaskConfigModelWithAfter := model.TaskWithId("child", childId, true, statement). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))) + childTaskConfigModelWithAfter.SetDependsOn(rootTaskConfigModelAfterSuspendFailuresUpdate.ResourceReference()) + + rootTaskConfigModelDisabled := model.TaskWithId("root", rootId, false, statement). + WithScheduleMinutes(schedule) + childTaskConfigModelDisabled := model.TaskWithId("child", childId, false, statement). + WithScheduleMinutes(schedule) + childTaskConfigModelDisabled.SetDependsOn(rootTaskConfigModelDisabled.ResourceReference()) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasScheduleMinutes(schedule). + HasSuspendTaskAfterNumFailuresString("1"), + resourceassert.TaskResource(t, childTaskConfigModel.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasScheduleMinutes(schedule). + HasAfter(). + HasSuspendTaskAfterNumFailuresString("10"), + ), + }, + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModelAfterSuspendFailuresUpdate, childTaskConfigModelWithAfter), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModelAfterSuspendFailuresUpdate.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasScheduleMinutes(schedule). + HasSuspendTaskAfterNumFailuresString("2"), + resourceassert.TaskResource(t, childTaskConfigModelWithAfter.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasNoScheduleSet(). + HasAfter(rootId). + HasSuspendTaskAfterNumFailuresString("10"), + ), + }, + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasScheduleMinutes(schedule). + HasSuspendTaskAfterNumFailuresString("1"), + resourceassert.TaskResource(t, childTaskConfigModel.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasScheduleMinutes(schedule). + HasAfter(). + HasSuspendTaskAfterNumFailuresString("10"), + ), + }, + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModelDisabled, childTaskConfigModelDisabled), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModelDisabled.ResourceReference()). + HasStartedString(r.BooleanFalse). + HasScheduleMinutes(schedule). + HasSuspendTaskAfterNumFailuresString("10"), + resourceassert.TaskResource(t, childTaskConfigModelDisabled.ResourceReference()). + HasStartedString(r.BooleanFalse). + HasScheduleMinutes(schedule). + HasAfter(). + HasSuspendTaskAfterNumFailuresString("10"), + ), + }, + }, + }) +} + +func TestAcc_Task_WithAfter(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + rootId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := 5 + + rootTaskConfigModel := model.TaskWithId("root", rootId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithScheduleMinutes(schedule). + WithSqlStatement(statement) + + childTaskConfigModelWithAfter := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). + WithSqlStatement(statement) + + childTaskConfigModelWithoutAfter := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithScheduleMinutes(schedule). + WithSqlStatement(statement) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModelWithAfter), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasScheduleMinutes(schedule), + resourceassert.TaskResource(t, childTaskConfigModelWithAfter.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasAfter(rootId), + ), + }, + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModelWithoutAfter), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasScheduleMinutes(schedule), + resourceassert.TaskResource(t, childTaskConfigModelWithoutAfter.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasAfter(), + ), + }, + }, + }) +} + +func TestAcc_Task_WithFinalizer(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + rootId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := 5 + + rootTaskConfigModel := model.TaskWithId("root", rootId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithScheduleMinutes(schedule). + WithSqlStatement(statement) + + childTaskConfigModelWithFinalizer := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithFinalize(rootId.FullyQualifiedName()). + WithSqlStatement(statement) + + childTaskConfigModelWithoutFinalizer := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithScheduleMinutes(schedule). + WithSqlStatement(statement) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModelWithFinalizer), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasScheduleMinutes(schedule), + resourceassert.TaskResource(t, childTaskConfigModelWithFinalizer.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasFinalizeString(rootId.FullyQualifiedName()), + ), + }, + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModelWithoutFinalizer), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasScheduleMinutes(schedule), + resourceassert.TaskResource(t, childTaskConfigModelWithoutFinalizer.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasFinalizeString(""), + ), + }, + }, + }) +} + +func TestAcc_Task_UpdateFinalizerExternally(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + rootId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := 5 + + rootTaskConfigModel := model.TaskWithId("root", rootId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithScheduleMinutes(schedule). + WithSqlStatement(statement) + + childTaskConfigModelWithoutFinalizer := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithScheduleMinutes(schedule). + WithComment("abc"). + WithSqlStatement(statement) + + childTaskConfigModelWithFinalizer := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithFinalize(rootId.FullyQualifiedName()). + WithComment("abc"). + WithSqlStatement(statement) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModelWithoutFinalizer), + }, + // Set finalizer externally + { + PreConfig: func() { + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithSuspend(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithSuspend(true)) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithUnset(*sdk.NewTaskUnsetRequest().WithSchedule(true))) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithSetFinalize(rootId)) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithResume(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithResume(true)) + }, + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModelWithoutFinalizer), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithoutFinalizer.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasFinalizeString(""), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithoutFinalizer.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{}), + ), + }, + // Set finalizer in config + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModelWithFinalizer), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithFinalizer.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasFinalizeString(rootId.FullyQualifiedName()), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithFinalizer.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{FinalizedRootTask: &rootId}), + ), + }, + // Unset finalizer externally + { + PreConfig: func() { + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithSuspend(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithSuspend(true)) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithUnsetFinalize(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithSet(*sdk.NewTaskSetRequest().WithSchedule(fmt.Sprintf("%d minutes", schedule)))) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithResume(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithResume(true)) + }, + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModelWithFinalizer), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithFinalizer.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasFinalizeString(rootId.FullyQualifiedName()), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithFinalizer.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{FinalizedRootTask: &rootId}), + ), + }, + // Unset finalizer in config + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModelWithoutFinalizer), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithoutFinalizer.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasFinalizeString(""), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithoutFinalizer.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{}), + ), + }, + }, + }) +} + +func TestAcc_Task_UpdateAfterExternally(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + rootId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := 5 + + rootTaskConfigModel := model.TaskWithId("root", rootId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithScheduleMinutes(schedule). + WithSqlStatement(statement) + + childTaskConfigModelWithoutAfter := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithScheduleMinutes(schedule). + WithComment("abc"). + WithSqlStatement(statement) + + childTaskConfigModelWithAfter := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). + WithComment("abc"). + WithSqlStatement(statement) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModelWithoutAfter), + }, + // Set after externally + { + PreConfig: func() { + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithSuspend(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithSuspend(true)) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithUnset(*sdk.NewTaskUnsetRequest().WithSchedule(true))) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithAddAfter([]sdk.SchemaObjectIdentifier{rootId})) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithResume(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithResume(true)) + }, + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModelWithoutAfter), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithoutAfter.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasAfter(), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithoutAfter.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{}), + ), + }, + // Set after in config + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModelWithAfter), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithAfter.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasAfter(rootId), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithAfter.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{Predecessors: []sdk.SchemaObjectIdentifier{rootId}}), + ), + }, + // Unset after externally + { + PreConfig: func() { + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithSuspend(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithSuspend(true)) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithRemoveAfter([]sdk.SchemaObjectIdentifier{rootId})) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithSet(*sdk.NewTaskSetRequest().WithSchedule(fmt.Sprintf("%d MINUTES", schedule)))) + + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(childId).WithResume(true)) + acc.TestClient().Task.Alter(t, sdk.NewAlterTaskRequest(rootId).WithResume(true)) + }, + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModelWithAfter), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithAfter.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasAfter(rootId), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithAfter.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{Predecessors: []sdk.SchemaObjectIdentifier{rootId}}), + ), + }, + // Unset after in config + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModelWithoutAfter), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, childTaskConfigModelWithoutAfter.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasAfter(), + resourceshowoutputassert.TaskShowOutput(t, childTaskConfigModelWithoutAfter.ResourceReference()). + HasState(sdk.TaskStateStarted). + HasTaskRelations(sdk.TaskRelations{}), + ), + }, + }, + }) +} + +func TestAcc_Task_issue2207(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + rootId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + childId := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := 5 + + rootTaskConfigModel := model.TaskWithId("root", rootId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithScheduleMinutes(schedule). + WithSqlStatement(statement) + + childTaskConfigModel := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). + WithComment("abc"). + WithSqlStatement(statement) + + childTaskConfigModelWithDifferentComment := model.TaskWithId("child", childId, true, statement). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootId.FullyQualifiedName()))). + WithComment("def"). + WithSqlStatement(statement) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasScheduleMinutes(schedule), + resourceassert.TaskResource(t, childTaskConfigModel.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasAfter(rootId). + HasCommentString("abc"), + ), + }, + // change comment + { + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(childTaskConfigModelWithDifferentComment.ResourceReference(), plancheck.ResourceActionUpdate), + }, + }, + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/with_task_dependency"), + ConfigVariables: config.ConfigVariablesFromModels(t, "tasks", rootTaskConfigModel, childTaskConfigModelWithDifferentComment), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, rootTaskConfigModel.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasScheduleMinutes(schedule), + resourceassert.TaskResource(t, childTaskConfigModelWithDifferentComment.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasAfter(rootId). + HasCommentString("def"), + ), + }, + }, + }) +} + +func TestAcc_Task_issue2036(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := 5 + when := "TRUE" + + taskConfigModelWithoutWhen := model.TaskWithId("test", id, true, statement). + WithScheduleMinutes(schedule). + WithSqlStatement(statement) + + taskConfigModelWithWhen := model.TaskWithId("test", id, true, statement). + WithScheduleMinutes(schedule). + WithSqlStatement(statement). + WithWhen(when) + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + // create without when + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, taskConfigModelWithoutWhen), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, taskConfigModelWithoutWhen.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasWhenString(""), + ), + }, + // add when + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, taskConfigModelWithWhen), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, taskConfigModelWithWhen.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasWhenString("TRUE"), + ), + }, + // remove when + { + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, taskConfigModelWithoutWhen), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, taskConfigModelWithoutWhen.ResourceReference()). + HasStartedString(r.BooleanTrue). + HasWhenString(""), + ), + }, + }, + }) +} + +func TestAcc_Task_issue3113(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + errorNotificationIntegration, errorNotificationIntegrationCleanup := acc.TestClient().NotificationIntegration.CreateWithGcpPubSub(t) + t.Cleanup(errorNotificationIntegrationCleanup) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + schedule := 5 + configModel := model.TaskWithId("test", id, true, statement). + WithScheduleMinutes(schedule). + WithSqlStatement(statement). + WithErrorIntegration(errorNotificationIntegration.ID().Name()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + PreConfig: func() { acc.SetV097CompatibleConfigPathEnv(t) }, + ExternalProviders: map[string]resource.ExternalProvider{ + "snowflake": { + VersionConstraint: "=0.97.0", + Source: "Snowflake-Labs/snowflake", + }, + }, + Config: taskConfigWithErrorIntegration(id, errorNotificationIntegration.ID()), + ExpectError: regexp.MustCompile("error_integration: '' expected type 'string', got unconvertible type 'sdk.AccountObjectIdentifier'"), + }, + { + PreConfig: func() { + acc.TestClient().Task.DropFunc(t, id)() + acc.UnsetConfigPathEnv(t) + }, + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModel.ResourceReference()). + HasErrorIntegrationString(errorNotificationIntegration.ID().Name()), + ), + }, + }, + }) +} + +func TestAcc_Task_StateUpgrade_NoOptionalFields(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + configModel := model.TaskWithId("test", id, false, statement) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "snowflake": { + VersionConstraint: "=0.98.0", + Source: "Snowflake-Labs/snowflake", + }, + }, + Config: taskNoOptionalFieldsConfigV0980(id), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_task.test", "enabled", "false"), + resource.TestCheckResourceAttr("snowflake_task.test", "allow_overlapping_execution", "false"), + ), + }, + { + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModel.ResourceReference()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanFalse). + HasAllowOverlappingExecutionString(r.BooleanDefault), + ), + }, + }, + }) +} + +func TestAcc_Task_StateUpgrade(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + condition := "2 < 1" + configModel := model.TaskWithId("test", id, false, statement). + WithScheduleMinutes(5). + WithAllowOverlappingExecution(r.BooleanTrue). + WithSuspendTaskAfterNumFailures(10). + WithWhen(condition). + WithUserTaskManagedInitialWarehouseSizeEnum(sdk.WarehouseSizeXSmall) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "snowflake": { + VersionConstraint: "=0.98.0", + Source: "Snowflake-Labs/snowflake", + }, + }, + Config: taskBasicConfigV0980(id, condition), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_task.test", "enabled", "false"), + resource.TestCheckResourceAttr("snowflake_task.test", "allow_overlapping_execution", "true"), + resource.TestCheckResourceAttr("snowflake_task.test", "schedule", "5 MINUTES"), + resource.TestCheckResourceAttr("snowflake_task.test", "suspend_task_after_num_failures", "10"), + resource.TestCheckResourceAttr("snowflake_task.test", "when", condition), + resource.TestCheckResourceAttr("snowflake_task.test", "user_task_managed_initial_warehouse_size", "XSMALL"), + ), + }, + { + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + ConfigDirectory: acc.ConfigurationDirectory("TestAcc_Task/basic"), + ConfigVariables: config.ConfigVariablesFromModel(t, configModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModel.ResourceReference()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanFalse). + HasScheduleMinutes(5). + HasAllowOverlappingExecutionString(r.BooleanTrue). + HasSuspendTaskAfterNumFailuresString("10"). + HasWhenString(condition). + HasUserTaskManagedInitialWarehouseSizeEnum(sdk.WarehouseSizeXSmall), + ), + }, + }, + }) +} + +func TestAcc_Task_StateUpgradeWithAfter(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + rootTask, rootTaskCleanup := acc.TestClient().Task.Create(t) + t.Cleanup(rootTaskCleanup) + + id := acc.TestClient().Ids.RandomSchemaObjectIdentifier() + statement := "SELECT 1" + comment := random.Comment() + configModel := model.TaskWithId("test", id, false, statement). + WithUserTaskTimeoutMs(50). + WithWarehouse(acc.TestClient().Ids.WarehouseId().Name()). + WithAfterValue(configvariable.SetVariable(configvariable.StringVariable(rootTask.ID().FullyQualifiedName()))). + WithComment(comment). + WithLogLevelEnum(sdk.LogLevelInfo). + WithAutocommit(false). + WithJsonIndent(4) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acc.TestAccPreCheck(t) }, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + CheckDestroy: acc.CheckDestroy(t, resources.Task), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "snowflake": { + VersionConstraint: "=0.98.0", + Source: "Snowflake-Labs/snowflake", + }, + }, + Config: taskCompleteConfigV0980(id, rootTask.ID(), acc.TestClient().Ids.WarehouseId(), 50, comment), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("snowflake_task.test", "after.#", "1"), + resource.TestCheckResourceAttr("snowflake_task.test", "after.0", rootTask.ID().Name()), + resource.TestCheckResourceAttr("snowflake_task.test", "warehouse", acc.TestClient().Ids.WarehouseId().Name()), + resource.TestCheckResourceAttr("snowflake_task.test", "user_task_timeout_ms", "50"), + resource.TestCheckResourceAttr("snowflake_task.test", "comment", comment), + resource.TestCheckResourceAttr("snowflake_task.test", "session_parameters.LOG_LEVEL", "INFO"), + resource.TestCheckResourceAttr("snowflake_task.test", "session_parameters.AUTOCOMMIT", "false"), + resource.TestCheckResourceAttr("snowflake_task.test", "session_parameters.JSON_INDENT", "4"), + ), + }, + { + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + Config: config.FromModel(t, configModel), + Check: assert.AssertThat(t, + resourceassert.TaskResource(t, configModel.ResourceReference()). + HasFullyQualifiedNameString(id.FullyQualifiedName()). + HasDatabaseString(id.DatabaseName()). + HasSchemaString(id.SchemaName()). + HasNameString(id.Name()). + HasStartedString(r.BooleanFalse). + HasSqlStatementString(statement). + HasAfter(rootTask.ID()). + HasWarehouseString(acc.TestClient().Ids.WarehouseId().Name()). + HasUserTaskTimeoutMsString("50"). + HasLogLevelString(string(sdk.LogLevelInfo)). + HasAutocommitString("false"). + HasJsonIndentString("4"). + HasCommentString(comment), + ), + }, + }, + }) +} + +func taskNoOptionalFieldsConfigV0980(id sdk.SchemaObjectIdentifier) string { + return fmt.Sprintf(` +resource "snowflake_task" "test" { + database = "%[1]s" + schema = "%[2]s" + name = "%[3]s" + sql_statement = "SELECT 1" +} +`, id.DatabaseName(), id.SchemaName(), id.Name()) +} + +func taskConfigWithErrorIntegration(id sdk.SchemaObjectIdentifier, errorIntegrationId sdk.AccountObjectIdentifier) string { + return fmt.Sprintf(` +resource "snowflake_task" "test" { + database = "%[1]s" + schema = "%[2]s" + name = "%[3]s" + schedule = "5 MINUTES" + sql_statement = "SELECT 1" + enabled = true + error_integration = "%[4]s" +} +`, id.DatabaseName(), id.SchemaName(), id.Name(), errorIntegrationId.Name()) +} + +func taskBasicConfigV0980(id sdk.SchemaObjectIdentifier, condition string) string { + return fmt.Sprintf(` +resource "snowflake_task" "test" { + database = "%[1]s" + schema = "%[2]s" + name = "%[3]s" + enabled = false + sql_statement = "SELECT 1" + schedule = "5 MINUTES" + allow_overlapping_execution = true + suspend_task_after_num_failures = 10 + when = "%[4]s" + user_task_managed_initial_warehouse_size = "XSMALL" +} +`, id.DatabaseName(), id.SchemaName(), id.Name(), condition) +} + +func taskCompleteConfigV0980( + id sdk.SchemaObjectIdentifier, + rootTaskId sdk.SchemaObjectIdentifier, + warehouseId sdk.AccountObjectIdentifier, + userTaskTimeoutMs int, + comment string, +) string { + return fmt.Sprintf(` +resource "snowflake_task" "test" { + database = "%[1]s" + schema = "%[2]s" + name = "%[3]s" + enabled = false + sql_statement = "SELECT 1" + + after = [%[4]s] + warehouse = "%[5]s" + user_task_timeout_ms = %[6]d + comment = "%[7]s" + session_parameters = { + LOG_LEVEL = "INFO", + AUTOCOMMIT = false, + JSON_INDENT = 4, + } +} +`, id.DatabaseName(), id.SchemaName(), id.Name(), + strconv.Quote(rootTaskId.Name()), + warehouseId.Name(), + userTaskTimeoutMs, + comment, + ) +} diff --git a/pkg/resources/task_parameters.go b/pkg/resources/task_parameters.go new file mode 100644 index 0000000000..0c6f24d66f --- /dev/null +++ b/pkg/resources/task_parameters.go @@ -0,0 +1,412 @@ +package resources + +import ( + "context" + "strconv" + "strings" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +var ( + taskParametersSchema = make(map[string]*schema.Schema) + taskParametersCustomDiff = ParametersCustomDiff( + taskParametersProvider, + // task parameters + parameter[sdk.TaskParameter]{sdk.TaskParameterSuspendTaskAfterNumFailures, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTaskAutoRetryAttempts, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterUserTaskManagedInitialWarehouseSize, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterUserTaskTimeoutMs, valueTypeInt, sdk.ParameterTypeTask}, + // session parameters + parameter[sdk.TaskParameter]{sdk.TaskParameterAbortDetachedQuery, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterAutocommit, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterBinaryInputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterBinaryOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientMemoryLimit, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientMetadataRequestUseConnectionCtx, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientPrefetchThreads, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientResultChunkSize, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientResultColumnCaseInsensitive, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientSessionKeepAlive, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientSessionKeepAliveHeartbeatFrequency, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterClientTimestampTypeMapping, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterDateInputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterDateOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterEnableUnloadPhysicalTypeOptimization, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterErrorOnNondeterministicMerge, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterErrorOnNondeterministicUpdate, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterGeographyOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterGeometryOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterJdbcTreatTimestampNtzAsUtc, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterJdbcUseSessionTimezone, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterJsonIndent, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterLockTimeout, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterLogLevel, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterMultiStatementCount, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterNoorderSequenceAsDefault, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterOdbcTreatDecimalAsInt, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterQueryTag, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterQuotedIdentifiersIgnoreCase, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterRowsPerResultset, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterS3StageVpceDnsName, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterSearchPath, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterStatementQueuedTimeoutInSeconds, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterStatementTimeoutInSeconds, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterStrictJsonOutput, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimestampDayIsAlways24h, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimestampInputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimestampLtzOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimestampNtzOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimestampOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimestampTypeMapping, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimestampTzOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimezone, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimeInputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTimeOutputFormat, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTraceLevel, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTransactionAbortOnError, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTransactionDefaultIsolationLevel, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterTwoDigitCenturyStart, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterUnsupportedDdlAction, valueTypeString, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterUseCachedResult, valueTypeBool, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterWeekOfYearPolicy, valueTypeInt, sdk.ParameterTypeTask}, + parameter[sdk.TaskParameter]{sdk.TaskParameterWeekStart, valueTypeInt, sdk.ParameterTypeTask}, + ) +) + +func init() { + // TODO [SNOW-1645342]: move to the SDK + TaskParameterFields := []parameterDef[sdk.TaskParameter]{ + // task parameters + {Name: sdk.TaskParameterSuspendTaskAfterNumFailures, Type: schema.TypeInt, ValidateDiag: validation.ToDiagFunc(validation.IntAtLeast(0)), Description: "Specifies the number of consecutive failed task runs after which the current task is suspended automatically. The default is 0 (no automatic suspension)."}, + {Name: sdk.TaskParameterTaskAutoRetryAttempts, Type: schema.TypeInt, ValidateDiag: validation.ToDiagFunc(validation.IntAtLeast(0)), Description: "Specifies the number of automatic task graph retry attempts. If any task graphs complete in a FAILED state, Snowflake can automatically retry the task graphs from the last task in the graph that failed."}, + {Name: sdk.TaskParameterUserTaskManagedInitialWarehouseSize, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToWarehouseSize), DiffSuppress: NormalizeAndCompare(sdk.ToWarehouseSize), ConflictsWith: []string{"warehouse"}, Description: "Specifies the size of the compute resources to provision for the first run of the task, before a task history is available for Snowflake to determine an ideal size. Once a task has successfully completed a few runs, Snowflake ignores this parameter setting. Valid values are (case-insensitive): %s. (Conflicts with warehouse)"}, + {Name: sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds, Type: schema.TypeInt, ValidateDiag: validation.ToDiagFunc(validation.IntAtLeast(0)), Description: "Minimum amount of time between Triggered Task executions in seconds"}, + {Name: sdk.TaskParameterUserTaskTimeoutMs, Type: schema.TypeInt, ValidateDiag: validation.ToDiagFunc(validation.IntAtLeast(0)), Description: "Specifies the time limit on a single run of the task before it times out (in milliseconds)."}, + // session params + {Name: sdk.TaskParameterAbortDetachedQuery, Type: schema.TypeBool, Description: "Specifies the action that Snowflake performs for in-progress queries if connectivity is lost due to abrupt termination of a session (e.g. network outage, browser termination, service interruption)."}, + {Name: sdk.TaskParameterAutocommit, Type: schema.TypeBool, Description: "Specifies whether autocommit is enabled for the session. Autocommit determines whether a DML statement, when executed without an active transaction, is automatically committed after the statement successfully completes. For more information, see [Transactions](https://docs.snowflake.com/en/sql-reference/transactions)."}, + {Name: sdk.TaskParameterBinaryInputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToBinaryInputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToBinaryInputFormat), Description: "The format of VARCHAR values passed as input to VARCHAR-to-BINARY conversion functions. For more information, see [Binary input and output](https://docs.snowflake.com/en/sql-reference/binary-input-output)."}, + {Name: sdk.TaskParameterBinaryOutputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToBinaryOutputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToBinaryOutputFormat), Description: "The format for VARCHAR values returned as output by BINARY-to-VARCHAR conversion functions. For more information, see [Binary input and output](https://docs.snowflake.com/en/sql-reference/binary-input-output)."}, + {Name: sdk.TaskParameterClientMemoryLimit, Type: schema.TypeInt, Description: "Parameter that specifies the maximum amount of memory the JDBC driver or ODBC driver should use for the result set from queries (in MB)."}, + {Name: sdk.TaskParameterClientMetadataRequestUseConnectionCtx, Type: schema.TypeBool, Description: "For specific ODBC functions and JDBC methods, this parameter can change the default search scope from all databases/schemas to the current database/schema. The narrower search typically returns fewer rows and executes more quickly."}, + {Name: sdk.TaskParameterClientPrefetchThreads, Type: schema.TypeInt, Description: "Parameter that specifies the number of threads used by the client to pre-fetch large result sets. The driver will attempt to honor the parameter value, but defines the minimum and maximum values (depending on your system’s resources) to improve performance."}, + {Name: sdk.TaskParameterClientResultChunkSize, Type: schema.TypeInt, Description: "Parameter that specifies the maximum size of each set (or chunk) of query results to download (in MB). The JDBC driver downloads query results in chunks."}, + {Name: sdk.TaskParameterClientResultColumnCaseInsensitive, Type: schema.TypeBool, Description: "Parameter that indicates whether to match column name case-insensitively in ResultSet.get* methods in JDBC."}, + {Name: sdk.TaskParameterClientSessionKeepAlive, Type: schema.TypeBool, Description: "Parameter that indicates whether to force a user to log in again after a period of inactivity in the session."}, + {Name: sdk.TaskParameterClientSessionKeepAliveHeartbeatFrequency, Type: schema.TypeInt, Description: "Number of seconds in-between client attempts to update the token for the session."}, + {Name: sdk.TaskParameterClientTimestampTypeMapping, Type: schema.TypeString, Description: "Specifies the [TIMESTAMP_* variation](https://docs.snowflake.com/en/sql-reference/data-types-datetime.html#label-datatypes-timestamp-variations) to use when binding timestamp variables for JDBC or ODBC applications that use the bind API to load data."}, + {Name: sdk.TaskParameterDateInputFormat, Type: schema.TypeString, Description: "Specifies the input format for the DATE data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, + {Name: sdk.TaskParameterDateOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the DATE data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, + {Name: sdk.TaskParameterEnableUnloadPhysicalTypeOptimization, Type: schema.TypeBool, Description: "Specifies whether to set the schema for unloaded Parquet files based on the logical column data types (i.e. the types in the unload SQL query or source table) or on the unloaded column values (i.e. the smallest data types and precision that support the values in the output columns of the unload SQL statement or source table)."}, + {Name: sdk.TaskParameterErrorOnNondeterministicMerge, Type: schema.TypeBool, Description: "Specifies whether to return an error when the [MERGE](https://docs.snowflake.com/en/sql-reference/sql/merge) command is used to update or delete a target row that joins multiple source rows and the system cannot determine the action to perform on the target row."}, + {Name: sdk.TaskParameterErrorOnNondeterministicUpdate, Type: schema.TypeBool, Description: "Specifies whether to return an error when the [UPDATE](https://docs.snowflake.com/en/sql-reference/sql/update) command is used to update a target row that joins multiple source rows and the system cannot determine the action to perform on the target row."}, + {Name: sdk.TaskParameterGeographyOutputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToGeographyOutputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToGeographyOutputFormat), Description: "Display format for [GEOGRAPHY values](https://docs.snowflake.com/en/sql-reference/data-types-geospatial.html#label-data-types-geography)."}, + {Name: sdk.TaskParameterGeometryOutputFormat, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToGeometryOutputFormat), DiffSuppress: NormalizeAndCompare(sdk.ToGeometryOutputFormat), Description: "Display format for [GEOMETRY values](https://docs.snowflake.com/en/sql-reference/data-types-geospatial.html#label-data-types-geometry)."}, + {Name: sdk.TaskParameterJdbcTreatTimestampNtzAsUtc, Type: schema.TypeBool, Description: "Specifies how JDBC processes TIMESTAMP_NTZ values."}, + {Name: sdk.TaskParameterJdbcUseSessionTimezone, Type: schema.TypeBool, Description: "Specifies whether the JDBC Driver uses the time zone of the JVM or the time zone of the session (specified by the [TIMEZONE](https://docs.snowflake.com/en/sql-reference/parameters#label-timezone) parameter) for the getDate(), getTime(), and getTimestamp() methods of the ResultSet class."}, + {Name: sdk.TaskParameterJsonIndent, Type: schema.TypeInt, Description: "Specifies the number of blank spaces to indent each new element in JSON output in the session. Also specifies whether to insert newline characters after each element."}, + {Name: sdk.TaskParameterLockTimeout, Type: schema.TypeInt, Description: "Number of seconds to wait while trying to lock a resource, before timing out and aborting the statement."}, + {Name: sdk.TaskParameterLogLevel, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToLogLevel), DiffSuppress: NormalizeAndCompare(sdk.ToLogLevel), Description: "Specifies the severity level of messages that should be ingested and made available in the active event table. Messages at the specified level (and at more severe levels) are ingested. For more information about log levels, see [Setting log level](https://docs.snowflake.com/en/developer-guide/logging-tracing/logging-log-level)."}, + {Name: sdk.TaskParameterMultiStatementCount, Type: schema.TypeInt, Description: "Number of statements to execute when using the multi-statement capability."}, + {Name: sdk.TaskParameterNoorderSequenceAsDefault, Type: schema.TypeBool, Description: "Specifies whether the ORDER or NOORDER property is set by default when you create a new sequence or add a new table column. The ORDER and NOORDER properties determine whether or not the values are generated for the sequence or auto-incremented column in [increasing or decreasing order](https://docs.snowflake.com/en/user-guide/querying-sequences.html#label-querying-sequences-increasing-values)."}, + {Name: sdk.TaskParameterOdbcTreatDecimalAsInt, Type: schema.TypeBool, Description: "Specifies how ODBC processes columns that have a scale of zero (0)."}, + {Name: sdk.TaskParameterQueryTag, Type: schema.TypeString, Description: "Optional string that can be used to tag queries and other SQL statements executed within a session. The tags are displayed in the output of the [QUERY_HISTORY, QUERY_HISTORY_BY_*](https://docs.snowflake.com/en/sql-reference/functions/query_history) functions."}, + {Name: sdk.TaskParameterQuotedIdentifiersIgnoreCase, Type: schema.TypeBool, Description: "Specifies whether letters in double-quoted object identifiers are stored and resolved as uppercase letters. By default, Snowflake preserves the case of alphabetic characters when storing and resolving double-quoted identifiers (see [Identifier resolution](https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html#label-identifier-casing)). You can use this parameter in situations in which [third-party applications always use double quotes around identifiers](https://docs.snowflake.com/en/sql-reference/identifiers-syntax.html#label-identifier-casing-parameter)."}, + {Name: sdk.TaskParameterRowsPerResultset, Type: schema.TypeInt, Description: "Specifies the maximum number of rows returned in a result set. A value of 0 specifies no maximum."}, + {Name: sdk.TaskParameterS3StageVpceDnsName, Type: schema.TypeString, Description: "Specifies the DNS name of an Amazon S3 interface endpoint. Requests sent to the internal stage of an account via [AWS PrivateLink for Amazon S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/privatelink-interface-endpoints.html) use this endpoint to connect. For more information, see [Accessing Internal stages with dedicated interface endpoints](https://docs.snowflake.com/en/user-guide/private-internal-stages-aws.html#label-aws-privatelink-internal-stage-network-isolation)."}, + {Name: sdk.TaskParameterSearchPath, Type: schema.TypeString, Description: "Specifies the path to search to resolve unqualified object names in queries. For more information, see [Name resolution in queries](https://docs.snowflake.com/en/sql-reference/name-resolution.html#label-object-name-resolution-search-path). Comma-separated list of identifiers. An identifier can be a fully or partially qualified schema name."}, + {Name: sdk.TaskParameterStatementQueuedTimeoutInSeconds, Type: schema.TypeInt, Description: "Amount of time, in seconds, a SQL statement (query, DDL, DML, etc.) remains queued for a warehouse before it is canceled by the system. This parameter can be used in conjunction with the [MAX_CONCURRENCY_LEVEL](https://docs.snowflake.com/en/sql-reference/parameters#label-max-concurrency-level) parameter to ensure a warehouse is never backlogged."}, + {Name: sdk.TaskParameterStatementTimeoutInSeconds, Type: schema.TypeInt, Description: "Amount of time, in seconds, after which a running SQL statement (query, DDL, DML, etc.) is canceled by the system."}, + {Name: sdk.TaskParameterStrictJsonOutput, Type: schema.TypeBool, Description: "This parameter specifies whether JSON output in a session is compatible with the general standard (as described by [http://json.org](http://json.org)). By design, Snowflake allows JSON input that contains non-standard values; however, these non-standard values might result in Snowflake outputting JSON that is incompatible with other platforms and languages. This parameter, when enabled, ensures that Snowflake outputs valid/compatible JSON."}, + {Name: sdk.TaskParameterTimestampDayIsAlways24h, Type: schema.TypeBool, Description: "Specifies whether the [DATEADD](https://docs.snowflake.com/en/sql-reference/functions/dateadd) function (and its aliases) always consider a day to be exactly 24 hours for expressions that span multiple days."}, + {Name: sdk.TaskParameterTimestampInputFormat, Type: schema.TypeString, Description: "Specifies the input format for the TIMESTAMP data type alias. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). Any valid, supported timestamp format or AUTO (AUTO specifies that Snowflake attempts to automatically detect the format of timestamps stored in the system during the session)."}, + {Name: sdk.TaskParameterTimestampLtzOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP_LTZ data type. If no format is specified, defaults to [TIMESTAMP_OUTPUT_FORMAT](https://docs.snowflake.com/en/sql-reference/parameters#label-timestamp-output-format). For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, + {Name: sdk.TaskParameterTimestampNtzOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP_NTZ data type."}, + {Name: sdk.TaskParameterTimestampOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP data type alias. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, + {Name: sdk.TaskParameterTimestampTypeMapping, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToTimestampTypeMapping), DiffSuppress: NormalizeAndCompare(sdk.ToTimestampTypeMapping), Description: "Specifies the TIMESTAMP_* variation that the TIMESTAMP data type alias maps to."}, + {Name: sdk.TaskParameterTimestampTzOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIMESTAMP_TZ data type. If no format is specified, defaults to [TIMESTAMP_OUTPUT_FORMAT](https://docs.snowflake.com/en/sql-reference/parameters#label-timestamp-output-format). For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, + {Name: sdk.TaskParameterTimezone, Type: schema.TypeString, Description: "Specifies the time zone for the session. You can specify a [time zone name](https://data.iana.org/time-zones/tzdb-2021a/zone1970.tab) or a [link name](https://data.iana.org/time-zones/tzdb-2021a/backward) from release 2021a of the [IANA Time Zone Database](https://www.iana.org/time-zones) (e.g. America/Los_Angeles, Europe/London, UTC, Etc/GMT, etc.)."}, + {Name: sdk.TaskParameterTimeInputFormat, Type: schema.TypeString, Description: "Specifies the input format for the TIME data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output). Any valid, supported time format or AUTO (AUTO specifies that Snowflake attempts to automatically detect the format of times stored in the system during the session)."}, + {Name: sdk.TaskParameterTimeOutputFormat, Type: schema.TypeString, Description: "Specifies the display format for the TIME data type. For more information, see [Date and time input and output formats](https://docs.snowflake.com/en/sql-reference/date-time-input-output)."}, + {Name: sdk.TaskParameterTraceLevel, Type: schema.TypeString, ValidateDiag: sdkValidation(sdk.ToTraceLevel), DiffSuppress: NormalizeAndCompare(sdk.ToTraceLevel), Description: "Controls how trace events are ingested into the event table. For more information about trace levels, see [Setting trace level](https://docs.snowflake.com/en/developer-guide/logging-tracing/tracing-trace-level)."}, + {Name: sdk.TaskParameterTransactionAbortOnError, Type: schema.TypeBool, Description: "Specifies the action to perform when a statement issued within a non-autocommit transaction returns with an error."}, + {Name: sdk.TaskParameterTransactionDefaultIsolationLevel, Type: schema.TypeString, Description: "Specifies the isolation level for transactions in the user session."}, + {Name: sdk.TaskParameterTwoDigitCenturyStart, Type: schema.TypeInt, Description: "Specifies the “century start” year for 2-digit years (i.e. the earliest year such dates can represent). This parameter prevents ambiguous dates when importing or converting data with the `YY` date format component (i.e. years represented as 2 digits)."}, + {Name: sdk.TaskParameterUnsupportedDdlAction, Type: schema.TypeString, Description: "Determines if an unsupported (i.e. non-default) value specified for a constraint property returns an error."}, + {Name: sdk.TaskParameterUseCachedResult, Type: schema.TypeBool, Description: "Specifies whether to reuse persisted query results, if available, when a matching query is submitted."}, + {Name: sdk.TaskParameterWeekOfYearPolicy, Type: schema.TypeInt, Description: "Specifies how the weeks in a given year are computed. `0`: The semantics used are equivalent to the ISO semantics, in which a week belongs to a given year if at least 4 days of that week are in that year. `1`: January 1 is included in the first week of the year and December 31 is included in the last week of the year."}, + {Name: sdk.TaskParameterWeekStart, Type: schema.TypeInt, Description: "Specifies the first day of the week (used by week-related date functions). `0`: Legacy Snowflake behavior is used (i.e. ISO-like semantics). `1` (Monday) to `7` (Sunday): All the week-related functions use weeks that start on the specified day of the week."}, + } + + // TODO [SNOW-1645342]: extract this method after moving to SDK + for _, field := range TaskParameterFields { + fieldName := strings.ToLower(string(field.Name)) + + taskParametersSchema[fieldName] = &schema.Schema{ + Type: field.Type, + Description: enrichWithReferenceToParameterDocs(field.Name, field.Description), + Computed: true, + Optional: true, + ValidateDiagFunc: field.ValidateDiag, + DiffSuppressFunc: field.DiffSuppress, + ConflictsWith: field.ConflictsWith, + } + } +} + +func taskParametersProvider(ctx context.Context, d ResourceIdProvider, meta any) ([]*sdk.Parameter, error) { + return parametersProvider(ctx, d, meta.(*provider.Context), taskParametersProviderFunc, sdk.ParseSchemaObjectIdentifier) +} + +func taskParametersProviderFunc(c *sdk.Client) showParametersFunc[sdk.SchemaObjectIdentifier] { + return c.Tasks.ShowParameters +} + +// TODO [SNOW-1645342]: make generic based on type definition +func handleTaskParameterRead(d *schema.ResourceData, taskParameters []*sdk.Parameter) error { + for _, p := range taskParameters { + switch p.Key { + case + string(sdk.TaskParameterSuspendTaskAfterNumFailures), + string(sdk.TaskParameterTaskAutoRetryAttempts), + string(sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds), + string(sdk.TaskParameterUserTaskTimeoutMs), + string(sdk.TaskParameterClientMemoryLimit), + string(sdk.TaskParameterClientPrefetchThreads), + string(sdk.TaskParameterClientResultChunkSize), + string(sdk.TaskParameterClientSessionKeepAliveHeartbeatFrequency), + string(sdk.TaskParameterJsonIndent), + string(sdk.TaskParameterLockTimeout), + string(sdk.TaskParameterMultiStatementCount), + string(sdk.TaskParameterRowsPerResultset), + string(sdk.TaskParameterStatementQueuedTimeoutInSeconds), + string(sdk.TaskParameterStatementTimeoutInSeconds), + string(sdk.TaskParameterTwoDigitCenturyStart), + string(sdk.TaskParameterWeekOfYearPolicy), + string(sdk.TaskParameterWeekStart): + value, err := strconv.Atoi(p.Value) + if err != nil { + return err + } + if err := d.Set(strings.ToLower(p.Key), value); err != nil { + return err + } + case + string(sdk.TaskParameterUserTaskManagedInitialWarehouseSize), + string(sdk.TaskParameterBinaryInputFormat), + string(sdk.TaskParameterBinaryOutputFormat), + string(sdk.TaskParameterClientTimestampTypeMapping), + string(sdk.TaskParameterDateInputFormat), + string(sdk.TaskParameterDateOutputFormat), + string(sdk.TaskParameterGeographyOutputFormat), + string(sdk.TaskParameterGeometryOutputFormat), + string(sdk.TaskParameterLogLevel), + string(sdk.TaskParameterQueryTag), + string(sdk.TaskParameterS3StageVpceDnsName), + string(sdk.TaskParameterSearchPath), + string(sdk.TaskParameterTimestampInputFormat), + string(sdk.TaskParameterTimestampLtzOutputFormat), + string(sdk.TaskParameterTimestampNtzOutputFormat), + string(sdk.TaskParameterTimestampOutputFormat), + string(sdk.TaskParameterTimestampTypeMapping), + string(sdk.TaskParameterTimestampTzOutputFormat), + string(sdk.TaskParameterTimezone), + string(sdk.TaskParameterTimeInputFormat), + string(sdk.TaskParameterTimeOutputFormat), + string(sdk.TaskParameterTraceLevel), + string(sdk.TaskParameterTransactionDefaultIsolationLevel), + string(sdk.TaskParameterUnsupportedDdlAction): + if err := d.Set(strings.ToLower(p.Key), p.Value); err != nil { + return err + } + case + string(sdk.TaskParameterAbortDetachedQuery), + string(sdk.TaskParameterAutocommit), + string(sdk.TaskParameterClientMetadataRequestUseConnectionCtx), + string(sdk.TaskParameterClientResultColumnCaseInsensitive), + string(sdk.TaskParameterClientSessionKeepAlive), + string(sdk.TaskParameterEnableUnloadPhysicalTypeOptimization), + string(sdk.TaskParameterErrorOnNondeterministicMerge), + string(sdk.TaskParameterErrorOnNondeterministicUpdate), + string(sdk.TaskParameterJdbcTreatTimestampNtzAsUtc), + string(sdk.TaskParameterJdbcUseSessionTimezone), + string(sdk.TaskParameterNoorderSequenceAsDefault), + string(sdk.TaskParameterOdbcTreatDecimalAsInt), + string(sdk.TaskParameterQuotedIdentifiersIgnoreCase), + string(sdk.TaskParameterStrictJsonOutput), + string(sdk.TaskParameterTimestampDayIsAlways24h), + string(sdk.TaskParameterTransactionAbortOnError), + string(sdk.TaskParameterUseCachedResult): + value, err := strconv.ParseBool(p.Value) + if err != nil { + return err + } + if err := d.Set(strings.ToLower(p.Key), value); err != nil { + return err + } + } + } + + return nil +} + +// TODO [SNOW-1348330]: consider using SessionParameters#setParam during parameters rework +// (because currently setParam already is able to set the right parameter based on the string value input, +// but GetConfigPropertyAsPointerAllowingZeroValue receives typed value, +// so this would be unnecessary running in circles) +// TODO [SNOW-1645342]: include mappers in the param definition (after moving it to the SDK: identity versus concrete) +func handleTaskParametersCreate(d *schema.ResourceData, createOpts *sdk.CreateTaskRequest) diag.Diagnostics { + createOpts.WithSessionParameters(sdk.SessionParameters{}) + if v, ok := d.GetOk("user_task_managed_initial_warehouse_size"); ok { + size, err := sdk.ToWarehouseSize(v.(string)) + if err != nil { + return diag.FromErr(err) + } + createOpts.WithWarehouse(*sdk.NewCreateTaskWarehouseRequest().WithUserTaskManagedInitialWarehouseSize(size)) + } + diags := JoinDiags( + // task parameters + handleParameterCreate(d, sdk.TaskParameterUserTaskTimeoutMs, &createOpts.UserTaskTimeoutMs), + handleParameterCreate(d, sdk.TaskParameterSuspendTaskAfterNumFailures, &createOpts.SuspendTaskAfterNumFailures), + handleParameterCreate(d, sdk.TaskParameterTaskAutoRetryAttempts, &createOpts.TaskAutoRetryAttempts), + handleParameterCreate(d, sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds, &createOpts.UserTaskMinimumTriggerIntervalInSeconds), + // session parameters + handleParameterCreate(d, sdk.TaskParameterAbortDetachedQuery, &createOpts.SessionParameters.AbortDetachedQuery), + handleParameterCreate(d, sdk.TaskParameterAutocommit, &createOpts.SessionParameters.Autocommit), + handleParameterCreateWithMapping(d, sdk.TaskParameterBinaryInputFormat, &createOpts.SessionParameters.BinaryInputFormat, stringToStringEnumProvider(sdk.ToBinaryInputFormat)), + handleParameterCreateWithMapping(d, sdk.TaskParameterBinaryOutputFormat, &createOpts.SessionParameters.BinaryOutputFormat, stringToStringEnumProvider(sdk.ToBinaryOutputFormat)), + handleParameterCreate(d, sdk.TaskParameterClientMemoryLimit, &createOpts.SessionParameters.ClientMemoryLimit), + handleParameterCreate(d, sdk.TaskParameterClientMetadataRequestUseConnectionCtx, &createOpts.SessionParameters.ClientMetadataRequestUseConnectionCtx), + handleParameterCreate(d, sdk.TaskParameterClientPrefetchThreads, &createOpts.SessionParameters.ClientPrefetchThreads), + handleParameterCreate(d, sdk.TaskParameterClientResultChunkSize, &createOpts.SessionParameters.ClientResultChunkSize), + handleParameterCreate(d, sdk.TaskParameterClientResultColumnCaseInsensitive, &createOpts.SessionParameters.ClientResultColumnCaseInsensitive), + handleParameterCreate(d, sdk.TaskParameterClientSessionKeepAlive, &createOpts.SessionParameters.ClientSessionKeepAlive), + handleParameterCreate(d, sdk.TaskParameterClientSessionKeepAliveHeartbeatFrequency, &createOpts.SessionParameters.ClientSessionKeepAliveHeartbeatFrequency), + handleParameterCreateWithMapping(d, sdk.TaskParameterClientTimestampTypeMapping, &createOpts.SessionParameters.ClientTimestampTypeMapping, stringToStringEnumProvider(sdk.ToClientTimestampTypeMapping)), + handleParameterCreate(d, sdk.TaskParameterDateInputFormat, &createOpts.SessionParameters.DateInputFormat), + handleParameterCreate(d, sdk.TaskParameterDateOutputFormat, &createOpts.SessionParameters.DateOutputFormat), + handleParameterCreate(d, sdk.TaskParameterEnableUnloadPhysicalTypeOptimization, &createOpts.SessionParameters.EnableUnloadPhysicalTypeOptimization), + handleParameterCreate(d, sdk.TaskParameterErrorOnNondeterministicMerge, &createOpts.SessionParameters.ErrorOnNondeterministicMerge), + handleParameterCreate(d, sdk.TaskParameterErrorOnNondeterministicUpdate, &createOpts.SessionParameters.ErrorOnNondeterministicUpdate), + handleParameterCreateWithMapping(d, sdk.TaskParameterGeographyOutputFormat, &createOpts.SessionParameters.GeographyOutputFormat, stringToStringEnumProvider(sdk.ToGeographyOutputFormat)), + handleParameterCreateWithMapping(d, sdk.TaskParameterGeometryOutputFormat, &createOpts.SessionParameters.GeometryOutputFormat, stringToStringEnumProvider(sdk.ToGeometryOutputFormat)), + handleParameterCreate(d, sdk.TaskParameterJdbcTreatTimestampNtzAsUtc, &createOpts.SessionParameters.JdbcTreatTimestampNtzAsUtc), + handleParameterCreate(d, sdk.TaskParameterJdbcUseSessionTimezone, &createOpts.SessionParameters.JdbcUseSessionTimezone), + handleParameterCreate(d, sdk.TaskParameterJsonIndent, &createOpts.SessionParameters.JSONIndent), + handleParameterCreate(d, sdk.TaskParameterLockTimeout, &createOpts.SessionParameters.LockTimeout), + handleParameterCreateWithMapping(d, sdk.TaskParameterLogLevel, &createOpts.SessionParameters.LogLevel, stringToStringEnumProvider(sdk.ToLogLevel)), + handleParameterCreate(d, sdk.TaskParameterMultiStatementCount, &createOpts.SessionParameters.MultiStatementCount), + handleParameterCreate(d, sdk.TaskParameterNoorderSequenceAsDefault, &createOpts.SessionParameters.NoorderSequenceAsDefault), + handleParameterCreate(d, sdk.TaskParameterOdbcTreatDecimalAsInt, &createOpts.SessionParameters.OdbcTreatDecimalAsInt), + handleParameterCreate(d, sdk.TaskParameterQueryTag, &createOpts.SessionParameters.QueryTag), + handleParameterCreate(d, sdk.TaskParameterQuotedIdentifiersIgnoreCase, &createOpts.SessionParameters.QuotedIdentifiersIgnoreCase), + handleParameterCreate(d, sdk.TaskParameterRowsPerResultset, &createOpts.SessionParameters.RowsPerResultset), + handleParameterCreate(d, sdk.TaskParameterS3StageVpceDnsName, &createOpts.SessionParameters.S3StageVpceDnsName), + handleParameterCreate(d, sdk.TaskParameterSearchPath, &createOpts.SessionParameters.SearchPath), + handleParameterCreate(d, sdk.TaskParameterStatementQueuedTimeoutInSeconds, &createOpts.SessionParameters.StatementQueuedTimeoutInSeconds), + handleParameterCreate(d, sdk.TaskParameterStatementTimeoutInSeconds, &createOpts.SessionParameters.StatementTimeoutInSeconds), + handleParameterCreate(d, sdk.TaskParameterStrictJsonOutput, &createOpts.SessionParameters.StrictJSONOutput), + handleParameterCreate(d, sdk.TaskParameterTimestampDayIsAlways24h, &createOpts.SessionParameters.TimestampDayIsAlways24h), + handleParameterCreate(d, sdk.TaskParameterTimestampInputFormat, &createOpts.SessionParameters.TimestampInputFormat), + handleParameterCreate(d, sdk.TaskParameterTimestampLtzOutputFormat, &createOpts.SessionParameters.TimestampLTZOutputFormat), + handleParameterCreate(d, sdk.TaskParameterTimestampNtzOutputFormat, &createOpts.SessionParameters.TimestampNTZOutputFormat), + handleParameterCreate(d, sdk.TaskParameterTimestampOutputFormat, &createOpts.SessionParameters.TimestampOutputFormat), + handleParameterCreateWithMapping(d, sdk.TaskParameterTimestampTypeMapping, &createOpts.SessionParameters.TimestampTypeMapping, stringToStringEnumProvider(sdk.ToTimestampTypeMapping)), + handleParameterCreate(d, sdk.TaskParameterTimestampTzOutputFormat, &createOpts.SessionParameters.TimestampTZOutputFormat), + handleParameterCreate(d, sdk.TaskParameterTimezone, &createOpts.SessionParameters.Timezone), + handleParameterCreate(d, sdk.TaskParameterTimeInputFormat, &createOpts.SessionParameters.TimeInputFormat), + handleParameterCreate(d, sdk.TaskParameterTimeOutputFormat, &createOpts.SessionParameters.TimeOutputFormat), + handleParameterCreateWithMapping(d, sdk.TaskParameterTraceLevel, &createOpts.SessionParameters.TraceLevel, stringToStringEnumProvider(sdk.ToTraceLevel)), + handleParameterCreate(d, sdk.TaskParameterTransactionAbortOnError, &createOpts.SessionParameters.TransactionAbortOnError), + handleParameterCreateWithMapping(d, sdk.TaskParameterTransactionDefaultIsolationLevel, &createOpts.SessionParameters.TransactionDefaultIsolationLevel, stringToStringEnumProvider(sdk.ToTransactionDefaultIsolationLevel)), + handleParameterCreate(d, sdk.TaskParameterTwoDigitCenturyStart, &createOpts.SessionParameters.TwoDigitCenturyStart), + handleParameterCreateWithMapping(d, sdk.TaskParameterUnsupportedDdlAction, &createOpts.SessionParameters.UnsupportedDDLAction, stringToStringEnumProvider(sdk.ToUnsupportedDDLAction)), + handleParameterCreate(d, sdk.TaskParameterUseCachedResult, &createOpts.SessionParameters.UseCachedResult), + handleParameterCreate(d, sdk.TaskParameterWeekOfYearPolicy, &createOpts.SessionParameters.WeekOfYearPolicy), + handleParameterCreate(d, sdk.TaskParameterWeekStart, &createOpts.SessionParameters.WeekStart), + ) + if *createOpts.SessionParameters == (sdk.SessionParameters{}) { + createOpts.SessionParameters = nil + } + return diags +} + +func handleTaskParametersUpdate(d *schema.ResourceData, set *sdk.TaskSetRequest, unset *sdk.TaskUnsetRequest) diag.Diagnostics { + set.WithSessionParameters(sdk.SessionParameters{}) + unset.WithSessionParametersUnset(sdk.SessionParametersUnset{}) + diags := JoinDiags( + // task parameters + handleParameterUpdateWithMapping(d, sdk.TaskParameterUserTaskManagedInitialWarehouseSize, &set.UserTaskManagedInitialWarehouseSize, &unset.UserTaskManagedInitialWarehouseSize, stringToStringEnumProvider(sdk.ToWarehouseSize)), + handleParameterUpdate(d, sdk.TaskParameterUserTaskTimeoutMs, &set.UserTaskTimeoutMs, &unset.UserTaskTimeoutMs), + handleParameterUpdate(d, sdk.TaskParameterSuspendTaskAfterNumFailures, &set.SuspendTaskAfterNumFailures, &unset.SuspendTaskAfterNumFailures), + handleParameterUpdate(d, sdk.TaskParameterTaskAutoRetryAttempts, &set.TaskAutoRetryAttempts, &unset.TaskAutoRetryAttempts), + handleParameterUpdate(d, sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds, &set.UserTaskMinimumTriggerIntervalInSeconds, &unset.UserTaskMinimumTriggerIntervalInSeconds), + // session parameters + handleParameterUpdate(d, sdk.TaskParameterAbortDetachedQuery, &set.SessionParameters.AbortDetachedQuery, &unset.SessionParametersUnset.AbortDetachedQuery), + handleParameterUpdate(d, sdk.TaskParameterAutocommit, &set.SessionParameters.Autocommit, &unset.SessionParametersUnset.Autocommit), + handleParameterUpdateWithMapping(d, sdk.TaskParameterBinaryInputFormat, &set.SessionParameters.BinaryInputFormat, &unset.SessionParametersUnset.BinaryInputFormat, stringToStringEnumProvider(sdk.ToBinaryInputFormat)), + handleParameterUpdateWithMapping(d, sdk.TaskParameterBinaryOutputFormat, &set.SessionParameters.BinaryOutputFormat, &unset.SessionParametersUnset.BinaryOutputFormat, stringToStringEnumProvider(sdk.ToBinaryOutputFormat)), + handleParameterUpdate(d, sdk.TaskParameterClientMemoryLimit, &set.SessionParameters.ClientMemoryLimit, &unset.SessionParametersUnset.ClientMemoryLimit), + handleParameterUpdate(d, sdk.TaskParameterClientMetadataRequestUseConnectionCtx, &set.SessionParameters.ClientMetadataRequestUseConnectionCtx, &unset.SessionParametersUnset.ClientMetadataRequestUseConnectionCtx), + handleParameterUpdate(d, sdk.TaskParameterClientPrefetchThreads, &set.SessionParameters.ClientPrefetchThreads, &unset.SessionParametersUnset.ClientPrefetchThreads), + handleParameterUpdate(d, sdk.TaskParameterClientResultChunkSize, &set.SessionParameters.ClientResultChunkSize, &unset.SessionParametersUnset.ClientResultChunkSize), + handleParameterUpdate(d, sdk.TaskParameterClientResultColumnCaseInsensitive, &set.SessionParameters.ClientResultColumnCaseInsensitive, &unset.SessionParametersUnset.ClientResultColumnCaseInsensitive), + handleParameterUpdate(d, sdk.TaskParameterClientSessionKeepAlive, &set.SessionParameters.ClientSessionKeepAlive, &unset.SessionParametersUnset.ClientSessionKeepAlive), + handleParameterUpdate(d, sdk.TaskParameterClientSessionKeepAliveHeartbeatFrequency, &set.SessionParameters.ClientSessionKeepAliveHeartbeatFrequency, &unset.SessionParametersUnset.ClientSessionKeepAliveHeartbeatFrequency), + handleParameterUpdateWithMapping(d, sdk.TaskParameterClientTimestampTypeMapping, &set.SessionParameters.ClientTimestampTypeMapping, &unset.SessionParametersUnset.ClientTimestampTypeMapping, stringToStringEnumProvider(sdk.ToClientTimestampTypeMapping)), + handleParameterUpdate(d, sdk.TaskParameterDateInputFormat, &set.SessionParameters.DateInputFormat, &unset.SessionParametersUnset.DateInputFormat), + handleParameterUpdate(d, sdk.TaskParameterDateOutputFormat, &set.SessionParameters.DateOutputFormat, &unset.SessionParametersUnset.DateOutputFormat), + handleParameterUpdate(d, sdk.TaskParameterEnableUnloadPhysicalTypeOptimization, &set.SessionParameters.EnableUnloadPhysicalTypeOptimization, &unset.SessionParametersUnset.EnableUnloadPhysicalTypeOptimization), + handleParameterUpdate(d, sdk.TaskParameterErrorOnNondeterministicMerge, &set.SessionParameters.ErrorOnNondeterministicMerge, &unset.SessionParametersUnset.ErrorOnNondeterministicMerge), + handleParameterUpdate(d, sdk.TaskParameterErrorOnNondeterministicUpdate, &set.SessionParameters.ErrorOnNondeterministicUpdate, &unset.SessionParametersUnset.ErrorOnNondeterministicUpdate), + handleParameterUpdateWithMapping(d, sdk.TaskParameterGeographyOutputFormat, &set.SessionParameters.GeographyOutputFormat, &unset.SessionParametersUnset.GeographyOutputFormat, stringToStringEnumProvider(sdk.ToGeographyOutputFormat)), + handleParameterUpdateWithMapping(d, sdk.TaskParameterGeometryOutputFormat, &set.SessionParameters.GeometryOutputFormat, &unset.SessionParametersUnset.GeometryOutputFormat, stringToStringEnumProvider(sdk.ToGeometryOutputFormat)), + handleParameterUpdate(d, sdk.TaskParameterJdbcTreatTimestampNtzAsUtc, &set.SessionParameters.JdbcTreatTimestampNtzAsUtc, &unset.SessionParametersUnset.JdbcTreatTimestampNtzAsUtc), + handleParameterUpdate(d, sdk.TaskParameterJdbcUseSessionTimezone, &set.SessionParameters.JdbcUseSessionTimezone, &unset.SessionParametersUnset.JdbcUseSessionTimezone), + handleParameterUpdate(d, sdk.TaskParameterJsonIndent, &set.SessionParameters.JSONIndent, &unset.SessionParametersUnset.JSONIndent), + handleParameterUpdate(d, sdk.TaskParameterLockTimeout, &set.SessionParameters.LockTimeout, &unset.SessionParametersUnset.LockTimeout), + handleParameterUpdateWithMapping(d, sdk.TaskParameterLogLevel, &set.SessionParameters.LogLevel, &unset.SessionParametersUnset.LogLevel, stringToStringEnumProvider(sdk.ToLogLevel)), + handleParameterUpdate(d, sdk.TaskParameterMultiStatementCount, &set.SessionParameters.MultiStatementCount, &unset.SessionParametersUnset.MultiStatementCount), + handleParameterUpdate(d, sdk.TaskParameterNoorderSequenceAsDefault, &set.SessionParameters.NoorderSequenceAsDefault, &unset.SessionParametersUnset.NoorderSequenceAsDefault), + handleParameterUpdate(d, sdk.TaskParameterOdbcTreatDecimalAsInt, &set.SessionParameters.OdbcTreatDecimalAsInt, &unset.SessionParametersUnset.OdbcTreatDecimalAsInt), + handleParameterUpdate(d, sdk.TaskParameterQueryTag, &set.SessionParameters.QueryTag, &unset.SessionParametersUnset.QueryTag), + handleParameterUpdate(d, sdk.TaskParameterQuotedIdentifiersIgnoreCase, &set.SessionParameters.QuotedIdentifiersIgnoreCase, &unset.SessionParametersUnset.QuotedIdentifiersIgnoreCase), + handleParameterUpdate(d, sdk.TaskParameterRowsPerResultset, &set.SessionParameters.RowsPerResultset, &unset.SessionParametersUnset.RowsPerResultset), + handleParameterUpdate(d, sdk.TaskParameterS3StageVpceDnsName, &set.SessionParameters.S3StageVpceDnsName, &unset.SessionParametersUnset.S3StageVpceDnsName), + handleParameterUpdate(d, sdk.TaskParameterSearchPath, &set.SessionParameters.SearchPath, &unset.SessionParametersUnset.SearchPath), + handleParameterUpdate(d, sdk.TaskParameterStatementQueuedTimeoutInSeconds, &set.SessionParameters.StatementQueuedTimeoutInSeconds, &unset.SessionParametersUnset.StatementQueuedTimeoutInSeconds), + handleParameterUpdate(d, sdk.TaskParameterStatementTimeoutInSeconds, &set.SessionParameters.StatementTimeoutInSeconds, &unset.SessionParametersUnset.StatementTimeoutInSeconds), + handleParameterUpdate(d, sdk.TaskParameterStrictJsonOutput, &set.SessionParameters.StrictJSONOutput, &unset.SessionParametersUnset.StrictJSONOutput), + handleParameterUpdate(d, sdk.TaskParameterTimestampDayIsAlways24h, &set.SessionParameters.TimestampDayIsAlways24h, &unset.SessionParametersUnset.TimestampDayIsAlways24h), + handleParameterUpdate(d, sdk.TaskParameterTimestampInputFormat, &set.SessionParameters.TimestampInputFormat, &unset.SessionParametersUnset.TimestampInputFormat), + handleParameterUpdate(d, sdk.TaskParameterTimestampLtzOutputFormat, &set.SessionParameters.TimestampLTZOutputFormat, &unset.SessionParametersUnset.TimestampLTZOutputFormat), + handleParameterUpdate(d, sdk.TaskParameterTimestampNtzOutputFormat, &set.SessionParameters.TimestampNTZOutputFormat, &unset.SessionParametersUnset.TimestampNTZOutputFormat), + handleParameterUpdate(d, sdk.TaskParameterTimestampOutputFormat, &set.SessionParameters.TimestampOutputFormat, &unset.SessionParametersUnset.TimestampOutputFormat), + handleParameterUpdateWithMapping(d, sdk.TaskParameterTimestampTypeMapping, &set.SessionParameters.TimestampTypeMapping, &unset.SessionParametersUnset.TimestampTypeMapping, stringToStringEnumProvider(sdk.ToTimestampTypeMapping)), + handleParameterUpdate(d, sdk.TaskParameterTimestampTzOutputFormat, &set.SessionParameters.TimestampTZOutputFormat, &unset.SessionParametersUnset.TimestampTZOutputFormat), + handleParameterUpdate(d, sdk.TaskParameterTimezone, &set.SessionParameters.Timezone, &unset.SessionParametersUnset.Timezone), + handleParameterUpdate(d, sdk.TaskParameterTimeInputFormat, &set.SessionParameters.TimeInputFormat, &unset.SessionParametersUnset.TimeInputFormat), + handleParameterUpdate(d, sdk.TaskParameterTimeOutputFormat, &set.SessionParameters.TimeOutputFormat, &unset.SessionParametersUnset.TimeOutputFormat), + handleParameterUpdateWithMapping(d, sdk.TaskParameterTraceLevel, &set.SessionParameters.TraceLevel, &unset.SessionParametersUnset.TraceLevel, stringToStringEnumProvider(sdk.ToTraceLevel)), + handleParameterUpdate(d, sdk.TaskParameterTransactionAbortOnError, &set.SessionParameters.TransactionAbortOnError, &unset.SessionParametersUnset.TransactionAbortOnError), + handleParameterUpdateWithMapping(d, sdk.TaskParameterTransactionDefaultIsolationLevel, &set.SessionParameters.TransactionDefaultIsolationLevel, &unset.SessionParametersUnset.TransactionDefaultIsolationLevel, stringToStringEnumProvider(sdk.ToTransactionDefaultIsolationLevel)), + handleParameterUpdate(d, sdk.TaskParameterTwoDigitCenturyStart, &set.SessionParameters.TwoDigitCenturyStart, &unset.SessionParametersUnset.TwoDigitCenturyStart), + handleParameterUpdateWithMapping(d, sdk.TaskParameterUnsupportedDdlAction, &set.SessionParameters.UnsupportedDDLAction, &unset.SessionParametersUnset.UnsupportedDDLAction, stringToStringEnumProvider(sdk.ToUnsupportedDDLAction)), + handleParameterUpdate(d, sdk.TaskParameterUseCachedResult, &set.SessionParameters.UseCachedResult, &unset.SessionParametersUnset.UseCachedResult), + handleParameterUpdate(d, sdk.TaskParameterWeekOfYearPolicy, &set.SessionParameters.WeekOfYearPolicy, &unset.SessionParametersUnset.WeekOfYearPolicy), + handleParameterUpdate(d, sdk.TaskParameterWeekStart, &set.SessionParameters.WeekStart, &unset.SessionParametersUnset.WeekStart), + ) + if *set.SessionParameters == (sdk.SessionParameters{}) { + set.SessionParameters = nil + } + if *unset.SessionParametersUnset == (sdk.SessionParametersUnset{}) { + unset.SessionParametersUnset = nil + } + return diags +} diff --git a/pkg/resources/task_state_upgraders.go b/pkg/resources/task_state_upgraders.go new file mode 100644 index 0000000000..1a77858405 --- /dev/null +++ b/pkg/resources/task_state_upgraders.go @@ -0,0 +1,53 @@ +package resources + +import ( + "context" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" +) + +func v098TaskStateUpgrader(ctx context.Context, rawState map[string]any, meta any) (map[string]any, error) { + if rawState == nil { + return rawState, nil + } + + rawState["condition"] = rawState["when"] + rawState["started"] = rawState["enabled"].(bool) + rawState["allow_overlapping_execution"] = booleanStringFromBool(rawState["allow_overlapping_execution"].(bool)) + if rawState["after"] != nil { + if afterSlice, okType := rawState["after"].([]any); okType { + newAfter := make([]string, len(afterSlice)) + for i, name := range afterSlice { + newAfter[i] = sdk.NewSchemaObjectIdentifier(rawState["database"].(string), rawState["schema"].(string), name.(string)).FullyQualifiedName() + } + rawState["after"] = newAfter + } + } + if rawState["session_parameters"] != nil { + if sessionParamsMap, okType := rawState["session_parameters"].(map[string]any); okType { + for k, v := range sessionParamsMap { + rawState[k] = v + } + } + } + delete(rawState, "session_parameters") + + if rawState["schedule"] != nil && len(rawState["schedule"].(string)) > 0 { + taskSchedule, err := sdk.ParseTaskSchedule(rawState["schedule"].(string)) + scheduleMap := make(map[string]any) + if err != nil { + return nil, err + } + switch { + case len(taskSchedule.Cron) > 0: + scheduleMap["using_cron"] = taskSchedule.Cron + case taskSchedule.Minutes > 0: + scheduleMap["minutes"] = taskSchedule.Minutes + } + rawState["schedule"] = []any{scheduleMap} + } else { + delete(rawState, "schedule") + } + + return migratePipeSeparatedObjectIdentifierResourceIdToFullyQualifiedName(ctx, rawState, meta) +} diff --git a/pkg/resources/testdata/TestAcc_GrantOwnership/OnAllTasks/test.tf b/pkg/resources/testdata/TestAcc_GrantOwnership/OnAllTasks/test.tf index f4c901edaf..cc7d4094a5 100644 --- a/pkg/resources/testdata/TestAcc_GrantOwnership/OnAllTasks/test.tf +++ b/pkg/resources/testdata/TestAcc_GrantOwnership/OnAllTasks/test.tf @@ -6,6 +6,7 @@ resource "snowflake_task" "test" { database = var.database schema = var.schema name = var.task + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } @@ -13,6 +14,7 @@ resource "snowflake_task" "second_test" { database = var.database schema = var.schema name = var.second_task + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } diff --git a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask/test.tf b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask/test.tf index f7b80a6d9a..df66234f14 100644 --- a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask/test.tf +++ b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask/test.tf @@ -7,6 +7,7 @@ resource "snowflake_task" "test" { schema = var.schema name = var.task warehouse = var.warehouse + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } diff --git a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/1/test.tf b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/1/test.tf index f7b80a6d9a..df66234f14 100644 --- a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/1/test.tf +++ b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/1/test.tf @@ -7,6 +7,7 @@ resource "snowflake_task" "test" { schema = var.schema name = var.task warehouse = var.warehouse + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } diff --git a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/2/test.tf b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/2/test.tf index 5aa8c57b5f..9dab06be4e 100644 --- a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/2/test.tf +++ b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/2/test.tf @@ -7,6 +7,7 @@ resource "snowflake_task" "test" { schema = var.schema name = var.task warehouse = var.warehouse + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } @@ -15,7 +16,8 @@ resource "snowflake_task" "child" { schema = var.schema name = var.child warehouse = var.warehouse - after = [snowflake_task.test.name] + after = [snowflake_task.test.fully_qualified_name] + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } diff --git a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/3/test.tf b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/3/test.tf index c8ef0f9c56..6acde0d353 100644 --- a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/3/test.tf +++ b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/3/test.tf @@ -7,5 +7,6 @@ resource "snowflake_task" "test" { schema = var.schema name = var.task warehouse = var.warehouse + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } diff --git a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/4/test.tf b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/4/test.tf index d57869ed64..f653336ba3 100644 --- a/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/4/test.tf +++ b/pkg/resources/testdata/TestAcc_GrantOwnership/OnTask_Discussion2877/4/test.tf @@ -7,6 +7,7 @@ resource "snowflake_task" "test" { schema = var.schema name = var.task warehouse = var.warehouse + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } @@ -15,7 +16,8 @@ resource "snowflake_task" "child" { schema = var.schema name = var.child warehouse = var.warehouse - after = [snowflake_task.test.name] + after = [snowflake_task.test.fully_qualified_name] + started = false sql_statement = "SELECT CURRENT_TIMESTAMP" } diff --git a/pkg/resources/testdata/TestAcc_Task/basic/test.tf b/pkg/resources/testdata/TestAcc_Task/basic/test.tf new file mode 100644 index 0000000000..78e5a97811 --- /dev/null +++ b/pkg/resources/testdata/TestAcc_Task/basic/test.tf @@ -0,0 +1,82 @@ +resource "snowflake_task" "test" { + name = var.name + database = var.database + schema = var.schema + started = var.started + sql_statement = var.sql_statement + + # Optionals + warehouse = var.warehouse + config = var.config + allow_overlapping_execution = var.allow_overlapping_execution + error_integration = var.error_integration + when = var.when + comment = var.comment + + dynamic "schedule" { + for_each = [for element in [var.schedule] : element if element != null] + content { + minutes = lookup(var.schedule, "minutes", null) + using_cron = lookup(var.schedule, "cron", null) + } + } + + # Parameters + suspend_task_after_num_failures = var.suspend_task_after_num_failures + task_auto_retry_attempts = var.task_auto_retry_attempts + user_task_managed_initial_warehouse_size = var.user_task_managed_initial_warehouse_size + user_task_minimum_trigger_interval_in_seconds = var.user_task_minimum_trigger_interval_in_seconds + user_task_timeout_ms = var.user_task_timeout_ms + abort_detached_query = var.abort_detached_query + autocommit = var.autocommit + binary_input_format = var.binary_input_format + binary_output_format = var.binary_output_format + client_memory_limit = var.client_memory_limit + client_metadata_request_use_connection_ctx = var.client_metadata_request_use_connection_ctx + client_prefetch_threads = var.client_prefetch_threads + client_result_chunk_size = var.client_result_chunk_size + client_result_column_case_insensitive = var.client_result_column_case_insensitive + client_session_keep_alive = var.client_session_keep_alive + client_session_keep_alive_heartbeat_frequency = var.client_session_keep_alive_heartbeat_frequency + client_timestamp_type_mapping = var.client_timestamp_type_mapping + date_input_format = var.date_input_format + date_output_format = var.date_output_format + enable_unload_physical_type_optimization = var.enable_unload_physical_type_optimization + error_on_nondeterministic_merge = var.error_on_nondeterministic_merge + error_on_nondeterministic_update = var.error_on_nondeterministic_update + geography_output_format = var.geography_output_format + geometry_output_format = var.geometry_output_format + jdbc_use_session_timezone = var.jdbc_use_session_timezone + json_indent = var.json_indent + lock_timeout = var.lock_timeout + log_level = var.log_level + multi_statement_count = var.multi_statement_count + noorder_sequence_as_default = var.noorder_sequence_as_default + odbc_treat_decimal_as_int = var.odbc_treat_decimal_as_int + query_tag = var.query_tag + quoted_identifiers_ignore_case = var.quoted_identifiers_ignore_case + rows_per_resultset = var.rows_per_resultset + s3_stage_vpce_dns_name = var.s3_stage_vpce_dns_name + search_path = var.search_path + statement_queued_timeout_in_seconds = var.statement_queued_timeout_in_seconds + statement_timeout_in_seconds = var.statement_timeout_in_seconds + strict_json_output = var.strict_json_output + timestamp_day_is_always_24h = var.timestamp_day_is_always_24h + timestamp_input_format = var.timestamp_input_format + timestamp_ltz_output_format = var.timestamp_ltz_output_format + timestamp_ntz_output_format = var.timestamp_ntz_output_format + timestamp_output_format = var.timestamp_output_format + timestamp_type_mapping = var.timestamp_type_mapping + timestamp_tz_output_format = var.timestamp_tz_output_format + timezone = var.timezone + time_input_format = var.time_input_format + time_output_format = var.time_output_format + trace_level = var.trace_level + transaction_abort_on_error = var.transaction_abort_on_error + transaction_default_isolation_level = var.transaction_default_isolation_level + two_digit_century_start = var.two_digit_century_start + unsupported_ddl_action = var.unsupported_ddl_action + use_cached_result = var.use_cached_result + week_of_year_policy = var.week_of_year_policy + week_start = var.week_start +} diff --git a/pkg/resources/testdata/TestAcc_Task/basic/variables.tf b/pkg/resources/testdata/TestAcc_Task/basic/variables.tf new file mode 100644 index 0000000000..70b21e25b2 --- /dev/null +++ b/pkg/resources/testdata/TestAcc_Task/basic/variables.tf @@ -0,0 +1,341 @@ +variable "database" { + type = string +} + +variable "schema" { + type = string +} + +variable "name" { + type = string +} + +variable "started" { + type = bool +} + +variable "sql_statement" { + type = string +} + +# Optionals +variable "comment" { + type = string + default = null +} + +variable "warehouse" { + type = string + default = null +} + +variable "config" { + type = string + default = null +} + +variable "allow_overlapping_execution" { + type = string + default = null +} + +variable "error_integration" { + type = string + default = null +} + +variable "when" { + type = string + default = null +} + +variable "schedule" { + default = null + type = map(string) +} + +# Parameters +variable "suspend_task_after_num_failures" { + default = null + type = number +} + +variable "task_auto_retry_attempts" { + default = null + type = number +} + +variable "user_task_managed_initial_warehouse_size" { + default = null + type = string +} + +variable "user_task_minimum_trigger_interval_in_seconds" { + default = null + type = number +} + +variable "user_task_timeout_ms" { + default = null + type = number +} + +variable "abort_detached_query" { + default = null + type = bool +} + +variable "autocommit" { + default = null + type = bool +} + +variable "binary_input_format" { + default = null + type = string +} + +variable "binary_output_format" { + default = null + type = string +} + +variable "client_memory_limit" { + default = null + type = number +} + +variable "client_metadata_request_use_connection_ctx" { + default = null + type = bool +} + +variable "client_prefetch_threads" { + default = null + type = number +} + +variable "client_result_chunk_size" { + default = null + type = number +} + +variable "client_result_column_case_insensitive" { + default = null + type = bool +} + +variable "client_session_keep_alive" { + default = null + type = bool +} + +variable "client_session_keep_alive_heartbeat_frequency" { + default = null + type = number +} + +variable "client_timestamp_type_mapping" { + default = null + type = string +} + +variable "date_input_format" { + default = null + type = string +} + +variable "date_output_format" { + default = null + type = string +} + +variable "enable_unload_physical_type_optimization" { + default = null + type = bool +} + +variable "error_on_nondeterministic_merge" { + default = null + type = bool +} + +variable "error_on_nondeterministic_update" { + default = null + type = bool +} + +variable "geography_output_format" { + default = null + type = string +} + +variable "geometry_output_format" { + default = null + type = string +} + +variable "jdbc_use_session_timezone" { + default = null + type = bool +} + +variable "json_indent" { + default = null + type = number +} + +variable "lock_timeout" { + default = null + type = number +} + +variable "log_level" { + default = null + type = string +} + +variable "multi_statement_count" { + default = null + type = number +} + +variable "noorder_sequence_as_default" { + default = null + type = bool +} + +variable "odbc_treat_decimal_as_int" { + default = null + type = bool +} + +variable "query_tag" { + default = null + type = string +} + +variable "quoted_identifiers_ignore_case" { + default = null + type = bool +} + +variable "rows_per_resultset" { + default = null + type = number +} + +variable "s3_stage_vpce_dns_name" { + default = null + type = string +} + +variable "search_path" { + default = null + type = string +} + +variable "statement_queued_timeout_in_seconds" { + default = null + type = number +} + +variable "statement_timeout_in_seconds" { + default = null + type = number +} + +variable "strict_json_output" { + default = null + type = bool +} + +variable "timestamp_day_is_always_24h" { + default = null + type = bool +} + +variable "timestamp_input_format" { + default = null + type = string +} + +variable "timestamp_ltz_output_format" { + default = null + type = string +} + +variable "timestamp_ntz_output_format" { + default = null + type = string +} + +variable "timestamp_output_format" { + default = null + type = string +} + +variable "timestamp_type_mapping" { + default = null + type = string +} + +variable "timestamp_tz_output_format" { + default = null + type = string +} + +variable "timezone" { + default = null + type = string +} + +variable "time_input_format" { + default = null + type = string +} + +variable "time_output_format" { + default = null + type = string +} + +variable "trace_level" { + default = null + type = string +} + +variable "transaction_abort_on_error" { + default = null + type = bool +} + +variable "transaction_default_isolation_level" { + default = null + type = string +} + +variable "two_digit_century_start" { + default = null + type = number +} + +variable "unsupported_ddl_action" { + default = null + type = string +} + +variable "use_cached_result" { + default = null + type = bool +} + +variable "week_of_year_policy" { + default = null + type = number +} + +variable "week_start" { + default = null + type = number +} diff --git a/pkg/resources/testdata/TestAcc_Task/with_task_dependency/test.tf b/pkg/resources/testdata/TestAcc_Task/with_task_dependency/test.tf new file mode 100644 index 0000000000..d8cb747aef --- /dev/null +++ b/pkg/resources/testdata/TestAcc_Task/with_task_dependency/test.tf @@ -0,0 +1,49 @@ +resource "snowflake_task" "root" { + name = var.tasks[0].name + database = var.tasks[0].database + schema = var.tasks[0].schema + started = var.tasks[0].started + sql_statement = var.tasks[0].sql_statement + + # Optionals + dynamic "schedule" { + for_each = [for element in [var.tasks[0].schedule] : element if element != null] + content { + minutes = lookup(var.tasks[0].schedule, "minutes", null) + using_cron = lookup(var.tasks[0].schedule, "cron", null) + } + } + + comment = var.tasks[0].comment + after = var.tasks[0].after + finalize = var.tasks[0].finalize + + # Parameters + suspend_task_after_num_failures = var.tasks[0].suspend_task_after_num_failures +} + +resource "snowflake_task" "child" { + depends_on = [snowflake_task.root] + + name = var.tasks[1].name + database = var.tasks[1].database + schema = var.tasks[1].schema + started = var.tasks[1].started + sql_statement = var.tasks[1].sql_statement + + # Optionals + dynamic "schedule" { + for_each = [for element in [var.tasks[1].schedule] : element if element != null] + content { + minutes = lookup(var.tasks[1].schedule, "minutes", null) + using_cron = lookup(var.tasks[1].schedule, "cron", null) + } + } + + comment = var.tasks[1].comment + after = var.tasks[1].after + finalize = var.tasks[1].finalize + + # Parameters + suspend_task_after_num_failures = var.tasks[1].suspend_task_after_num_failures +} diff --git a/pkg/resources/testdata/TestAcc_Task/with_task_dependency/variables.tf b/pkg/resources/testdata/TestAcc_Task/with_task_dependency/variables.tf new file mode 100644 index 0000000000..f08bfd135f --- /dev/null +++ b/pkg/resources/testdata/TestAcc_Task/with_task_dependency/variables.tf @@ -0,0 +1,18 @@ +variable "tasks" { + type = list(object({ + database = string + schema = string + name = string + started = bool + sql_statement = string + + # Optionals + comment = optional(string) + schedule = optional(map(string)) + after = optional(set(string)) + finalize = optional(string) + + # Parameters + suspend_task_after_num_failures = optional(number) + })) +} diff --git a/pkg/resources/testdata/TestAcc_Task_issue2036/1/test.tf b/pkg/resources/testdata/TestAcc_Task_issue2036/1/test.tf deleted file mode 100644 index d095e18684..0000000000 --- a/pkg/resources/testdata/TestAcc_Task_issue2036/1/test.tf +++ /dev/null @@ -1,9 +0,0 @@ -resource "snowflake_task" "test_task" { - name = var.name - database = var.database - schema = var.schema - warehouse = var.warehouse - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" -} \ No newline at end of file diff --git a/pkg/resources/testdata/TestAcc_Task_issue2036/1/variables.tf b/pkg/resources/testdata/TestAcc_Task_issue2036/1/variables.tf deleted file mode 100644 index 01e8e1a797..0000000000 --- a/pkg/resources/testdata/TestAcc_Task_issue2036/1/variables.tf +++ /dev/null @@ -1,15 +0,0 @@ -variable "database" { - type = string -} - -variable "schema" { - type = string -} - -variable "warehouse" { - type = string -} - -variable "name" { - type = string -} diff --git a/pkg/resources/testdata/TestAcc_Task_issue2036/2/test.tf b/pkg/resources/testdata/TestAcc_Task_issue2036/2/test.tf deleted file mode 100644 index 4c6e9d5521..0000000000 --- a/pkg/resources/testdata/TestAcc_Task_issue2036/2/test.tf +++ /dev/null @@ -1,10 +0,0 @@ -resource "snowflake_task" "test_task" { - name = var.name - database = var.database - schema = var.schema - warehouse = var.warehouse - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" - when = "TRUE" -} diff --git a/pkg/resources/testdata/TestAcc_Task_issue2036/2/variables.tf b/pkg/resources/testdata/TestAcc_Task_issue2036/2/variables.tf deleted file mode 100644 index 01e8e1a797..0000000000 --- a/pkg/resources/testdata/TestAcc_Task_issue2036/2/variables.tf +++ /dev/null @@ -1,15 +0,0 @@ -variable "database" { - type = string -} - -variable "schema" { - type = string -} - -variable "warehouse" { - type = string -} - -variable "name" { - type = string -} diff --git a/pkg/resources/testdata/TestAcc_Task_issue2207/1/test.tf b/pkg/resources/testdata/TestAcc_Task_issue2207/1/test.tf deleted file mode 100644 index 0d57ab1811..0000000000 --- a/pkg/resources/testdata/TestAcc_Task_issue2207/1/test.tf +++ /dev/null @@ -1,20 +0,0 @@ -resource "snowflake_task" "root_task" { - name = var.root_name - database = var.database - schema = var.schema - warehouse = var.warehouse - sql_statement = "SELECT 1" - enabled = true - schedule = "5 MINUTE" -} - -resource "snowflake_task" "child_task" { - name = var.child_name - database = snowflake_task.root_task.database - schema = snowflake_task.root_task.schema - warehouse = snowflake_task.root_task.warehouse - sql_statement = "SELECT 1" - enabled = true - after = [snowflake_task.root_task.name] - comment = var.comment -} diff --git a/pkg/resources/testdata/TestAcc_Task_issue2207/1/variables.tf b/pkg/resources/testdata/TestAcc_Task_issue2207/1/variables.tf deleted file mode 100644 index fe59da5d99..0000000000 --- a/pkg/resources/testdata/TestAcc_Task_issue2207/1/variables.tf +++ /dev/null @@ -1,23 +0,0 @@ -variable "database" { - type = string -} - -variable "schema" { - type = string -} - -variable "warehouse" { - type = string -} - -variable "root_name" { - type = string -} - -variable "child_name" { - type = string -} - -variable "comment" { - type = string -} diff --git a/pkg/resources/user_parameters.go b/pkg/resources/user_parameters.go index 968f2c7c3c..05ecc15e0c 100644 --- a/pkg/resources/user_parameters.go +++ b/pkg/resources/user_parameters.go @@ -77,11 +77,12 @@ var ( ) type parameterDef[T ~string] struct { - Name T - Type schema.ValueType - Description string - DiffSuppress schema.SchemaDiffSuppressFunc - ValidateDiag schema.SchemaValidateDiagFunc + Name T + Type schema.ValueType + Description string + DiffSuppress schema.SchemaDiffSuppressFunc + ValidateDiag schema.SchemaValidateDiagFunc + ConflictsWith []string } func init() { @@ -159,6 +160,7 @@ func init() { Optional: true, ValidateDiagFunc: field.ValidateDiag, DiffSuppressFunc: field.DiffSuppress, + ConflictsWith: field.ConflictsWith, } } } diff --git a/pkg/schemas/gen/README.md b/pkg/schemas/gen/README.md index 78420ff9e2..3476f6d06f 100644 --- a/pkg/schemas/gen/README.md +++ b/pkg/schemas/gen/README.md @@ -83,6 +83,7 @@ If you change the show output struct in the SDK: Functional improvements: - handle the missing types (TODOs in [schema_field_mapper.go](./schema_field_mapper.go)) + - handle nested structs with identifiers / slices of identifiers - parametrize the generation, e.g.: - (optional) parametrize the output directory - currently, it's always written to `schemas` package - discover a change and generate as part of a `make pre-push` diff --git a/pkg/schemas/task_gen.go b/pkg/schemas/task_gen.go index a9daac5198..dec4850868 100644 --- a/pkg/schemas/task_gen.go +++ b/pkg/schemas/task_gen.go @@ -1,8 +1,7 @@ -// Code generated by sdk-to-schema generator; DO NOT EDIT. - package schemas import ( + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -46,7 +45,8 @@ var ShowTaskSchema = map[string]*schema.Schema{ Computed: true, }, "predecessors": { - Type: schema.TypeInvalid, + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, Computed: true, }, "state": { @@ -89,6 +89,31 @@ var ShowTaskSchema = map[string]*schema.Schema{ Type: schema.TypeString, Computed: true, }, + "task_relations": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "predecessors": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "finalizer": { + Type: schema.TypeString, + Computed: true, + }, + "finalized_root_task": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "last_suspended_reason": { + Type: schema.TypeString, + Computed: true, + }, } var _ = ShowTaskSchema @@ -102,19 +127,40 @@ func TaskToSchema(task *sdk.Task) map[string]any { taskSchema["schema_name"] = task.SchemaName taskSchema["owner"] = task.Owner taskSchema["comment"] = task.Comment - taskSchema["warehouse"] = task.Warehouse + if task.Warehouse != nil { + taskSchema["warehouse"] = task.Warehouse.Name() + } taskSchema["schedule"] = task.Schedule - taskSchema["predecessors"] = task.Predecessors + taskSchema["predecessors"] = collections.Map(task.Predecessors, sdk.SchemaObjectIdentifier.FullyQualifiedName) taskSchema["state"] = string(task.State) taskSchema["definition"] = task.Definition taskSchema["condition"] = task.Condition taskSchema["allow_overlapping_execution"] = task.AllowOverlappingExecution - taskSchema["error_integration"] = task.ErrorIntegration + if task.ErrorIntegration != nil { + taskSchema["error_integration"] = task.ErrorIntegration.Name() + } taskSchema["last_committed_on"] = task.LastCommittedOn taskSchema["last_suspended_on"] = task.LastSuspendedOn taskSchema["owner_role_type"] = task.OwnerRoleType taskSchema["config"] = task.Config taskSchema["budget"] = task.Budget + taskSchema["last_suspended_reason"] = task.LastSuspendedReason + // This is manually edited, please don't re-generate this file + finalizer := "" + if task.TaskRelations.FinalizerTask != nil { + finalizer = task.TaskRelations.FinalizerTask.FullyQualifiedName() + } + finalizedRootTask := "" + if task.TaskRelations.FinalizedRootTask != nil { + finalizedRootTask = task.TaskRelations.FinalizedRootTask.FullyQualifiedName() + } + taskSchema["task_relations"] = []any{ + map[string]any{ + "predecessors": collections.Map(task.TaskRelations.Predecessors, sdk.SchemaObjectIdentifier.FullyQualifiedName), + "finalizer": finalizer, + "finalized_root_task": finalizedRootTask, + }, + } return taskSchema } diff --git a/pkg/schemas/task_parameters.go b/pkg/schemas/task_parameters.go new file mode 100644 index 0000000000..7ab3b47882 --- /dev/null +++ b/pkg/schemas/task_parameters.go @@ -0,0 +1,91 @@ +package schemas + +import ( + "slices" + "strings" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +var ( + ShowTaskParametersSchema = make(map[string]*schema.Schema) + taskParameters = []sdk.TaskParameter{ + // task parameters + sdk.TaskParameterSuspendTaskAfterNumFailures, + sdk.TaskParameterTaskAutoRetryAttempts, + sdk.TaskParameterUserTaskManagedInitialWarehouseSize, + sdk.TaskParameterUserTaskMinimumTriggerIntervalInSeconds, + sdk.TaskParameterUserTaskTimeoutMs, + // session parameters + sdk.TaskParameterAbortDetachedQuery, + sdk.TaskParameterAutocommit, + sdk.TaskParameterBinaryInputFormat, + sdk.TaskParameterBinaryOutputFormat, + sdk.TaskParameterClientMemoryLimit, + sdk.TaskParameterClientMetadataRequestUseConnectionCtx, + sdk.TaskParameterClientPrefetchThreads, + sdk.TaskParameterClientResultChunkSize, + sdk.TaskParameterClientResultColumnCaseInsensitive, + sdk.TaskParameterClientSessionKeepAlive, + sdk.TaskParameterClientSessionKeepAliveHeartbeatFrequency, + sdk.TaskParameterClientTimestampTypeMapping, + sdk.TaskParameterDateInputFormat, + sdk.TaskParameterDateOutputFormat, + sdk.TaskParameterEnableUnloadPhysicalTypeOptimization, + sdk.TaskParameterErrorOnNondeterministicMerge, + sdk.TaskParameterErrorOnNondeterministicUpdate, + sdk.TaskParameterGeographyOutputFormat, + sdk.TaskParameterGeometryOutputFormat, + sdk.TaskParameterJdbcTreatTimestampNtzAsUtc, + sdk.TaskParameterJdbcUseSessionTimezone, + sdk.TaskParameterJsonIndent, + sdk.TaskParameterLockTimeout, + sdk.TaskParameterLogLevel, + sdk.TaskParameterMultiStatementCount, + sdk.TaskParameterNoorderSequenceAsDefault, + sdk.TaskParameterOdbcTreatDecimalAsInt, + sdk.TaskParameterQueryTag, + sdk.TaskParameterQuotedIdentifiersIgnoreCase, + sdk.TaskParameterRowsPerResultset, + sdk.TaskParameterS3StageVpceDnsName, + sdk.TaskParameterSearchPath, + sdk.TaskParameterStatementQueuedTimeoutInSeconds, + sdk.TaskParameterStatementTimeoutInSeconds, + sdk.TaskParameterStrictJsonOutput, + sdk.TaskParameterTimestampDayIsAlways24h, + sdk.TaskParameterTimestampInputFormat, + sdk.TaskParameterTimestampLtzOutputFormat, + sdk.TaskParameterTimestampNtzOutputFormat, + sdk.TaskParameterTimestampOutputFormat, + sdk.TaskParameterTimestampTypeMapping, + sdk.TaskParameterTimestampTzOutputFormat, + sdk.TaskParameterTimezone, + sdk.TaskParameterTimeInputFormat, + sdk.TaskParameterTimeOutputFormat, + sdk.TaskParameterTraceLevel, + sdk.TaskParameterTransactionAbortOnError, + sdk.TaskParameterTransactionDefaultIsolationLevel, + sdk.TaskParameterTwoDigitCenturyStart, + sdk.TaskParameterUnsupportedDdlAction, + sdk.TaskParameterUseCachedResult, + sdk.TaskParameterWeekOfYearPolicy, + sdk.TaskParameterWeekStart, + } +) + +func init() { + for _, param := range taskParameters { + ShowTaskParametersSchema[strings.ToLower(string(param))] = ParameterListSchema + } +} + +func TaskParametersToSchema(parameters []*sdk.Parameter) map[string]any { + taskParametersValue := make(map[string]any) + for _, param := range parameters { + if slices.Contains(taskParameters, sdk.TaskParameter(param.Key)) { + taskParametersValue[strings.ToLower(param.Key)] = []map[string]any{ParameterToSchema(param)} + } + } + return taskParametersValue +} diff --git a/pkg/sdk/grants_impl.go b/pkg/sdk/grants_impl.go index d0399d605b..082ec61020 100644 --- a/pkg/sdk/grants_impl.go +++ b/pkg/sdk/grants_impl.go @@ -403,11 +403,15 @@ func (v *grants) grantOwnershipOnTask(ctx context.Context, taskId SchemaObjectId return err } + if currentTask.Warehouse == nil { + return fmt.Errorf("no warehouse found to be attached to the task: %s", taskId.FullyQualifiedName()) + } + currentGrantsOnTaskWarehouse, err := v.client.Grants.Show(ctx, &ShowGrantOptions{ On: &ShowGrantsOn{ Object: &Object{ ObjectType: ObjectTypeWarehouse, - Name: NewAccountObjectIdentifier(currentTask.Warehouse), + Name: *currentTask.Warehouse, }, }, }) @@ -442,7 +446,7 @@ func (v *grants) grantOwnershipOnTask(ctx context.Context, taskId SchemaObjectId return err } - if currentTask.State == TaskStateStarted && !slices.ContainsFunc(tasksToResume, func(id SchemaObjectIdentifier) bool { + if currentTask.IsStarted() && !slices.ContainsFunc(tasksToResume, func(id SchemaObjectIdentifier) bool { return id.FullyQualifiedName() == currentTask.ID().FullyQualifiedName() }) { tasksToResume = append(tasksToResume, currentTask.ID()) @@ -515,7 +519,7 @@ func (v *grants) runOnAllTasks(ctx context.Context, inDatabase *AccountObjectIde } } - tasks, err := v.client.Tasks.Show(ctx, NewShowTaskRequest().WithIn(in)) + tasks, err := v.client.Tasks.Show(ctx, NewShowTaskRequest().WithIn(ExtendedIn{In: in})) if err != nil { return err } diff --git a/pkg/sdk/parameters.go b/pkg/sdk/parameters.go index 8f275752e1..f341ed503a 100644 --- a/pkg/sdk/parameters.go +++ b/pkg/sdk/parameters.go @@ -721,6 +721,70 @@ const ( TaskParameterWeekStart TaskParameter = "WEEK_START" ) +var AllTaskParameters = []TaskParameter{ + // Task Parameters + TaskParameterSuspendTaskAfterNumFailures, + TaskParameterTaskAutoRetryAttempts, + TaskParameterUserTaskManagedInitialWarehouseSize, + TaskParameterUserTaskMinimumTriggerIntervalInSeconds, + TaskParameterUserTaskTimeoutMs, + + // Session Parameters (inherited) + TaskParameterAbortDetachedQuery, + TaskParameterAutocommit, + TaskParameterBinaryInputFormat, + TaskParameterBinaryOutputFormat, + TaskParameterClientMemoryLimit, + TaskParameterClientMetadataRequestUseConnectionCtx, + TaskParameterClientPrefetchThreads, + TaskParameterClientResultChunkSize, + TaskParameterClientResultColumnCaseInsensitive, + TaskParameterClientSessionKeepAlive, + TaskParameterClientSessionKeepAliveHeartbeatFrequency, + TaskParameterClientTimestampTypeMapping, + TaskParameterDateInputFormat, + TaskParameterDateOutputFormat, + TaskParameterEnableUnloadPhysicalTypeOptimization, + TaskParameterErrorOnNondeterministicMerge, + TaskParameterErrorOnNondeterministicUpdate, + TaskParameterGeographyOutputFormat, + TaskParameterGeometryOutputFormat, + TaskParameterJdbcTreatTimestampNtzAsUtc, + TaskParameterJdbcUseSessionTimezone, + TaskParameterJsonIndent, + TaskParameterLockTimeout, + TaskParameterLogLevel, + TaskParameterMultiStatementCount, + TaskParameterNoorderSequenceAsDefault, + TaskParameterOdbcTreatDecimalAsInt, + TaskParameterQueryTag, + TaskParameterQuotedIdentifiersIgnoreCase, + TaskParameterRowsPerResultset, + TaskParameterS3StageVpceDnsName, + TaskParameterSearchPath, + TaskParameterStatementQueuedTimeoutInSeconds, + TaskParameterStatementTimeoutInSeconds, + TaskParameterStrictJsonOutput, + TaskParameterTimestampDayIsAlways24h, + TaskParameterTimestampInputFormat, + TaskParameterTimestampLtzOutputFormat, + TaskParameterTimestampNtzOutputFormat, + TaskParameterTimestampOutputFormat, + TaskParameterTimestampTypeMapping, + TaskParameterTimestampTzOutputFormat, + TaskParameterTimezone, + TaskParameterTimeInputFormat, + TaskParameterTimeOutputFormat, + TaskParameterTraceLevel, + TaskParameterTransactionAbortOnError, + TaskParameterTransactionDefaultIsolationLevel, + TaskParameterTwoDigitCenturyStart, + TaskParameterUnsupportedDdlAction, + TaskParameterUseCachedResult, + TaskParameterWeekOfYearPolicy, + TaskParameterWeekStart, +} + type WarehouseParameter string const ( diff --git a/pkg/sdk/tasks_def.go b/pkg/sdk/tasks_def.go index 26214a521d..83ff671492 100644 --- a/pkg/sdk/tasks_def.go +++ b/pkg/sdk/tasks_def.go @@ -27,8 +27,9 @@ func ToTaskState(s string) (TaskState, error) { } type TaskRelationsRepresentation struct { - Predecessors []string `json:"Predecessors"` - FinalizerTask string `json:"FinalizerTask"` + Predecessors []string `json:"Predecessors"` + FinalizerTask string `json:"FinalizerTask"` + FinalizedRootTask string `json:"FinalizedRootTask"` } func (r *TaskRelationsRepresentation) ToTaskRelations() (TaskRelations, error) { @@ -53,12 +54,21 @@ func (r *TaskRelationsRepresentation) ToTaskRelations() (TaskRelations, error) { taskRelations.FinalizerTask = &finalizerTask } + if len(r.FinalizedRootTask) > 0 { + finalizedRootTask, err := ParseSchemaObjectIdentifier(r.FinalizedRootTask) + if err != nil { + return TaskRelations{}, err + } + taskRelations.FinalizedRootTask = &finalizedRootTask + } + return taskRelations, nil } type TaskRelations struct { - Predecessors []SchemaObjectIdentifier - FinalizerTask *SchemaObjectIdentifier + Predecessors []SchemaObjectIdentifier + FinalizerTask *SchemaObjectIdentifier + FinalizedRootTask *SchemaObjectIdentifier } func ToTaskRelations(s string) (TaskRelations, error) { @@ -146,7 +156,7 @@ var TasksDef = g.NewInterface( OptionalSessionParameters(). OptionalNumberAssignment("USER_TASK_TIMEOUT_MS", nil). OptionalNumberAssignment("SUSPEND_TASK_AFTER_NUM_FAILURES", nil). - OptionalIdentifier("ErrorNotificationIntegration", g.KindOfT[AccountObjectIdentifier](), g.IdentifierOptions().Equals().SQL("ERROR_INTEGRATION")). + OptionalIdentifier("ErrorIntegration", g.KindOfT[AccountObjectIdentifier](), g.IdentifierOptions().Equals().SQL("ERROR_INTEGRATION")). OptionalTextAssignment("COMMENT", g.ParameterOptions().SingleQuotes()). OptionalIdentifier("Finalize", g.KindOfT[SchemaObjectIdentifier](), g.IdentifierOptions().Equals().SQL("FINALIZE")). OptionalNumberAssignment("TASK_AUTO_RETRY_ATTEMPTS", g.ParameterOptions()). @@ -157,7 +167,7 @@ var TasksDef = g.NewInterface( SQL("AS"). Text("sql", g.KeywordOptions().NoQuotes().Required()). WithValidation(g.ValidIdentifier, "name"). - WithValidation(g.ValidIdentifierIfSet, "ErrorNotificationIntegration"). + WithValidation(g.ValidIdentifierIfSet, "ErrorIntegration"). WithValidation(g.ConflictingFields, "OrReplace", "IfNotExists"), taskCreateWarehouse, ). @@ -175,7 +185,7 @@ var TasksDef = g.NewInterface( OptionalNumberAssignment("USER_TASK_TIMEOUT_MS", nil). OptionalSessionParameters(). OptionalNumberAssignment("SUSPEND_TASK_AFTER_NUM_FAILURES", nil). - OptionalIdentifier("ErrorNotificationIntegration", g.KindOfT[AccountObjectIdentifier](), g.IdentifierOptions().Equals().SQL("ERROR_INTEGRATION")). + OptionalIdentifier("ErrorIntegration", g.KindOfT[AccountObjectIdentifier](), g.IdentifierOptions().Equals().SQL("ERROR_INTEGRATION")). OptionalTextAssignment("COMMENT", g.ParameterOptions().SingleQuotes()). OptionalIdentifier("Finalize", g.KindOfT[SchemaObjectIdentifier](), g.IdentifierOptions().Equals().SQL("FINALIZE")). OptionalNumberAssignment("TASK_AUTO_RETRY_ATTEMPTS", g.ParameterOptions()). @@ -184,7 +194,7 @@ var TasksDef = g.NewInterface( SQL("AS"). Text("sql", g.KeywordOptions().NoQuotes().Required()). WithValidation(g.ValidIdentifier, "name"). - WithValidation(g.ValidIdentifierIfSet, "ErrorNotificationIntegration"), + WithValidation(g.ValidIdentifierIfSet, "ErrorIntegration"), ). CustomOperation( "Clone", @@ -221,20 +231,21 @@ var TasksDef = g.NewInterface( OptionalBooleanAssignment("ALLOW_OVERLAPPING_EXECUTION", nil). OptionalNumberAssignment("USER_TASK_TIMEOUT_MS", nil). OptionalNumberAssignment("SUSPEND_TASK_AFTER_NUM_FAILURES", nil). - OptionalIdentifier("ErrorNotificationIntegration", g.KindOfT[AccountObjectIdentifier](), g.IdentifierOptions().Equals().SQL("ERROR_INTEGRATION")). + OptionalIdentifier("ErrorIntegration", g.KindOfT[AccountObjectIdentifier](), g.IdentifierOptions().Equals().SQL("ERROR_INTEGRATION")). OptionalTextAssignment("COMMENT", g.ParameterOptions().SingleQuotes()). OptionalSessionParameters(). OptionalNumberAssignment("TASK_AUTO_RETRY_ATTEMPTS", nil). OptionalNumberAssignment("USER_TASK_MINIMUM_TRIGGER_INTERVAL_IN_SECONDS", nil). WithValidation(g.AtLeastOneValueSet, "Warehouse", "UserTaskManagedInitialWarehouseSize", "Schedule", "Config", "AllowOverlappingExecution", "UserTaskTimeoutMs", "SuspendTaskAfterNumFailures", "ErrorIntegration", "Comment", "SessionParameters", "TaskAutoRetryAttempts", "UserTaskMinimumTriggerIntervalInSeconds"). WithValidation(g.ConflictingFields, "Warehouse", "UserTaskManagedInitialWarehouseSize"). - WithValidation(g.ValidIdentifierIfSet, "ErrorNotificationIntegration"), + WithValidation(g.ValidIdentifierIfSet, "ErrorIntegration"), g.ListOptions().SQL("SET"), ). OptionalQueryStructField( "Unset", g.NewQueryStruct("TaskUnset"). OptionalSQL("WAREHOUSE"). + OptionalSQL("USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE"). OptionalSQL("SCHEDULE"). OptionalSQL("CONFIG"). OptionalSQL("ALLOW_OVERLAPPING_EXECUTION"). @@ -245,7 +256,7 @@ var TasksDef = g.NewInterface( OptionalSQL("TASK_AUTO_RETRY_ATTEMPTS"). OptionalSQL("USER_TASK_MINIMUM_TRIGGER_INTERVAL_IN_SECONDS"). OptionalSessionParametersUnset(). - WithValidation(g.AtLeastOneValueSet, "Warehouse", "Schedule", "Config", "AllowOverlappingExecution", "UserTaskTimeoutMs", "SuspendTaskAfterNumFailures", "ErrorIntegration", "Comment", "SessionParametersUnset", "TaskAutoRetryAttempts", "UserTaskMinimumTriggerIntervalInSeconds"), + WithValidation(g.AtLeastOneValueSet, "Warehouse", "UserTaskManagedInitialWarehouseSize", "Schedule", "Config", "AllowOverlappingExecution", "UserTaskTimeoutMs", "SuspendTaskAfterNumFailures", "ErrorIntegration", "Comment", "SessionParametersUnset", "TaskAutoRetryAttempts", "UserTaskMinimumTriggerIntervalInSeconds"), g.ListOptions().SQL("UNSET"), ). OptionalSetTags(). @@ -276,7 +287,7 @@ var TasksDef = g.NewInterface( Terse(). SQL("TASKS"). OptionalLike(). - OptionalIn(). + OptionalExtendedIn(). OptionalStartsWith(). OptionalSQL("ROOT ONLY"). OptionalLimit(), diff --git a/pkg/sdk/tasks_dto_builders_gen.go b/pkg/sdk/tasks_dto_builders_gen.go index 7a0397bca2..7e3f7b90f7 100644 --- a/pkg/sdk/tasks_dto_builders_gen.go +++ b/pkg/sdk/tasks_dto_builders_gen.go @@ -59,8 +59,8 @@ func (s *CreateTaskRequest) WithSuspendTaskAfterNumFailures(SuspendTaskAfterNumF return s } -func (s *CreateTaskRequest) WithErrorNotificationIntegration(ErrorNotificationIntegration AccountObjectIdentifier) *CreateTaskRequest { - s.ErrorNotificationIntegration = &ErrorNotificationIntegration +func (s *CreateTaskRequest) WithErrorIntegration(ErrorIntegration AccountObjectIdentifier) *CreateTaskRequest { + s.ErrorIntegration = &ErrorIntegration return s } @@ -158,8 +158,8 @@ func (s *CreateOrAlterTaskRequest) WithSuspendTaskAfterNumFailures(SuspendTaskAf return s } -func (s *CreateOrAlterTaskRequest) WithErrorNotificationIntegration(ErrorNotificationIntegration AccountObjectIdentifier) *CreateOrAlterTaskRequest { - s.ErrorNotificationIntegration = &ErrorNotificationIntegration +func (s *CreateOrAlterTaskRequest) WithErrorIntegration(ErrorIntegration AccountObjectIdentifier) *CreateOrAlterTaskRequest { + s.ErrorIntegration = &ErrorIntegration return s } @@ -325,8 +325,8 @@ func (s *TaskSetRequest) WithSuspendTaskAfterNumFailures(SuspendTaskAfterNumFail return s } -func (s *TaskSetRequest) WithErrorNotificationIntegration(ErrorNotificationIntegration AccountObjectIdentifier) *TaskSetRequest { - s.ErrorNotificationIntegration = &ErrorNotificationIntegration +func (s *TaskSetRequest) WithErrorIntegration(ErrorIntegration AccountObjectIdentifier) *TaskSetRequest { + s.ErrorIntegration = &ErrorIntegration return s } @@ -359,6 +359,11 @@ func (s *TaskUnsetRequest) WithWarehouse(Warehouse bool) *TaskUnsetRequest { return s } +func (s *TaskUnsetRequest) WithUserTaskManagedInitialWarehouseSize(UserTaskManagedInitialWarehouseSize bool) *TaskUnsetRequest { + s.UserTaskManagedInitialWarehouseSize = &UserTaskManagedInitialWarehouseSize + return s +} + func (s *TaskUnsetRequest) WithSchedule(Schedule bool) *TaskUnsetRequest { s.Schedule = &Schedule return s @@ -436,7 +441,7 @@ func (s *ShowTaskRequest) WithLike(Like Like) *ShowTaskRequest { return s } -func (s *ShowTaskRequest) WithIn(In In) *ShowTaskRequest { +func (s *ShowTaskRequest) WithIn(In ExtendedIn) *ShowTaskRequest { s.In = &In return s } diff --git a/pkg/sdk/tasks_dto_gen.go b/pkg/sdk/tasks_dto_gen.go index e6a2726b4e..e8dcc23d20 100644 --- a/pkg/sdk/tasks_dto_gen.go +++ b/pkg/sdk/tasks_dto_gen.go @@ -24,7 +24,7 @@ type CreateTaskRequest struct { SessionParameters *SessionParameters UserTaskTimeoutMs *int SuspendTaskAfterNumFailures *int - ErrorNotificationIntegration *AccountObjectIdentifier + ErrorIntegration *AccountObjectIdentifier Comment *string Finalize *SchemaObjectIdentifier TaskAutoRetryAttempts *int @@ -45,21 +45,21 @@ func (r *CreateTaskRequest) GetName() SchemaObjectIdentifier { } type CreateOrAlterTaskRequest struct { - name SchemaObjectIdentifier // required - Warehouse *CreateTaskWarehouseRequest - Schedule *string - Config *string - AllowOverlappingExecution *bool - UserTaskTimeoutMs *int - SessionParameters *SessionParameters - SuspendTaskAfterNumFailures *int - ErrorNotificationIntegration *AccountObjectIdentifier - Comment *string - Finalize *SchemaObjectIdentifier - TaskAutoRetryAttempts *int - After []SchemaObjectIdentifier - When *string - sql string // required + name SchemaObjectIdentifier // required + Warehouse *CreateTaskWarehouseRequest + Schedule *string + Config *string + AllowOverlappingExecution *bool + UserTaskTimeoutMs *int + SessionParameters *SessionParameters + SuspendTaskAfterNumFailures *int + ErrorIntegration *AccountObjectIdentifier + Comment *string + Finalize *SchemaObjectIdentifier + TaskAutoRetryAttempts *int + After []SchemaObjectIdentifier + When *string + sql string // required } func (r *CreateOrAlterTaskRequest) GetName() SchemaObjectIdentifier { @@ -103,7 +103,7 @@ type TaskSetRequest struct { AllowOverlappingExecution *bool UserTaskTimeoutMs *int SuspendTaskAfterNumFailures *int - ErrorNotificationIntegration *AccountObjectIdentifier + ErrorIntegration *AccountObjectIdentifier Comment *string SessionParameters *SessionParameters TaskAutoRetryAttempts *int @@ -112,6 +112,7 @@ type TaskSetRequest struct { type TaskUnsetRequest struct { Warehouse *bool + UserTaskManagedInitialWarehouseSize *bool Schedule *bool Config *bool AllowOverlappingExecution *bool @@ -132,7 +133,7 @@ type DropTaskRequest struct { type ShowTaskRequest struct { Terse *bool Like *Like - In *In + In *ExtendedIn StartsWith *string RootOnly *bool Limit *LimitFrom diff --git a/pkg/sdk/tasks_gen.go b/pkg/sdk/tasks_gen.go index 56123086ff..3d6bf112bb 100644 --- a/pkg/sdk/tasks_gen.go +++ b/pkg/sdk/tasks_gen.go @@ -3,6 +3,9 @@ package sdk import ( "context" "database/sql" + "fmt" + "strconv" + "strings" ) type Tasks interface { @@ -13,6 +16,7 @@ type Tasks interface { Drop(ctx context.Context, request *DropTaskRequest) error Show(ctx context.Context, request *ShowTaskRequest) ([]Task, error) ShowByID(ctx context.Context, id SchemaObjectIdentifier) (*Task, error) + ShowParameters(ctx context.Context, id SchemaObjectIdentifier) ([]*Parameter, error) Describe(ctx context.Context, id SchemaObjectIdentifier) (*Task, error) Execute(ctx context.Context, request *ExecuteTaskRequest) error SuspendRootTasks(ctx context.Context, taskId SchemaObjectIdentifier, id SchemaObjectIdentifier) ([]SchemaObjectIdentifier, error) @@ -33,7 +37,7 @@ type CreateTaskOptions struct { SessionParameters *SessionParameters `ddl:"list,no_parentheses"` UserTaskTimeoutMs *int `ddl:"parameter" sql:"USER_TASK_TIMEOUT_MS"` SuspendTaskAfterNumFailures *int `ddl:"parameter" sql:"SUSPEND_TASK_AFTER_NUM_FAILURES"` - ErrorNotificationIntegration *AccountObjectIdentifier `ddl:"identifier,equals" sql:"ERROR_INTEGRATION"` + ErrorIntegration *AccountObjectIdentifier `ddl:"identifier,equals" sql:"ERROR_INTEGRATION"` Comment *string `ddl:"parameter,single_quotes" sql:"COMMENT"` Finalize *SchemaObjectIdentifier `ddl:"identifier,equals" sql:"FINALIZE"` TaskAutoRetryAttempts *int `ddl:"parameter" sql:"TASK_AUTO_RETRY_ATTEMPTS"` @@ -52,24 +56,24 @@ type CreateTaskWarehouse struct { // CreateOrAlterTaskOptions is based on https://docs.snowflake.com/en/sql-reference/sql/create-task#create-or-alter-task. type CreateOrAlterTaskOptions struct { - createOrAlter bool `ddl:"static" sql:"CREATE OR ALTER"` - task bool `ddl:"static" sql:"TASK"` - name SchemaObjectIdentifier `ddl:"identifier"` - Warehouse *CreateTaskWarehouse `ddl:"keyword"` - Schedule *string `ddl:"parameter,single_quotes" sql:"SCHEDULE"` - Config *string `ddl:"parameter,no_quotes" sql:"CONFIG"` - AllowOverlappingExecution *bool `ddl:"parameter" sql:"ALLOW_OVERLAPPING_EXECUTION"` - UserTaskTimeoutMs *int `ddl:"parameter" sql:"USER_TASK_TIMEOUT_MS"` - SessionParameters *SessionParameters `ddl:"list,no_parentheses"` - SuspendTaskAfterNumFailures *int `ddl:"parameter" sql:"SUSPEND_TASK_AFTER_NUM_FAILURES"` - ErrorNotificationIntegration *AccountObjectIdentifier `ddl:"identifier,equals" sql:"ERROR_INTEGRATION"` - Comment *string `ddl:"parameter,single_quotes" sql:"COMMENT"` - Finalize *SchemaObjectIdentifier `ddl:"identifier,equals" sql:"FINALIZE"` - TaskAutoRetryAttempts *int `ddl:"parameter" sql:"TASK_AUTO_RETRY_ATTEMPTS"` - After []SchemaObjectIdentifier `ddl:"parameter,no_equals" sql:"AFTER"` - When *string `ddl:"parameter,no_quotes,no_equals" sql:"WHEN"` - as bool `ddl:"static" sql:"AS"` - sql string `ddl:"keyword,no_quotes"` + createOrAlter bool `ddl:"static" sql:"CREATE OR ALTER"` + task bool `ddl:"static" sql:"TASK"` + name SchemaObjectIdentifier `ddl:"identifier"` + Warehouse *CreateTaskWarehouse `ddl:"keyword"` + Schedule *string `ddl:"parameter,single_quotes" sql:"SCHEDULE"` + Config *string `ddl:"parameter,no_quotes" sql:"CONFIG"` + AllowOverlappingExecution *bool `ddl:"parameter" sql:"ALLOW_OVERLAPPING_EXECUTION"` + UserTaskTimeoutMs *int `ddl:"parameter" sql:"USER_TASK_TIMEOUT_MS"` + SessionParameters *SessionParameters `ddl:"list,no_parentheses"` + SuspendTaskAfterNumFailures *int `ddl:"parameter" sql:"SUSPEND_TASK_AFTER_NUM_FAILURES"` + ErrorIntegration *AccountObjectIdentifier `ddl:"identifier,equals" sql:"ERROR_INTEGRATION"` + Comment *string `ddl:"parameter,single_quotes" sql:"COMMENT"` + Finalize *SchemaObjectIdentifier `ddl:"identifier,equals" sql:"FINALIZE"` + TaskAutoRetryAttempts *int `ddl:"parameter" sql:"TASK_AUTO_RETRY_ATTEMPTS"` + After []SchemaObjectIdentifier `ddl:"parameter,no_equals" sql:"AFTER"` + When *string `ddl:"parameter,no_quotes,no_equals" sql:"WHEN"` + as bool `ddl:"static" sql:"AS"` + sql string `ddl:"keyword,no_quotes"` } // CloneTaskOptions is based on https://docs.snowflake.com/en/sql-reference/sql/create-task#create-task-clone. @@ -112,7 +116,7 @@ type TaskSet struct { AllowOverlappingExecution *bool `ddl:"parameter" sql:"ALLOW_OVERLAPPING_EXECUTION"` UserTaskTimeoutMs *int `ddl:"parameter" sql:"USER_TASK_TIMEOUT_MS"` SuspendTaskAfterNumFailures *int `ddl:"parameter" sql:"SUSPEND_TASK_AFTER_NUM_FAILURES"` - ErrorNotificationIntegration *AccountObjectIdentifier `ddl:"identifier,equals" sql:"ERROR_INTEGRATION"` + ErrorIntegration *AccountObjectIdentifier `ddl:"identifier,equals" sql:"ERROR_INTEGRATION"` Comment *string `ddl:"parameter,single_quotes" sql:"COMMENT"` SessionParameters *SessionParameters `ddl:"list,no_parentheses"` TaskAutoRetryAttempts *int `ddl:"parameter" sql:"TASK_AUTO_RETRY_ATTEMPTS"` @@ -121,6 +125,7 @@ type TaskSet struct { type TaskUnset struct { Warehouse *bool `ddl:"keyword" sql:"WAREHOUSE"` + UserTaskManagedInitialWarehouseSize *bool `ddl:"keyword" sql:"USER_TASK_MANAGED_INITIAL_WAREHOUSE_SIZE"` Schedule *bool `ddl:"keyword" sql:"SCHEDULE"` Config *bool `ddl:"keyword" sql:"CONFIG"` AllowOverlappingExecution *bool `ddl:"keyword" sql:"ALLOW_OVERLAPPING_EXECUTION"` @@ -143,14 +148,14 @@ type DropTaskOptions struct { // ShowTaskOptions is based on https://docs.snowflake.com/en/sql-reference/sql/show-tasks. type ShowTaskOptions struct { - show bool `ddl:"static" sql:"SHOW"` - Terse *bool `ddl:"keyword" sql:"TERSE"` - tasks bool `ddl:"static" sql:"TASKS"` - Like *Like `ddl:"keyword" sql:"LIKE"` - In *In `ddl:"keyword" sql:"IN"` - StartsWith *string `ddl:"parameter,single_quotes,no_equals" sql:"STARTS WITH"` - RootOnly *bool `ddl:"keyword" sql:"ROOT ONLY"` - Limit *LimitFrom `ddl:"keyword" sql:"LIMIT"` + show bool `ddl:"static" sql:"SHOW"` + Terse *bool `ddl:"keyword" sql:"TERSE"` + tasks bool `ddl:"static" sql:"TASKS"` + Like *Like `ddl:"keyword" sql:"LIKE"` + In *ExtendedIn `ddl:"keyword" sql:"IN"` + StartsWith *string `ddl:"parameter,single_quotes,no_equals" sql:"STARTS WITH"` + RootOnly *bool `ddl:"keyword" sql:"ROOT ONLY"` + Limit *LimitFrom `ddl:"keyword" sql:"LIMIT"` } type taskDBRow struct { @@ -186,7 +191,7 @@ type Task struct { SchemaName string Owner string Comment string - Warehouse string + Warehouse *AccountObjectIdentifier Schedule string Predecessors []SchemaObjectIdentifier State TaskState @@ -207,6 +212,38 @@ func (v *Task) ID() SchemaObjectIdentifier { return NewSchemaObjectIdentifier(v.DatabaseName, v.SchemaName, v.Name) } +func (v *Task) IsStarted() bool { + return v.State == TaskStateStarted +} + +type TaskSchedule struct { + Minutes int + Cron string +} + +func ParseTaskSchedule(schedule string) (*TaskSchedule, error) { + upperSchedule := strings.ToUpper(schedule) + switch { + case strings.Contains(upperSchedule, "USING CRON"): + // We have to do it this was because we want to get rid of the prefix and leave the casing as is (mostly because timezones like America/Los_Angeles are case-sensitive). + // That why the prefix trimming has to be done by slicing rather than using strings.TrimPrefix. + cron := schedule[len("USING CRON "):] + return &TaskSchedule{Cron: cron}, nil + case strings.HasSuffix(upperSchedule, "M") || + strings.HasSuffix(upperSchedule, "MINUTE") || + strings.HasSuffix(upperSchedule, "MINUTES"): + minuteParts := strings.Split(upperSchedule, " ") + minutes, err := strconv.Atoi(minuteParts[0]) + if err != nil { + return nil, err + } + + return &TaskSchedule{Minutes: minutes}, nil + default: + return nil, fmt.Errorf("invalid schedule format: %s", schedule) + } +} + // DescribeTaskOptions is based on https://docs.snowflake.com/en/sql-reference/sql/desc-task. type DescribeTaskOptions struct { describe bool `ddl:"static" sql:"DESCRIBE"` diff --git a/pkg/sdk/tasks_gen_test.go b/pkg/sdk/tasks_gen_test.go index 53aa7d8ea4..9422d73824 100644 --- a/pkg/sdk/tasks_gen_test.go +++ b/pkg/sdk/tasks_gen_test.go @@ -2,6 +2,8 @@ package sdk import ( "testing" + + "github.com/stretchr/testify/assert" ) func TestTasks_Create(t *testing.T) { @@ -81,7 +83,7 @@ func TestTasks_Create(t *testing.T) { } opts.UserTaskTimeoutMs = Int(5) opts.SuspendTaskAfterNumFailures = Int(6) - opts.ErrorNotificationIntegration = Pointer(NewAccountObjectIdentifier("some_error_integration")) + opts.ErrorIntegration = Pointer(NewAccountObjectIdentifier("some_error_integration")) opts.Comment = String("some comment") opts.Finalize = &finalizerId opts.TaskAutoRetryAttempts = Int(10) @@ -157,7 +159,7 @@ func TestTasks_CreateOrAlter(t *testing.T) { LockTimeout: Int(5), } opts.SuspendTaskAfterNumFailures = Int(6) - opts.ErrorNotificationIntegration = Pointer(NewAccountObjectIdentifier("some_error_integration")) + opts.ErrorIntegration = Pointer(NewAccountObjectIdentifier("some_error_integration")) opts.Comment = String("some comment") opts.Finalize = &finalizerId opts.TaskAutoRetryAttempts = Int(10) @@ -271,7 +273,7 @@ func TestTasks_Alter(t *testing.T) { t.Run("validation: at least one of the fields [opts.Unset.Warehouse opts.Unset.Schedule opts.Unset.Config opts.Unset.AllowOverlappingExecution opts.Unset.UserTaskTimeoutMs opts.Unset.SuspendTaskAfterNumFailures opts.Unset.ErrorIntegration opts.Unset.Comment opts.Unset.SessionParametersUnset] should be set", func(t *testing.T) { opts := defaultOpts() opts.Unset = &TaskUnset{} - assertOptsInvalidJoinedErrors(t, opts, errAtLeastOneOf("AlterTaskOptions.Unset", "Warehouse", "Schedule", "Config", "AllowOverlappingExecution", "UserTaskTimeoutMs", "SuspendTaskAfterNumFailures", "ErrorIntegration", "Comment", "SessionParametersUnset", "TaskAutoRetryAttempts", "UserTaskMinimumTriggerIntervalInSeconds")) + assertOptsInvalidJoinedErrors(t, opts, errAtLeastOneOf("AlterTaskOptions.Unset", "Warehouse", "UserTaskManagedInitialWarehouseSize", "Schedule", "Config", "AllowOverlappingExecution", "UserTaskTimeoutMs", "SuspendTaskAfterNumFailures", "ErrorIntegration", "Comment", "SessionParametersUnset", "TaskAutoRetryAttempts", "UserTaskMinimumTriggerIntervalInSeconds")) }) t.Run("validation: opts.Unset.SessionParametersUnset.SessionParametersUnset should be valid", func(t *testing.T) { @@ -457,14 +459,34 @@ func TestTasks_Show(t *testing.T) { assertOptsValidAndSQLEquals(t, opts, "SHOW TASKS") }) + t.Run("in application", func(t *testing.T) { + opts := defaultOpts() + id := randomAccountObjectIdentifier() + opts.In = &ExtendedIn{ + Application: id, + } + assertOptsValidAndSQLEquals(t, opts, "SHOW TASKS IN APPLICATION %s", id.FullyQualifiedName()) + }) + + t.Run("in application package", func(t *testing.T) { + opts := defaultOpts() + id := randomAccountObjectIdentifier() + opts.In = &ExtendedIn{ + ApplicationPackage: id, + } + assertOptsValidAndSQLEquals(t, opts, "SHOW TASKS IN APPLICATION PACKAGE %s", id.FullyQualifiedName()) + }) + t.Run("all options", func(t *testing.T) { opts := defaultOpts() opts.Terse = Bool(true) opts.Like = &Like{ Pattern: String("myaccount"), } - opts.In = &In{ - Account: Bool(true), + opts.In = &ExtendedIn{ + In: In{ + Account: Bool(true), + }, } opts.StartsWith = String("abc") opts.RootOnly = Bool(true) @@ -532,3 +554,64 @@ func TestTasks_Execute(t *testing.T) { assertOptsValidAndSQLEquals(t, opts, "EXECUTE TASK %s RETRY LAST", id.FullyQualifiedName()) }) } + +func TestParseTaskSchedule(t *testing.T) { + testCases := map[string]struct { + Schedule string + ExpectedTaskSchedule *TaskSchedule + Error string + }{ + "valid schedule: m minutes": { + Schedule: "5 m", + ExpectedTaskSchedule: &TaskSchedule{Minutes: 5}, + }, + "valid schedule: M minutes": { + Schedule: "5 M", + ExpectedTaskSchedule: &TaskSchedule{Minutes: 5}, + }, + "valid schedule: MINUTE minutes": { + Schedule: "5 MINUTE", + ExpectedTaskSchedule: &TaskSchedule{Minutes: 5}, + }, + "valid schedule: MINUTES minutes": { + Schedule: "5 MINUTES", + ExpectedTaskSchedule: &TaskSchedule{Minutes: 5}, + }, + "valid schedule: cron": { + Schedule: "USING CRON * * * * * UTC", + ExpectedTaskSchedule: &TaskSchedule{Cron: "* * * * * UTC"}, + }, + "valid schedule: cron with case sensitive location": { + Schedule: "USING CRON * * * * * America/Loc_Angeles", + ExpectedTaskSchedule: &TaskSchedule{Cron: "* * * * * America/Loc_Angeles"}, + }, + "invalid schedule: wrong schedule format": { + Schedule: "SOME SCHEDULE", + ExpectedTaskSchedule: nil, + Error: "invalid schedule format", + }, + "invalid schedule: wrong minutes format": { + Schedule: "a5 MINUTE", + ExpectedTaskSchedule: nil, + Error: `strconv.Atoi: parsing "A5": invalid syntax`, + }, + // currently, cron expressions are not validated (they are on Snowflake level) + "invalid schedule: wrong cron format": { + Schedule: "USING CRON some_cron", + ExpectedTaskSchedule: &TaskSchedule{Cron: "some_cron"}, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + taskSchedule, err := ParseTaskSchedule(tc.Schedule) + if tc.Error != "" { + assert.Nil(t, taskSchedule) + assert.ErrorContains(t, err, tc.Error) + } else { + assert.EqualValues(t, tc.ExpectedTaskSchedule, taskSchedule) + assert.NoError(t, err) + } + }) + } +} diff --git a/pkg/sdk/tasks_impl_gen.go b/pkg/sdk/tasks_impl_gen.go index 9f00526c25..3d4b530194 100644 --- a/pkg/sdk/tasks_impl_gen.go +++ b/pkg/sdk/tasks_impl_gen.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "errors" + "fmt" "log" "slices" "strings" @@ -53,8 +54,10 @@ func (v *tasks) Show(ctx context.Context, request *ShowTaskRequest) ([]Task, err } func (v *tasks) ShowByID(ctx context.Context, id SchemaObjectIdentifier) (*Task, error) { - tasks, err := v.Show(ctx, NewShowTaskRequest().WithIn(In{ - Schema: id.SchemaId(), + tasks, err := v.Show(ctx, NewShowTaskRequest().WithIn(ExtendedIn{ + In: In{ + Schema: id.SchemaId(), + }, }).WithLike(Like{ Pattern: String(id.Name()), })) @@ -64,6 +67,14 @@ func (v *tasks) ShowByID(ctx context.Context, id SchemaObjectIdentifier) (*Task, return collections.FindFirst(tasks, func(r Task) bool { return r.Name == id.Name() }) } +func (v *tasks) ShowParameters(ctx context.Context, id SchemaObjectIdentifier) ([]*Parameter, error) { + return v.client.Parameters.ShowParameters(ctx, &ShowParametersOptions{ + In: &ParametersIn{ + Task: id, + }, + }) +} + func (v *tasks) Describe(ctx context.Context, id SchemaObjectIdentifier) (*Task, error) { opts := &DescribeTaskOptions{ name: id, @@ -92,7 +103,7 @@ func (v *tasks) SuspendRootTasks(ctx context.Context, taskId SchemaObjectIdentif for _, rootTask := range rootTasks { // If a root task is started, then it needs to be suspended before the child tasks can be created - if rootTask.State == TaskStateStarted { + if rootTask.IsStarted() { err := v.client.Tasks.Alter(ctx, NewAlterTaskRequest(rootTask.ID()).WithSuspend(true)) if err != nil { log.Printf("[WARN] failed to suspend task %s", rootTask.ID().FullyQualifiedName()) @@ -145,6 +156,12 @@ func GetRootTasks(v Tasks, ctx context.Context, id SchemaObjectIdentifier) ([]Ta return nil, err } + if task.TaskRelations.FinalizedRootTask != nil { + tasksToExamine.Push(*task.TaskRelations.FinalizedRootTask) + alreadyExaminedTasksNames = append(alreadyExaminedTasksNames, current.Name()) + continue + } + predecessors := task.Predecessors if len(predecessors) == 0 { rootTasks = append(rootTasks, *task) @@ -165,12 +182,11 @@ func (r *CreateTaskRequest) toOpts() *CreateTaskOptions { IfNotExists: r.IfNotExists, name: r.name, Schedule: r.Schedule, - Config: r.Config, AllowOverlappingExecution: r.AllowOverlappingExecution, SessionParameters: r.SessionParameters, UserTaskTimeoutMs: r.UserTaskTimeoutMs, SuspendTaskAfterNumFailures: r.SuspendTaskAfterNumFailures, - ErrorNotificationIntegration: r.ErrorNotificationIntegration, + ErrorIntegration: r.ErrorIntegration, Comment: r.Comment, Finalize: r.Finalize, TaskAutoRetryAttempts: r.TaskAutoRetryAttempts, @@ -186,25 +202,27 @@ func (r *CreateTaskRequest) toOpts() *CreateTaskOptions { UserTaskManagedInitialWarehouseSize: r.Warehouse.UserTaskManagedInitialWarehouseSize, } } + if r.Config != nil { + opts.Config = String(fmt.Sprintf("$$%s$$", *r.Config)) + } return opts } func (r *CreateOrAlterTaskRequest) toOpts() *CreateOrAlterTaskOptions { opts := &CreateOrAlterTaskOptions{ - name: r.name, - Schedule: r.Schedule, - Config: r.Config, - AllowOverlappingExecution: r.AllowOverlappingExecution, - UserTaskTimeoutMs: r.UserTaskTimeoutMs, - SessionParameters: r.SessionParameters, - SuspendTaskAfterNumFailures: r.SuspendTaskAfterNumFailures, - ErrorNotificationIntegration: r.ErrorNotificationIntegration, - Comment: r.Comment, - Finalize: r.Finalize, - TaskAutoRetryAttempts: r.TaskAutoRetryAttempts, - After: r.After, - When: r.When, - sql: r.sql, + name: r.name, + Schedule: r.Schedule, + AllowOverlappingExecution: r.AllowOverlappingExecution, + UserTaskTimeoutMs: r.UserTaskTimeoutMs, + SessionParameters: r.SessionParameters, + SuspendTaskAfterNumFailures: r.SuspendTaskAfterNumFailures, + ErrorIntegration: r.ErrorIntegration, + Comment: r.Comment, + Finalize: r.Finalize, + TaskAutoRetryAttempts: r.TaskAutoRetryAttempts, + After: r.After, + When: r.When, + sql: r.sql, } if r.Warehouse != nil { opts.Warehouse = &CreateTaskWarehouse{ @@ -212,6 +230,9 @@ func (r *CreateOrAlterTaskRequest) toOpts() *CreateOrAlterTaskOptions { UserTaskManagedInitialWarehouseSize: r.Warehouse.UserTaskManagedInitialWarehouseSize, } } + if r.Config != nil { + opts.Config = String(fmt.Sprintf("$$%s$$", *r.Config)) + } return opts } @@ -247,20 +268,23 @@ func (r *AlterTaskRequest) toOpts() *AlterTaskOptions { Warehouse: r.Set.Warehouse, UserTaskManagedInitialWarehouseSize: r.Set.UserTaskManagedInitialWarehouseSize, Schedule: r.Set.Schedule, - Config: r.Set.Config, AllowOverlappingExecution: r.Set.AllowOverlappingExecution, UserTaskTimeoutMs: r.Set.UserTaskTimeoutMs, SuspendTaskAfterNumFailures: r.Set.SuspendTaskAfterNumFailures, - ErrorNotificationIntegration: r.Set.ErrorNotificationIntegration, + ErrorIntegration: r.Set.ErrorIntegration, Comment: r.Set.Comment, SessionParameters: r.Set.SessionParameters, TaskAutoRetryAttempts: r.Set.TaskAutoRetryAttempts, UserTaskMinimumTriggerIntervalInSeconds: r.Set.UserTaskMinimumTriggerIntervalInSeconds, } + if r.Set.Config != nil { + opts.Set.Config = String(fmt.Sprintf("$$%s$$", *r.Set.Config)) + } } if r.Unset != nil { opts.Unset = &TaskUnset{ Warehouse: r.Unset.Warehouse, + UserTaskManagedInitialWarehouseSize: r.Unset.UserTaskManagedInitialWarehouseSize, Schedule: r.Unset.Schedule, Config: r.Unset.Config, AllowOverlappingExecution: r.Unset.AllowOverlappingExecution, @@ -317,8 +341,13 @@ func (r taskDBRow) convert() *Task { if r.Comment.Valid { task.Comment = r.Comment.String } - if r.Warehouse.Valid { - task.Warehouse = r.Warehouse.String + if r.Warehouse.Valid && r.Warehouse.String != "null" { + id, err := ParseAccountObjectIdentifier(r.Warehouse.String) + if err != nil { + log.Printf("[DEBUG] failed to parse warehouse: %v", err) + } else { + task.Warehouse = &id + } } if r.Schedule.Valid { task.Schedule = r.Schedule.String @@ -332,6 +361,8 @@ func (r taskDBRow) convert() *Task { } } task.Predecessors = ids + } else { + task.Predecessors = make([]SchemaObjectIdentifier, 0) } if len(r.State) > 0 { taskState, err := ToTaskState(r.State) diff --git a/pkg/sdk/tasks_validations_gen.go b/pkg/sdk/tasks_validations_gen.go index 6a5392457b..d5e30aa4cd 100644 --- a/pkg/sdk/tasks_validations_gen.go +++ b/pkg/sdk/tasks_validations_gen.go @@ -32,8 +32,8 @@ func (opts *CreateTaskOptions) validate() error { if everyValueSet(opts.OrReplace, opts.IfNotExists) { errs = append(errs, errOneOf("CreateTaskOptions", "OrReplace", "IfNotExists")) } - if opts.ErrorNotificationIntegration != nil && !ValidObjectIdentifier(opts.ErrorNotificationIntegration) { - errs = append(errs, errInvalidIdentifier("CreateTaskOptions", "ErrorNotificationIntegration")) + if opts.ErrorIntegration != nil && !ValidObjectIdentifier(opts.ErrorIntegration) { + errs = append(errs, errInvalidIdentifier("CreateTaskOptions", "ErrorIntegration")) } return JoinErrors(errs...) } @@ -56,8 +56,8 @@ func (opts *CreateOrAlterTaskOptions) validate() error { if !ValidObjectIdentifier(opts.name) { errs = append(errs, ErrInvalidObjectIdentifier) } - if opts.ErrorNotificationIntegration != nil && !ValidObjectIdentifier(opts.ErrorNotificationIntegration) { - errs = append(errs, errInvalidIdentifier("CreateOrAlterTaskOptions", "ErrorNotificationIntegration")) + if opts.ErrorIntegration != nil && !ValidObjectIdentifier(opts.ErrorIntegration) { + errs = append(errs, errInvalidIdentifier("CreateOrAlterTaskOptions", "ErrorIntegration")) } return JoinErrors(errs...) } @@ -93,19 +93,19 @@ func (opts *AlterTaskOptions) validate() error { errs = append(errs, err) } } - if !anyValueSet(opts.Set.Warehouse, opts.Set.UserTaskManagedInitialWarehouseSize, opts.Set.Schedule, opts.Set.Config, opts.Set.AllowOverlappingExecution, opts.Set.UserTaskTimeoutMs, opts.Set.SuspendTaskAfterNumFailures, opts.Set.ErrorNotificationIntegration, opts.Set.Comment, opts.Set.SessionParameters, opts.Set.TaskAutoRetryAttempts, opts.Set.UserTaskMinimumTriggerIntervalInSeconds) { + if !anyValueSet(opts.Set.Warehouse, opts.Set.UserTaskManagedInitialWarehouseSize, opts.Set.Schedule, opts.Set.Config, opts.Set.AllowOverlappingExecution, opts.Set.UserTaskTimeoutMs, opts.Set.SuspendTaskAfterNumFailures, opts.Set.ErrorIntegration, opts.Set.Comment, opts.Set.SessionParameters, opts.Set.TaskAutoRetryAttempts, opts.Set.UserTaskMinimumTriggerIntervalInSeconds) { errs = append(errs, errAtLeastOneOf("AlterTaskOptions.Set", "Warehouse", "UserTaskManagedInitialWarehouseSize", "Schedule", "Config", "AllowOverlappingExecution", "UserTaskTimeoutMs", "SuspendTaskAfterNumFailures", "ErrorIntegration", "Comment", "SessionParameters", "TaskAutoRetryAttempts", "UserTaskMinimumTriggerIntervalInSeconds")) } if everyValueSet(opts.Set.Warehouse, opts.Set.UserTaskManagedInitialWarehouseSize) { errs = append(errs, errOneOf("AlterTaskOptions.Set", "Warehouse", "UserTaskManagedInitialWarehouseSize")) } - if opts.Set.ErrorNotificationIntegration != nil && !ValidObjectIdentifier(opts.Set.ErrorNotificationIntegration) { - errs = append(errs, errInvalidIdentifier("AlterTaskOptions.Set", "ErrorNotificationIntegration")) + if opts.Set.ErrorIntegration != nil && !ValidObjectIdentifier(opts.Set.ErrorIntegration) { + errs = append(errs, errInvalidIdentifier("AlterTaskOptions.Set", "ErrorIntegration")) } } if valueSet(opts.Unset) { - if !anyValueSet(opts.Unset.Warehouse, opts.Unset.Schedule, opts.Unset.Config, opts.Unset.AllowOverlappingExecution, opts.Unset.UserTaskTimeoutMs, opts.Unset.SuspendTaskAfterNumFailures, opts.Unset.ErrorIntegration, opts.Unset.Comment, opts.Unset.SessionParametersUnset, opts.Unset.TaskAutoRetryAttempts, opts.Unset.UserTaskMinimumTriggerIntervalInSeconds) { - errs = append(errs, errAtLeastOneOf("AlterTaskOptions.Unset", "Warehouse", "Schedule", "Config", "AllowOverlappingExecution", "UserTaskTimeoutMs", "SuspendTaskAfterNumFailures", "ErrorIntegration", "Comment", "SessionParametersUnset", "TaskAutoRetryAttempts", "UserTaskMinimumTriggerIntervalInSeconds")) + if !anyValueSet(opts.Unset.Warehouse, opts.Unset.UserTaskManagedInitialWarehouseSize, opts.Unset.Schedule, opts.Unset.Config, opts.Unset.AllowOverlappingExecution, opts.Unset.UserTaskTimeoutMs, opts.Unset.SuspendTaskAfterNumFailures, opts.Unset.ErrorIntegration, opts.Unset.Comment, opts.Unset.SessionParametersUnset, opts.Unset.TaskAutoRetryAttempts, opts.Unset.UserTaskMinimumTriggerIntervalInSeconds) { + errs = append(errs, errAtLeastOneOf("AlterTaskOptions.Unset", "Warehouse", "UserTaskManagedInitialWarehouseSize", "Schedule", "Config", "AllowOverlappingExecution", "UserTaskTimeoutMs", "SuspendTaskAfterNumFailures", "ErrorIntegration", "Comment", "SessionParametersUnset", "TaskAutoRetryAttempts", "UserTaskMinimumTriggerIntervalInSeconds")) } if valueSet(opts.Unset.SessionParametersUnset) { if err := opts.Unset.SessionParametersUnset.validate(); err != nil { diff --git a/pkg/sdk/testint/tasks_gen_integration_test.go b/pkg/sdk/testint/tasks_gen_integration_test.go index 4f61362a27..b47c7c9139 100644 --- a/pkg/sdk/testint/tasks_gen_integration_test.go +++ b/pkg/sdk/testint/tasks_gen_integration_test.go @@ -2,6 +2,7 @@ package testint import ( "fmt" + "strings" "testing" "time" @@ -21,21 +22,10 @@ func TestInt_Tasks(t *testing.T) { ctx := testContext(t) sql := "SELECT CURRENT_TIMESTAMP" - // TODO [SNOW-1017580]: replace with real value - const gcpPubsubSubscriptionName = "projects/project-1234/subscriptions/sub2" - errorIntegrationId := testClientHelper().Ids.RandomAccountObjectIdentifier() - err := client.NotificationIntegrations.Create(ctx, - sdk.NewCreateNotificationIntegrationRequest(errorIntegrationId, true). - WithAutomatedDataLoadsParams(sdk.NewAutomatedDataLoadsParamsRequest(). - WithGoogleAutoParams(sdk.NewGoogleAutoParamsRequest(gcpPubsubSubscriptionName)), - ), - ) - require.NoError(t, err) - t.Cleanup(func() { - require.NoError(t, client.NotificationIntegrations.Drop(ctx, sdk.NewDropNotificationIntegrationRequest(errorIntegrationId).WithIfExists(sdk.Bool(true)))) - }) + errorIntegration, ErrorIntegrationCleanup := testClientHelper().NotificationIntegration.CreateWithGcpPubSub(t) + t.Cleanup(ErrorIntegrationCleanup) - assertTask := func(t *testing.T, task *sdk.Task, id sdk.SchemaObjectIdentifier, warehouseName string) { + assertTask := func(t *testing.T, task *sdk.Task, id sdk.SchemaObjectIdentifier, warehouseId *sdk.AccountObjectIdentifier) { t.Helper() assertions.AssertThat(t, objectassert.TaskFromObject(t, task). HasNotEmptyCreatedOn(). @@ -45,9 +35,9 @@ func TestInt_Tasks(t *testing.T) { HasSchemaName(testClientHelper().Ids.SchemaId().Name()). HasOwner("ACCOUNTADMIN"). HasComment(""). - HasWarehouse(warehouseName). + HasWarehouse(warehouseId). HasSchedule(""). - HasPredecessors(). + HasPredecessorsInAnyOrder(). HasState(sdk.TaskStateStarted). HasDefinition(sql). HasCondition(""). @@ -63,7 +53,7 @@ func TestInt_Tasks(t *testing.T) { ) } - assertTaskWithOptions := func(t *testing.T, task *sdk.Task, id sdk.SchemaObjectIdentifier, comment string, warehouse string, schedule string, condition string, allowOverlappingExecution bool, config string, predecessor *sdk.SchemaObjectIdentifier, errorIntegrationName *sdk.AccountObjectIdentifier) { + assertTaskWithOptions := func(t *testing.T, task *sdk.Task, id sdk.SchemaObjectIdentifier, comment string, warehouse *sdk.AccountObjectIdentifier, schedule string, condition string, allowOverlappingExecution bool, config string, predecessor *sdk.SchemaObjectIdentifier, errorIntegrationName *sdk.AccountObjectIdentifier) { t.Helper() asserts := objectassert.TaskFromObject(t, task). @@ -89,12 +79,12 @@ func TestInt_Tasks(t *testing.T) { HasLastSuspendedOn("") if predecessor != nil { - asserts.HasPredecessors(*predecessor) + asserts.HasPredecessorsInAnyOrder(*predecessor) asserts.HasTaskRelations(sdk.TaskRelations{ Predecessors: []sdk.SchemaObjectIdentifier{*predecessor}, }) } else { - asserts.HasPredecessors() + asserts.HasPredecessorsInAnyOrder() asserts.HasTaskRelations(sdk.TaskRelations{}) } @@ -113,8 +103,8 @@ func TestInt_Tasks(t *testing.T) { HasId(""). HasOwner(""). HasComment(""). - HasWarehouse(""). - HasPredecessors(). + HasWarehouse(nil). + HasPredecessorsInAnyOrder(). HasState(""). HasDefinition(""). HasCondition(""). @@ -253,7 +243,7 @@ func TestInt_Tasks(t *testing.T) { task, err := testClientHelper().Task.Show(t, id) require.NoError(t, err) - assertTask(t, task, id, "") + assertTask(t, task, id, nil) assertions.AssertThat(t, objectparametersassert.TaskParameters(t, id).HasAllDefaults()) }) @@ -272,18 +262,18 @@ func TestInt_Tasks(t *testing.T) { HasUserTaskManagedInitialWarehouseSize(sdk.WarehouseSizeXSmall), ) - assertTask(t, task, id, "") + assertTask(t, task, id, nil) }) t.Run("create task: complete case", func(t *testing.T) { id := testClientHelper().Ids.RandomSchemaObjectIdentifier() - err = testClient(t).Tasks.Create(ctx, sdk.NewCreateTaskRequest(id, sql). + err := testClient(t).Tasks.Create(ctx, sdk.NewCreateTaskRequest(id, sql). WithOrReplace(true). WithWarehouse(*sdk.NewCreateTaskWarehouseRequest().WithWarehouse(testClientHelper().Ids.WarehouseId())). - WithErrorNotificationIntegration(errorIntegrationId). + WithErrorIntegration(errorIntegration.ID()). WithSchedule("10 MINUTE"). - WithConfig(`$${"output_dir": "/temp/test_directory/", "learning_rate": 0.1}$$`). + WithConfig(`{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`). WithAllowOverlappingExecution(true). WithSessionParameters(sdk.SessionParameters{ JSONIndent: sdk.Int(4), @@ -298,7 +288,7 @@ func TestInt_Tasks(t *testing.T) { task, err := testClientHelper().Task.Show(t, id) require.NoError(t, err) - assertTaskWithOptions(t, task, id, "some comment", testClientHelper().Ids.WarehouseId().Name(), "10 MINUTE", `SYSTEM$STREAM_HAS_DATA('MYSTREAM')`, true, `{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`, nil, &errorIntegrationId) + assertTaskWithOptions(t, task, id, "some comment", sdk.Pointer(testClientHelper().Ids.WarehouseId()), "10 MINUTE", `SYSTEM$STREAM_HAS_DATA('MYSTREAM')`, true, `{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`, nil, sdk.Pointer(errorIntegration.ID())) assertions.AssertThat(t, objectparametersassert.TaskParameters(t, id). HasJsonIndent(4). HasUserTaskTimeoutMs(500). @@ -321,7 +311,7 @@ func TestInt_Tasks(t *testing.T) { task, err := testClientHelper().Task.Show(t, id) require.NoError(t, err) - assertTaskWithOptions(t, task, id, "", "", "", "", false, "", &rootTaskId, nil) + assertTaskWithOptions(t, task, id, "", nil, "", "", false, "", &rootTaskId, nil) }) t.Run("create task: with after and finalizer", func(t *testing.T) { @@ -347,6 +337,13 @@ func TestInt_Tasks(t *testing.T) { FinalizerTask: &finalizerId, }), ) + + assertions.AssertThat(t, objectassert.Task(t, finalizerId). + HasTaskRelations(sdk.TaskRelations{ + Predecessors: []sdk.SchemaObjectIdentifier{}, + FinalizedRootTask: &rootTaskId, + }), + ) }) // Tested graph @@ -538,7 +535,7 @@ func TestInt_Tasks(t *testing.T) { err := client.Tasks.CreateOrAlter(ctx, sdk.NewCreateOrAlterTaskRequest(id, sql). WithWarehouse(*sdk.NewCreateTaskWarehouseRequest().WithWarehouse(testClientHelper().Ids.WarehouseId())). WithSchedule("10 MINUTES"). - WithConfig(`$${"output_dir": "/temp/test_directory/", "learning_rate": 0.1}$$`). + WithConfig(`{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`). WithAllowOverlappingExecution(true). WithUserTaskTimeoutMs(10). WithSessionParameters(sessionParametersSet). @@ -555,7 +552,7 @@ func TestInt_Tasks(t *testing.T) { createdOn := task.CreatedOn assertions.AssertThat(t, objectassert.TaskFromObject(t, task). - HasWarehouse(testClientHelper().Ids.WarehouseId().Name()). + HasWarehouse(sdk.Pointer(testClientHelper().Ids.WarehouseId())). HasSchedule("10 MINUTES"). HasConfig(`{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`). HasAllowOverlappingExecution(true). @@ -576,7 +573,7 @@ func TestInt_Tasks(t *testing.T) { require.NoError(t, err) assertions.AssertThat(t, objectassert.TaskFromObject(t, alteredTask). - HasWarehouse(""). + HasWarehouse(nil). HasSchedule(""). HasConfig(""). HasAllowOverlappingExecution(false). @@ -618,10 +615,10 @@ func TestInt_Tasks(t *testing.T) { err := client.Tasks.Alter(ctx, sdk.NewAlterTaskRequest(task.ID()).WithSet(*sdk.NewTaskSetRequest(). // TODO(SNOW-1348116): Cannot set warehouse due to Snowflake error // WithWarehouse(testClientHelper().Ids.WarehouseId()). - WithErrorNotificationIntegration(errorIntegrationId). + WithErrorIntegration(errorIntegration.ID()). WithSessionParameters(sessionParametersSet). WithSchedule("10 MINUTE"). - WithConfig(`$${"output_dir": "/temp/test_directory/", "learning_rate": 0.1}$$`). + WithConfig(`{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`). WithAllowOverlappingExecution(true). WithUserTaskTimeoutMs(1000). WithSuspendTaskAfterNumFailures(100). @@ -633,7 +630,7 @@ func TestInt_Tasks(t *testing.T) { assertions.AssertThat(t, objectassert.Task(t, task.ID()). // HasWarehouse(testClientHelper().Ids.WarehouseId().Name()). - HasErrorIntegration(sdk.Pointer(errorIntegrationId)). + HasErrorIntegration(sdk.Pointer(errorIntegration.ID())). HasSchedule("10 MINUTE"). HasConfig(`{"output_dir": "/temp/test_directory/", "learning_rate": 0.1}`). HasAllowOverlappingExecution(true). @@ -837,7 +834,7 @@ func TestInt_Tasks(t *testing.T) { task2, task2Cleanup := testClientHelper().Task.Create(t) t.Cleanup(task2Cleanup) - returnedTasks, err := client.Tasks.Show(ctx, sdk.NewShowTaskRequest().WithIn(sdk.In{Schema: testClientHelper().Ids.SchemaId()})) + returnedTasks, err := client.Tasks.Show(ctx, sdk.NewShowTaskRequest().WithIn(sdk.ExtendedIn{In: sdk.In{Schema: testClientHelper().Ids.SchemaId()}})) require.NoError(t, err) require.Len(t, returnedTasks, 2) @@ -850,7 +847,7 @@ func TestInt_Tasks(t *testing.T) { task, taskCleanup := testClientHelper().Task.CreateWithRequest(t, sdk.NewCreateTaskRequest(id, sql).WithSchedule("10 MINUTE")) t.Cleanup(taskCleanup) - returnedTasks, err := client.Tasks.Show(ctx, sdk.NewShowTaskRequest().WithIn(sdk.In{Schema: testClientHelper().Ids.SchemaId()}).WithTerse(true)) + returnedTasks, err := client.Tasks.Show(ctx, sdk.NewShowTaskRequest().WithIn(sdk.ExtendedIn{In: sdk.In{Schema: testClientHelper().Ids.SchemaId()}}).WithTerse(true)) require.NoError(t, err) require.Len(t, returnedTasks, 1) @@ -866,7 +863,7 @@ func TestInt_Tasks(t *testing.T) { returnedTasks, err := client.Tasks.Show(ctx, sdk.NewShowTaskRequest(). WithLike(sdk.Like{Pattern: &task1.Name}). - WithIn(sdk.In{Schema: testClientHelper().Ids.SchemaId()}). + WithIn(sdk.ExtendedIn{In: sdk.In{Schema: testClientHelper().Ids.SchemaId()}}). WithLimit(sdk.LimitFrom{Rows: sdk.Int(5)})) require.NoError(t, err) @@ -882,7 +879,7 @@ func TestInt_Tasks(t *testing.T) { returnedTask, err := client.Tasks.Describe(ctx, task.ID()) require.NoError(t, err) - assertTask(t, returnedTask, task.ID(), testClientHelper().Ids.WarehouseId().Name()) + assertTask(t, returnedTask, task.ID(), sdk.Pointer(testClientHelper().Ids.WarehouseId())) }) t.Run("execute task: default", func(t *testing.T) { @@ -903,8 +900,11 @@ func TestInt_Tasks(t *testing.T) { err := client.Tasks.Execute(ctx, sdk.NewExecuteTaskRequest(task.ID())) require.NoError(t, err) - err = client.Tasks.Execute(ctx, sdk.NewExecuteTaskRequest(task.ID()).WithRetryLast(true)) - require.ErrorContains(t, err, fmt.Sprintf("Cannot perform retry: no suitable run of graph with root task %s to retry.", task.ID().Name())) + require.Eventually(t, func() bool { + err = client.Tasks.Execute(ctx, sdk.NewExecuteTaskRequest(task.ID()).WithRetryLast(true)) + return strings.Contains(err.Error(), fmt.Sprintf("Cannot perform retry: no suitable run of graph with root task %s to retry.", task.ID().Name())) || + strings.Contains(err.Error(), fmt.Sprintf("graph with root task %s had no failures.", task.ID().Name())) + }, 2*time.Second, time.Millisecond*300) }) t.Run("execute task: retry last after failed last task", func(t *testing.T) { diff --git a/templates/data-sources/tasks.md.tmpl b/templates/data-sources/tasks.md.tmpl new file mode 100644 index 0000000000..9173876ceb --- /dev/null +++ b/templates/data-sources/tasks.md.tmpl @@ -0,0 +1,24 @@ +--- +page_title: "{{.Name}} {{.Type}} - {{.ProviderName}}" +subcategory: "" +description: |- +{{ if gt (len (split .Description "")) 1 -}} +{{ index (split .Description "") 1 | plainmarkdown | trimspace | prefixlines " " }} +{{- else -}} +{{ .Description | plainmarkdown | trimspace | prefixlines " " }} +{{- end }} +--- + +!> **V1 release candidate** This data source was reworked and is a release candidate for the V1. We do not expect significant changes in it before the V1. We will welcome any feedback and adjust the data source if needed. Any errors reported will be resolved with a higher priority. We encourage checking this data source out before the V1 release. Please follow the [migration guide](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/MIGRATION_GUIDE.md#v0980--v0990) to use it. + +# {{.Name}} ({{.Type}}) + +{{ .Description | trimspace }} + +{{ if .HasExample -}} +## Example Usage + +{{ tffile (printf "examples/data-sources/%s/data-source.tf" .Name)}} +{{- end }} + +{{ .SchemaMarkdown | trimspace }} diff --git a/templates/resources/task.md.tmpl b/templates/resources/task.md.tmpl new file mode 100644 index 0000000000..7a876a0017 --- /dev/null +++ b/templates/resources/task.md.tmpl @@ -0,0 +1,35 @@ +--- +page_title: "{{.Name}} {{.Type}} - {{.ProviderName}}" +subcategory: "" +description: |- +{{ if gt (len (split .Description "")) 1 -}} +{{ index (split .Description "") 1 | plainmarkdown | trimspace | prefixlines " " }} +{{- else -}} +{{ .Description | plainmarkdown | trimspace | prefixlines " " }} +{{- end }} +--- + +!> **V1 release candidate** This resource was reworked and is a release candidate for the V1. We do not expect significant changes in it before the V1. We will welcome any feedback and adjust the resource if needed. Any errors reported will be resolved with a higher priority. We encourage checking this resource out before the V1 release. Please follow the [migration guide](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/MIGRATION_GUIDE.md#v0980--v0990) to use it. + +# {{.Name}} ({{.Type}}) + +{{ .Description | trimspace }} + +{{ if .HasExample -}} +## Example Usage + +{{ tffile (printf "examples/resources/%s/resource.tf" .Name)}} +-> **Note** Instead of using fully_qualified_name, you can reference objects managed outside Terraform by constructing a correct ID, consult [identifiers guide](https://registry.terraform.io/providers/Snowflake-Labs/snowflake/latest/docs/guides/identifiers#new-computed-fully-qualified-name-field-in-resources). + + +{{- end }} + +{{ .SchemaMarkdown | trimspace }} +{{- if .HasImport }} + +## Import + +Import is supported using the following syntax: + +{{ codefile "shell" (printf "examples/resources/%s/import.sh" .Name)}} +{{- end }} diff --git a/v1-preparations/ESSENTIAL_GA_OBJECTS.MD b/v1-preparations/ESSENTIAL_GA_OBJECTS.MD index 28c250976a..96c1eea1d7 100644 --- a/v1-preparations/ESSENTIAL_GA_OBJECTS.MD +++ b/v1-preparations/ESSENTIAL_GA_OBJECTS.MD @@ -33,7 +33,7 @@ newer provider versions. We will address these while working on the given object | STREAMLIT | 🚀 | - | | TABLE | 👨‍💻 | [#2997](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2997), [#2844](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2844), [#2839](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2839), [#2735](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2735), [#2733](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2733), [#2683](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2683), [#2676](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2676), [#2674](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2674), [#2629](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2629), [#2418](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2418), [#2415](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2415), [#2406](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2406), [#2236](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2236), [#2035](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2035), [#1823](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1823), [#1799](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1799), [#1764](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1764), [#1600](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1600), [#1387](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1387), [#1272](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1272), [#1271](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1271), [#1248](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1248), [#1241](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1241), [#1146](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1146), [#1032](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1032), [#420](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/420) | | TAG | 👨‍💻 | [#2943](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2902), [#2598](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2598), [#1910](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1910), [#1909](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1909), [#1862](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1862), [#1806](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1806), [#1657](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1657), [#1496](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1496), [#1443](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1443), [#1394](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1394), [#1372](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1372), [#1074](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1074) | -| TASK | 👨‍💻 | [#1419](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1419), [#1250](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1250), [#1194](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1194), [#1088](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1088) | +| TASK | 👨‍💻 | [#3136](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3136), [#1419](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1419), [#1250](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1250), [#1194](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1194), [#1088](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1088) | | VIEW | 🚀 | issues in the older versions: [resources](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues?q=label%3Aresource%3Aview+) and [datasources](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues?q=label%3Adata_source%3Aviews+) | | snowflake_unsafe_execute | 👨‍💻 | [#2934](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2934) | From 231f65323611f110564117a325062355e7ed7cf6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Tue, 26 Nov 2024 14:20:27 +0100 Subject: [PATCH 06/10] chore: Add usage tracking for the rest of the resources and fix views (#3223) ## Changes - Add usage tracking to the remaining resources (had to change their method signature to the new one containing context) - Fix views (trimmed the tracking comment from the text column that contains the command run to create the view) - Applied the same fix for materialized view and dynamic table that also contain self reference to create command --- pkg/internal/tracking/query.go | 5 + pkg/internal/tracking/query_test.go | 37 ++++ pkg/provider/resources/resources.go | 13 ++ pkg/resources/account.go | 51 +++--- ...ccount_authentication_policy_attachment.go | 27 +-- pkg/resources/account_parameter.go | 38 ++-- .../account_password_policy_attachment.go | 27 +-- pkg/resources/alert.go | 2 +- pkg/resources/api_integration.go | 89 +++++----- pkg/resources/database_old.go | 62 ++++--- pkg/resources/dynamic_table.go | 92 +++++----- .../email_notification_integration.go | 53 +++--- pkg/resources/external_table.go | 45 +++-- pkg/resources/failover_group.go | 89 +++++----- pkg/resources/file_format.go | 162 +++++++++--------- pkg/resources/grant_account_role.go | 37 ++-- pkg/resources/grant_database_role.go | 51 +++--- pkg/resources/managed_account.go | 46 ++--- pkg/resources/materialized_view.go | 64 +++---- pkg/resources/network_policy_attachment.go | 106 ++++++------ pkg/resources/notification_integration.go | 96 ++++++----- pkg/resources/oauth_integration.go | 66 +++---- pkg/resources/oauth_integration_test.go | 13 +- pkg/resources/object_parameter.go | 51 +++--- pkg/resources/password_policy.go | 98 +++++------ pkg/resources/pipe.go | 58 +++---- pkg/resources/saml_integration.go | 92 +++++----- pkg/resources/saml_integration_test.go | 13 +- pkg/resources/sequence.go | 61 +++---- pkg/resources/session_parameter.go | 48 +++--- pkg/resources/share.go | 50 +++--- pkg/resources/stream.go | 81 +++++---- pkg/resources/table.go | 76 ++++---- ...table_column_masking_policy_application.go | 34 ++-- pkg/resources/table_constraint.go | 53 +++--- .../user_authentication_policy_attachment.go | 40 ++--- .../user_password_policy_attachment.go | 40 ++--- pkg/resources/user_public_keys.go | 39 +++-- pkg/sdk/dynamic_table.go | 4 +- pkg/sdk/materialized_views_impl_gen.go | 3 +- .../testint/dynamic_table_integration_test.go | 21 +++ ...materialized_views_gen_integration_test.go | 15 ++ pkg/sdk/testint/views_gen_integration_test.go | 16 ++ pkg/sdk/views_impl_gen.go | 4 +- 44 files changed, 1150 insertions(+), 1018 deletions(-) diff --git a/pkg/internal/tracking/query.go b/pkg/internal/tracking/query.go index e49421b1a9..6a829bf9b3 100644 --- a/pkg/internal/tracking/query.go +++ b/pkg/internal/tracking/query.go @@ -6,6 +6,11 @@ import ( "strings" ) +func TrimMetadata(sql string) string { + queryParts := strings.Split(sql, fmt.Sprintf(" --%s", MetadataPrefix)) + return queryParts[0] +} + func AppendMetadata(sql string, metadata Metadata) (string, error) { bytes, err := json.Marshal(metadata) if err != nil { diff --git a/pkg/internal/tracking/query_test.go b/pkg/internal/tracking/query_test.go index 6d46162186..0261d77684 100644 --- a/pkg/internal/tracking/query_test.go +++ b/pkg/internal/tracking/query_test.go @@ -5,10 +5,47 @@ import ( "fmt" "testing" + "github.com/stretchr/testify/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" "github.com/stretchr/testify/require" ) +func TestTrimMetadata(t *testing.T) { + testCases := []struct { + Input string + Expected string + }{ + { + Input: "select 1", + Expected: "select 1", + }, + { + Input: "select 1; --some comment", + Expected: "select 1; --some comment", + }, + { + Input: fmt.Sprintf("select 1; --%s", MetadataPrefix), + Expected: "select 1;", + }, + { + Input: fmt.Sprintf("select 1; --%s ", MetadataPrefix), + Expected: "select 1;", + }, + { + Input: fmt.Sprintf("select 1; --%s some text after", MetadataPrefix), + Expected: "select 1;", + }, + } + + for _, tc := range testCases { + t.Run("TrimMetadata: "+tc.Input, func(t *testing.T) { + trimmedInput := TrimMetadata(tc.Input) + assert.Equal(t, tc.Expected, trimmedInput) + }) + } +} + func TestAppendMetadata(t *testing.T) { metadata := NewMetadata("123", resources.Account, CreateOperation) sql := "SELECT 1" diff --git a/pkg/provider/resources/resources.go b/pkg/provider/resources/resources.go index dc4de69296..6991cbabe2 100644 --- a/pkg/provider/resources/resources.go +++ b/pkg/provider/resources/resources.go @@ -4,6 +4,9 @@ type resource string const ( Account resource = "snowflake_account" + AccountAuthenticationPolicyAttachment resource = "snowflake_account_authentication_policy_attachment" + AccountParameter resource = "snowflake_account_parameter" + AccountPasswordPolicyAttachment resource = "snowflake_account_password_policy_attachment" AccountRole resource = "snowflake_account_role" Alert resource = "snowflake_alert" ApiAuthenticationIntegrationWithAuthorizationCodeGrant resource = "snowflake_api_authentication_integration_with_authorization_code_grant" @@ -36,10 +39,13 @@ const ( MaskingPolicy resource = "snowflake_masking_policy" MaterializedView resource = "snowflake_materialized_view" NetworkPolicy resource = "snowflake_network_policy" + NetworkPolicyAttachment resource = "snowflake_network_policy_attachment" NetworkRule resource = "snowflake_network_rule" NotificationIntegration resource = "snowflake_notification_integration" + OauthIntegration resource = "snowflake_oauth_integration" OauthIntegrationForCustomClients resource = "snowflake_oauth_integration_for_custom_clients" OauthIntegrationForPartnerApplications resource = "snowflake_oauth_integration_for_partner_applications" + ObjectParameter resource = "snowflake_object_parameter" PasswordPolicy resource = "snowflake_password_policy" Pipe resource = "snowflake_pipe" PrimaryConnection resource = "snowflake_primary_connection" @@ -47,6 +53,7 @@ const ( ResourceMonitor resource = "snowflake_resource_monitor" Role resource = "snowflake_role" RowAccessPolicy resource = "snowflake_row_access_policy" + SamlSecurityIntegration resource = "snowflake_saml_integration" Saml2SecurityIntegration resource = "snowflake_saml2_integration" Schema resource = "snowflake_schema" ScimSecurityIntegration resource = "snowflake_scim_integration" @@ -56,6 +63,7 @@ const ( SecretWithBasicAuthentication resource = "snowflake_secret_with_basic_authentication" SecretWithClientCredentials resource = "snowflake_secret_with_client_credentials" SecretWithGenericString resource = "snowflake_secret_with_generic_string" + SessionParameter resource = "snowflake_session_parameter" Sequence resource = "snowflake_sequence" ServiceUser resource = "snowflake_service_user" Share resource = "snowflake_share" @@ -69,12 +77,17 @@ const ( StreamOnView resource = "snowflake_stream_on_view" Streamlit resource = "snowflake_streamlit" Table resource = "snowflake_table" + TableColumnMaskingPolicyApplication resource = "snowflake_table_column_masking_policy_application" + TableConstraint resource = "snowflake_table_constraint" Tag resource = "snowflake_tag" TagAssociation resource = "snowflake_tag_association" TagMaskingPolicyAssociation resource = "snowflake_tag_masking_policy_association" Task resource = "snowflake_task" UnsafeExecute resource = "snowflake_unsafe_execute" User resource = "snowflake_user" + UserAuthenticationPolicyAttachment resource = "snowflake_user_authentication_policy_attachment" + UserPasswordPolicyAttachment resource = "snowflake_user_password_policy_attachment" + UserPublicKeys resource = "snowflake_user_public_keys" View resource = "snowflake_view" Warehouse resource = "snowflake_warehouse" ) diff --git a/pkg/resources/account.go b/pkg/resources/account.go index dd58b80c5c..8b687c7cd8 100644 --- a/pkg/resources/account.go +++ b/pkg/resources/account.go @@ -7,6 +7,8 @@ import ( "strings" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -212,11 +214,11 @@ var accountSchema = map[string]*schema.Schema{ func Account() *schema.Resource { return &schema.Resource{ - Description: "The account resource allows you to create and manage Snowflake accounts.", - Create: CreateAccount, - Read: ReadAccount, - Update: UpdateAccount, - Delete: DeleteAccount, + Description: "The account resource allows you to create and manage Snowflake accounts.", + CreateContext: TrackingCreateWrapper(resources.Account, CreateAccount), + ReadContext: TrackingReadWrapper(resources.Account, ReadAccount), + UpdateContext: TrackingUpdateWrapper(resources.Account, UpdateAccount), + DeleteContext: TrackingDeleteWrapper(resources.Account, DeleteAccount), CustomizeDiff: TrackingCustomDiffWrapper(resources.Account, customdiff.All( ComputedIfAnyAttributeChanged(accountSchema, FullyQualifiedNameAttributeName, "name"), @@ -230,9 +232,8 @@ func Account() *schema.Resource { } // CreateAccount implements schema.CreateFunc. -func CreateAccount(d *schema.ResourceData, meta interface{}) error { +func CreateAccount(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() name := d.Get("name").(string) objectIdentifier := sdk.NewAccountObjectIdentifier(name) @@ -267,7 +268,7 @@ func CreateAccount(d *schema.ResourceData, meta interface{}) error { // For organizations that have accounts in multiple region groups, returns . so we need to split on "." currentRegion, err := client.ContextFunctions.CurrentRegion(ctx) if err != nil { - return err + return diag.FromErr(err) } regionParts := strings.Split(currentRegion, ".") if len(regionParts) == 2 { @@ -280,7 +281,7 @@ func CreateAccount(d *schema.ResourceData, meta interface{}) error { // For organizations that have accounts in multiple region groups, returns . so we need to split on "." currentRegion, err := client.ContextFunctions.CurrentRegion(ctx) if err != nil { - return err + return diag.FromErr(err) } regionParts := strings.Split(currentRegion, ".") if len(regionParts) == 2 { @@ -295,7 +296,7 @@ func CreateAccount(d *schema.ResourceData, meta interface{}) error { err := client.Accounts.Create(ctx, objectIdentifier, createOptions) if err != nil { - return err + return diag.FromErr(err) } var account *sdk.Account @@ -308,17 +309,16 @@ func CreateAccount(d *schema.ResourceData, meta interface{}) error { return nil, true }) if err != nil { - return err + return diag.FromErr(err) } d.SetId(helpers.EncodeSnowflakeID(account.AccountLocator)) - return ReadAccount(d, meta) + return ReadAccount(ctx, d, meta) } // ReadAccount implements schema.ReadFunc. -func ReadAccount(d *schema.ResourceData, meta interface{}) error { +func ReadAccount(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) @@ -333,42 +333,42 @@ func ReadAccount(d *schema.ResourceData, meta interface{}) error { return nil, true }) if err != nil { - return err + return diag.FromErr(err) } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } if err = d.Set("name", acc.AccountName); err != nil { - return fmt.Errorf("error setting name: %w", err) + return diag.FromErr(fmt.Errorf("error setting name: %w", err)) } if err = d.Set("edition", acc.Edition); err != nil { - return fmt.Errorf("error setting edition: %w", err) + return diag.FromErr(fmt.Errorf("error setting edition: %w", err)) } if err = d.Set("region_group", acc.RegionGroup); err != nil { - return fmt.Errorf("error setting region_group: %w", err) + return diag.FromErr(fmt.Errorf("error setting region_group: %w", err)) } if err = d.Set("region", acc.SnowflakeRegion); err != nil { - return fmt.Errorf("error setting region: %w", err) + return diag.FromErr(fmt.Errorf("error setting region: %w", err)) } if err = d.Set("comment", acc.Comment); err != nil { - return fmt.Errorf("error setting comment: %w", err) + return diag.FromErr(fmt.Errorf("error setting comment: %w", err)) } if err = d.Set("is_org_admin", acc.IsOrgAdmin); err != nil { - return fmt.Errorf("error setting is_org_admin: %w", err) + return diag.FromErr(fmt.Errorf("error setting is_org_admin: %w", err)) } return nil } // UpdateAccount implements schema.UpdateFunc. -func UpdateAccount(d *schema.ResourceData, meta interface{}) error { +func UpdateAccount(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { /* todo: comments may eventually work again for accounts, so this can be uncommented when that happens client := meta.(*provider.Context).Client @@ -394,12 +394,11 @@ func UpdateAccount(d *schema.ResourceData, meta interface{}) error { } // DeleteAccount implements schema.DeleteFunc. -func DeleteAccount(d *schema.ResourceData, meta interface{}) error { +func DeleteAccount(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() gracePeriodInDays := d.Get("grace_period_in_days").(int) err := client.Accounts.Drop(ctx, helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier), gracePeriodInDays, &sdk.DropAccountOptions{ IfExists: sdk.Bool(true), }) - return err + return diag.FromErr(err) } diff --git a/pkg/resources/account_authentication_policy_attachment.go b/pkg/resources/account_authentication_policy_attachment.go index 628cee492c..bb61f3d215 100644 --- a/pkg/resources/account_authentication_policy_attachment.go +++ b/pkg/resources/account_authentication_policy_attachment.go @@ -4,6 +4,9 @@ import ( "context" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -26,9 +29,9 @@ func AccountAuthenticationPolicyAttachment() *schema.Resource { return &schema.Resource{ Description: "Specifies the authentication policy to use for the current account. To set the authentication policy of a different account, use a provider alias.", - Create: CreateAccountAuthenticationPolicyAttachment, - Read: ReadAccountAuthenticationPolicyAttachment, - Delete: DeleteAccountAuthenticationPolicyAttachment, + CreateContext: TrackingCreateWrapper(resources.AccountAuthenticationPolicyAttachment, CreateAccountAuthenticationPolicyAttachment), + ReadContext: TrackingReadWrapper(resources.AccountAuthenticationPolicyAttachment, ReadAccountAuthenticationPolicyAttachment), + DeleteContext: TrackingDeleteWrapper(resources.AccountAuthenticationPolicyAttachment, DeleteAccountAuthenticationPolicyAttachment), Schema: accountAuthenticationPolicyAttachmentSchema, Importer: &schema.ResourceImporter{ @@ -38,13 +41,12 @@ func AccountAuthenticationPolicyAttachment() *schema.Resource { } // CreateAccountAuthenticationPolicyAttachment implements schema.CreateFunc. -func CreateAccountAuthenticationPolicyAttachment(d *schema.ResourceData, meta interface{}) error { +func CreateAccountAuthenticationPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() authenticationPolicy, ok := sdk.NewObjectIdentifierFromFullyQualifiedName(d.Get("authentication_policy").(string)).(sdk.SchemaObjectIdentifier) if !ok { - return fmt.Errorf("authentication_policy %s is not a valid authentication policy qualified name, expected format: `\"db\".\"schema\".\"policy\"`", d.Get("authentication_policy")) + return diag.FromErr(fmt.Errorf("authentication_policy %s is not a valid authentication policy qualified name, expected format: `\"db\".\"schema\".\"policy\"`", d.Get("authentication_policy"))) } err := client.Accounts.Alter(ctx, &sdk.AlterAccountOptions{ @@ -53,27 +55,26 @@ func CreateAccountAuthenticationPolicyAttachment(d *schema.ResourceData, meta in }, }) if err != nil { - return err + return diag.FromErr(err) } d.SetId(helpers.EncodeSnowflakeID(authenticationPolicy)) - return ReadAccountAuthenticationPolicyAttachment(d, meta) + return ReadAccountAuthenticationPolicyAttachment(ctx, d, meta) } -func ReadAccountAuthenticationPolicyAttachment(d *schema.ResourceData, meta interface{}) error { +func ReadAccountAuthenticationPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { authenticationPolicy := helpers.DecodeSnowflakeID(d.Id()) if err := d.Set("authentication_policy", authenticationPolicy.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } return nil } // DeleteAccountAuthenticationPolicyAttachment implements schema.DeleteFunc. -func DeleteAccountAuthenticationPolicyAttachment(d *schema.ResourceData, meta interface{}) error { +func DeleteAccountAuthenticationPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() err := client.Accounts.Alter(ctx, &sdk.AlterAccountOptions{ Unset: &sdk.AccountUnset{ @@ -81,7 +82,7 @@ func DeleteAccountAuthenticationPolicyAttachment(d *schema.ResourceData, meta in }, }) if err != nil { - return err + return diag.FromErr(err) } return nil diff --git a/pkg/resources/account_parameter.go b/pkg/resources/account_parameter.go index ccff34a089..95cea28579 100644 --- a/pkg/resources/account_parameter.go +++ b/pkg/resources/account_parameter.go @@ -4,6 +4,9 @@ import ( "context" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -26,10 +29,10 @@ var accountParameterSchema = map[string]*schema.Schema{ func AccountParameter() *schema.Resource { return &schema.Resource{ - Create: CreateAccountParameter, - Read: ReadAccountParameter, - Update: UpdateAccountParameter, - Delete: DeleteAccountParameter, + CreateContext: TrackingCreateWrapper(resources.AccountParameter, CreateAccountParameter), + ReadContext: TrackingReadWrapper(resources.AccountParameter, ReadAccountParameter), + UpdateContext: TrackingUpdateWrapper(resources.AccountParameter, UpdateAccountParameter), + DeleteContext: TrackingDeleteWrapper(resources.AccountParameter, DeleteAccountParameter), Schema: accountParameterSchema, Importer: &schema.ResourceImporter{ @@ -39,59 +42,56 @@ func AccountParameter() *schema.Resource { } // CreateAccountParameter implements schema.CreateFunc. -func CreateAccountParameter(d *schema.ResourceData, meta interface{}) error { +func CreateAccountParameter(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client key := d.Get("key").(string) value := d.Get("value").(string) - ctx := context.Background() parameter := sdk.AccountParameter(key) err := client.Parameters.SetAccountParameter(ctx, parameter, value) if err != nil { - return err + return diag.FromErr(err) } d.SetId(key) - return ReadAccountParameter(d, meta) + return ReadAccountParameter(ctx, d, meta) } // ReadAccountParameter implements schema.ReadFunc. -func ReadAccountParameter(d *schema.ResourceData, meta interface{}) error { +func ReadAccountParameter(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() parameterName := d.Id() parameter, err := client.Parameters.ShowAccountParameter(ctx, sdk.AccountParameter(parameterName)) if err != nil { - return fmt.Errorf("error reading account parameter err = %w", err) + return diag.FromErr(fmt.Errorf("error reading account parameter err = %w", err)) } err = d.Set("value", parameter.Value) if err != nil { - return fmt.Errorf("error setting account parameter err = %w", err) + return diag.FromErr(fmt.Errorf("error setting account parameter err = %w", err)) } err = d.Set("key", parameter.Key) if err != nil { - return fmt.Errorf("error setting account parameter err = %w", err) + return diag.FromErr(fmt.Errorf("error setting account parameter err = %w", err)) } return nil } // UpdateAccountParameter implements schema.UpdateFunc. -func UpdateAccountParameter(d *schema.ResourceData, meta interface{}) error { - return CreateAccountParameter(d, meta) +func UpdateAccountParameter(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return CreateAccountParameter(ctx, d, meta) } // DeleteAccountParameter implements schema.DeleteFunc. -func DeleteAccountParameter(d *schema.ResourceData, meta interface{}) error { +func DeleteAccountParameter(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client key := d.Get("key").(string) - ctx := context.Background() parameter := sdk.AccountParameter(key) defaultParameter, err := client.Parameters.ShowAccountParameter(ctx, sdk.AccountParameter(key)) if err != nil { - return err + return diag.FromErr(err) } defaultValue := defaultParameter.Default err = client.Parameters.SetAccountParameter(ctx, parameter, defaultValue) if err != nil { - return fmt.Errorf("error resetting account parameter err = %w", err) + return diag.FromErr(fmt.Errorf("error resetting account parameter err = %w", err)) } d.SetId("") diff --git a/pkg/resources/account_password_policy_attachment.go b/pkg/resources/account_password_policy_attachment.go index 245b2d33c2..03375b0c75 100644 --- a/pkg/resources/account_password_policy_attachment.go +++ b/pkg/resources/account_password_policy_attachment.go @@ -4,6 +4,9 @@ import ( "context" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -26,9 +29,9 @@ func AccountPasswordPolicyAttachment() *schema.Resource { return &schema.Resource{ Description: "Specifies the password policy to use for the current account. To set the password policy of a different account, use a provider alias.", - Create: CreateAccountPasswordPolicyAttachment, - Read: ReadAccountPasswordPolicyAttachment, - Delete: DeleteAccountPasswordPolicyAttachment, + CreateContext: TrackingCreateWrapper(resources.AccountPasswordPolicyAttachment, CreateAccountPasswordPolicyAttachment), + ReadContext: TrackingReadWrapper(resources.AccountPasswordPolicyAttachment, ReadAccountPasswordPolicyAttachment), + DeleteContext: TrackingDeleteWrapper(resources.AccountPasswordPolicyAttachment, DeleteAccountPasswordPolicyAttachment), Schema: accountPasswordPolicyAttachmentSchema, Importer: &schema.ResourceImporter{ @@ -38,13 +41,12 @@ func AccountPasswordPolicyAttachment() *schema.Resource { } // CreateAccountPasswordPolicyAttachment implements schema.CreateFunc. -func CreateAccountPasswordPolicyAttachment(d *schema.ResourceData, meta interface{}) error { +func CreateAccountPasswordPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() passwordPolicy, ok := sdk.NewObjectIdentifierFromFullyQualifiedName(d.Get("password_policy").(string)).(sdk.SchemaObjectIdentifier) if !ok { - return fmt.Errorf("password_policy %s is not a valid password policy qualified name, expected format: `\"db\".\"schema\".\"policy\"`", d.Get("password_policy")) + return diag.FromErr(fmt.Errorf("password_policy %s is not a valid password policy qualified name, expected format: `\"db\".\"schema\".\"policy\"`", d.Get("password_policy"))) } err := client.Accounts.Alter(ctx, &sdk.AlterAccountOptions{ @@ -53,27 +55,26 @@ func CreateAccountPasswordPolicyAttachment(d *schema.ResourceData, meta interfac }, }) if err != nil { - return err + return diag.FromErr(err) } d.SetId(helpers.EncodeSnowflakeID(passwordPolicy)) - return ReadAccountPasswordPolicyAttachment(d, meta) + return ReadAccountPasswordPolicyAttachment(ctx, d, meta) } -func ReadAccountPasswordPolicyAttachment(d *schema.ResourceData, meta interface{}) error { +func ReadAccountPasswordPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { passwordPolicy := helpers.DecodeSnowflakeID(d.Id()) if err := d.Set("password_policy", passwordPolicy.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } return nil } // DeleteAccountPasswordPolicyAttachment implements schema.DeleteFunc. -func DeleteAccountPasswordPolicyAttachment(d *schema.ResourceData, meta interface{}) error { +func DeleteAccountPasswordPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() err := client.Accounts.Alter(ctx, &sdk.AlterAccountOptions{ Unset: &sdk.AccountUnset{ @@ -81,7 +82,7 @@ func DeleteAccountPasswordPolicyAttachment(d *schema.ResourceData, meta interfac }, }) if err != nil { - return err + return diag.FromErr(err) } return nil diff --git a/pkg/resources/alert.go b/pkg/resources/alert.go index 2c16b7c8c7..6ca90697db 100644 --- a/pkg/resources/alert.go +++ b/pkg/resources/alert.go @@ -119,7 +119,7 @@ func Alert() *schema.Resource { Schema: alertSchema, Importer: &schema.ResourceImporter{ - StateContext: TrackingImportWrapper(resources.Alert, schema.ImportStatePassthroughContext), + StateContext: schema.ImportStatePassthroughContext, }, } } diff --git a/pkg/resources/api_integration.go b/pkg/resources/api_integration.go index c6d8ffc8d7..fee0a6312c 100644 --- a/pkg/resources/api_integration.go +++ b/pkg/resources/api_integration.go @@ -6,6 +6,9 @@ import ( "log" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -121,10 +124,10 @@ var apiIntegrationSchema = map[string]*schema.Schema{ // APIIntegration returns a pointer to the resource representing an api integration. func APIIntegration() *schema.Resource { return &schema.Resource{ - Create: CreateAPIIntegration, - Read: ReadAPIIntegration, - Update: UpdateAPIIntegration, - Delete: DeleteAPIIntegration, + CreateContext: TrackingCreateWrapper(resources.ApiIntegration, CreateAPIIntegration), + ReadContext: TrackingReadWrapper(resources.ApiIntegration, ReadAPIIntegration), + UpdateContext: TrackingUpdateWrapper(resources.ApiIntegration, UpdateAPIIntegration), + DeleteContext: TrackingDeleteWrapper(resources.ApiIntegration, DeleteAPIIntegration), Schema: apiIntegrationSchema, Importer: &schema.ResourceImporter{ @@ -142,9 +145,8 @@ func toApiIntegrationEndpointPrefix(paths []string) []sdk.ApiIntegrationEndpoint } // CreateAPIIntegration implements schema.CreateFunc. -func CreateAPIIntegration(d *schema.ResourceData, meta interface{}) error { +func CreateAPIIntegration(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() name := d.Get("name").(string) id := sdk.NewAccountObjectIdentifier(name) @@ -169,7 +171,7 @@ func CreateAPIIntegration(d *schema.ResourceData, meta interface{}) error { case "aws_api_gateway", "aws_private_api_gateway", "aws_gov_api_gateway", "aws_gov_private_api_gateway": roleArn, ok := d.GetOk("api_aws_role_arn") if !ok { - return fmt.Errorf("if you use AWS api provider you must specify an api_aws_role_arn") + return diag.FromErr(fmt.Errorf("if you use AWS api provider you must specify an api_aws_role_arn")) } awsParams := sdk.NewAwsApiParamsRequest(sdk.ApiIntegrationAwsApiProviderType(apiProvider), roleArn.(string)) if v, ok := d.GetOk("api_key"); ok { @@ -179,11 +181,11 @@ func CreateAPIIntegration(d *schema.ResourceData, meta interface{}) error { case "azure_api_management": tenantId, ok := d.GetOk("azure_tenant_id") if !ok { - return fmt.Errorf("if you use the Azure api provider you must specify an azure_tenant_id") + return diag.FromErr(fmt.Errorf("if you use the Azure api provider you must specify an azure_tenant_id")) } applicationId, ok := d.GetOk("azure_ad_application_id") if !ok { - return fmt.Errorf("if you use the Azure api provider you must specify an azure_ad_application_id") + return diag.FromErr(fmt.Errorf("if you use the Azure api provider you must specify an azure_ad_application_id")) } azureParams := sdk.NewAzureApiParamsRequest(tenantId.(string), applicationId.(string)) if v, ok := d.GetOk("api_key"); ok { @@ -193,66 +195,65 @@ func CreateAPIIntegration(d *schema.ResourceData, meta interface{}) error { case "google_api_gateway": audience, ok := d.GetOk("google_audience") if !ok { - return fmt.Errorf("if you use GCP api provider you must specify a google_audience") + return diag.FromErr(fmt.Errorf("if you use GCP api provider you must specify a google_audience")) } googleParams := sdk.NewGoogleApiParamsRequest(audience.(string)) createRequest.WithGoogleApiProviderParams(googleParams) default: - return fmt.Errorf("unexpected provider %v", apiProvider) + return diag.FromErr(fmt.Errorf("unexpected provider %v", apiProvider)) } err := client.ApiIntegrations.Create(ctx, createRequest) if err != nil { - return fmt.Errorf("error creating api integration: %w", err) + return diag.FromErr(fmt.Errorf("error creating api integration: %w", err)) } d.SetId(helpers.EncodeSnowflakeID(id)) - return ReadAPIIntegration(d, meta) + return ReadAPIIntegration(ctx, d, meta) } // ReadAPIIntegration implements schema.ReadFunc. -func ReadAPIIntegration(d *schema.ResourceData, meta interface{}) error { +func ReadAPIIntegration(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) integration, err := client.ApiIntegrations.ShowByID(ctx, id) if err != nil { log.Printf("[DEBUG] api integration (%s) not found", d.Id()) d.SetId("") - return err + return diag.FromErr(err) } // Note: category must be API or something is broken if c := integration.Category; c != "API" { - return fmt.Errorf("expected %v to be an api integration, got %v", id, c) + return diag.FromErr(fmt.Errorf("expected %v to be an api integration, got %v", id, c)) } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("name", integration.Name); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("comment", integration.Comment); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("created_on", integration.CreatedOn.String()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("enabled", integration.Enabled); err != nil { - return err + return diag.FromErr(err) } // Some properties come from the DESCRIBE INTEGRATION call integrationProperties, err := client.ApiIntegrations.Describe(ctx, id) if err != nil { - return fmt.Errorf("could not describe api integration: %w", err) + return diag.FromErr(fmt.Errorf("could not describe api integration: %w", err)) } for _, property := range integrationProperties { @@ -263,66 +264,65 @@ func ReadAPIIntegration(d *schema.ResourceData, meta interface{}) error { // We set this using the SHOW INTEGRATION call so let's ignore it here case "API_ALLOWED_PREFIXES": if err := d.Set("api_allowed_prefixes", strings.Split(value, ",")); err != nil { - return err + return diag.FromErr(err) } case "API_BLOCKED_PREFIXES": if val := value; val != "" { if err := d.Set("api_blocked_prefixes", strings.Split(val, ",")); err != nil { - return err + return diag.FromErr(err) } } case "API_AWS_IAM_USER_ARN": if err := d.Set("api_aws_iam_user_arn", value); err != nil { - return err + return diag.FromErr(err) } case "API_AWS_ROLE_ARN": if err := d.Set("api_aws_role_arn", value); err != nil { - return err + return diag.FromErr(err) } case "API_AWS_EXTERNAL_ID": if err := d.Set("api_aws_external_id", value); err != nil { - return err + return diag.FromErr(err) } case "AZURE_CONSENT_URL": if err := d.Set("azure_consent_url", value); err != nil { - return err + return diag.FromErr(err) } case "AZURE_MULTI_TENANT_APP_NAME": if err := d.Set("azure_multi_tenant_app_name", value); err != nil { - return err + return diag.FromErr(err) } case "AZURE_TENANT_ID": if err := d.Set("azure_tenant_id", value); err != nil { - return err + return diag.FromErr(err) } case "AZURE_AD_APPLICATION_ID": if err := d.Set("azure_ad_application_id", value); err != nil { - return err + return diag.FromErr(err) } case "GOOGLE_AUDIENCE": if err := d.Set("google_audience", value); err != nil { - return err + return diag.FromErr(err) } case "API_GCP_SERVICE_ACCOUNT": if err := d.Set("api_gcp_service_account", value); err != nil { - return err + return diag.FromErr(err) } case "API_PROVIDER": if err := d.Set("api_provider", strings.ToLower(value)); err != nil { - return err + return diag.FromErr(err) } default: log.Printf("[WARN] unexpected api integration property %v returned from Snowflake", name) } } - return err + return diag.FromErr(err) } // UpdateAPIIntegration implements schema.UpdateFunc. -func UpdateAPIIntegration(d *schema.ResourceData, meta interface{}) error { +func UpdateAPIIntegration(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) var runSetStatement bool @@ -348,7 +348,7 @@ func UpdateAPIIntegration(d *schema.ResourceData, meta interface{}) error { if len(v) == 0 { err := client.ApiIntegrations.Alter(ctx, sdk.NewAlterApiIntegrationRequest(id).WithUnset(sdk.NewApiIntegrationUnsetRequest().WithApiBlockedPrefixes(sdk.Bool(true)))) if err != nil { - return fmt.Errorf("error unsetting api_blocked_prefixes: %w", err) + return diag.FromErr(fmt.Errorf("error unsetting api_blocked_prefixes: %w", err)) } } else { runSetStatement = true @@ -392,28 +392,27 @@ func UpdateAPIIntegration(d *schema.ResourceData, meta interface{}) error { setRequest.WithGoogleParams(googleParams) } default: - return fmt.Errorf("unexpected provider %v", apiProvider) + return diag.FromErr(fmt.Errorf("unexpected provider %v", apiProvider)) } if runSetStatement { err := client.ApiIntegrations.Alter(ctx, sdk.NewAlterApiIntegrationRequest(id).WithSet(setRequest)) if err != nil { - return fmt.Errorf("error updating api integration: %w", err) + return diag.FromErr(fmt.Errorf("error updating api integration: %w", err)) } } - return ReadAPIIntegration(d, meta) + return ReadAPIIntegration(ctx, d, meta) } // DeleteAPIIntegration implements schema.DeleteFunc. -func DeleteAPIIntegration(d *schema.ResourceData, meta interface{}) error { +func DeleteAPIIntegration(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) err := client.ApiIntegrations.Drop(ctx, sdk.NewDropApiIntegrationRequest(id)) if err != nil { - return err + return diag.FromErr(err) } d.SetId("") diff --git a/pkg/resources/database_old.go b/pkg/resources/database_old.go index 15d4fca440..6c6b6c3641 100644 --- a/pkg/resources/database_old.go +++ b/pkg/resources/database_old.go @@ -7,6 +7,8 @@ import ( "slices" "strconv" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -90,10 +92,10 @@ var databaseOldSchema = map[string]*schema.Schema{ // Database returns a pointer to the resource representing a database. func DatabaseOld() *schema.Resource { return &schema.Resource{ - Create: CreateDatabaseOld, - Read: ReadDatabaseOld, - Delete: DeleteDatabaseOld, - Update: UpdateDatabaseOld, + CreateContext: TrackingCreateWrapper(resources.DatabaseOld, CreateDatabaseOld), + ReadContext: TrackingReadWrapper(resources.DatabaseOld, ReadDatabaseOld), + DeleteContext: TrackingDeleteWrapper(resources.DatabaseOld, DeleteDatabaseOld), + UpdateContext: TrackingUpdateWrapper(resources.DatabaseOld, UpdateDatabaseOld), DeprecationMessage: "This resource is deprecated and will be removed in a future major version release. Please use snowflake_database or snowflake_shared_database or snowflake_secondary_database instead.", Schema: databaseOldSchema, @@ -104,9 +106,8 @@ func DatabaseOld() *schema.Resource { } // CreateDatabase implements schema.CreateFunc. -func CreateDatabaseOld(d *schema.ResourceData, meta interface{}) error { +func CreateDatabaseOld(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() name := d.Get("name").(string) id := sdk.NewAccountObjectIdentifier(name) @@ -121,10 +122,10 @@ func CreateDatabaseOld(d *schema.ResourceData, meta interface{}) error { } err := client.Databases.CreateShared(ctx, id, shareID, opts) if err != nil { - return fmt.Errorf("error creating database %v: %w", name, err) + return diag.FromErr(fmt.Errorf("error creating database %v: %w", name, err)) } d.SetId(name) - return ReadDatabaseOld(d, meta) + return ReadDatabaseOld(ctx, d, meta) } // Is it a Secondary Database? if primaryName, ok := d.GetOk("from_replica"); ok { @@ -135,11 +136,11 @@ func CreateDatabaseOld(d *schema.ResourceData, meta interface{}) error { } err := client.Databases.CreateSecondary(ctx, id, primaryID, opts) if err != nil { - return fmt.Errorf("error creating database %v: %w", name, err) + return diag.FromErr(fmt.Errorf("error creating database %v: %w", name, err)) } d.SetId(name) // todo: add failover_configuration block - return ReadDatabaseOld(d, meta) + return ReadDatabaseOld(ctx, d, meta) } // Otherwise it is a Standard Database @@ -164,7 +165,7 @@ func CreateDatabaseOld(d *schema.ResourceData, meta interface{}) error { err := client.Databases.Create(ctx, id, &opts) if err != nil { - return fmt.Errorf("error creating database %v: %w", name, err) + return diag.FromErr(fmt.Errorf("error creating database %v: %w", name, err)) } d.SetId(name) @@ -185,16 +186,15 @@ func CreateDatabaseOld(d *schema.ResourceData, meta interface{}) error { } err := client.Databases.AlterReplication(ctx, id, opts) if err != nil { - return fmt.Errorf("error enabling replication for database %v: %w", name, err) + return diag.FromErr(fmt.Errorf("error enabling replication for database %v: %w", name, err)) } } - return ReadDatabaseOld(d, meta) + return ReadDatabaseOld(ctx, d, meta) } -func ReadDatabaseOld(d *schema.ResourceData, meta interface{}) error { +func ReadDatabaseOld(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) database, err := client.Databases.ShowByID(ctx, id) @@ -205,35 +205,34 @@ func ReadDatabaseOld(d *schema.ResourceData, meta interface{}) error { } if err := d.Set("comment", database.Comment); err != nil { - return err + return diag.FromErr(err) } dataRetention, err := client.Parameters.ShowAccountParameter(ctx, sdk.AccountParameterDataRetentionTimeInDays) if err != nil { - return err + return diag.FromErr(err) } paramDataRetention, err := strconv.Atoi(dataRetention.Value) if err != nil { - return err + return diag.FromErr(err) } if dataRetentionDays := d.Get("data_retention_time_in_days"); dataRetentionDays.(int) != IntDefault || database.RetentionTime != paramDataRetention { if err := d.Set("data_retention_time_in_days", database.RetentionTime); err != nil { - return err + return diag.FromErr(err) } } if err := d.Set("is_transient", database.Transient); err != nil { - return err + return diag.FromErr(err) } return nil } -func UpdateDatabaseOld(d *schema.ResourceData, meta interface{}) error { +func UpdateDatabaseOld(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) client := meta.(*provider.Context).Client - ctx := context.Background() if d.HasChange("name") { newName := d.Get("name").(string) @@ -243,7 +242,7 @@ func UpdateDatabaseOld(d *schema.ResourceData, meta interface{}) error { } err := client.Databases.Alter(ctx, id, opts) if err != nil { - return fmt.Errorf("error updating database name on %v err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error updating database name on %v err = %w", d.Id(), err)) } d.SetId(helpers.EncodeSnowflakeID(newId)) id = newId @@ -261,7 +260,7 @@ func UpdateDatabaseOld(d *schema.ResourceData, meta interface{}) error { } err := client.Databases.Alter(ctx, id, opts) if err != nil { - return fmt.Errorf("error updating database comment on %v err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error updating database comment on %v err = %w", d.Id(), err)) } } @@ -273,7 +272,7 @@ func UpdateDatabaseOld(d *schema.ResourceData, meta interface{}) error { }, }) if err != nil { - return fmt.Errorf("error when setting database data retention time on %v err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error when setting database data retention time on %v err = %w", d.Id(), err)) } } else { err := client.Databases.Alter(ctx, id, &sdk.AlterDatabaseOptions{ @@ -282,7 +281,7 @@ func UpdateDatabaseOld(d *schema.ResourceData, meta interface{}) error { }, }) if err != nil { - return fmt.Errorf("error when usetting database data retention time on %v err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error when usetting database data retention time on %v err = %w", d.Id(), err)) } } } @@ -335,7 +334,7 @@ func UpdateDatabaseOld(d *schema.ResourceData, meta interface{}) error { } err := client.Databases.AlterReplication(ctx, id, opts) if err != nil { - return fmt.Errorf("error enabling replication configuration on %v err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error enabling replication configuration on %v err = %w", d.Id(), err)) } } @@ -347,23 +346,22 @@ func UpdateDatabaseOld(d *schema.ResourceData, meta interface{}) error { } err := client.Databases.AlterReplication(ctx, id, opts) if err != nil { - return fmt.Errorf("error disabling replication configuration on %v err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error disabling replication configuration on %v err = %w", d.Id(), err)) } } } - return ReadDatabaseOld(d, meta) + return ReadDatabaseOld(ctx, d, meta) } -func DeleteDatabaseOld(d *schema.ResourceData, meta interface{}) error { +func DeleteDatabaseOld(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) err := client.Databases.Drop(ctx, id, &sdk.DropDatabaseOptions{ IfExists: sdk.Bool(true), }) if err != nil { - return err + return diag.FromErr(err) } d.SetId("") return nil diff --git a/pkg/resources/dynamic_table.go b/pkg/resources/dynamic_table.go index e8b64b64e4..72446d5b91 100644 --- a/pkg/resources/dynamic_table.go +++ b/pkg/resources/dynamic_table.go @@ -7,6 +7,9 @@ import ( "strings" "time" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -163,10 +166,10 @@ var dynamicTableSchema = map[string]*schema.Schema{ // DynamicTable returns a pointer to the resource representing a dynamic table. func DynamicTable() *schema.Resource { return &schema.Resource{ - Create: CreateDynamicTable, - Read: ReadDynamicTable, - Update: UpdateDynamicTable, - Delete: DeleteDynamicTable, + CreateContext: TrackingCreateWrapper(resources.DynamicTable, CreateDynamicTable), + ReadContext: TrackingReadWrapper(resources.DynamicTable, ReadDynamicTable), + UpdateContext: TrackingUpdateWrapper(resources.DynamicTable, UpdateDynamicTable), + DeleteContext: TrackingDeleteWrapper(resources.DynamicTable, DeleteDynamicTable), Schema: dynamicTableSchema, Importer: &schema.ResourceImporter{ @@ -176,93 +179,93 @@ func DynamicTable() *schema.Resource { } // ReadDynamicTable implements schema.ReadFunc. -func ReadDynamicTable(d *schema.ResourceData, meta interface{}) error { +func ReadDynamicTable(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) - dynamicTable, err := client.DynamicTables.ShowByID(context.Background(), id) + dynamicTable, err := client.DynamicTables.ShowByID(ctx, id) if err != nil { log.Printf("[DEBUG] dynamic table (%s) not found", d.Id()) d.SetId("") return nil } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("name", dynamicTable.Name); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("database", dynamicTable.DatabaseName); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("schema", dynamicTable.SchemaName); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("warehouse", dynamicTable.Warehouse); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("comment", dynamicTable.Comment); err != nil { - return err + return diag.FromErr(err) } tl := map[string]interface{}{} if dynamicTable.TargetLag == "DOWNSTREAM" { tl["downstream"] = true if err := d.Set("target_lag", []interface{}{tl}); err != nil { - return err + return diag.FromErr(err) } } else { tl["maximum_duration"] = dynamicTable.TargetLag if err := d.Set("target_lag", []interface{}{tl}); err != nil { - return err + return diag.FromErr(err) } } if strings.Contains(dynamicTable.Text, "OR REPLACE") { if err := d.Set("or_replace", true); err != nil { - return err + return diag.FromErr(err) } } else { if err := d.Set("or_replace", false); err != nil { - return err + return diag.FromErr(err) } } if strings.Contains(dynamicTable.Text, "initialize = 'ON_CREATE'") { if err := d.Set("initialize", "ON_CREATE"); err != nil { - return err + return diag.FromErr(err) } } else if strings.Contains(dynamicTable.Text, "initialize = 'ON_SCHEDULE'") { if err := d.Set("initialize", "ON_SCHEDULE"); err != nil { - return err + return diag.FromErr(err) } } m := refreshModePattern.FindStringSubmatch(dynamicTable.Text) if len(m) > 1 { if err := d.Set("refresh_mode", m[1]); err != nil { - return err + return diag.FromErr(err) } } if err := d.Set("created_on", dynamicTable.CreatedOn.Format(time.RFC3339)); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("cluster_by", dynamicTable.ClusterBy); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("rows", dynamicTable.Rows); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("bytes", dynamicTable.Bytes); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("owner", dynamicTable.Owner); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("refresh_mode_reason", dynamicTable.RefreshModeReason); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("automatic_clustering", dynamicTable.AutomaticClustering); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("scheduling_state", string(dynamicTable.SchedulingState)); err != nil { - return err + return diag.FromErr(err) } /* guides on time formatting @@ -271,25 +274,25 @@ func ReadDynamicTable(d *schema.ResourceData, meta interface{}) error { note: format may depend on what the account parameter for TIMESTAMP_OUTPUT_FORMAT is set to. Perhaps we should return this as a string rather than a time.Time? */ if err := d.Set("last_suspended_on", dynamicTable.LastSuspendedOn.Format("2006-01-02T16:04:05.000 -0700")); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("is_clone", dynamicTable.IsClone); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("is_replica", dynamicTable.IsReplica); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("data_timestamp", dynamicTable.DataTimestamp.Format("2006-01-02T16:04:05.000 -0700")); err != nil { - return err + return diag.FromErr(err) } extractor := snowflake.NewViewSelectStatementExtractor(dynamicTable.Text) query, err := extractor.ExtractDynamicTable() if err != nil { - return err + return diag.FromErr(err) } if err := d.Set("query", query); err != nil { - return err + return diag.FromErr(err) } return nil @@ -309,7 +312,7 @@ func parseTargetLag(v interface{}) sdk.TargetLag { } // CreateDynamicTable implements schema.CreateFunc. -func CreateDynamicTable(d *schema.ResourceData, meta interface{}) error { +func CreateDynamicTable(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client databaseName := d.Get("database").(string) @@ -334,18 +337,17 @@ func CreateDynamicTable(d *schema.ResourceData, meta interface{}) error { if v, ok := d.GetOk("initialize"); ok { request.WithInitialize(sdk.DynamicTableInitialize(v.(string))) } - if err := client.DynamicTables.Create(context.Background(), request); err != nil { - return err + if err := client.DynamicTables.Create(ctx, request); err != nil { + return diag.FromErr(err) } d.SetId(helpers.EncodeSnowflakeID(id)) - return ReadDynamicTable(d, meta) + return ReadDynamicTable(ctx, d, meta) } // UpdateDynamicTable implements schema.UpdateFunc. -func UpdateDynamicTable(d *schema.ResourceData, meta interface{}) error { +func UpdateDynamicTable(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) request := sdk.NewAlterDynamicTableRequest(id) @@ -366,7 +368,7 @@ func UpdateDynamicTable(d *schema.ResourceData, meta interface{}) error { if runSet { request.WithSet(set) if err := client.DynamicTables.Alter(ctx, request); err != nil { - return err + return diag.FromErr(err) } } @@ -377,19 +379,19 @@ func UpdateDynamicTable(d *schema.ResourceData, meta interface{}) error { Value: sdk.String(d.Get("comment").(string)), }) if err != nil { - return err + return diag.FromErr(err) } } - return ReadDynamicTable(d, meta) + return ReadDynamicTable(ctx, d, meta) } // DeleteDynamicTable implements schema.DeleteFunc. -func DeleteDynamicTable(d *schema.ResourceData, meta interface{}) error { +func DeleteDynamicTable(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) - if err := client.DynamicTables.Drop(context.Background(), sdk.NewDropDynamicTableRequest(id)); err != nil { - return err + if err := client.DynamicTables.Drop(ctx, sdk.NewDropDynamicTableRequest(id)); err != nil { + return diag.FromErr(err) } d.SetId("") diff --git a/pkg/resources/email_notification_integration.go b/pkg/resources/email_notification_integration.go index 586adfc72d..bd0a38c64e 100644 --- a/pkg/resources/email_notification_integration.go +++ b/pkg/resources/email_notification_integration.go @@ -6,6 +6,9 @@ import ( "log" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -42,10 +45,10 @@ var emailNotificationIntegrationSchema = map[string]*schema.Schema{ // EmailNotificationIntegration returns a pointer to the resource representing a notification integration. func EmailNotificationIntegration() *schema.Resource { return &schema.Resource{ - Create: CreateEmailNotificationIntegration, - Read: ReadEmailNotificationIntegration, - Update: UpdateEmailNotificationIntegration, - Delete: DeleteEmailNotificationIntegration, + CreateContext: TrackingCreateWrapper(resources.EmailNotificationIntegration, CreateEmailNotificationIntegration), + ReadContext: TrackingReadWrapper(resources.EmailNotificationIntegration, ReadEmailNotificationIntegration), + UpdateContext: TrackingUpdateWrapper(resources.EmailNotificationIntegration, UpdateEmailNotificationIntegration), + DeleteContext: TrackingDeleteWrapper(resources.EmailNotificationIntegration, DeleteEmailNotificationIntegration), Schema: emailNotificationIntegrationSchema, Importer: &schema.ResourceImporter{ @@ -63,9 +66,8 @@ func toAllowedRecipients(emails []string) []sdk.NotificationIntegrationAllowedRe } // CreateEmailNotificationIntegration implements schema.CreateFunc. -func CreateEmailNotificationIntegration(d *schema.ResourceData, meta interface{}) error { +func CreateEmailNotificationIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() name := d.Get("name").(string) id := sdk.NewAccountObjectIdentifier(name) @@ -85,47 +87,46 @@ func CreateEmailNotificationIntegration(d *schema.ResourceData, meta interface{} err := client.NotificationIntegrations.Create(ctx, createRequest) if err != nil { - return fmt.Errorf("error creating notification integration: %w", err) + return diag.FromErr(fmt.Errorf("error creating notification integration: %w", err)) } d.SetId(helpers.EncodeSnowflakeID(id)) - return ReadEmailNotificationIntegration(d, meta) + return ReadEmailNotificationIntegration(ctx, d, meta) } // ReadEmailNotificationIntegration implements schema.ReadFunc. -func ReadEmailNotificationIntegration(d *schema.ResourceData, meta interface{}) error { +func ReadEmailNotificationIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) integration, err := client.NotificationIntegrations.ShowByID(ctx, id) if err != nil { log.Printf("[DEBUG] notification integration (%s) not found", d.Id()) d.SetId("") - return err + return diag.FromErr(err) } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("name", integration.Name); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("enabled", integration.Enabled); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("comment", integration.Comment); err != nil { - return err + return diag.FromErr(err) } // Some properties come from the DESCRIBE INTEGRATION call integrationProperties, err := client.NotificationIntegrations.Describe(ctx, id) if err != nil { - return fmt.Errorf("could not describe notification integration: %w", err) + return diag.FromErr(fmt.Errorf("could not describe notification integration: %w", err)) } for _, property := range integrationProperties { name := property.Name @@ -135,11 +136,11 @@ func ReadEmailNotificationIntegration(d *schema.ResourceData, meta interface{}) case "ALLOWED_RECIPIENTS": if value == "" { if err := d.Set("allowed_recipients", make([]string, 0)); err != nil { - return err + return diag.FromErr(err) } } else { if err := d.Set("allowed_recipients", strings.Split(value, ",")); err != nil { - return err + return diag.FromErr(err) } } default: @@ -147,13 +148,12 @@ func ReadEmailNotificationIntegration(d *schema.ResourceData, meta interface{}) } } - return err + return diag.FromErr(err) } // UpdateEmailNotificationIntegration implements schema.UpdateFunc. -func UpdateEmailNotificationIntegration(d *schema.ResourceData, meta interface{}) error { +func UpdateEmailNotificationIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) var runSetStatement bool @@ -190,29 +190,28 @@ func UpdateEmailNotificationIntegration(d *schema.ResourceData, meta interface{} if runSetStatement { err := client.NotificationIntegrations.Alter(ctx, sdk.NewAlterNotificationIntegrationRequest(id).WithSet(setRequest)) if err != nil { - return fmt.Errorf("error updating notification integration: %w", err) + return diag.FromErr(fmt.Errorf("error updating notification integration: %w", err)) } } if runUnsetStatement { err := client.NotificationIntegrations.Alter(ctx, sdk.NewAlterNotificationIntegrationRequest(id).WithUnsetEmailParams(unsetRequest)) if err != nil { - return fmt.Errorf("error updating notification integration: %w", err) + return diag.FromErr(fmt.Errorf("error updating notification integration: %w", err)) } } - return ReadEmailNotificationIntegration(d, meta) + return ReadEmailNotificationIntegration(ctx, d, meta) } // DeleteEmailNotificationIntegration implements schema.DeleteFunc. -func DeleteEmailNotificationIntegration(d *schema.ResourceData, meta interface{}) error { +func DeleteEmailNotificationIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) err := client.NotificationIntegrations.Drop(ctx, sdk.NewDropNotificationIntegrationRequest(id)) if err != nil { - return err + return diag.FromErr(err) } d.SetId("") diff --git a/pkg/resources/external_table.go b/pkg/resources/external_table.go index 94cd921cf1..833b8c4dd8 100644 --- a/pkg/resources/external_table.go +++ b/pkg/resources/external_table.go @@ -5,6 +5,9 @@ import ( "fmt" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -140,10 +143,10 @@ var externalTableSchema = map[string]*schema.Schema{ func ExternalTable() *schema.Resource { return &schema.Resource{ - Create: CreateExternalTable, - Read: ReadExternalTable, - Update: UpdateExternalTable, - Delete: DeleteExternalTable, + CreateContext: TrackingCreateWrapper(resources.ExternalTable, CreateExternalTable), + ReadContext: TrackingReadWrapper(resources.ExternalTable, ReadExternalTable), + UpdateContext: TrackingUpdateWrapper(resources.ExternalTable, UpdateExternalTable), + DeleteContext: TrackingDeleteWrapper(resources.ExternalTable, DeleteExternalTable), Schema: externalTableSchema, Importer: &schema.ResourceImporter{ @@ -153,9 +156,8 @@ func ExternalTable() *schema.Resource { } // CreateExternalTable implements schema.CreateFunc. -func CreateExternalTable(d *schema.ResourceData, meta any) error { +func CreateExternalTable(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() database := d.Get("database").(string) schema := d.Get("schema").(string) @@ -214,7 +216,7 @@ func CreateExternalTable(d *schema.ResourceData, meta any) error { } err := client.ExternalTables.CreateDeltaLake(ctx, req) if err != nil { - return err + return diag.FromErr(err) } default: req := sdk.NewCreateExternalTableRequest(id, location). @@ -236,47 +238,45 @@ func CreateExternalTable(d *schema.ResourceData, meta any) error { } err := client.ExternalTables.Create(ctx, req) if err != nil { - return err + return diag.FromErr(err) } } d.SetId(helpers.EncodeSnowflakeID(id)) - return ReadExternalTable(d, meta) + return ReadExternalTable(ctx, d, meta) } // ReadExternalTable implements schema.ReadFunc. -func ReadExternalTable(d *schema.ResourceData, meta any) error { +func ReadExternalTable(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) externalTable, err := client.ExternalTables.ShowByID(ctx, id) if err != nil { log.Printf("[DEBUG] external table (%s) not found", d.Id()) d.SetId("") - return err + return diag.FromErr(err) } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("name", externalTable.Name); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("owner", externalTable.Owner); err != nil { - return err + return diag.FromErr(err) } return nil } // UpdateExternalTable implements schema.UpdateFunc. -func UpdateExternalTable(d *schema.ResourceData, meta any) error { +func UpdateExternalTable(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) if d.HasChange("tag") { @@ -285,7 +285,7 @@ func UpdateExternalTable(d *schema.ResourceData, meta any) error { if len(unsetTags) > 0 { err := client.ExternalTables.Alter(ctx, sdk.NewAlterExternalTableRequest(id).WithUnsetTag(unsetTags)) if err != nil { - return fmt.Errorf("error setting tags on %v, err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error setting tags on %v, err = %w", d.Id(), err)) } } @@ -296,23 +296,22 @@ func UpdateExternalTable(d *schema.ResourceData, meta any) error { } err := client.ExternalTables.Alter(ctx, sdk.NewAlterExternalTableRequest(id).WithSetTag(tagAssociationRequests)) if err != nil { - return fmt.Errorf("error setting tags on %v, err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error setting tags on %v, err = %w", d.Id(), err)) } } } - return ReadExternalTable(d, meta) + return ReadExternalTable(ctx, d, meta) } // DeleteExternalTable implements schema.DeleteFunc. -func DeleteExternalTable(d *schema.ResourceData, meta any) error { +func DeleteExternalTable(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) err := client.ExternalTables.Drop(ctx, sdk.NewDropExternalTableRequest(id)) if err != nil { - return err + return diag.FromErr(err) } d.SetId("") diff --git a/pkg/resources/failover_group.go b/pkg/resources/failover_group.go index e873436b58..4a8ab0e3fe 100644 --- a/pkg/resources/failover_group.go +++ b/pkg/resources/failover_group.go @@ -8,6 +8,9 @@ import ( "strconv" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -138,10 +141,10 @@ var failoverGroupSchema = map[string]*schema.Schema{ // FailoverGroup returns a pointer to the resource representing a failover group. func FailoverGroup() *schema.Resource { return &schema.Resource{ - Create: CreateFailoverGroup, - Read: ReadFailoverGroup, - Update: UpdateFailoverGroup, - Delete: DeleteFailoverGroup, + CreateContext: TrackingCreateWrapper(resources.FailoverGroup, CreateFailoverGroup), + ReadContext: TrackingReadWrapper(resources.FailoverGroup, ReadFailoverGroup), + UpdateContext: TrackingUpdateWrapper(resources.FailoverGroup, UpdateFailoverGroup), + DeleteContext: TrackingDeleteWrapper(resources.FailoverGroup, DeleteFailoverGroup), Schema: failoverGroupSchema, Importer: &schema.ResourceImporter{ @@ -151,9 +154,8 @@ func FailoverGroup() *schema.Resource { } // CreateFailoverGroup implements schema.CreateFunc. -func CreateFailoverGroup(d *schema.ResourceData, meta interface{}) error { +func CreateFailoverGroup(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() // getting required attributes name := d.Get("name").(string) id := sdk.NewAccountObjectIdentifier(name) @@ -168,15 +170,15 @@ func CreateFailoverGroup(d *schema.ResourceData, meta interface{}) error { primaryFailoverGroupID := sdk.NewExternalObjectIdentifier(sdk.NewAccountIdentifier(organizationName, sourceAccountName), sdk.NewAccountObjectIdentifier(sourceFailoverGroupName)) err := client.FailoverGroups.CreateSecondaryReplicationGroup(ctx, id, primaryFailoverGroupID, nil) if err != nil { - return err + return diag.FromErr(err) } d.SetId(name) - return ReadFailoverGroup(d, meta) + return ReadFailoverGroup(ctx, d, meta) } // these two are required attributes if from_replica is not set if _, ok := d.GetOk("object_types"); !ok { - return errors.New("object_types is required when not creating from a replica") + return diag.FromErr(errors.New("object_types is required when not creating from a replica")) } objectTypesList := expandStringList(d.Get("object_types").(*schema.Set).List()) objectTypes := make([]sdk.PluralObjectType, len(objectTypesList)) @@ -185,7 +187,7 @@ func CreateFailoverGroup(d *schema.ResourceData, meta interface{}) error { } if _, ok := d.GetOk("allowed_accounts"); !ok { - return errors.New("allowed_accounts is required when not creating from a replica") + return diag.FromErr(errors.New("allowed_accounts is required when not creating from a replica")) } aaList := expandStringList(d.Get("allowed_accounts").(*schema.Set).List()) allowedAccounts := make([]sdk.AccountIdentifier, len(aaList)) @@ -193,7 +195,7 @@ func CreateFailoverGroup(d *schema.ResourceData, meta interface{}) error { // validation since we cannot do that in the ValidateFunc parts := strings.Split(v, ".") if len(parts) != 2 { - return fmt.Errorf("allowed_account %s cannot be an account locator and must be of the format .", allowedAccounts[i]) + return diag.FromErr(fmt.Errorf("allowed_account %s cannot be an account locator and must be of the format .", allowedAccounts[i])) } organizationName := parts[0] accountName := parts[1] @@ -257,29 +259,28 @@ func CreateFailoverGroup(d *schema.ResourceData, meta interface{}) error { err := client.FailoverGroups.Create(ctx, id, objectTypes, allowedAccounts, &opts) if err != nil { - return err + return diag.FromErr(err) } d.SetId(name) - return ReadFailoverGroup(d, meta) + return ReadFailoverGroup(ctx, d, meta) } // ReadFailoverGroup implements schema.ReadFunc. -func ReadFailoverGroup(d *schema.ResourceData, meta interface{}) error { +func ReadFailoverGroup(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) failoverGroup, err := client.FailoverGroups.ShowByID(ctx, id) if err != nil { - return err + return diag.FromErr(err) } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("name", failoverGroup.Name); err != nil { - return err + return diag.FromErr(err) } // if the failover group is created from a replica, then we do not want to get the other values if _, ok := d.GetOk("from_replica"); ok { @@ -291,7 +292,7 @@ func ReadFailoverGroup(d *schema.ResourceData, meta interface{}) error { if strings.Contains(replicationSchedule, "MINUTE") { interval, err := strconv.Atoi(strings.TrimSuffix(replicationSchedule, " MINUTE")) if err != nil { - return err + return diag.FromErr(err) } err = d.Set("replication_schedule", []interface{}{ map[string]interface{}{ @@ -299,7 +300,7 @@ func ReadFailoverGroup(d *schema.ResourceData, meta interface{}) error { }, }) if err != nil { - return err + return diag.FromErr(err) } } else { repScheduleParts := strings.Split(replicationSchedule, " ") @@ -316,7 +317,7 @@ func ReadFailoverGroup(d *schema.ResourceData, meta interface{}) error { }, }) if err != nil { - return err + return diag.FromErr(err) } } } @@ -327,7 +328,7 @@ func ReadFailoverGroup(d *schema.ResourceData, meta interface{}) error { } objectTypesSet := schema.NewSet(schema.HashString, objectTypes) if err := d.Set("object_types", objectTypesSet); err != nil { - return err + return diag.FromErr(err) } // integration types @@ -338,7 +339,7 @@ func ReadFailoverGroup(d *schema.ResourceData, meta interface{}) error { allowedIntegrationsTypesSet := schema.NewSet(schema.HashString, allowedIntegrationTypes) if err := d.Set("allowed_integration_types", allowedIntegrationsTypesSet); err != nil { - return err + return diag.FromErr(err) } // allowed accounts @@ -348,13 +349,13 @@ func ReadFailoverGroup(d *schema.ResourceData, meta interface{}) error { } allowedAccountsSet := schema.NewSet(schema.HashString, allowedAccounts) if err := d.Set("allowed_accounts", allowedAccountsSet); err != nil { - return err + return diag.FromErr(err) } // allowed databases databases, err := client.FailoverGroups.ShowDatabases(ctx, id) if err != nil { - return err + return diag.FromErr(err) } allowedDatabases := make([]interface{}, len(databases)) for i, database := range databases { @@ -363,18 +364,18 @@ func ReadFailoverGroup(d *schema.ResourceData, meta interface{}) error { allowedDatabasesSet := schema.NewSet(schema.HashString, allowedDatabases) if len(allowedDatabases) > 0 { if err := d.Set("allowed_databases", allowedDatabasesSet); err != nil { - return err + return diag.FromErr(err) } } else { if err := d.Set("allowed_databases", nil); err != nil { - return err + return diag.FromErr(err) } } // allowed shares shares, err := client.FailoverGroups.ShowShares(ctx, id) if err != nil { - return err + return diag.FromErr(err) } allowedShares := make([]interface{}, len(shares)) for i, share := range shares { @@ -383,11 +384,11 @@ func ReadFailoverGroup(d *schema.ResourceData, meta interface{}) error { allowedSharesSet := schema.NewSet(schema.HashString, allowedShares) if len(allowedShares) > 0 { if err := d.Set("allowed_shares", allowedSharesSet); err != nil { - return err + return diag.FromErr(err) } } else { if err := d.Set("allowed_shares", nil); err != nil { - return err + return diag.FromErr(err) } } @@ -395,9 +396,8 @@ func ReadFailoverGroup(d *schema.ResourceData, meta interface{}) error { } // UpdateFailoverGroup implements schema.UpdateFunc. -func UpdateFailoverGroup(d *schema.ResourceData, meta interface{}) error { +func UpdateFailoverGroup(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) // alter failover group set ... @@ -437,7 +437,7 @@ func UpdateFailoverGroup(d *schema.ResourceData, meta interface{}) error { } if runSet { if err := client.FailoverGroups.AlterSource(ctx, id, opts); err != nil { - return err + return diag.FromErr(err) } } @@ -467,7 +467,7 @@ func UpdateFailoverGroup(d *schema.ResourceData, meta interface{}) error { }, }) if err != nil { - return err + return diag.FromErr(err) } } else { err := client.FailoverGroups.AlterSource(ctx, id, &sdk.AlterSourceFailoverGroupOptions{ @@ -476,7 +476,7 @@ func UpdateFailoverGroup(d *schema.ResourceData, meta interface{}) error { }, }) if err != nil { - return err + return diag.FromErr(err) } } } @@ -507,7 +507,7 @@ func UpdateFailoverGroup(d *schema.ResourceData, meta interface{}) error { }, } if err := client.FailoverGroups.AlterSource(ctx, id, opts); err != nil { - return fmt.Errorf("error removing allowed databases for failover group %v err = %w", id.Name(), err) + return diag.FromErr(fmt.Errorf("error removing allowed databases for failover group %v err = %w", id.Name(), err)) } } @@ -525,7 +525,7 @@ func UpdateFailoverGroup(d *schema.ResourceData, meta interface{}) error { }, } if err := client.FailoverGroups.AlterSource(ctx, id, opts); err != nil { - return fmt.Errorf("error removing allowed databases for failover group %v err = %w", id.Name(), err) + return diag.FromErr(fmt.Errorf("error removing allowed databases for failover group %v err = %w", id.Name(), err)) } } } @@ -556,7 +556,7 @@ func UpdateFailoverGroup(d *schema.ResourceData, meta interface{}) error { }, } if err := client.FailoverGroups.AlterSource(ctx, id, opts); err != nil { - return fmt.Errorf("error removing allowed shares for failover group %v err = %w", id.Name(), err) + return diag.FromErr(fmt.Errorf("error removing allowed shares for failover group %v err = %w", id.Name(), err)) } } @@ -574,7 +574,7 @@ func UpdateFailoverGroup(d *schema.ResourceData, meta interface{}) error { }, } if err := client.FailoverGroups.AlterSource(ctx, id, opts); err != nil { - return fmt.Errorf("error removing allowed shares for failover group %v err = %w", id.Name(), err) + return diag.FromErr(fmt.Errorf("error removing allowed shares for failover group %v err = %w", id.Name(), err)) } } } @@ -613,7 +613,7 @@ func UpdateFailoverGroup(d *schema.ResourceData, meta interface{}) error { }, } if err := client.FailoverGroups.AlterSource(ctx, id, opts); err != nil { - return fmt.Errorf("error removing allowed accounts for failover group %v err = %w", id.Name(), err) + return diag.FromErr(fmt.Errorf("error removing allowed accounts for failover group %v err = %w", id.Name(), err)) } } @@ -631,22 +631,21 @@ func UpdateFailoverGroup(d *schema.ResourceData, meta interface{}) error { }, } if err := client.FailoverGroups.AlterSource(ctx, id, opts); err != nil { - return fmt.Errorf("error removing allowed accounts for failover group %v err = %w", id.Name(), err) + return diag.FromErr(fmt.Errorf("error removing allowed accounts for failover group %v err = %w", id.Name(), err)) } } } - return ReadFailoverGroup(d, meta) + return ReadFailoverGroup(ctx, d, meta) } // DeleteFailoverGroup implements schema.DeleteFunc. -func DeleteFailoverGroup(d *schema.ResourceData, meta interface{}) error { +func DeleteFailoverGroup(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) - ctx := context.Background() err := client.FailoverGroups.Drop(ctx, id, &sdk.DropFailoverGroupOptions{IfExists: sdk.Bool(true)}) if err != nil { - return fmt.Errorf("error deleting failover group %v err = %w", id.Name(), err) + return diag.FromErr(fmt.Errorf("error deleting failover group %v err = %w", id.Name(), err)) } d.SetId("") diff --git a/pkg/resources/file_format.go b/pkg/resources/file_format.go index 561212e487..8ff7cec53f 100644 --- a/pkg/resources/file_format.go +++ b/pkg/resources/file_format.go @@ -7,6 +7,8 @@ import ( "fmt" "strings" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -315,10 +317,10 @@ func (ffi *fileFormatID) String() (string, error) { // FileFormat returns a pointer to the resource representing a file format. func FileFormat() *schema.Resource { return &schema.Resource{ - Create: CreateFileFormat, - Read: ReadFileFormat, - Update: UpdateFileFormat, - Delete: DeleteFileFormat, + CreateContext: TrackingCreateWrapper(resources.FileFormat, CreateFileFormat), + ReadContext: TrackingReadWrapper(resources.FileFormat, ReadFileFormat), + UpdateContext: TrackingUpdateWrapper(resources.FileFormat, UpdateFileFormat), + DeleteContext: TrackingDeleteWrapper(resources.FileFormat, DeleteFileFormat), CustomizeDiff: TrackingCustomDiffWrapper(resources.FileFormat, customdiff.All( ComputedIfAnyAttributeChanged(fileFormatSchema, FullyQualifiedNameAttributeName, "name"), @@ -332,9 +334,8 @@ func FileFormat() *schema.Resource { } // CreateFileFormat implements schema.CreateFunc. -func CreateFileFormat(d *schema.ResourceData, meta interface{}) error { +func CreateFileFormat(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() dbName := d.Get("database").(string) schemaName := d.Get("schema").(string) @@ -520,7 +521,7 @@ func CreateFileFormat(d *schema.ResourceData, meta interface{}) error { err := client.FileFormats.Create(ctx, id, &opts) if err != nil { - return err + return diag.FromErr(err) } fileFormatID := &fileFormatID{ @@ -530,252 +531,250 @@ func CreateFileFormat(d *schema.ResourceData, meta interface{}) error { } dataIDInput, err := fileFormatID.String() if err != nil { - return err + return diag.FromErr(err) } d.SetId(dataIDInput) - return ReadFileFormat(d, meta) + return ReadFileFormat(ctx, d, meta) } // ReadFileFormat implements schema.ReadFunc. -func ReadFileFormat(d *schema.ResourceData, meta interface{}) error { +func ReadFileFormat(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() fileFormatID, err := fileFormatIDFromString(d.Id()) if err != nil { - return err + return diag.FromErr(err) } id := sdk.NewSchemaObjectIdentifier(fileFormatID.DatabaseName, fileFormatID.SchemaName, fileFormatID.FileFormatName) fileFormat, err := client.FileFormats.ShowByID(ctx, id) if err != nil { - return fmt.Errorf("cannot read file format: %w", err) + return diag.FromErr(fmt.Errorf("cannot read file format: %w", err)) } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("name", fileFormat.Name.Name()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("database", fileFormat.Name.DatabaseName()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("schema", fileFormat.Name.SchemaName()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("format_type", fileFormat.Type); err != nil { - return err + return diag.FromErr(err) } switch fileFormat.Type { case sdk.FileFormatTypeCSV: if err := d.Set("compression", fileFormat.Options.CSVCompression); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("record_delimiter", fileFormat.Options.CSVRecordDelimiter); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("field_delimiter", fileFormat.Options.CSVFieldDelimiter); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("file_extension", fileFormat.Options.CSVFileExtension); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("parse_header", fileFormat.Options.CSVParseHeader); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("skip_header", fileFormat.Options.CSVSkipHeader); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("skip_blank_lines", fileFormat.Options.CSVSkipBlankLines); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("date_format", fileFormat.Options.CSVDateFormat); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("time_format", fileFormat.Options.CSVTimeFormat); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("timestamp_format", fileFormat.Options.CSVTimestampFormat); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("binary_format", fileFormat.Options.CSVBinaryFormat); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("escape", fileFormat.Options.CSVEscape); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("escape_unenclosed_field", fileFormat.Options.CSVEscapeUnenclosedField); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("trim_space", fileFormat.Options.CSVTrimSpace); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("field_optionally_enclosed_by", fileFormat.Options.CSVFieldOptionallyEnclosedBy); err != nil { - return err + return diag.FromErr(err) } nullIf := []string{} for _, s := range *fileFormat.Options.CSVNullIf { nullIf = append(nullIf, s.S) } if err := d.Set("null_if", nullIf); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("error_on_column_count_mismatch", fileFormat.Options.CSVErrorOnColumnCountMismatch); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("replace_invalid_characters", fileFormat.Options.CSVReplaceInvalidCharacters); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("empty_field_as_null", fileFormat.Options.CSVEmptyFieldAsNull); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("skip_byte_order_mark", fileFormat.Options.CSVSkipByteOrderMark); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("encoding", fileFormat.Options.CSVEncoding); err != nil { - return err + return diag.FromErr(err) } case sdk.FileFormatTypeJSON: if err := d.Set("compression", fileFormat.Options.JSONCompression); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("date_format", fileFormat.Options.JSONDateFormat); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("time_format", fileFormat.Options.JSONTimeFormat); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("timestamp_format", fileFormat.Options.JSONTimestampFormat); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("binary_format", fileFormat.Options.JSONBinaryFormat); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("trim_space", fileFormat.Options.JSONTrimSpace); err != nil { - return err + return diag.FromErr(err) } nullIf := []string{} for _, s := range fileFormat.Options.JSONNullIf { nullIf = append(nullIf, s.S) } if err := d.Set("null_if", nullIf); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("file_extension", fileFormat.Options.JSONFileExtension); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("enable_octal", fileFormat.Options.JSONEnableOctal); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("allow_duplicate", fileFormat.Options.JSONAllowDuplicate); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("strip_outer_array", fileFormat.Options.JSONStripOuterArray); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("strip_null_values", fileFormat.Options.JSONStripNullValues); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("replace_invalid_characters", fileFormat.Options.JSONReplaceInvalidCharacters); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("ignore_utf8_errors", fileFormat.Options.JSONIgnoreUTF8Errors); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("skip_byte_order_mark", fileFormat.Options.JSONSkipByteOrderMark); err != nil { - return err + return diag.FromErr(err) } case sdk.FileFormatTypeAvro: if err := d.Set("compression", fileFormat.Options.AvroCompression); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("trim_space", fileFormat.Options.AvroTrimSpace); err != nil { - return err + return diag.FromErr(err) } nullIf := []string{} for _, s := range *fileFormat.Options.AvroNullIf { nullIf = append(nullIf, s.S) } if err := d.Set("null_if", nullIf); err != nil { - return err + return diag.FromErr(err) } case sdk.FileFormatTypeORC: if err := d.Set("trim_space", fileFormat.Options.ORCTrimSpace); err != nil { - return err + return diag.FromErr(err) } nullIf := []string{} for _, s := range *fileFormat.Options.ORCNullIf { nullIf = append(nullIf, s.S) } if err := d.Set("null_if", nullIf); err != nil { - return err + return diag.FromErr(err) } case sdk.FileFormatTypeParquet: if err := d.Set("compression", fileFormat.Options.ParquetCompression); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("binary_as_text", fileFormat.Options.ParquetBinaryAsText); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("trim_space", fileFormat.Options.ParquetTrimSpace); err != nil { - return err + return diag.FromErr(err) } nullIf := []string{} for _, s := range *fileFormat.Options.ParquetNullIf { nullIf = append(nullIf, s.S) } if err := d.Set("null_if", nullIf); err != nil { - return err + return diag.FromErr(err) } case sdk.FileFormatTypeXML: if err := d.Set("compression", fileFormat.Options.XMLCompression); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("ignore_utf8_errors", fileFormat.Options.XMLIgnoreUTF8Errors); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("preserve_space", fileFormat.Options.XMLPreserveSpace); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("strip_outer_element", fileFormat.Options.XMLStripOuterElement); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("disable_snowflake_data", fileFormat.Options.XMLDisableSnowflakeData); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("disable_auto_convert", fileFormat.Options.XMLDisableAutoConvert); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("skip_byte_order_mark", fileFormat.Options.XMLSkipByteOrderMark); err != nil { - return err + return diag.FromErr(err) } // Terraform doesn't like it when computed fields aren't set. if err := d.Set("null_if", []string{}); err != nil { - return err + return diag.FromErr(err) } } if err := d.Set("comment", fileFormat.Comment); err != nil { - return err + return diag.FromErr(err) } return nil } // UpdateFileFormat implements schema.UpdateFunc. -func UpdateFileFormat(d *schema.ResourceData, meta interface{}) error { +func UpdateFileFormat(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() fileFormatID, err := fileFormatIDFromString(d.Id()) if err != nil { - return err + return diag.FromErr(err) } id := sdk.NewSchemaObjectIdentifier(fileFormatID.DatabaseName, fileFormatID.SchemaName, fileFormatID.FileFormatName) @@ -788,7 +787,7 @@ func UpdateFileFormat(d *schema.ResourceData, meta interface{}) error { }, }) if err != nil { - return fmt.Errorf("error renaming file format: %w", err) + return diag.FromErr(fmt.Errorf("error renaming file format: %w", err)) } d.SetId(helpers.EncodeSnowflakeID(newId)) @@ -1116,27 +1115,26 @@ func UpdateFileFormat(d *schema.ResourceData, meta interface{}) error { if runSet { err = client.FileFormats.Alter(ctx, id, &opts) if err != nil { - return err + return diag.FromErr(err) } } - return ReadFileFormat(d, meta) + return ReadFileFormat(ctx, d, meta) } // DeleteFileFormat implements schema.DeleteFunc. -func DeleteFileFormat(d *schema.ResourceData, meta interface{}) error { +func DeleteFileFormat(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() fileFormatID, err := fileFormatIDFromString(d.Id()) if err != nil { - return err + return diag.FromErr(err) } id := sdk.NewSchemaObjectIdentifier(fileFormatID.DatabaseName, fileFormatID.SchemaName, fileFormatID.FileFormatName) err = client.FileFormats.Drop(ctx, id, nil) if err != nil { - return fmt.Errorf("error while deleting file format: %w", err) + return diag.FromErr(fmt.Errorf("error while deleting file format: %w", err)) } d.SetId("") diff --git a/pkg/resources/grant_account_role.go b/pkg/resources/grant_account_role.go index 78e6b35f2c..ce02dc6f9c 100644 --- a/pkg/resources/grant_account_role.go +++ b/pkg/resources/grant_account_role.go @@ -6,6 +6,8 @@ import ( "log" "strings" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -49,10 +51,10 @@ var grantAccountRoleSchema = map[string]*schema.Schema{ func GrantAccountRole() *schema.Resource { return &schema.Resource{ - Create: CreateGrantAccountRole, - Read: ReadGrantAccountRole, - Delete: DeleteGrantAccountRole, - Schema: grantAccountRoleSchema, + CreateContext: TrackingCreateWrapper(resources.GrantAccountRole, CreateGrantAccountRole), + ReadContext: TrackingReadWrapper(resources.GrantAccountRole, ReadGrantAccountRole), + DeleteContext: TrackingDeleteWrapper(resources.GrantAccountRole, DeleteGrantAccountRole), + Schema: grantAccountRoleSchema, Importer: &schema.ResourceImporter{ StateContext: TrackingImportWrapper(resources.GrantAccountRole, func(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { parts := strings.Split(d.Id(), helpers.IDDelimiter) @@ -82,9 +84,8 @@ func GrantAccountRole() *schema.Resource { } // CreateGrantAccountRole implements schema.CreateFunc. -func CreateGrantAccountRole(d *schema.ResourceData, meta interface{}) error { +func CreateGrantAccountRole(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() roleName := d.Get("role_name").(string) roleIdentifier := sdk.NewAccountObjectIdentifierFromFullyQualifiedName(roleName) // format of snowflakeResourceID is || @@ -96,7 +97,7 @@ func CreateGrantAccountRole(d *schema.ResourceData, meta interface{}) error { Role: &parentRoleIdentifier, }) if err := client.Roles.Grant(ctx, req); err != nil { - return err + return diag.FromErr(err) } } else if userName, ok := d.GetOk("user_name"); ok && userName.(string) != "" { userIdentifier := sdk.NewAccountObjectIdentifierFromFullyQualifiedName(userName.(string)) @@ -105,26 +106,25 @@ func CreateGrantAccountRole(d *schema.ResourceData, meta interface{}) error { User: &userIdentifier, }) if err := client.Roles.Grant(ctx, req); err != nil { - return err + return diag.FromErr(err) } } else { - return fmt.Errorf("invalid role grant specified: %v", d) + return diag.FromErr(fmt.Errorf("invalid role grant specified: %v", d)) } d.SetId(snowflakeResourceID) - return ReadGrantAccountRole(d, meta) + return ReadGrantAccountRole(ctx, d, meta) } -func ReadGrantAccountRole(d *schema.ResourceData, meta interface{}) error { +func ReadGrantAccountRole(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client parts := strings.Split(d.Id(), helpers.IDDelimiter) if len(parts) != 3 { - return fmt.Errorf("invalid ID specified: %v, expected ||", d.Id()) + return diag.FromErr(fmt.Errorf("invalid ID specified: %v, expected ||", d.Id())) } roleName := parts[0] roleIdentifier := sdk.NewAccountObjectIdentifierFromFullyQualifiedName(roleName) objectType := parts[1] targetIdentifier := parts[2] - ctx := context.Background() grants, err := client.Grants.Show(ctx, &sdk.ShowGrantOptions{ Of: &sdk.ShowGrantsOf{ Role: roleIdentifier, @@ -153,28 +153,27 @@ func ReadGrantAccountRole(d *schema.ResourceData, meta interface{}) error { return nil } -func DeleteGrantAccountRole(d *schema.ResourceData, meta interface{}) error { +func DeleteGrantAccountRole(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client parts := strings.Split(d.Id(), helpers.IDDelimiter) if len(parts) != 3 { - return fmt.Errorf("invalid ID specified: %v, expected ||", d.Id()) + return diag.FromErr(fmt.Errorf("invalid ID specified: %v, expected ||", d.Id())) } id := sdk.NewAccountObjectIdentifierFromFullyQualifiedName(parts[0]) objectType := parts[1] granteeName := parts[2] - ctx := context.Background() granteeIdentifier := sdk.NewAccountObjectIdentifierFromFullyQualifiedName(granteeName) switch objectType { case "ROLE": if err := client.Roles.Revoke(ctx, sdk.NewRevokeRoleRequest(id, sdk.RevokeRole{Role: &granteeIdentifier})); err != nil { - return err + return diag.FromErr(err) } case "USER": if err := client.Roles.Revoke(ctx, sdk.NewRevokeRoleRequest(id, sdk.RevokeRole{User: &granteeIdentifier})); err != nil { - return err + return diag.FromErr(err) } default: - return fmt.Errorf("invalid object type specified: %v, expected ROLE or USER", objectType) + return diag.FromErr(fmt.Errorf("invalid object type specified: %v, expected ROLE or USER", objectType)) } d.SetId("") return nil diff --git a/pkg/resources/grant_database_role.go b/pkg/resources/grant_database_role.go index 1c845200af..e67946fc5d 100644 --- a/pkg/resources/grant_database_role.go +++ b/pkg/resources/grant_database_role.go @@ -5,6 +5,8 @@ import ( "fmt" "log" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -66,10 +68,10 @@ var grantDatabaseRoleSchema = map[string]*schema.Schema{ func GrantDatabaseRole() *schema.Resource { return &schema.Resource{ - Create: CreateGrantDatabaseRole, - Read: ReadGrantDatabaseRole, - Delete: DeleteGrantDatabaseRole, - Schema: grantDatabaseRoleSchema, + CreateContext: TrackingCreateWrapper(resources.GrantDatabaseRole, CreateGrantDatabaseRole), + ReadContext: TrackingReadWrapper(resources.GrantDatabaseRole, ReadGrantDatabaseRole), + DeleteContext: TrackingDeleteWrapper(resources.GrantDatabaseRole, DeleteGrantDatabaseRole), + Schema: grantDatabaseRoleSchema, Importer: &schema.ResourceImporter{ StateContext: TrackingImportWrapper(resources.GrantDatabaseRole, func(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { parts := helpers.ParseResourceIdentifier(d.Id()) @@ -121,63 +123,61 @@ func GrantDatabaseRole() *schema.Resource { } // CreateGrantDatabaseRole implements schema.CreateFunc. -func CreateGrantDatabaseRole(d *schema.ResourceData, meta interface{}) error { +func CreateGrantDatabaseRole(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() databaseRoleName := d.Get("database_role_name").(string) databaseRoleIdentifier, err := sdk.ParseDatabaseObjectIdentifier(databaseRoleName) if err != nil { - return err + return diag.FromErr(err) } // format of snowflakeResourceID is || var snowflakeResourceID string if parentRoleName, ok := d.GetOk("parent_role_name"); ok && parentRoleName.(string) != "" { parentRoleIdentifier, err := sdk.ParseAccountObjectIdentifier(parentRoleName.(string)) if err != nil { - return err + return diag.FromErr(err) } snowflakeResourceID = helpers.EncodeResourceIdentifier(databaseRoleIdentifier.FullyQualifiedName(), sdk.ObjectTypeRole.String(), parentRoleIdentifier.FullyQualifiedName()) req := sdk.NewGrantDatabaseRoleRequest(databaseRoleIdentifier).WithAccountRole(parentRoleIdentifier) if err := client.DatabaseRoles.Grant(ctx, req); err != nil { - return err + return diag.FromErr(err) } } else if parentDatabaseRoleName, ok := d.GetOk("parent_database_role_name"); ok && parentDatabaseRoleName.(string) != "" { parentRoleIdentifier, err := sdk.ParseDatabaseObjectIdentifier(parentDatabaseRoleName.(string)) if err != nil { - return err + return diag.FromErr(err) } snowflakeResourceID = helpers.EncodeResourceIdentifier(databaseRoleIdentifier.FullyQualifiedName(), sdk.ObjectTypeDatabaseRole.String(), parentRoleIdentifier.FullyQualifiedName()) req := sdk.NewGrantDatabaseRoleRequest(databaseRoleIdentifier).WithDatabaseRole(parentRoleIdentifier) if err := client.DatabaseRoles.Grant(ctx, req); err != nil { - return err + return diag.FromErr(err) } } else if shareName, ok := d.GetOk("share_name"); ok && shareName.(string) != "" { shareIdentifier, err := sdk.ParseAccountObjectIdentifier(shareName.(string)) if err != nil { - return err + return diag.FromErr(err) } snowflakeResourceID = helpers.EncodeResourceIdentifier(databaseRoleIdentifier.FullyQualifiedName(), sdk.ObjectTypeShare.String(), shareIdentifier.FullyQualifiedName()) req := sdk.NewGrantDatabaseRoleToShareRequest(databaseRoleIdentifier, shareIdentifier) if err := client.DatabaseRoles.GrantToShare(ctx, req); err != nil { - return err + return diag.FromErr(err) } } d.SetId(snowflakeResourceID) - return ReadGrantDatabaseRole(d, meta) + return ReadGrantDatabaseRole(ctx, d, meta) } // ReadGrantDatabaseRole implements schema.ReadFunc. -func ReadGrantDatabaseRole(d *schema.ResourceData, meta interface{}) error { +func ReadGrantDatabaseRole(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client parts := helpers.ParseResourceIdentifier(d.Id()) databaseRoleName := parts[0] databaseRoleIdentifier, err := sdk.ParseDatabaseObjectIdentifier(databaseRoleName) if err != nil { - return err + return diag.FromErr(err) } objectType := parts[1] targetIdentifier := parts[2] - ctx := context.Background() grants, err := client.Grants.Show(ctx, &sdk.ShowGrantOptions{ Of: &sdk.ShowGrantsOf{ DatabaseRole: databaseRoleIdentifier, @@ -206,41 +206,40 @@ func ReadGrantDatabaseRole(d *schema.ResourceData, meta interface{}) error { } // DeleteGrantDatabaseRole implements schema.DeleteFunc. -func DeleteGrantDatabaseRole(d *schema.ResourceData, meta interface{}) error { +func DeleteGrantDatabaseRole(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { client := meta.(*provider.Context).Client parts := helpers.ParseResourceIdentifier(d.Id()) id, err := sdk.ParseDatabaseObjectIdentifier(parts[0]) if err != nil { - return err + return diag.FromErr(err) } objectType := parts[1] granteeName := parts[2] - ctx := context.Background() switch objectType { case "ROLE": accountRoleId, err := sdk.ParseAccountObjectIdentifier(granteeName) if err != nil { - return err + return diag.FromErr(err) } if err := client.DatabaseRoles.Revoke(ctx, sdk.NewRevokeDatabaseRoleRequest(id).WithAccountRole(accountRoleId)); err != nil { - return err + return diag.FromErr(err) } case "DATABASE ROLE": databaseRoleId, err := sdk.ParseDatabaseObjectIdentifier(granteeName) if err != nil { - return err + return diag.FromErr(err) } if err := client.DatabaseRoles.Revoke(ctx, sdk.NewRevokeDatabaseRoleRequest(id).WithDatabaseRole(databaseRoleId)); err != nil { - return err + return diag.FromErr(err) } case "SHARE": sharedId, err := sdk.ParseAccountObjectIdentifier(granteeName) if err != nil { - return err + return diag.FromErr(err) } if err := client.DatabaseRoles.RevokeFromShare(ctx, sdk.NewRevokeDatabaseRoleFromShareRequest(id, sharedId)); err != nil { - return err + return diag.FromErr(err) } } d.SetId("") diff --git a/pkg/resources/managed_account.go b/pkg/resources/managed_account.go index 9a01a6cf67..510d7af633 100644 --- a/pkg/resources/managed_account.go +++ b/pkg/resources/managed_account.go @@ -6,6 +6,9 @@ import ( "log" "time" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/util" @@ -84,9 +87,9 @@ var managedAccountSchema = map[string]*schema.Schema{ // ManagedAccount returns a pointer to the resource representing a managed account. func ManagedAccount() *schema.Resource { return &schema.Resource{ - Create: CreateManagedAccount, - Read: ReadManagedAccount, - Delete: DeleteManagedAccount, + CreateContext: TrackingCreateWrapper(resources.ManagedAccount, CreateManagedAccount), + ReadContext: TrackingReadWrapper(resources.ManagedAccount, ReadManagedAccount), + DeleteContext: TrackingDeleteWrapper(resources.ManagedAccount, DeleteManagedAccount), Schema: managedAccountSchema, Importer: &schema.ResourceImporter{ @@ -96,9 +99,8 @@ func ManagedAccount() *schema.Resource { } // CreateManagedAccount implements schema.CreateFunc. -func CreateManagedAccount(d *schema.ResourceData, meta interface{}) error { +func CreateManagedAccount(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() name := d.Get("name").(string) id := sdk.NewAccountObjectIdentifier(name) @@ -115,19 +117,18 @@ func CreateManagedAccount(d *schema.ResourceData, meta interface{}) error { err := client.ManagedAccounts.Create(ctx, createRequest) if err != nil { - return err + return diag.FromErr(err) } d.SetId(helpers.EncodeSnowflakeID(id)) - return ReadManagedAccount(d, meta) + return ReadManagedAccount(ctx, d, meta) } // ReadManagedAccount implements schema.ReadFunc. -func ReadManagedAccount(d *schema.ResourceData, meta interface{}) error { +func ReadManagedAccount(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) // We have to wait during the first read, since the locator takes some time to appear. @@ -144,60 +145,59 @@ func ReadManagedAccount(d *schema.ResourceData, meta interface{}) error { return nil, true }) if err != nil { - return err + return diag.FromErr(err) } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("name", managedAccount.Name); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("cloud", managedAccount.Cloud); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("region", managedAccount.Region); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("locator", managedAccount.Locator); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("created_on", managedAccount.CreatedOn); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("url", managedAccount.URL); err != nil { - return err + return diag.FromErr(err) } if managedAccount.IsReader { if err := d.Set("type", "READER"); err != nil { - return err + return diag.FromErr(err) } } else { - return fmt.Errorf("unable to determine the account type") + return diag.FromErr(fmt.Errorf("unable to determine the account type")) } if err := d.Set("comment", managedAccount.Comment); err != nil { - return err + return diag.FromErr(err) } return nil } // DeleteManagedAccount implements schema.DeleteFunc. -func DeleteManagedAccount(d *schema.ResourceData, meta interface{}) error { +func DeleteManagedAccount(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() objectIdentifier := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) err := client.ManagedAccounts.Drop(ctx, sdk.NewDropManagedAccountRequest(objectIdentifier)) if err != nil { - return err + return diag.FromErr(err) } d.SetId("") diff --git a/pkg/resources/materialized_view.go b/pkg/resources/materialized_view.go index 5151daf88a..2dacd668e6 100644 --- a/pkg/resources/materialized_view.go +++ b/pkg/resources/materialized_view.go @@ -6,6 +6,8 @@ import ( "log" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -13,7 +15,6 @@ import ( "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/snowflake" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -72,10 +73,10 @@ var materializedViewSchema = map[string]*schema.Schema{ // MaterializedView returns a pointer to the resource representing a view. func MaterializedView() *schema.Resource { return &schema.Resource{ - Create: CreateMaterializedView, - Read: ReadMaterializedView, - Update: UpdateMaterializedView, - Delete: DeleteMaterializedView, + CreateContext: TrackingCreateWrapper(resources.MaterializedView, CreateMaterializedView), + ReadContext: TrackingReadWrapper(resources.MaterializedView, ReadMaterializedView), + UpdateContext: TrackingUpdateWrapper(resources.MaterializedView, UpdateMaterializedView), + DeleteContext: TrackingDeleteWrapper(resources.MaterializedView, DeleteMaterializedView), CustomizeDiff: TrackingCustomDiffWrapper(resources.MaterializedView, customdiff.All( ComputedIfAnyAttributeChanged(materializedViewSchema, FullyQualifiedNameAttributeName, "name"), @@ -89,9 +90,8 @@ func MaterializedView() *schema.Resource { } // CreateMaterializedView implements schema.CreateFunc. -func CreateMaterializedView(d *schema.ResourceData, meta interface{}) error { +func CreateMaterializedView(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() databaseName := d.Get("database").(string) schemaName := d.Get("schema").(string) @@ -117,12 +117,12 @@ func CreateMaterializedView(d *schema.ResourceData, meta interface{}) error { // TODO [SNOW-1348355]: this was the old implementation, it's left for now, we will address this with resources rework discussions err := client.Sessions.UseWarehouse(ctx, sdk.NewAccountObjectIdentifier(warehouseName)) if err != nil { - return fmt.Errorf("error setting warehouse %s while creating materialized view %v err = %w", warehouseName, name, err) + return diag.FromErr(fmt.Errorf("error setting warehouse %s while creating materialized view %v err = %w", warehouseName, name, err)) } err = client.MaterializedViews.Create(ctx, createRequest) if err != nil { - return fmt.Errorf("error creating materialized view %v err = %w", name, err) + return diag.FromErr(fmt.Errorf("error creating materialized view %v err = %w", name, err)) } // TODO [SNOW-1348355]: we have to set tags after creation because existing materialized view extractor is not aware of TAG during CREATE @@ -130,19 +130,19 @@ func CreateMaterializedView(d *schema.ResourceData, meta interface{}) error { if _, ok := d.GetOk("tag"); ok { err := client.Views.Alter(ctx, sdk.NewAlterViewRequest(id).WithSetTags(getPropertyTags(d, "tag"))) if err != nil { - return fmt.Errorf("error setting tags on materialized view %v, err = %w", id, err) + return diag.FromErr(fmt.Errorf("error setting tags on materialized view %v, err = %w", id, err)) } } d.SetId(helpers.EncodeSnowflakeID(id)) - return ReadMaterializedView(d, meta) + return ReadMaterializedView(ctx, d, meta) } // ReadMaterializedView implements schema.ReadFunc. -func ReadMaterializedView(d *schema.ResourceData, meta interface{}) error { +func ReadMaterializedView(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) materializedView, err := client.MaterializedViews.ShowByID(ctx, id) @@ -152,46 +152,46 @@ func ReadMaterializedView(d *schema.ResourceData, meta interface{}) error { return nil } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("name", materializedView.Name); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("is_secure", materializedView.IsSecure); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("comment", materializedView.Comment); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("schema", materializedView.SchemaName); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("database", materializedView.DatabaseName); err != nil { - return err + return diag.FromErr(err) } // Want to only capture the SELECT part of the query because before that is the CREATE part of the view. extractor := snowflake.NewViewSelectStatementExtractor(materializedView.Text) substringOfQuery, err := extractor.ExtractMaterializedView() if err != nil { - return err + return diag.FromErr(err) } if err := d.Set("statement", substringOfQuery); err != nil { - return err + return diag.FromErr(err) } return nil } // UpdateMaterializedView implements schema.UpdateFunc. -func UpdateMaterializedView(d *schema.ResourceData, meta interface{}) error { +func UpdateMaterializedView(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) if d.HasChange("name") { @@ -199,7 +199,7 @@ func UpdateMaterializedView(d *schema.ResourceData, meta interface{}) error { err := client.MaterializedViews.Alter(ctx, sdk.NewAlterMaterializedViewRequest(id).WithRenameTo(&newId)) if err != nil { - return fmt.Errorf("error renaming materialized view %v err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error renaming materialized view %v err = %w", d.Id(), err)) } d.SetId(helpers.EncodeSnowflakeID(newId)) @@ -234,14 +234,14 @@ func UpdateMaterializedView(d *schema.ResourceData, meta interface{}) error { if runSetStatement { err := client.MaterializedViews.Alter(ctx, sdk.NewAlterMaterializedViewRequest(id).WithSet(setRequest)) if err != nil { - return fmt.Errorf("error updating materialized view: %w", err) + return diag.FromErr(fmt.Errorf("error updating materialized view: %w", err)) } } if runUnsetStatement { err := client.MaterializedViews.Alter(ctx, sdk.NewAlterMaterializedViewRequest(id).WithUnset(unsetRequest)) if err != nil { - return fmt.Errorf("error updating materialized view: %w", err) + return diag.FromErr(fmt.Errorf("error updating materialized view: %w", err)) } } @@ -252,7 +252,7 @@ func UpdateMaterializedView(d *schema.ResourceData, meta interface{}) error { // TODO [SNOW-1022645]: view is used on purpose here; change after we have an agreement on situations like this in the SDK err := client.Views.Alter(ctx, sdk.NewAlterViewRequest(id).WithUnsetTags(unsetTags)) if err != nil { - return fmt.Errorf("error unsetting tags on %v, err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error unsetting tags on %v, err = %w", d.Id(), err)) } } @@ -260,23 +260,23 @@ func UpdateMaterializedView(d *schema.ResourceData, meta interface{}) error { // TODO [SNOW-1022645]: view is used on purpose here; change after we have an agreement on situations like this in the SDK err := client.Views.Alter(ctx, sdk.NewAlterViewRequest(id).WithSetTags(setTags)) if err != nil { - return fmt.Errorf("error setting tags on %v, err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error setting tags on %v, err = %w", d.Id(), err)) } } } - return ReadMaterializedView(d, meta) + return ReadMaterializedView(ctx, d, meta) } // DeleteMaterializedView implements schema.DeleteFunc. -func DeleteMaterializedView(d *schema.ResourceData, meta interface{}) error { +func DeleteMaterializedView(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) err := client.MaterializedViews.Drop(ctx, sdk.NewDropMaterializedViewRequest(id)) if err != nil { - return err + return diag.FromErr(err) } d.SetId("") diff --git a/pkg/resources/network_policy_attachment.go b/pkg/resources/network_policy_attachment.go index e1d64590b1..c68ab02f85 100644 --- a/pkg/resources/network_policy_attachment.go +++ b/pkg/resources/network_policy_attachment.go @@ -6,6 +6,9 @@ import ( "log" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -36,10 +39,10 @@ var networkPolicyAttachmentSchema = map[string]*schema.Schema{ // NetworkPolicyAttachment returns a pointer to the resource representing a network policy attachment. func NetworkPolicyAttachment() *schema.Resource { return &schema.Resource{ - Create: CreateNetworkPolicyAttachment, - Read: ReadNetworkPolicyAttachment, - Update: UpdateNetworkPolicyAttachment, - Delete: DeleteNetworkPolicyAttachment, + CreateContext: TrackingCreateWrapper(resources.NetworkPolicyAttachment, CreateNetworkPolicyAttachment), + ReadContext: TrackingReadWrapper(resources.NetworkPolicyAttachment, ReadNetworkPolicyAttachment), + UpdateContext: TrackingUpdateWrapper(resources.NetworkPolicyAttachment, UpdateNetworkPolicyAttachment), + DeleteContext: TrackingDeleteWrapper(resources.NetworkPolicyAttachment, DeleteNetworkPolicyAttachment), Schema: networkPolicyAttachmentSchema, Importer: &schema.ResourceImporter{ @@ -49,40 +52,40 @@ func NetworkPolicyAttachment() *schema.Resource { } // CreateNetworkPolicyAttachment implements schema.CreateFunc. -func CreateNetworkPolicyAttachment(d *schema.ResourceData, meta interface{}) error { +func CreateNetworkPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { policyName := d.Get("network_policy_name").(string) d.SetId(policyName + "_attachment") if d.Get("set_for_account").(bool) { - if err := setOnAccount(d, meta); err != nil { - return fmt.Errorf("error creating attachment for network policy %v err = %w", policyName, err) + if err := setOnAccount(ctx, d, meta); err != nil { + return diag.FromErr(fmt.Errorf("error creating attachment for network policy %v err = %w", policyName, err)) } } if u, ok := d.GetOk("users"); ok { users := expandStringList(u.(*schema.Set).List()) - if err := ensureUserAlterPrivileges(users, meta); err != nil { - return err + if err := ensureUserAlterPrivileges(ctx, users, meta); err != nil { + return diag.FromErr(err) } - if err := setOnUsers(users, d, meta); err != nil { - return fmt.Errorf("error creating attachment for network policy %v err = %w", policyName, err) + if err := setOnUsers(ctx, users, d, meta); err != nil { + return diag.FromErr(fmt.Errorf("error creating attachment for network policy %v err = %w", policyName, err)) } } - return ReadNetworkPolicyAttachment(d, meta) + return ReadNetworkPolicyAttachment(ctx, d, meta) } // ReadNetworkPolicyAttachment implements schema.ReadFunc. -func ReadNetworkPolicyAttachment(d *schema.ResourceData, meta interface{}) error { +func ReadNetworkPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + policyName := strings.Replace(d.Id(), "_attachment", "", 1) var currentUsers []string if err := d.Set("network_policy_name", policyName); err != nil { - return err + return diag.FromErr(err) } if u, ok := d.GetOk("users"); ok { @@ -100,7 +103,7 @@ func ReadNetworkPolicyAttachment(d *schema.ResourceData, meta interface{}) error } if err := d.Set("users", currentUsers); err != nil { - return err + return diag.FromErr(err) } } @@ -117,22 +120,22 @@ func ReadNetworkPolicyAttachment(d *schema.ResourceData, meta interface{}) error } if err := d.Set("set_for_account", isSetOnAccount); err != nil { - return err + return diag.FromErr(err) } return nil } // UpdateNetworkPolicyAttachment implements schema.UpdateFunc. -func UpdateNetworkPolicyAttachment(d *schema.ResourceData, meta interface{}) error { +func UpdateNetworkPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { if d.HasChange("set_for_account") { oldAcctFlag, newAcctFlag := d.GetChange("set_for_account") if newAcctFlag.(bool) { - if err := setOnAccount(d, meta); err != nil { - return err + if err := setOnAccount(ctx, d, meta); err != nil { + return diag.FromErr(err) } } else if !newAcctFlag.(bool) && oldAcctFlag == true { - if err := unsetOnAccount(d, meta); err != nil { - return err + if err := unsetOnAccount(ctx, d, meta); err != nil { + return diag.FromErr(err) } } } @@ -145,50 +148,50 @@ func UpdateNetworkPolicyAttachment(d *schema.ResourceData, meta interface{}) err removedUsers := expandStringList(oldUsersSet.Difference(newUsersSet).List()) addedUsers := expandStringList(newUsersSet.Difference(oldUsersSet).List()) - if err := ensureUserAlterPrivileges(removedUsers, meta); err != nil { - return err + if err := ensureUserAlterPrivileges(ctx, removedUsers, meta); err != nil { + return diag.FromErr(err) } - if err := ensureUserAlterPrivileges(addedUsers, meta); err != nil { - return err + if err := ensureUserAlterPrivileges(ctx, addedUsers, meta); err != nil { + return diag.FromErr(err) } for _, user := range removedUsers { - if err := unsetOnUser(user, d, meta); err != nil { - return err + if err := unsetOnUser(ctx, user, d, meta); err != nil { + return diag.FromErr(err) } } for _, user := range addedUsers { - if err := setOnUser(user, d, meta); err != nil { - return err + if err := setOnUser(ctx, user, d, meta); err != nil { + return diag.FromErr(err) } } } - return ReadNetworkPolicyAttachment(d, meta) + return ReadNetworkPolicyAttachment(ctx, d, meta) } // DeleteNetworkPolicyAttachment implements schema.DeleteFunc. -func DeleteNetworkPolicyAttachment(d *schema.ResourceData, meta interface{}) error { +func DeleteNetworkPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { policyName := d.Get("network_policy_name").(string) d.SetId(policyName + "_attachment") if d.Get("set_for_account").(bool) { - if err := unsetOnAccount(d, meta); err != nil { - return fmt.Errorf("error deleting attachment for network policy %v err = %w", policyName, err) + if err := unsetOnAccount(ctx, d, meta); err != nil { + return diag.FromErr(fmt.Errorf("error deleting attachment for network policy %v err = %w", policyName, err)) } } if u, ok := d.GetOk("users"); ok { users := expandStringList(u.(*schema.Set).List()) - if err := ensureUserAlterPrivileges(users, meta); err != nil { - return err + if err := ensureUserAlterPrivileges(ctx, users, meta); err != nil { + return diag.FromErr(err) } - if err := unsetOnUsers(users, d, meta); err != nil { - return fmt.Errorf("error deleting attachment for network policy %v err = %w", policyName, err) + if err := unsetOnUsers(ctx, users, d, meta); err != nil { + return diag.FromErr(fmt.Errorf("error deleting attachment for network policy %v err = %w", policyName, err)) } } @@ -197,9 +200,9 @@ func DeleteNetworkPolicyAttachment(d *schema.ResourceData, meta interface{}) err // setOnAccount sets the network policy globally for the Snowflake account // Note: the ip address of the session executing this SQL must be allowed by the network policy being set. -func setOnAccount(d *schema.ResourceData, meta interface{}) error { +func setOnAccount(ctx context.Context, d *schema.ResourceData, meta any) error { client := meta.(*provider.Context).Client - ctx := context.Background() + policyName := d.Get("network_policy_name").(string) err := client.Accounts.Alter(ctx, &sdk.AlterAccountOptions{Set: &sdk.AccountSet{Parameters: &sdk.AccountLevelParameters{ObjectParameters: &sdk.ObjectParameters{NetworkPolicy: sdk.String(policyName)}}}}) @@ -211,9 +214,9 @@ func setOnAccount(d *schema.ResourceData, meta interface{}) error { } // setOnAccount unsets the network policy globally for the Snowflake account. -func unsetOnAccount(d *schema.ResourceData, meta interface{}) error { +func unsetOnAccount(ctx context.Context, d *schema.ResourceData, meta any) error { client := meta.(*provider.Context).Client - ctx := context.Background() + policyName := d.Get("network_policy_name").(string) err := client.Accounts.Alter(ctx, &sdk.AlterAccountOptions{Unset: &sdk.AccountUnset{Parameters: &sdk.AccountLevelParametersUnset{ObjectParameters: &sdk.ObjectParametersUnset{NetworkPolicy: sdk.Bool(true)}}}}) @@ -225,10 +228,10 @@ func unsetOnAccount(d *schema.ResourceData, meta interface{}) error { } // setOnUsers sets the network policy for list of users. -func setOnUsers(users []string, data *schema.ResourceData, meta interface{}) error { +func setOnUsers(ctx context.Context, users []string, data *schema.ResourceData, meta interface{}) error { policyName := data.Get("network_policy_name").(string) for _, user := range users { - if err := setOnUser(user, data, meta); err != nil { + if err := setOnUser(ctx, user, data, meta); err != nil { return fmt.Errorf("error setting network policy %v on user %v err = %w", policyName, user, err) } } @@ -237,9 +240,9 @@ func setOnUsers(users []string, data *schema.ResourceData, meta interface{}) err } // setOnUser sets the network policy for a given user. -func setOnUser(user string, data *schema.ResourceData, meta interface{}) error { +func setOnUser(ctx context.Context, user string, data *schema.ResourceData, meta interface{}) error { client := meta.(*provider.Context).Client - ctx := context.Background() + policyName := data.Get("network_policy_name").(string) err := client.Users.Alter(ctx, sdk.NewAccountObjectIdentifier(user), &sdk.AlterUserOptions{Set: &sdk.UserSet{ObjectParameters: &sdk.UserObjectParameters{NetworkPolicy: sdk.Pointer(sdk.NewAccountObjectIdentifier(policyName))}}}) @@ -251,10 +254,10 @@ func setOnUser(user string, data *schema.ResourceData, meta interface{}) error { } // unsetOnUsers unsets the network policy for list of users. -func unsetOnUsers(users []string, data *schema.ResourceData, meta interface{}) error { +func unsetOnUsers(ctx context.Context, users []string, data *schema.ResourceData, meta interface{}) error { policyName := data.Get("network_policy_name").(string) for _, user := range users { - if err := unsetOnUser(user, data, meta); err != nil { + if err := unsetOnUser(ctx, user, data, meta); err != nil { return fmt.Errorf("error unsetting network policy %v on user %v err = %w", policyName, user, err) } } @@ -263,9 +266,9 @@ func unsetOnUsers(users []string, data *schema.ResourceData, meta interface{}) e } // unsetOnUser sets the network policy for a given user. -func unsetOnUser(user string, data *schema.ResourceData, meta interface{}) error { +func unsetOnUser(ctx context.Context, user string, data *schema.ResourceData, meta interface{}) error { client := meta.(*provider.Context).Client - ctx := context.Background() + policyName := data.Get("network_policy_name").(string) err := client.Users.Alter(ctx, sdk.NewAccountObjectIdentifier(user), &sdk.AlterUserOptions{Unset: &sdk.UserUnset{ObjectParameters: &sdk.UserObjectParametersUnset{NetworkPolicy: sdk.Bool(true)}}}) @@ -277,9 +280,8 @@ func unsetOnUser(user string, data *schema.ResourceData, meta interface{}) error } // ensureUserAlterPrivileges ensures the executing Snowflake user can alter each user in the set of users. -func ensureUserAlterPrivileges(users []string, meta interface{}) error { +func ensureUserAlterPrivileges(ctx context.Context, users []string, meta interface{}) error { client := meta.(*provider.Context).Client - ctx := context.Background() for _, user := range users { _, err := client.Users.Describe(ctx, sdk.NewAccountObjectIdentifier(user)) diff --git a/pkg/resources/notification_integration.go b/pkg/resources/notification_integration.go index a69e474f33..109a46e0ba 100644 --- a/pkg/resources/notification_integration.go +++ b/pkg/resources/notification_integration.go @@ -6,6 +6,9 @@ import ( "log" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -151,10 +154,10 @@ var notificationIntegrationSchema = map[string]*schema.Schema{ // NotificationIntegration returns a pointer to the resource representing a notification integration. func NotificationIntegration() *schema.Resource { return &schema.Resource{ - Create: CreateNotificationIntegration, - Read: ReadNotificationIntegration, - Update: UpdateNotificationIntegration, - Delete: DeleteNotificationIntegration, + CreateContext: TrackingCreateWrapper(resources.NotificationIntegration, CreateNotificationIntegration), + ReadContext: TrackingReadWrapper(resources.NotificationIntegration, ReadNotificationIntegration), + UpdateContext: TrackingUpdateWrapper(resources.NotificationIntegration, UpdateNotificationIntegration), + DeleteContext: TrackingDeleteWrapper(resources.NotificationIntegration, DeleteNotificationIntegration), Schema: notificationIntegrationSchema, Importer: &schema.ResourceImporter{ @@ -164,9 +167,8 @@ func NotificationIntegration() *schema.Resource { } // CreateNotificationIntegration implements schema.CreateFunc. -func CreateNotificationIntegration(d *schema.ResourceData, meta interface{}) error { +func CreateNotificationIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() name := d.Get("name").(string) id := sdk.NewAccountObjectIdentifier(name) @@ -183,11 +185,11 @@ func CreateNotificationIntegration(d *schema.ResourceData, meta interface{}) err case "AWS_SNS": topic, ok := d.GetOk("aws_sns_topic_arn") if !ok { - return fmt.Errorf("if you use AWS_SNS provider you must specify an aws_sns_topic_arn") + return diag.FromErr(fmt.Errorf("if you use AWS_SNS provider you must specify an aws_sns_topic_arn")) } role, ok := d.GetOk("aws_sns_role_arn") if !ok { - return fmt.Errorf("if you use AWS_SNS provider you must specify an aws_sns_role_arn") + return diag.FromErr(fmt.Errorf("if you use AWS_SNS provider you must specify an aws_sns_role_arn")) } createRequest.WithPushNotificationParams( sdk.NewPushNotificationParamsRequest().WithAmazonPushParams(sdk.NewAmazonPushParamsRequest(topic.(string), role.(string))), @@ -206,63 +208,63 @@ func CreateNotificationIntegration(d *schema.ResourceData, meta interface{}) err case "AZURE_STORAGE_QUEUE": uri, ok := d.GetOk("azure_storage_queue_primary_uri") if !ok { - return fmt.Errorf("if you use AZURE_STORAGE_QUEUE provider you must specify an azure_storage_queue_primary_uri") + return diag.FromErr(fmt.Errorf("if you use AZURE_STORAGE_QUEUE provider you must specify an azure_storage_queue_primary_uri")) } tenantId, ok := d.GetOk("azure_tenant_id") if !ok { - return fmt.Errorf("if you use AZURE_STORAGE_QUEUE provider you must specify an azure_tenant_id") + return diag.FromErr(fmt.Errorf("if you use AZURE_STORAGE_QUEUE provider you must specify an azure_tenant_id")) } createRequest.WithAutomatedDataLoadsParams( sdk.NewAutomatedDataLoadsParamsRequest().WithAzureAutoParams(sdk.NewAzureAutoParamsRequest(uri.(string), tenantId.(string))), ) default: - return fmt.Errorf("unexpected provider %v", notificationProvider) + return diag.FromErr(fmt.Errorf("unexpected provider %v", notificationProvider)) } err := client.NotificationIntegrations.Create(ctx, createRequest) if err != nil { - return fmt.Errorf("error creating notification integration: %w", err) + return diag.FromErr(fmt.Errorf("error creating notification integration: %w", err)) } d.SetId(helpers.EncodeSnowflakeID(id)) - return ReadNotificationIntegration(d, meta) + return ReadNotificationIntegration(ctx, d, meta) } // ReadNotificationIntegration implements schema.ReadFunc. -func ReadNotificationIntegration(d *schema.ResourceData, meta interface{}) error { +func ReadNotificationIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) integration, err := client.NotificationIntegrations.ShowByID(ctx, id) if err != nil { log.Printf("[DEBUG] notification integration (%s) not found", d.Id()) d.SetId("") - return err + return diag.FromErr(err) } // Note: category must be NOTIFICATION or something is broken if c := integration.Category; c != "NOTIFICATION" { - return fmt.Errorf("expected %v to be a NOTIFICATION integration, got %v", id, c) + return diag.FromErr(fmt.Errorf("expected %v to be a NOTIFICATION integration, got %v", id, c)) } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("name", integration.Name); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("comment", integration.Comment); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("created_on", integration.CreatedOn.String()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("enabled", integration.Enabled); err != nil { - return err + return diag.FromErr(err) } // Snowflake returns "QUEUE - AZURE_STORAGE_QUEUE" instead of simple "QUEUE" as a type @@ -270,13 +272,13 @@ func ReadNotificationIntegration(d *schema.ResourceData, meta interface{}) error typeParts := strings.Split(integration.NotificationType, "-") parsedType := strings.TrimSpace(typeParts[0]) if err := d.Set("type", parsedType); err != nil { - return err + return diag.FromErr(err) } // Some properties come from the DESCRIBE INTEGRATION call integrationProperties, err := client.NotificationIntegrations.Describe(ctx, id) if err != nil { - return fmt.Errorf("could not describe notification integration: %w", err) + return diag.FromErr(fmt.Errorf("could not describe notification integration: %w", err)) } for _, property := range integrationProperties { name := property.Name @@ -286,72 +288,72 @@ func ReadNotificationIntegration(d *schema.ResourceData, meta interface{}) error // We set this using the SHOW INTEGRATION call so let's ignore it here case "DIRECTION": if err := d.Set("direction", value); err != nil { - return err + return diag.FromErr(err) } case "NOTIFICATION_PROVIDER": if err := d.Set("notification_provider", value); err != nil { - return err + return diag.FromErr(err) } case "AZURE_STORAGE_QUEUE_PRIMARY_URI": if err := d.Set("azure_storage_queue_primary_uri", value); err != nil { - return err + return diag.FromErr(err) } // NOTIFICATION_PROVIDER is not returned for azure automated data load, so we set it manually in such a case if err := d.Set("notification_provider", "AZURE_STORAGE_QUEUE"); err != nil { - return err + return diag.FromErr(err) } case "AZURE_TENANT_ID": if err := d.Set("azure_tenant_id", value); err != nil { - return err + return diag.FromErr(err) } case "AWS_SNS_TOPIC_ARN": if err := d.Set("aws_sns_topic_arn", value); err != nil { - return err + return diag.FromErr(err) } case "AWS_SNS_ROLE_ARN": if err := d.Set("aws_sns_role_arn", value); err != nil { - return err + return diag.FromErr(err) } case "SF_AWS_EXTERNAL_ID": if err := d.Set("aws_sns_external_id", value); err != nil { - return err + return diag.FromErr(err) } case "SF_AWS_IAM_USER_ARN": if err := d.Set("aws_sns_iam_user_arn", value); err != nil { - return err + return diag.FromErr(err) } case "GCP_PUBSUB_SUBSCRIPTION_NAME": if err := d.Set("gcp_pubsub_subscription_name", value); err != nil { - return err + return diag.FromErr(err) } // NOTIFICATION_PROVIDER is not returned for gcp, so we set it manually in such a case if err := d.Set("notification_provider", "GCP_PUBSUB"); err != nil { - return err + return diag.FromErr(err) } case "GCP_PUBSUB_TOPIC_NAME": if err := d.Set("gcp_pubsub_topic_name", value); err != nil { - return err + return diag.FromErr(err) } // NOTIFICATION_PROVIDER is not returned for gcp, so we set it manually in such a case if err := d.Set("notification_provider", "GCP_PUBSUB"); err != nil { - return err + return diag.FromErr(err) } case "GCP_PUBSUB_SERVICE_ACCOUNT": if err := d.Set("gcp_pubsub_service_account", value); err != nil { - return err + return diag.FromErr(err) } default: log.Printf("[WARN] unexpected property %v returned from Snowflake", name) } } - return err + return diag.FromErr(err) } // UpdateNotificationIntegration implements schema.UpdateFunc. -func UpdateNotificationIntegration(d *schema.ResourceData, meta interface{}) error { +func UpdateNotificationIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) var runSetStatement bool @@ -379,28 +381,28 @@ func UpdateNotificationIntegration(d *schema.ResourceData, meta interface{}) err case "AZURE_STORAGE_QUEUE": log.Printf("[WARN] all AZURE_STORAGE_QUEUE properties should recreate the resource") default: - return fmt.Errorf("unexpected provider %v", notificationProvider) + return diag.FromErr(fmt.Errorf("unexpected provider %v", notificationProvider)) } if runSetStatement { err := client.NotificationIntegrations.Alter(ctx, sdk.NewAlterNotificationIntegrationRequest(id).WithSet(setRequest)) if err != nil { - return fmt.Errorf("error updating notification integration: %w", err) + return diag.FromErr(fmt.Errorf("error updating notification integration: %w", err)) } } - return ReadNotificationIntegration(d, meta) + return ReadNotificationIntegration(ctx, d, meta) } // DeleteNotificationIntegration implements schema.DeleteFunc. -func DeleteNotificationIntegration(d *schema.ResourceData, meta interface{}) error { +func DeleteNotificationIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) err := client.NotificationIntegrations.Drop(ctx, sdk.NewDropNotificationIntegrationRequest(id)) if err != nil { - return err + return diag.FromErr(err) } d.SetId("") diff --git a/pkg/resources/oauth_integration.go b/pkg/resources/oauth_integration.go index 2d7e4152ae..22b812ed91 100644 --- a/pkg/resources/oauth_integration.go +++ b/pkg/resources/oauth_integration.go @@ -1,11 +1,15 @@ package resources import ( + "context" "fmt" "log" "strconv" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/snowflake" @@ -86,10 +90,10 @@ var oauthIntegrationSchema = map[string]*schema.Schema{ // OAuthIntegration returns a pointer to the resource representing an OAuth integration. func OAuthIntegration() *schema.Resource { return &schema.Resource{ - Create: CreateOAuthIntegration, - Read: ReadOAuthIntegration, - Update: UpdateOAuthIntegration, - Delete: DeleteOAuthIntegration, + CreateContext: TrackingCreateWrapper(resources.OauthIntegration, CreateOAuthIntegration), + ReadContext: TrackingReadWrapper(resources.OauthIntegration, ReadOAuthIntegration), + UpdateContext: TrackingUpdateWrapper(resources.OauthIntegration, UpdateOAuthIntegration), + DeleteContext: TrackingDeleteWrapper(resources.OauthIntegration, DeleteOAuthIntegration), DeprecationMessage: "This resource is deprecated and will be removed in a future major version release. Please use snowflake_oauth_integration_for_custom_clients or snowflake_oauth_integration_for_partner_applications instead.", Schema: oauthIntegrationSchema, @@ -100,7 +104,7 @@ func OAuthIntegration() *schema.Resource { } // CreateOAuthIntegration implements schema.CreateFunc. -func CreateOAuthIntegration(d *schema.ResourceData, meta interface{}) error { +func CreateOAuthIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client db := client.GetConn().DB name := d.Get("name").(string) @@ -137,16 +141,16 @@ func CreateOAuthIntegration(d *schema.ResourceData, meta interface{}) error { } if err := snowflake.Exec(db, stmt.Statement()); err != nil { - return fmt.Errorf("error creating security integration err = %w", err) + return diag.FromErr(fmt.Errorf("error creating security integration err = %w", err)) } d.SetId(name) - return ReadOAuthIntegration(d, meta) + return ReadOAuthIntegration(ctx, d, meta) } // ReadOAuthIntegration implements schema.ReadFunc. -func ReadOAuthIntegration(d *schema.ResourceData, meta interface{}) error { +func ReadOAuthIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client db := client.GetConn().DB id := d.Id() @@ -158,32 +162,32 @@ func ReadOAuthIntegration(d *schema.ResourceData, meta interface{}) error { s, err := snowflake.ScanOAuthIntegration(row) if err != nil { - return fmt.Errorf("could not show security integration err = %w", err) + return diag.FromErr(fmt.Errorf("could not show security integration err = %w", err)) } // Note: category must be Security or something is broken if c := s.Category.String; c != "SECURITY" { - return fmt.Errorf("expected %v to be an Security integration, got %v err = %w", id, c, err) + return diag.FromErr(fmt.Errorf("expected %v to be an Security integration, got %v err = %w", id, c, err)) } if err := d.Set("oauth_client", strings.TrimPrefix(s.IntegrationType.String, "OAUTH - ")); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("name", s.Name.String); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("enabled", s.Enabled.Bool); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("comment", s.Comment.String); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("created_on", s.CreatedOn.String); err != nil { - return err + return diag.FromErr(err) } // Some properties come from the DESCRIBE INTEGRATION call @@ -193,12 +197,12 @@ func ReadOAuthIntegration(d *schema.ResourceData, meta interface{}) error { stmt = snowflake.NewOAuthIntegrationBuilder(id).Describe() rows, err := db.Query(stmt) if err != nil { - return fmt.Errorf("could not describe security integration err = %w", err) + return diag.FromErr(fmt.Errorf("could not describe security integration err = %w", err)) } defer rows.Close() for rows.Next() { if err := rows.Scan(&k, &pType, &v, &unused); err != nil { - return fmt.Errorf("unable to parse security integration rows err = %w", err) + return diag.FromErr(fmt.Errorf("unable to parse security integration rows err = %w", err)) } switch k { case "ENABLED": @@ -208,22 +212,22 @@ func ReadOAuthIntegration(d *schema.ResourceData, meta interface{}) error { case "OAUTH_ISSUE_REFRESH_TOKENS": b, err := strconv.ParseBool(v.(string)) if err != nil { - return fmt.Errorf("returned OAuth issue refresh tokens that is not boolean err = %w", err) + return diag.FromErr(fmt.Errorf("returned OAuth issue refresh tokens that is not boolean err = %w", err)) } if err := d.Set("oauth_issue_refresh_tokens", b); err != nil { - return fmt.Errorf("unable to set OAuth issue refresh tokens for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set OAuth issue refresh tokens for security integration err = %w", err)) } case "OAUTH_REFRESH_TOKEN_VALIDITY": i, err := strconv.Atoi(v.(string)) if err != nil { - return fmt.Errorf("returned OAuth refresh token validity that is not integer err = %w", err) + return diag.FromErr(fmt.Errorf("returned OAuth refresh token validity that is not integer err = %w", err)) } if err := d.Set("oauth_refresh_token_validity", i); err != nil { - return fmt.Errorf("unable to set OAuth refresh token validity for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set OAuth refresh token validity for security integration err = %w", err)) } case "OAUTH_USE_SECONDARY_ROLES": if err := d.Set("oauth_use_secondary_roles", v.(string)); err != nil { - return fmt.Errorf("unable to set OAuth use secondary roles for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set OAuth use secondary roles for security integration err = %w", err)) } case "BLOCKED_ROLES_LIST": blockedRolesAll := strings.Split(v.(string), ",") @@ -238,18 +242,18 @@ func ReadOAuthIntegration(d *schema.ResourceData, meta interface{}) error { } if err := d.Set("blocked_roles_list", blockedRolesCustom); err != nil { - return fmt.Errorf("unable to set blocked roles list for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set blocked roles list for security integration err = %w", err)) } case "OAUTH_REDIRECT_URI": if err := d.Set("oauth_redirect_uri", v.(string)); err != nil { - return fmt.Errorf("unable to set OAuth redirect URI for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set OAuth redirect URI for security integration err = %w", err)) } case "OAUTH_CLIENT_TYPE": isTableau := strings.HasSuffix(s.IntegrationType.String, "TABLEAU_DESKTOP") || strings.HasSuffix(s.IntegrationType.String, "TABLEAU_SERVER") if !isTableau { if err = d.Set("oauth_client_type", v.(string)); err != nil { - return fmt.Errorf("unable to set OAuth client type for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set OAuth client type for security integration err = %w", err)) } } case "OAUTH_ENFORCE_PKCE": @@ -270,11 +274,11 @@ func ReadOAuthIntegration(d *schema.ResourceData, meta interface{}) error { } } - return err + return diag.FromErr(err) } // UpdateOAuthIntegration implements schema.UpdateFunc. -func UpdateOAuthIntegration(d *schema.ResourceData, meta interface{}) error { +func UpdateOAuthIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client db := client.GetConn().DB id := d.Id() @@ -330,14 +334,14 @@ func UpdateOAuthIntegration(d *schema.ResourceData, meta interface{}) error { if runSetStatement { if err := snowflake.Exec(db, stmt.Statement()); err != nil { - return fmt.Errorf("error updating security integration err = %w", err) + return diag.FromErr(fmt.Errorf("error updating security integration err = %w", err)) } } - return ReadOAuthIntegration(d, meta) + return ReadOAuthIntegration(ctx, d, meta) } // DeleteOAuthIntegration implements schema.DeleteFunc. -func DeleteOAuthIntegration(d *schema.ResourceData, meta interface{}) error { - return DeleteResource("", snowflake.NewOAuthIntegrationBuilder)(d, meta) +func DeleteOAuthIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + return diag.FromErr(DeleteResource("", snowflake.NewOAuthIntegrationBuilder)(d, meta)) } diff --git a/pkg/resources/oauth_integration_test.go b/pkg/resources/oauth_integration_test.go index c8e2d9a126..7a353dd8f2 100644 --- a/pkg/resources/oauth_integration_test.go +++ b/pkg/resources/oauth_integration_test.go @@ -1,6 +1,7 @@ package resources_test import ( + "context" "database/sql" "testing" @@ -37,10 +38,10 @@ func TestOAuthIntegrationCreate(t *testing.T) { ).WillReturnResult(sqlmock.NewResult(1, 1)) expectReadOAuthIntegration(mock) - err := resources.CreateOAuthIntegration(d, &internalprovider.Context{ + diags := resources.CreateOAuthIntegration(context.Background(), d, &internalprovider.Context{ Client: sdk.NewClientFromDB(db), }) - r.NoError(err) + r.Empty(diags) }) } @@ -52,10 +53,10 @@ func TestOAuthIntegrationRead(t *testing.T) { WithMockDb(t, func(db *sql.DB, mock sqlmock.Sqlmock) { expectReadOAuthIntegration(mock) - err := resources.ReadOAuthIntegration(d, &internalprovider.Context{ + diags := resources.ReadOAuthIntegration(context.Background(), d, &internalprovider.Context{ Client: sdk.NewClientFromDB(db), }) - r.NoError(err) + r.Empty(diags) }) } @@ -66,10 +67,10 @@ func TestOAuthIntegrationDelete(t *testing.T) { WithMockDb(t, func(db *sql.DB, mock sqlmock.Sqlmock) { mock.ExpectExec(`DROP SECURITY INTEGRATION "drop_it"`).WillReturnResult(sqlmock.NewResult(1, 1)) - err := resources.DeleteOAuthIntegration(d, &internalprovider.Context{ + diags := resources.DeleteOAuthIntegration(context.Background(), d, &internalprovider.Context{ Client: sdk.NewClientFromDB(db), }) - r.NoError(err) + r.Empty(diags) }) } diff --git a/pkg/resources/object_parameter.go b/pkg/resources/object_parameter.go index ab5dbc0ecc..7d5b01ae08 100644 --- a/pkg/resources/object_parameter.go +++ b/pkg/resources/object_parameter.go @@ -5,6 +5,9 @@ import ( "fmt" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -69,10 +72,10 @@ var objectParameterSchema = map[string]*schema.Schema{ func ObjectParameter() *schema.Resource { return &schema.Resource{ - Create: CreateObjectParameter, - Read: ReadObjectParameter, - Update: UpdateObjectParameter, - Delete: DeleteObjectParameter, + CreateContext: TrackingCreateWrapper(resources.ObjectParameter, CreateObjectParameter), + ReadContext: TrackingReadWrapper(resources.ObjectParameter, ReadObjectParameter), + UpdateContext: TrackingUpdateWrapper(resources.ObjectParameter, UpdateObjectParameter), + DeleteContext: TrackingDeleteWrapper(resources.ObjectParameter, DeleteObjectParameter), Schema: objectParameterSchema, Importer: &schema.ResourceImporter{ @@ -82,11 +85,11 @@ func ObjectParameter() *schema.Resource { } // CreateObjectParameter implements schema.CreateFunc. -func CreateObjectParameter(d *schema.ResourceData, meta interface{}) error { +func CreateObjectParameter(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client key := d.Get("key").(string) value := d.Get("value").(string) - ctx := context.Background() + parameter := sdk.ObjectParameter(key) o := sdk.Object{} @@ -102,12 +105,12 @@ func CreateObjectParameter(d *schema.ResourceData, meta interface{}) error { if onAccount { err := client.Parameters.SetObjectParameterOnAccount(ctx, parameter, value) if err != nil { - return fmt.Errorf("error creating object parameter err = %w", err) + return diag.FromErr(fmt.Errorf("error creating object parameter err = %w", err)) } } else { err := client.Parameters.SetObjectParameterOnObject(ctx, o, parameter, value) if err != nil { - return fmt.Errorf("error setting object parameter err = %w", err) + return diag.FromErr(fmt.Errorf("error setting object parameter err = %w", err)) } } @@ -127,26 +130,26 @@ func CreateObjectParameter(d *schema.ResourceData, meta interface{}) error { p, err = client.Parameters.ShowObjectParameter(ctx, sdk.ObjectParameter(key), o) } if err != nil { - return fmt.Errorf("error reading object parameter err = %w", err) + return diag.FromErr(fmt.Errorf("error reading object parameter err = %w", err)) } err = d.Set("value", p.Value) if err != nil { - return err + return diag.FromErr(err) } - return ReadObjectParameter(d, meta) + return ReadObjectParameter(ctx, d, meta) } // ReadObjectParameter implements schema.ReadFunc. -func ReadObjectParameter(d *schema.ResourceData, meta interface{}) error { +func ReadObjectParameter(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := d.Id() parts := strings.Split(id, "|") if len(parts) != 3 { parts = strings.Split(id, "❄️") // for backwards compatibility } if len(parts) != 3 { - return fmt.Errorf("unexpected format of ID (%v), expected key|object_type|object_identifier", id) + return diag.FromErr(fmt.Errorf("unexpected format of ID (%v), expected key|object_type|object_identifier", id)) } key := parts[0] var p *sdk.Parameter @@ -163,35 +166,35 @@ func ReadObjectParameter(d *schema.ResourceData, meta interface{}) error { }) } if err != nil { - return fmt.Errorf("error reading object parameter err = %w", err) + return diag.FromErr(fmt.Errorf("error reading object parameter err = %w", err)) } if err := d.Set("value", p.Value); err != nil { - return err + return diag.FromErr(err) } return nil } // UpdateObjectParameter implements schema.UpdateFunc. -func UpdateObjectParameter(d *schema.ResourceData, meta interface{}) error { - return CreateObjectParameter(d, meta) +func UpdateObjectParameter(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + return CreateObjectParameter(ctx, d, meta) } // DeleteObjectParameter implements schema.DeleteFunc. -func DeleteObjectParameter(d *schema.ResourceData, meta interface{}) error { +func DeleteObjectParameter(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + key := d.Get("key").(string) onAccount := d.Get("on_account").(bool) if onAccount { defaultParameter, err := client.Parameters.ShowAccountParameter(ctx, sdk.AccountParameter(key)) if err != nil { - return err + return diag.FromErr(err) } defaultValue := defaultParameter.Default err = client.Parameters.SetAccountParameter(ctx, sdk.AccountParameter(key), defaultValue) if err != nil { - return fmt.Errorf("error resetting account parameter err = %w", err) + return diag.FromErr(fmt.Errorf("error resetting account parameter err = %w", err)) } } else { v := d.Get("object_identifier") @@ -205,12 +208,12 @@ func DeleteObjectParameter(d *schema.ResourceData, meta interface{}) error { objectParameter := sdk.ObjectParameter(key) defaultParameter, err := client.Parameters.ShowObjectParameter(ctx, objectParameter, o) if err != nil { - return err + return diag.FromErr(err) } defaultValue := defaultParameter.Default err = client.Parameters.SetObjectParameterOnObject(ctx, o, objectParameter, defaultValue) if err != nil { - return fmt.Errorf("error resetting object parameter err = %w", err) + return diag.FromErr(fmt.Errorf("error resetting object parameter err = %w", err)) } } d.SetId("") diff --git a/pkg/resources/password_policy.go b/pkg/resources/password_policy.go index 0fba86bf06..da3cf85038 100644 --- a/pkg/resources/password_policy.go +++ b/pkg/resources/password_policy.go @@ -4,13 +4,14 @@ import ( "context" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) @@ -140,11 +141,11 @@ var passwordPolicySchema = map[string]*schema.Schema{ func PasswordPolicy() *schema.Resource { return &schema.Resource{ - Description: "A password policy specifies the requirements that must be met to create and reset a password to authenticate to Snowflake.", - Create: CreatePasswordPolicy, - Read: ReadPasswordPolicy, - Update: UpdatePasswordPolicy, - Delete: DeletePasswordPolicy, + Description: "A password policy specifies the requirements that must be met to create and reset a password to authenticate to Snowflake.", + CreateContext: TrackingCreateWrapper(resources.PasswordPolicy, CreatePasswordPolicy), + ReadContext: TrackingReadWrapper(resources.PasswordPolicy, ReadPasswordPolicy), + UpdateContext: TrackingUpdateWrapper(resources.PasswordPolicy, UpdatePasswordPolicy), + DeleteContext: TrackingDeleteWrapper(resources.PasswordPolicy, DeletePasswordPolicy), CustomizeDiff: TrackingCustomDiffWrapper(resources.PasswordPolicy, customdiff.All( ComputedIfAnyAttributeChanged(passwordPolicySchema, FullyQualifiedNameAttributeName, "name"), @@ -158,9 +159,9 @@ func PasswordPolicy() *schema.Resource { } // CreatePasswordPolicy implements schema.CreateFunc. -func CreatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { +func CreatePasswordPolicy(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + name := d.Get("name").(string) database := d.Get("database").(string) schema := d.Get("schema").(string) @@ -188,84 +189,83 @@ func CreatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { err := client.PasswordPolicies.Create(ctx, objectIdentifier, createOptions) if err != nil { - return err + return diag.FromErr(err) } d.SetId(helpers.EncodeSnowflakeID(objectIdentifier)) - return ReadPasswordPolicy(d, meta) + return ReadPasswordPolicy(ctx, d, meta) } // ReadPasswordPolicy implements schema.ReadFunc. -func ReadPasswordPolicy(d *schema.ResourceData, meta interface{}) error { +func ReadPasswordPolicy(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) passwordPolicy, err := client.PasswordPolicies.ShowByID(ctx, id) if err != nil { - return err + return diag.FromErr(err) } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("database", passwordPolicy.DatabaseName); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("schema", passwordPolicy.SchemaName); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("name", passwordPolicy.Name); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("comment", passwordPolicy.Comment); err != nil { - return err + return diag.FromErr(err) } passwordPolicyDetails, err := client.PasswordPolicies.Describe(ctx, id) if err != nil { - return err + return diag.FromErr(err) } if err := setFromIntProperty(d, "min_length", passwordPolicyDetails.PasswordMinLength); err != nil { - return err + return diag.FromErr(err) } if err := setFromIntProperty(d, "max_length", passwordPolicyDetails.PasswordMaxLength); err != nil { - return err + return diag.FromErr(err) } if err := setFromIntProperty(d, "min_upper_case_chars", passwordPolicyDetails.PasswordMinUpperCaseChars); err != nil { - return err + return diag.FromErr(err) } if err := setFromIntProperty(d, "min_lower_case_chars", passwordPolicyDetails.PasswordMinLowerCaseChars); err != nil { - return err + return diag.FromErr(err) } if err := setFromIntProperty(d, "min_numeric_chars", passwordPolicyDetails.PasswordMinNumericChars); err != nil { - return err + return diag.FromErr(err) } if err := setFromIntProperty(d, "min_special_chars", passwordPolicyDetails.PasswordMinSpecialChars); err != nil { - return err + return diag.FromErr(err) } if err := setFromIntProperty(d, "min_age_days", passwordPolicyDetails.PasswordMinAgeDays); err != nil { - return err + return diag.FromErr(err) } if err := setFromIntProperty(d, "max_age_days", passwordPolicyDetails.PasswordMaxAgeDays); err != nil { - return err + return diag.FromErr(err) } if err := setFromIntProperty(d, "max_retries", passwordPolicyDetails.PasswordMaxRetries); err != nil { - return err + return diag.FromErr(err) } if err := setFromIntProperty(d, "lockout_time_mins", passwordPolicyDetails.PasswordLockoutTimeMins); err != nil { - return err + return diag.FromErr(err) } if err := setFromIntProperty(d, "history", passwordPolicyDetails.PasswordHistory); err != nil { - return err + return diag.FromErr(err) } return nil } // UpdatePasswordPolicy implements schema.UpdateFunc. -func UpdatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { +func UpdatePasswordPolicy(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() objectIdentifier := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) @@ -276,7 +276,7 @@ func UpdatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { NewName: &newId, }) if err != nil { - return err + return diag.FromErr(err) } d.SetId(helpers.EncodeSnowflakeID(newId)) @@ -291,7 +291,7 @@ func UpdatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { } err := client.PasswordPolicies.Alter(ctx, objectIdentifier, alterOptions) if err != nil { - return err + return diag.FromErr(err) } } if d.HasChange("max_length") { @@ -302,7 +302,7 @@ func UpdatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { } err := client.PasswordPolicies.Alter(ctx, objectIdentifier, alterOptions) if err != nil { - return err + return diag.FromErr(err) } } if d.HasChange("min_upper_case_chars") { @@ -313,7 +313,7 @@ func UpdatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { } err := client.PasswordPolicies.Alter(ctx, objectIdentifier, alterOptions) if err != nil { - return err + return diag.FromErr(err) } } if d.HasChange("min_lower_case_chars") { @@ -324,7 +324,7 @@ func UpdatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { } err := client.PasswordPolicies.Alter(ctx, objectIdentifier, alterOptions) if err != nil { - return err + return diag.FromErr(err) } } @@ -336,7 +336,7 @@ func UpdatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { } err := client.PasswordPolicies.Alter(ctx, objectIdentifier, alterOptions) if err != nil { - return err + return diag.FromErr(err) } } @@ -348,7 +348,7 @@ func UpdatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { } err := client.PasswordPolicies.Alter(ctx, objectIdentifier, alterOptions) if err != nil { - return err + return diag.FromErr(err) } } @@ -360,7 +360,7 @@ func UpdatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { } err := client.PasswordPolicies.Alter(ctx, objectIdentifier, alterOptions) if err != nil { - return err + return diag.FromErr(err) } } @@ -372,7 +372,7 @@ func UpdatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { } err := client.PasswordPolicies.Alter(ctx, objectIdentifier, alterOptions) if err != nil { - return err + return diag.FromErr(err) } } @@ -384,7 +384,7 @@ func UpdatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { } err := client.PasswordPolicies.Alter(ctx, objectIdentifier, alterOptions) if err != nil { - return err + return diag.FromErr(err) } } @@ -396,7 +396,7 @@ func UpdatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { } err := client.PasswordPolicies.Alter(ctx, objectIdentifier, alterOptions) if err != nil { - return err + return diag.FromErr(err) } } @@ -408,7 +408,7 @@ func UpdatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { } err := client.PasswordPolicies.Alter(ctx, objectIdentifier, alterOptions) if err != nil { - return err + return diag.FromErr(err) } } @@ -425,21 +425,21 @@ func UpdatePasswordPolicy(d *schema.ResourceData, meta interface{}) error { } err := client.PasswordPolicies.Alter(ctx, objectIdentifier, alterOptions) if err != nil { - return err + return diag.FromErr(err) } } - return ReadPasswordPolicy(d, meta) + return ReadPasswordPolicy(ctx, d, meta) } // DeletePasswordPolicy implements schema.DeleteFunc. -func DeletePasswordPolicy(d *schema.ResourceData, meta interface{}) error { +func DeletePasswordPolicy(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + objectIdentifier := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) err := client.PasswordPolicies.Drop(ctx, objectIdentifier, nil) if err != nil { - return err + return diag.FromErr(err) } return nil diff --git a/pkg/resources/pipe.go b/pkg/resources/pipe.go index 208d08e13a..c15140621f 100644 --- a/pkg/resources/pipe.go +++ b/pkg/resources/pipe.go @@ -6,6 +6,9 @@ import ( "log" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -83,10 +86,10 @@ var pipeSchema = map[string]*schema.Schema{ func Pipe() *schema.Resource { return &schema.Resource{ - Create: CreatePipe, - Read: ReadPipe, - Update: UpdatePipe, - Delete: DeletePipe, + CreateContext: TrackingCreateWrapper(resources.Pipe, CreatePipe), + ReadContext: TrackingReadWrapper(resources.Pipe, ReadPipe), + UpdateContext: TrackingUpdateWrapper(resources.Pipe, UpdatePipe), + DeleteContext: TrackingDeleteWrapper(resources.Pipe, DeletePipe), Schema: pipeSchema, Importer: &schema.ResourceImporter{ @@ -105,14 +108,13 @@ func pipeCopyStatementDiffSuppress(_, o, n string, _ *schema.ResourceData) bool } // CreatePipe implements schema.CreateFunc. -func CreatePipe(d *schema.ResourceData, meta interface{}) error { +func CreatePipe(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client databaseName := d.Get("database").(string) schemaName := d.Get("schema").(string) name := d.Get("name").(string) - ctx := context.Background() objectIdentifier := sdk.NewSchemaObjectIdentifier(databaseName, schemaName, name) opts := &sdk.CreatePipeOptions{} @@ -142,20 +144,19 @@ func CreatePipe(d *schema.ResourceData, meta interface{}) error { err := client.Pipes.Create(ctx, objectIdentifier, copyStatement, opts) if err != nil { - return err + return diag.FromErr(err) } d.SetId(helpers.EncodeSnowflakeID(objectIdentifier)) - return ReadPipe(d, meta) + return ReadPipe(ctx, d, meta) } // ReadPipe implements schema.ReadFunc. -func ReadPipe(d *schema.ResourceData, meta interface{}) error { +func ReadPipe(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) - ctx := context.Background() pipe, err := client.Pipes.ShowByID(ctx, id) if err != nil { // If not found, mark resource to be removed from state file during apply or refresh @@ -165,59 +166,58 @@ func ReadPipe(d *schema.ResourceData, meta interface{}) error { } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("name", pipe.Name); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("database", pipe.DatabaseName); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("schema", pipe.SchemaName); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("copy_statement", pipe.Definition); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("owner", pipe.Owner); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("comment", pipe.Comment); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("notification_channel", pipe.NotificationChannel); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("auto_ingest", pipe.NotificationChannel != ""); err != nil { - return err + return diag.FromErr(err) } if strings.Contains(pipe.NotificationChannel, "arn:aws:sns:") { if err := d.Set("aws_sns_topic_arn", pipe.NotificationChannel); err != nil { - return err + return diag.FromErr(err) } } if err := d.Set("error_integration", pipe.ErrorIntegration); err != nil { - return err + return diag.FromErr(err) } return nil } // UpdatePipe implements schema.UpdateFunc. -func UpdatePipe(d *schema.ResourceData, meta interface{}) error { +func UpdatePipe(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client objectIdentifier := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) - ctx := context.Background() pipeSet := &sdk.PipeSet{} pipeUnset := &sdk.PipeUnset{} @@ -248,7 +248,7 @@ func UpdatePipe(d *schema.ResourceData, meta interface{}) error { options := &sdk.AlterPipeOptions{Set: pipeSet} err := client.Pipes.Alter(ctx, objectIdentifier, options) if err != nil { - return fmt.Errorf("error updating pipe %v: %w", objectIdentifier.Name(), err) + return diag.FromErr(fmt.Errorf("error updating pipe %v: %w", objectIdentifier.Name(), err)) } } @@ -256,22 +256,22 @@ func UpdatePipe(d *schema.ResourceData, meta interface{}) error { options := &sdk.AlterPipeOptions{Unset: pipeUnset} err := client.Pipes.Alter(ctx, objectIdentifier, options) if err != nil { - return fmt.Errorf("error updating pipe %v: %w", objectIdentifier.Name(), err) + return diag.FromErr(fmt.Errorf("error updating pipe %v: %w", objectIdentifier.Name(), err)) } } - return ReadPipe(d, meta) + return ReadPipe(ctx, d, meta) } // DeletePipe implements schema.DeleteFunc. -func DeletePipe(d *schema.ResourceData, meta interface{}) error { +func DeletePipe(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + objectIdentifier := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) err := client.Pipes.Drop(ctx, objectIdentifier, &sdk.DropPipeOptions{IfExists: sdk.Bool(true)}) if err != nil { - return err + return diag.FromErr(err) } d.SetId("") diff --git a/pkg/resources/saml_integration.go b/pkg/resources/saml_integration.go index 8062a9e40d..3cd6a6a18d 100644 --- a/pkg/resources/saml_integration.go +++ b/pkg/resources/saml_integration.go @@ -1,11 +1,15 @@ package resources import ( + "context" "fmt" "log" "strconv" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/snowflake" @@ -141,10 +145,10 @@ var samlIntegrationSchema = map[string]*schema.Schema{ // SAMLIntegration returns a pointer to the resource representing a SAML2 security integration. func SAMLIntegration() *schema.Resource { return &schema.Resource{ - Create: CreateSAMLIntegration, - Read: ReadSAMLIntegration, - Update: UpdateSAMLIntegration, - Delete: DeleteSAMLIntegration, + CreateContext: TrackingCreateWrapper(resources.SamlSecurityIntegration, CreateSAMLIntegration), + ReadContext: TrackingReadWrapper(resources.SamlSecurityIntegration, ReadSAMLIntegration), + UpdateContext: TrackingUpdateWrapper(resources.SamlSecurityIntegration, UpdateSAMLIntegration), + DeleteContext: TrackingDeleteWrapper(resources.SamlSecurityIntegration, DeleteSAMLIntegration), DeprecationMessage: "This resource is deprecated and will be removed in a future major version release. Please use snowflake_saml2_integration instead.", Schema: samlIntegrationSchema, @@ -155,7 +159,7 @@ func SAMLIntegration() *schema.Resource { } // CreateSAMLIntegration implements schema.CreateFunc. -func CreateSAMLIntegration(d *schema.ResourceData, meta interface{}) error { +func CreateSAMLIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client db := client.GetConn().DB name := d.Get("name").(string) @@ -212,16 +216,16 @@ func CreateSAMLIntegration(d *schema.ResourceData, meta interface{}) error { err := snowflake.Exec(db, stmt.Statement()) if err != nil { - return fmt.Errorf("error creating security integration err = %w", err) + return diag.FromErr(fmt.Errorf("error creating security integration err = %w", err)) } d.SetId(name) - return ReadSAMLIntegration(d, meta) + return ReadSAMLIntegration(ctx, d, meta) } // ReadSAMLIntegration implements schema.ReadFunc. -func ReadSAMLIntegration(d *schema.ResourceData, meta interface{}) error { +func ReadSAMLIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client db := client.GetConn().DB id := d.Id() @@ -233,29 +237,29 @@ func ReadSAMLIntegration(d *schema.ResourceData, meta interface{}) error { s, err := snowflake.ScanSamlIntegration(row) if err != nil { - return fmt.Errorf("could not show security integration err = %w", err) + return diag.FromErr(fmt.Errorf("could not show security integration err = %w", err)) } // Note: category must be Security or something is broken if c := s.Category.String; c != "SECURITY" { - return fmt.Errorf("expected %v to be an Security integration, got %v", id, c) + return diag.FromErr(fmt.Errorf("expected %v to be an Security integration, got %v", id, c)) } // Note: type must be SAML2 or something is broken if c := s.IntegrationType.String; c != "SAML2" { - return fmt.Errorf("expected %v to be a SAML2 integration type, got %v", id, c) + return diag.FromErr(fmt.Errorf("expected %v to be a SAML2 integration type, got %v", id, c)) } if err := d.Set("name", s.Name.String); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("created_on", s.CreatedOn.String); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("enabled", s.Enabled.Bool); err != nil { - return err + return diag.FromErr(err) } // Some properties come from the DESCRIBE INTEGRATION call @@ -265,35 +269,35 @@ func ReadSAMLIntegration(d *schema.ResourceData, meta interface{}) error { stmt = snowflake.NewSamlIntegrationBuilder(id).Describe() rows, err := db.Query(stmt) if err != nil { - return fmt.Errorf("could not describe security integration err = %w", err) + return diag.FromErr(fmt.Errorf("could not describe security integration err = %w", err)) } defer rows.Close() for rows.Next() { if err := rows.Scan(&k, &pType, &v, &unused); err != nil { - return fmt.Errorf("unable to parse security integration rows err = %w", err) + return diag.FromErr(fmt.Errorf("unable to parse security integration rows err = %w", err)) } switch k { case "ENABLED": // set using the SHOW INTEGRATION, ignoring here case "SAML2_ISSUER": if err := d.Set("saml2_issuer", v.(string)); err != nil { - return fmt.Errorf("unable to set saml2_issuer for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set saml2_issuer for security integration err = %w", err)) } case "SAML2_SSO_URL": if err := d.Set("saml2_sso_url", v.(string)); err != nil { - return fmt.Errorf("unable to set saml2_sso_url for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set saml2_sso_url for security integration err = %w", err)) } case "SAML2_PROVIDER": if err := d.Set("saml2_provider", v.(string)); err != nil { - return fmt.Errorf("unable to set saml2_provider for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set saml2_provider for security integration err = %w", err)) } case "SAML2_X509_CERT": if err := d.Set("saml2_x509_cert", v.(string)); err != nil { - return fmt.Errorf("unable to set saml2_x509_cert for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set saml2_x509_cert for security integration err = %w", err)) } case "SAML2_SP_INITIATED_LOGIN_PAGE_LABEL": if err := d.Set("saml2_sp_initiated_login_page_label", v.(string)); err != nil { - return fmt.Errorf("unable to set saml2_sp_initiated_login_page_label for security integration") + return diag.FromErr(fmt.Errorf("unable to set saml2_sp_initiated_login_page_label for security integration")) } case "SAML2_ENABLE_SP_INITIATED": var b bool @@ -303,17 +307,17 @@ func ReadSAMLIntegration(d *schema.ResourceData, meta interface{}) error { case string: b, err = strconv.ParseBool(v.(string)) if err != nil { - return fmt.Errorf("returned saml2_force_authn that is not boolean err = %w", err) + return diag.FromErr(fmt.Errorf("returned saml2_force_authn that is not boolean err = %w", err)) } default: - return fmt.Errorf("returned saml2_force_authn that is not boolean") + return diag.FromErr(fmt.Errorf("returned saml2_force_authn that is not boolean")) } if err := d.Set("saml2_enable_sp_initiated", b); err != nil { - return fmt.Errorf("unable to set saml2_enable_sp_initiated for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set saml2_enable_sp_initiated for security integration err = %w", err)) } case "SAML2_SNOWFLAKE_X509_CERT": if err := d.Set("saml2_snowflake_x509_cert", v.(string)); err != nil { - return fmt.Errorf("unable to set saml2_snowflake_x509_cert for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set saml2_snowflake_x509_cert for security integration err = %w", err)) } case "SAML2_SIGN_REQUEST": var b bool @@ -323,21 +327,21 @@ func ReadSAMLIntegration(d *schema.ResourceData, meta interface{}) error { case string: b, err = strconv.ParseBool(v.(string)) if err != nil { - return fmt.Errorf("returned saml2_force_authn that is not boolean err = %w", err) + return diag.FromErr(fmt.Errorf("returned saml2_force_authn that is not boolean err = %w", err)) } default: - return fmt.Errorf("returned saml2_force_authn that is not boolean err = %w", err) + return diag.FromErr(fmt.Errorf("returned saml2_force_authn that is not boolean err = %w", err)) } if err := d.Set("saml2_sign_request", b); err != nil { - return fmt.Errorf("unable to set saml2_sign_request for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set saml2_sign_request for security integration err = %w", err)) } case "SAML2_REQUESTED_NAMEID_FORMAT": if err := d.Set("saml2_requested_nameid_format", v.(string)); err != nil { - return fmt.Errorf("unable to set saml2_requested_nameid_format for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set saml2_requested_nameid_format for security integration err = %w", err)) } case "SAML2_POST_LOGOUT_REDIRECT_URL": if err := d.Set("saml2_post_logout_redirect_url", v.(string)); err != nil { - return fmt.Errorf("unable to set saml2_post_logout_redirect_url for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set saml2_post_logout_redirect_url for security integration err = %w", err)) } case "SAML2_FORCE_AUTHN": var b bool @@ -347,33 +351,33 @@ func ReadSAMLIntegration(d *schema.ResourceData, meta interface{}) error { case string: b, err = strconv.ParseBool(v.(string)) if err != nil { - return fmt.Errorf("returned saml2_force_authn that is not boolean err = %w", err) + return diag.FromErr(fmt.Errorf("returned saml2_force_authn that is not boolean err = %w", err)) } default: - return fmt.Errorf("returned saml2_force_authn that is not boolean err = %w", err) + return diag.FromErr(fmt.Errorf("returned saml2_force_authn that is not boolean err = %w", err)) } if err := d.Set("saml2_force_authn", b); err != nil { - return fmt.Errorf("unable to set saml2_force_authn for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set saml2_force_authn for security integration err = %w", err)) } case "SAML2_SNOWFLAKE_ISSUER_URL": if err := d.Set("saml2_snowflake_issuer_url", v.(string)); err != nil { - return fmt.Errorf("unable to set saml2_snowflake_issuer_url for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set saml2_snowflake_issuer_url for security integration err = %w", err)) } case "SAML2_SNOWFLAKE_ACS_URL": if err := d.Set("saml2_snowflake_acs_url", v.(string)); err != nil { - return fmt.Errorf("unable to set saml2_snowflake_acs_url for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set saml2_snowflake_acs_url for security integration err = %w", err)) } case "SAML2_SNOWFLAKE_METADATA": if err := d.Set("saml2_snowflake_metadata", v.(string)); err != nil { - return fmt.Errorf("unable to set saml2_snowflake_metadata for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set saml2_snowflake_metadata for security integration err = %w", err)) } case "SAML2_DIGEST_METHODS_USED": if err := d.Set("saml2_digest_methods_used", v.(string)); err != nil { - return fmt.Errorf("unable to set saml2_digest_methods_used for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set saml2_digest_methods_used for security integration err = %w", err)) } case "SAML2_SIGNATURE_METHODS_USED": if err := d.Set("saml2_signature_methods_used", v.(string)); err != nil { - return fmt.Errorf("unable to set saml2_signature_methods_used for security integration err = %w", err) + return diag.FromErr(fmt.Errorf("unable to set saml2_signature_methods_used for security integration err = %w", err)) } case "COMMENT": // COMMENT cannot be set according to snowflake docs, so ignoring @@ -382,11 +386,11 @@ func ReadSAMLIntegration(d *schema.ResourceData, meta interface{}) error { } } - return err + return diag.FromErr(err) } // UpdateSAMLIntegration implements schema.UpdateFunc. -func UpdateSAMLIntegration(d *schema.ResourceData, meta interface{}) error { +func UpdateSAMLIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client db := client.GetConn().DB id := d.Id() @@ -467,14 +471,14 @@ func UpdateSAMLIntegration(d *schema.ResourceData, meta interface{}) error { if runSetStatement { if err := snowflake.Exec(db, stmt.Statement()); err != nil { - return fmt.Errorf("error updating security integration err = %w", err) + return diag.FromErr(fmt.Errorf("error updating security integration err = %w", err)) } } - return ReadSAMLIntegration(d, meta) + return ReadSAMLIntegration(ctx, d, meta) } // DeleteSAMLIntegration implements schema.DeleteFunc. -func DeleteSAMLIntegration(d *schema.ResourceData, meta interface{}) error { - return DeleteResource("", snowflake.NewSamlIntegrationBuilder)(d, meta) +func DeleteSAMLIntegration(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + return diag.FromErr(DeleteResource("", snowflake.NewSamlIntegrationBuilder)(d, meta)) } diff --git a/pkg/resources/saml_integration_test.go b/pkg/resources/saml_integration_test.go index d326c12212..36327ec029 100644 --- a/pkg/resources/saml_integration_test.go +++ b/pkg/resources/saml_integration_test.go @@ -1,6 +1,7 @@ package resources_test import ( + "context" "database/sql" "testing" @@ -41,10 +42,10 @@ func TestSAMLIntegrationCreate(t *testing.T) { ).WillReturnResult(sqlmock.NewResult(1, 1)) expectReadSAMLIntegration(mock) - err := resources.CreateSAMLIntegration(d, &internalprovider.Context{ + diags := resources.CreateSAMLIntegration(context.Background(), d, &internalprovider.Context{ Client: sdk.NewClientFromDB(db), }) - r.NoError(err) + r.Empty(diags) }) } @@ -56,10 +57,10 @@ func TestSAMLIntegrationRead(t *testing.T) { WithMockDb(t, func(db *sql.DB, mock sqlmock.Sqlmock) { expectReadSAMLIntegration(mock) - err := resources.ReadSAMLIntegration(d, &internalprovider.Context{ + diags := resources.ReadSAMLIntegration(context.Background(), d, &internalprovider.Context{ Client: sdk.NewClientFromDB(db), }) - r.NoError(err) + r.Empty(diags) }) } @@ -70,10 +71,10 @@ func TestSAMLIntegrationDelete(t *testing.T) { WithMockDb(t, func(db *sql.DB, mock sqlmock.Sqlmock) { mock.ExpectExec(`DROP SECURITY INTEGRATION "drop_it"`).WillReturnResult(sqlmock.NewResult(1, 1)) - err := resources.DeleteSAMLIntegration(d, &internalprovider.Context{ + diags := resources.DeleteSAMLIntegration(context.Background(), d, &internalprovider.Context{ Client: sdk.NewClientFromDB(db), }) - r.NoError(err) + r.Empty(diags) }) } diff --git a/pkg/resources/sequence.go b/pkg/resources/sequence.go index 207b78d909..449dc56694 100644 --- a/pkg/resources/sequence.go +++ b/pkg/resources/sequence.go @@ -3,6 +3,9 @@ package resources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -64,10 +67,10 @@ var sequenceSchema = map[string]*schema.Schema{ func Sequence() *schema.Resource { return &schema.Resource{ - Create: CreateSequence, - Read: ReadSequence, - Delete: DeleteSequence, - Update: UpdateSequence, + CreateContext: TrackingCreateWrapper(resources.Sequence, CreateSequence), + ReadContext: TrackingReadWrapper(resources.Sequence, ReadSequence), + DeleteContext: TrackingDeleteWrapper(resources.Sequence, DeleteSequence), + UpdateContext: TrackingUpdateWrapper(resources.Sequence, UpdateSequence), Schema: sequenceSchema, Importer: &schema.ResourceImporter{ @@ -76,9 +79,9 @@ func Sequence() *schema.Resource { } } -func CreateSequence(d *schema.ResourceData, meta interface{}) error { +func CreateSequence(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + database := d.Get("database").(string) schema := d.Get("schema").(string) name := d.Get("name").(string) @@ -97,72 +100,72 @@ func CreateSequence(d *schema.ResourceData, meta interface{}) error { } err := client.Sequences.Create(ctx, req) if err != nil { - return err + return diag.FromErr(err) } d.SetId(helpers.EncodeSnowflakeID(database, schema, name)) - return ReadSequence(d, meta) + return ReadSequence(ctx, d, meta) } -func ReadSequence(d *schema.ResourceData, meta interface{}) error { +func ReadSequence(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) seq, err := client.Sequences.ShowByID(ctx, id) if err != nil { - return err + return diag.FromErr(err) } if err := d.Set("name", seq.Name); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("schema", seq.SchemaName); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("database", seq.DatabaseName); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("comment", seq.Comment); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("increment", seq.Interval); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("next_value", seq.NextValue); err != nil { - return err + return diag.FromErr(err) } if seq.Ordered { if err := d.Set("ordering", "ORDER"); err != nil { - return err + return diag.FromErr(err) } } else { if err := d.Set("ordering", "NOORDER"); err != nil { - return err + return diag.FromErr(err) } } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } return nil } -func UpdateSequence(d *schema.ResourceData, meta interface{}) error { +func UpdateSequence(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) if d.HasChange("comment") { req := sdk.NewAlterSequenceRequest(id) req.WithSet(sdk.NewSequenceSetRequest().WithComment(sdk.String(d.Get("comment").(string)))) if err := client.Sequences.Alter(ctx, req); err != nil { - return err + return diag.FromErr(err) } } @@ -170,7 +173,7 @@ func UpdateSequence(d *schema.ResourceData, meta interface{}) error { req := sdk.NewAlterSequenceRequest(id) req.WithSetIncrement(sdk.Int(d.Get("increment").(int))) if err := client.Sequences.Alter(ctx, req); err != nil { - return err + return diag.FromErr(err) } } @@ -178,20 +181,20 @@ func UpdateSequence(d *schema.ResourceData, meta interface{}) error { req := sdk.NewAlterSequenceRequest(id) req.WithSet(sdk.NewSequenceSetRequest().WithValuesBehavior(sdk.ValuesBehaviorPointer(sdk.ValuesBehavior(d.Get("ordering").(string))))) if err := client.Sequences.Alter(ctx, req); err != nil { - return err + return diag.FromErr(err) } } - return ReadSequence(d, meta) + return ReadSequence(ctx, d, meta) } -func DeleteSequence(d *schema.ResourceData, meta interface{}) error { +func DeleteSequence(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) err := client.Sequences.Drop(ctx, sdk.NewDropSequenceRequest(id).WithIfExists(sdk.Bool(true))) if err != nil { - return err + return diag.FromErr(err) } d.SetId("") return nil diff --git a/pkg/resources/session_parameter.go b/pkg/resources/session_parameter.go index 9c4d13e09d..1efbc5a40e 100644 --- a/pkg/resources/session_parameter.go +++ b/pkg/resources/session_parameter.go @@ -4,6 +4,9 @@ import ( "context" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -37,10 +40,10 @@ var sessionParameterSchema = map[string]*schema.Schema{ func SessionParameter() *schema.Resource { return &schema.Resource{ - Create: CreateSessionParameter, - Read: ReadSessionParameter, - Update: UpdateSessionParameter, - Delete: DeleteSessionParameter, + CreateContext: TrackingCreateWrapper(resources.SessionParameter, CreateSessionParameter), + ReadContext: TrackingReadWrapper(resources.SessionParameter, ReadSessionParameter), + UpdateContext: TrackingUpdateWrapper(resources.SessionParameter, UpdateSessionParameter), + DeleteContext: TrackingDeleteWrapper(resources.SessionParameter, DeleteSessionParameter), Schema: sessionParameterSchema, Importer: &schema.ResourceImporter{ @@ -50,11 +53,11 @@ func SessionParameter() *schema.Resource { } // CreateSessionParameter implements schema.CreateFunc. -func CreateSessionParameter(d *schema.ResourceData, meta interface{}) error { +func CreateSessionParameter(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client key := d.Get("key").(string) value := d.Get("value").(string) - ctx := context.Background() + onAccount := d.Get("on_account").(bool) user := d.Get("user").(string) parameter := sdk.SessionParameter(key) @@ -63,28 +66,28 @@ func CreateSessionParameter(d *schema.ResourceData, meta interface{}) error { if onAccount { err := client.Parameters.SetSessionParameterOnAccount(ctx, parameter, value) if err != nil { - return err + return diag.FromErr(err) } } else { if user == "" { - return fmt.Errorf("user is required if on_account is false") + return diag.FromErr(fmt.Errorf("user is required if on_account is false")) } userId := sdk.NewAccountObjectIdentifier(user) err = client.Parameters.SetSessionParameterOnUser(ctx, userId, parameter, value) if err != nil { - return fmt.Errorf("error creating session parameter err = %w", err) + return diag.FromErr(fmt.Errorf("error creating session parameter err = %w", err)) } } d.SetId(key) - return ReadSessionParameter(d, meta) + return ReadSessionParameter(ctx, d, meta) } // ReadSessionParameter implements schema.ReadFunc. -func ReadSessionParameter(d *schema.ResourceData, meta interface{}) error { +func ReadSessionParameter(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + parameter := d.Id() onAccount := d.Get("on_account").(bool) @@ -98,25 +101,24 @@ func ReadSessionParameter(d *schema.ResourceData, meta interface{}) error { p, err = client.Parameters.ShowUserParameter(ctx, sdk.UserParameter(parameter), userId) } if err != nil { - return fmt.Errorf("error reading session parameter err = %w", err) + return diag.FromErr(fmt.Errorf("error reading session parameter err = %w", err)) } err = d.Set("value", p.Value) if err != nil { - return fmt.Errorf("error setting session parameter err = %w", err) + return diag.FromErr(fmt.Errorf("error setting session parameter err = %w", err)) } return nil } // UpdateSessionParameter implements schema.UpdateFunc. -func UpdateSessionParameter(d *schema.ResourceData, meta interface{}) error { - return CreateSessionParameter(d, meta) +func UpdateSessionParameter(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + return CreateSessionParameter(ctx, d, meta) } // DeleteSessionParameter implements schema.DeleteFunc. -func DeleteSessionParameter(d *schema.ResourceData, meta interface{}) error { +func DeleteSessionParameter(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client key := d.Get("key").(string) - ctx := context.Background() onAccount := d.Get("on_account").(bool) parameter := sdk.SessionParameter(key) @@ -124,27 +126,27 @@ func DeleteSessionParameter(d *schema.ResourceData, meta interface{}) error { if onAccount { defaultParameter, err := client.Parameters.ShowAccountParameter(ctx, sdk.AccountParameter(key)) if err != nil { - return err + return diag.FromErr(err) } defaultValue := defaultParameter.Default err = client.Parameters.SetSessionParameterOnAccount(ctx, parameter, defaultValue) if err != nil { - return fmt.Errorf("error creating session parameter err = %w", err) + return diag.FromErr(fmt.Errorf("error creating session parameter err = %w", err)) } } else { user := d.Get("user").(string) if user == "" { - return fmt.Errorf("user is required if on_account is false") + return diag.FromErr(fmt.Errorf("user is required if on_account is false")) } userId := sdk.NewAccountObjectIdentifier(user) defaultParameter, err := client.Parameters.ShowSessionParameter(ctx, sdk.SessionParameter(key)) if err != nil { - return err + return diag.FromErr(err) } defaultValue := defaultParameter.Default err = client.Parameters.SetSessionParameterOnUser(ctx, userId, parameter, defaultValue) if err != nil { - return fmt.Errorf("error deleting session parameter err = %w", err) + return diag.FromErr(fmt.Errorf("error deleting session parameter err = %w", err)) } } diff --git a/pkg/resources/share.go b/pkg/resources/share.go index 8c4d5ebe33..334a931f30 100644 --- a/pkg/resources/share.go +++ b/pkg/resources/share.go @@ -8,6 +8,7 @@ import ( "time" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -47,10 +48,10 @@ var shareSchema = map[string]*schema.Schema{ // Share returns a pointer to the resource representing a share. func Share() *schema.Resource { return &schema.Resource{ - Create: CreateShare, - Read: ReadShare, - Update: UpdateShare, - Delete: DeleteShare, + CreateContext: TrackingCreateWrapper(resources.Share, CreateShare), + ReadContext: TrackingReadWrapper(resources.Share, ReadShare), + UpdateContext: TrackingUpdateWrapper(resources.Share, UpdateShare), + DeleteContext: TrackingDeleteWrapper(resources.Share, DeleteShare), Schema: shareSchema, Importer: &schema.ResourceImporter{ @@ -60,10 +61,10 @@ func Share() *schema.Resource { } // CreateShare implements schema.CreateFunc. -func CreateShare(d *schema.ResourceData, meta interface{}) error { +func CreateShare(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client name := d.Get("name").(string) - ctx := context.Background() + comment := d.Get("comment").(string) id := sdk.NewAccountObjectIdentifier(name) var opts sdk.CreateShareOptions @@ -73,7 +74,7 @@ func CreateShare(d *schema.ResourceData, meta interface{}) error { } } if err := client.Shares.Create(ctx, id, &opts); err != nil { - return fmt.Errorf("error creating share (%v) err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error creating share (%v) err = %w", d.Id(), err)) } d.SetId(name) @@ -89,10 +90,10 @@ func CreateShare(d *schema.ResourceData, meta interface{}) error { } err := setShareAccounts(ctx, client, shareID, accountIdentifiers) if err != nil { - return err + return diag.FromErr(err) } } - return ReadShare(d, meta) + return ReadShare(ctx, d, meta) } func setShareAccounts(ctx context.Context, client *sdk.Client, shareID sdk.AccountObjectIdentifier, accounts []sdk.AccountIdentifier) error { @@ -158,20 +159,19 @@ func setShareAccounts(ctx context.Context, client *sdk.Client, shareID sdk.Accou } // ReadShare implements schema.ReadFunc. -func ReadShare(d *schema.ResourceData, meta interface{}) error { +func ReadShare(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) - ctx := context.Background() share, err := client.Shares.ShowByID(ctx, id) if err != nil { - return fmt.Errorf("error reading share (%v) err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error reading share (%v) err = %w", d.Id(), err)) } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("comment", share.Comment); err != nil { - return err + return diag.FromErr(err) } accounts := make([]string, len(share.To)) for i, accountIdentifier := range share.To { @@ -186,10 +186,10 @@ func ReadShare(d *schema.ResourceData, meta interface{}) error { accounts = reorderStringList(currentAccounts, accounts) } if err := d.Set("accounts", accounts); err != nil { - return err + return diag.FromErr(err) } - return err + return diag.FromErr(err) } func accountIdentifiersFromSlice(accounts []string) []sdk.AccountIdentifier { @@ -204,10 +204,10 @@ func accountIdentifiersFromSlice(accounts []string) []sdk.AccountIdentifier { } // UpdateShare implements schema.UpdateFunc. -func UpdateShare(d *schema.ResourceData, meta interface{}) error { +func UpdateShare(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) client := meta.(*provider.Context).Client - ctx := context.Background() + if d.HasChange("accounts") { o, n := d.GetChange("accounts") oldAccounts := expandStringList(o.([]interface{})) @@ -220,13 +220,13 @@ func UpdateShare(d *schema.ResourceData, meta interface{}) error { }, }) if err != nil { - return fmt.Errorf("error removing accounts from share (%v) err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error removing accounts from share (%v) err = %w", d.Id(), err)) } } else { accountIdentifiers := accountIdentifiersFromSlice(newAccounts) err := setShareAccounts(ctx, client, id, accountIdentifiers) if err != nil { - return err + return diag.FromErr(err) } } } @@ -238,21 +238,21 @@ func UpdateShare(d *schema.ResourceData, meta interface{}) error { }, }) if err != nil { - return fmt.Errorf("error updating share (%v) comment err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error updating share (%v) comment err = %w", d.Id(), err)) } } - return ReadShare(d, meta) + return ReadShare(ctx, d, meta) } // DeleteShare implements schema.DeleteFunc. -func DeleteShare(d *schema.ResourceData, meta interface{}) error { +func DeleteShare(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) client := meta.(*provider.Context).Client - ctx := context.Background() + err := client.Shares.Drop(ctx, id, &sdk.DropShareOptions{IfExists: sdk.Bool(true)}) if err != nil { - return fmt.Errorf("error deleting share (%v) err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error deleting share (%v) err = %w", d.Id(), err)) } return nil } diff --git a/pkg/resources/stream.go b/pkg/resources/stream.go index 1957dfedc4..219161a107 100644 --- a/pkg/resources/stream.go +++ b/pkg/resources/stream.go @@ -7,6 +7,7 @@ import ( "strings" providerresources "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -100,10 +101,10 @@ var streamSchema = map[string]*schema.Schema{ func Stream() *schema.Resource { return &schema.Resource{ - Create: CreateStream, - Read: ReadStream, - Update: UpdateStream, - Delete: DeleteStream, + CreateContext: TrackingCreateWrapper(providerresources.Stream, CreateStream), + ReadContext: TrackingReadWrapper(providerresources.Stream, ReadStream), + UpdateContext: TrackingUpdateWrapper(providerresources.Stream, UpdateStream), + DeleteContext: TrackingDeleteWrapper(providerresources.Stream, DeleteStream), DeprecationMessage: deprecatedResourceDescription( string(providerresources.StreamOnDirectoryTable), string(providerresources.StreamOnExternalTable), @@ -119,7 +120,7 @@ func Stream() *schema.Resource { } // CreateStream implements schema.CreateFunc. -func CreateStream(d *schema.ResourceData, meta interface{}) error { +func CreateStream(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client databaseName := d.Get("database").(string) schemaName := d.Get("schema").(string) @@ -129,8 +130,6 @@ func CreateStream(d *schema.ResourceData, meta interface{}) error { showInitialRows := d.Get("show_initial_rows").(bool) id := sdk.NewSchemaObjectIdentifier(databaseName, schemaName, name) - ctx := context.Background() - onTable, onTableSet := d.GetOk("on_table") onView, onViewSet := d.GetOk("on_view") onStage, onStageSet := d.GetOk("on_stage") @@ -139,13 +138,13 @@ func CreateStream(d *schema.ResourceData, meta interface{}) error { case onTableSet: tableObjectIdentifier, err := helpers.DecodeSnowflakeParameterID(onTable.(string)) if err != nil { - return err + return diag.FromErr(err) } tableId := tableObjectIdentifier.(sdk.SchemaObjectIdentifier) table, err := client.Tables.ShowByID(ctx, tableId) if err != nil { - return err + return diag.FromErr(err) } if table.IsExternal { @@ -158,7 +157,7 @@ func CreateStream(d *schema.ResourceData, meta interface{}) error { } err := client.Streams.CreateOnExternalTable(ctx, req) if err != nil { - return fmt.Errorf("error creating stream %v err = %w", name, err) + return diag.FromErr(fmt.Errorf("error creating stream %v err = %w", name, err)) } } else { req := sdk.NewCreateOnTableStreamRequest(id, tableId) @@ -173,19 +172,19 @@ func CreateStream(d *schema.ResourceData, meta interface{}) error { } err := client.Streams.CreateOnTable(ctx, req) if err != nil { - return fmt.Errorf("error creating stream %v err = %w", name, err) + return diag.FromErr(fmt.Errorf("error creating stream %v err = %w", name, err)) } } case onViewSet: viewObjectIdentifier, err := helpers.DecodeSnowflakeParameterID(onView.(string)) viewId := viewObjectIdentifier.(sdk.SchemaObjectIdentifier) if err != nil { - return err + return diag.FromErr(err) } _, err = client.Views.ShowByID(ctx, viewId) if err != nil { - return err + return diag.FromErr(err) } req := sdk.NewCreateOnViewStreamRequest(id, viewId) @@ -200,20 +199,20 @@ func CreateStream(d *schema.ResourceData, meta interface{}) error { } err = client.Streams.CreateOnView(ctx, req) if err != nil { - return fmt.Errorf("error creating stream %v err = %w", name, err) + return diag.FromErr(fmt.Errorf("error creating stream %v err = %w", name, err)) } case onStageSet: stageObjectIdentifier, err := helpers.DecodeSnowflakeParameterID(onStage.(string)) stageId := stageObjectIdentifier.(sdk.SchemaObjectIdentifier) if err != nil { - return err + return diag.FromErr(err) } stageProperties, err := client.Stages.Describe(ctx, stageId) if err != nil { - return err + return diag.FromErr(err) } if findStagePropertyValueByName(stageProperties, "ENABLE") != "true" { - return fmt.Errorf("directory must be enabled on stage") + return diag.FromErr(fmt.Errorf("directory must be enabled on stage")) } req := sdk.NewCreateOnDirectoryTableStreamRequest(id, stageId) if v, ok := d.GetOk("comment"); ok { @@ -221,19 +220,19 @@ func CreateStream(d *schema.ResourceData, meta interface{}) error { } err = client.Streams.CreateOnDirectoryTable(ctx, req) if err != nil { - return fmt.Errorf("error creating stream %v err = %w", name, err) + return diag.FromErr(fmt.Errorf("error creating stream %v err = %w", name, err)) } } d.SetId(helpers.EncodeSnowflakeID(id)) - return ReadStream(d, meta) + return ReadStream(ctx, d, meta) } // ReadStream implements schema.ReadFunc. -func ReadStream(d *schema.ResourceData, meta interface{}) error { +func ReadStream(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) stream, err := client.Streams.ShowByID(ctx, id) if err != nil { @@ -242,56 +241,56 @@ func ReadStream(d *schema.ResourceData, meta interface{}) error { return nil } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("name", stream.Name); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("database", stream.DatabaseName); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("schema", stream.SchemaName); err != nil { - return err + return diag.FromErr(err) } switch *stream.SourceType { case sdk.StreamSourceTypeStage: if err := d.Set("on_stage", *stream.TableName); err != nil { - return err + return diag.FromErr(err) } case sdk.StreamSourceTypeView: if err := d.Set("on_view", *stream.TableName); err != nil { - return err + return diag.FromErr(err) } default: if err := d.Set("on_table", *stream.TableName); err != nil { - return err + return diag.FromErr(err) } } if err := d.Set("append_only", *stream.Mode == "APPEND_ONLY"); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("insert_only", *stream.Mode == "INSERT_ONLY"); err != nil { - return err + return diag.FromErr(err) } // TODO: SHOW STREAMS doesn't return that value right now (I'm not sure if it ever did), but probably we can assume // the customers got 'false' every time and hardcode it (it's only on create thing, so it's not necessary // to track its value after creation). if err := d.Set("show_initial_rows", false); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("comment", *stream.Comment); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("owner", *stream.Owner); err != nil { - return err + return diag.FromErr(err) } return nil } // UpdateStream implements schema.UpdateFunc. -func UpdateStream(d *schema.ResourceData, meta interface{}) error { +func UpdateStream(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) if d.HasChange("comment") { @@ -299,28 +298,28 @@ func UpdateStream(d *schema.ResourceData, meta interface{}) error { if comment == "" { err := client.Streams.Alter(ctx, sdk.NewAlterStreamRequest(id).WithUnsetComment(true)) if err != nil { - return fmt.Errorf("error unsetting stream comment on %v", d.Id()) + return diag.FromErr(fmt.Errorf("error unsetting stream comment on %v", d.Id())) } } else { err := client.Streams.Alter(ctx, sdk.NewAlterStreamRequest(id).WithSetComment(comment)) if err != nil { - return fmt.Errorf("error setting stream comment on %v", d.Id()) + return diag.FromErr(fmt.Errorf("error setting stream comment on %v", d.Id())) } } } - return ReadStream(d, meta) + return ReadStream(ctx, d, meta) } // DeleteStream implements schema.DeleteFunc. -func DeleteStream(d *schema.ResourceData, meta interface{}) error { +func DeleteStream(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + streamId := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) err := client.Streams.Drop(ctx, sdk.NewDropStreamRequest(streamId)) if err != nil { - return fmt.Errorf("error deleting stream %v err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error deleting stream %v err = %w", d.Id(), err)) } d.SetId("") diff --git a/pkg/resources/table.go b/pkg/resources/table.go index ce39f90765..017ad799d8 100644 --- a/pkg/resources/table.go +++ b/pkg/resources/table.go @@ -8,6 +8,8 @@ import ( "strings" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -15,7 +17,6 @@ import ( "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/snowflake" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) @@ -208,10 +209,10 @@ var tableSchema = map[string]*schema.Schema{ func Table() *schema.Resource { return &schema.Resource{ - Create: CreateTable, - Read: ReadTable, - Update: UpdateTable, - Delete: DeleteTable, + CreateContext: TrackingCreateWrapper(resources.Table, CreateTable), + ReadContext: TrackingReadWrapper(resources.Table, ReadTable), + UpdateContext: TrackingUpdateWrapper(resources.Table, UpdateTable), + DeleteContext: TrackingDeleteWrapper(resources.Table, DeleteTable), CustomizeDiff: TrackingCustomDiffWrapper(resources.Table, customdiff.All( ComputedIfAnyAttributeChanged(tableSchema, FullyQualifiedNameAttributeName, "name"), @@ -568,9 +569,8 @@ func toColumnIdentityConfig(td sdk.TableColumnDetails) map[string]any { } // CreateTable implements schema.CreateFunc. -func CreateTable(d *schema.ResourceData, meta interface{}) error { +func CreateTable(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() databaseName := d.Get("database").(string) schemaName := d.Get("schema").(string) @@ -622,18 +622,18 @@ func CreateTable(d *schema.ResourceData, meta interface{}) error { err := client.Tables.Create(ctx, createRequest) if err != nil { - return fmt.Errorf("error creating table %v err = %w", name, err) + return diag.FromErr(fmt.Errorf("error creating table %v err = %w", name, err)) } d.SetId(helpers.EncodeSnowflakeID(id)) - return ReadTable(d, meta) + return ReadTable(ctx, d, meta) } // ReadTable implements schema.ReadFunc. -func ReadTable(d *schema.ResourceData, meta interface{}) error { +func ReadTable(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) table, err := client.Tables.ShowByID(ctx, id) @@ -650,7 +650,7 @@ func ReadTable(d *schema.ResourceData, meta interface{}) error { return nil } if err := d.Set(FullyQualifiedNameAttributeName, id.FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } var schemaRetentionTime int64 // "retention_time" may sometimes be empty string instead of an integer @@ -662,13 +662,13 @@ func ReadTable(d *schema.ResourceData, meta interface{}) error { schemaRetentionTime, err = strconv.ParseInt(rt, 10, 64) if err != nil { - return err + return diag.FromErr(err) } } tableDescription, err := client.Tables.DescribeColumns(ctx, sdk.NewDescribeTableColumnsRequest(id)) if err != nil { - return err + return diag.FromErr(err) } // Set the relevant data in the state @@ -688,16 +688,16 @@ func ReadTable(d *schema.ResourceData, meta interface{}) error { for key, val := range toSet { if err := d.Set(key, val); err != nil { // lintignore:R001 - return err + return diag.FromErr(err) } } return nil } // UpdateTable implements schema.UpdateFunc. -func UpdateTable(d *schema.ResourceData, meta interface{}) error { +func UpdateTable(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) if d.HasChange("name") { @@ -705,7 +705,7 @@ func UpdateTable(d *schema.ResourceData, meta interface{}) error { err := client.Tables.Alter(ctx, sdk.NewAlterTableRequest(id).WithNewName(&newId)) if err != nil { - return fmt.Errorf("error renaming table %v err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error renaming table %v err = %w", d.Id(), err)) } d.SetId(helpers.EncodeSnowflakeID(newId)) @@ -747,14 +747,14 @@ func UpdateTable(d *schema.ResourceData, meta interface{}) error { if runSetStatement { err := client.Tables.Alter(ctx, sdk.NewAlterTableRequest(id).WithSet(setRequest)) if err != nil { - return fmt.Errorf("error updating table: %w", err) + return diag.FromErr(fmt.Errorf("error updating table: %w", err)) } } if runUnsetStatement { err := client.Tables.Alter(ctx, sdk.NewAlterTableRequest(id).WithUnset(unsetRequest)) if err != nil { - return fmt.Errorf("error updating table: %w", err) + return diag.FromErr(fmt.Errorf("error updating table: %w", err)) } } @@ -764,12 +764,12 @@ func UpdateTable(d *schema.ResourceData, meta interface{}) error { if len(cb) != 0 { err := client.Tables.Alter(ctx, sdk.NewAlterTableRequest(id).WithClusteringAction(sdk.NewTableClusteringActionRequest().WithClusterBy(cb))) if err != nil { - return fmt.Errorf("error updating table: %w", err) + return diag.FromErr(fmt.Errorf("error updating table: %w", err)) } } else { err := client.Tables.Alter(ctx, sdk.NewAlterTableRequest(id).WithClusteringAction(sdk.NewTableClusteringActionRequest().WithDropClusteringKey(sdk.Bool(true)))) if err != nil { - return fmt.Errorf("error updating table: %w", err) + return diag.FromErr(fmt.Errorf("error updating table: %w", err)) } } } @@ -785,7 +785,7 @@ func UpdateTable(d *schema.ResourceData, meta interface{}) error { } err := client.Tables.Alter(ctx, sdk.NewAlterTableRequest(id).WithColumnAction(sdk.NewTableColumnActionRequest().WithDropColumns(snowflake.QuoteStringList(removedColumnNames)))) if err != nil { - return fmt.Errorf("error updating table: %w", err) + return diag.FromErr(fmt.Errorf("error updating table: %w", err)) } } @@ -795,7 +795,7 @@ func UpdateTable(d *schema.ResourceData, meta interface{}) error { if cA._default != nil { if cA._default._type() != "constant" { - return fmt.Errorf("failed to add column %v => Only adding a column as a constant is supported by Snowflake", cA.name) + return diag.FromErr(fmt.Errorf("failed to add column %v => Only adding a column as a constant is supported by Snowflake", cA.name)) } var expression string if sdk.IsStringType(cA.dataType) { @@ -824,7 +824,7 @@ func UpdateTable(d *schema.ResourceData, meta interface{}) error { err := client.Tables.Alter(ctx, sdk.NewAlterTableRequest(id).WithColumnAction(sdk.NewTableColumnActionRequest().WithAdd(addRequest))) if err != nil { - return fmt.Errorf("error adding column: %w", err) + return diag.FromErr(fmt.Errorf("error adding column: %w", err)) } } for _, cA := range changed { @@ -835,7 +835,7 @@ func UpdateTable(d *schema.ResourceData, meta interface{}) error { } err := client.Tables.Alter(ctx, sdk.NewAlterTableRequest(id).WithColumnAction(sdk.NewTableColumnActionRequest().WithAlter([]sdk.TableColumnAlterActionRequest{*sdk.NewTableColumnAlterActionRequest(fmt.Sprintf("\"%s\"", cA.newColumn.name)).WithType(sdk.Pointer(sdk.DataType(cA.newColumn.dataType))).WithCollate(newCollation)}))) if err != nil { - return fmt.Errorf("error changing property on %v: err %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error changing property on %v: err %w", d.Id(), err)) } } if cA.changedNullConstraint { @@ -847,13 +847,13 @@ func UpdateTable(d *schema.ResourceData, meta interface{}) error { } err := client.Tables.Alter(ctx, sdk.NewAlterTableRequest(id).WithColumnAction(sdk.NewTableColumnActionRequest().WithAlter([]sdk.TableColumnAlterActionRequest{*sdk.NewTableColumnAlterActionRequest(fmt.Sprintf("\"%s\"", cA.newColumn.name)).WithNotNullConstraint(nullabilityRequest)}))) if err != nil { - return fmt.Errorf("error changing property on %v: err %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error changing property on %v: err %w", d.Id(), err)) } } if cA.dropedDefault { err := client.Tables.Alter(ctx, sdk.NewAlterTableRequest(id).WithColumnAction(sdk.NewTableColumnActionRequest().WithAlter([]sdk.TableColumnAlterActionRequest{*sdk.NewTableColumnAlterActionRequest(fmt.Sprintf("\"%s\"", cA.newColumn.name)).WithDropDefault(sdk.Bool(true))}))) if err != nil { - return fmt.Errorf("error changing property on %v: err %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error changing property on %v: err %w", d.Id(), err)) } } if cA.changedComment { @@ -866,7 +866,7 @@ func UpdateTable(d *schema.ResourceData, meta interface{}) error { err := client.Tables.Alter(ctx, sdk.NewAlterTableRequest(id).WithColumnAction(sdk.NewTableColumnActionRequest().WithAlter([]sdk.TableColumnAlterActionRequest{*columnAlterActionRequest}))) if err != nil { - return fmt.Errorf("error changing property on %v: err %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error changing property on %v: err %w", d.Id(), err)) } } if cA.changedMaskingPolicy { @@ -878,7 +878,7 @@ func UpdateTable(d *schema.ResourceData, meta interface{}) error { } err := client.Tables.Alter(ctx, sdk.NewAlterTableRequest(id).WithColumnAction(columnAction)) if err != nil { - return fmt.Errorf("error changing property on %v: err %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error changing property on %v: err %w", d.Id(), err)) } } } @@ -897,7 +897,7 @@ func UpdateTable(d *schema.ResourceData, meta interface{}) error { WithDrop(sdk.NewTableConstraintDropActionRequest().WithPrimaryKey(sdk.Bool(true))), )) if err != nil { - return fmt.Errorf("error updating table: %w", err) + return diag.FromErr(fmt.Errorf("error updating table: %w", err)) } } @@ -910,7 +910,7 @@ func UpdateTable(d *schema.ResourceData, meta interface{}) error { sdk.NewTableConstraintActionRequest().WithAdd(constraint), )) if err != nil { - return fmt.Errorf("error updating table: %w", err) + return diag.FromErr(fmt.Errorf("error updating table: %w", err)) } } } @@ -921,7 +921,7 @@ func UpdateTable(d *schema.ResourceData, meta interface{}) error { if len(unsetTags) > 0 { err := client.Tables.Alter(ctx, sdk.NewAlterTableRequest(id).WithUnsetTags(unsetTags)) if err != nil { - return fmt.Errorf("error setting tags on %v, err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error setting tags on %v, err = %w", d.Id(), err)) } } @@ -932,23 +932,23 @@ func UpdateTable(d *schema.ResourceData, meta interface{}) error { } err := client.Tables.Alter(ctx, sdk.NewAlterTableRequest(id).WithSetTags(tagAssociationRequests)) if err != nil { - return fmt.Errorf("error setting tags on %v, err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("error setting tags on %v, err = %w", d.Id(), err)) } } } - return ReadTable(d, meta) + return ReadTable(ctx, d, meta) } // DeleteTable implements schema.DeleteFunc. -func DeleteTable(d *schema.ResourceData, meta interface{}) error { +func DeleteTable(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() + id := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) err := client.Tables.Drop(ctx, sdk.NewDropTableRequest(id)) if err != nil { - return err + return diag.FromErr(err) } d.SetId("") diff --git a/pkg/resources/table_column_masking_policy_application.go b/pkg/resources/table_column_masking_policy_application.go index 71b722f2ad..e48d4447ad 100644 --- a/pkg/resources/table_column_masking_policy_application.go +++ b/pkg/resources/table_column_masking_policy_application.go @@ -1,8 +1,12 @@ package resources import ( + "context" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/snowflake" @@ -32,10 +36,10 @@ var tableColumnMaskingPolicyApplicationSchema = map[string]*schema.Schema{ func TableColumnMaskingPolicyApplication() *schema.Resource { return &schema.Resource{ - Description: "Applies a masking policy to a table column.", - Create: CreateTableColumnMaskingPolicyApplication, - Read: ReadTableColumnMaskingPolicyApplication, - Delete: DeleteTableColumnMaskingPolicyApplication, + Description: "Applies a masking policy to a table column.", + CreateContext: TrackingCreateWrapper(resources.TableColumnMaskingPolicyApplication, CreateTableColumnMaskingPolicyApplication), + ReadContext: TrackingReadWrapper(resources.TableColumnMaskingPolicyApplication, ReadTableColumnMaskingPolicyApplication), + DeleteContext: TrackingDeleteWrapper(resources.TableColumnMaskingPolicyApplication, DeleteTableColumnMaskingPolicyApplication), Schema: tableColumnMaskingPolicyApplicationSchema, Importer: &schema.ResourceImporter{ @@ -45,7 +49,7 @@ func TableColumnMaskingPolicyApplication() *schema.Resource { } // CreateTableColumnMaskingPolicyApplication implements schema.CreateFunc. -func CreateTableColumnMaskingPolicyApplication(d *schema.ResourceData, meta interface{}) error { +func CreateTableColumnMaskingPolicyApplication(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { manager := snowflake.NewTableColumnMaskingPolicyApplicationManager() input := &snowflake.TableColumnMaskingPolicyApplicationCreateInput{ @@ -62,25 +66,25 @@ func CreateTableColumnMaskingPolicyApplication(d *schema.ResourceData, meta inte db := client.GetConn().DB _, err := db.Exec(stmt) if err != nil { - return fmt.Errorf("error applying masking policy: %w", err) + return diag.FromErr(fmt.Errorf("error applying masking policy: %w", err)) } d.SetId(TableColumnMaskingPolicyApplicationID(&input.TableColumnMaskingPolicyApplication)) - return ReadTableColumnMaskingPolicyApplication(d, meta) + return ReadTableColumnMaskingPolicyApplication(ctx, d, meta) } // ReadTableColumnMaskingPolicyApplication implements schema.ReadFunc. -func ReadTableColumnMaskingPolicyApplication(d *schema.ResourceData, meta interface{}) error { +func ReadTableColumnMaskingPolicyApplication(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { manager := snowflake.NewTableColumnMaskingPolicyApplicationManager() table, column := TableColumnMaskingPolicyApplicationIdentifier(d.Id()) if err := d.Set("table", table.QualifiedName()); err != nil { - return fmt.Errorf("error setting table: %w", err) + return diag.FromErr(fmt.Errorf("error setting table: %w", err)) } if err := d.Set("column", column); err != nil { - return fmt.Errorf("error setting column: %w", err) + return diag.FromErr(fmt.Errorf("error setting column: %w", err)) } input := &snowflake.TableColumnMaskingPolicyApplicationReadInput{ @@ -94,24 +98,24 @@ func ReadTableColumnMaskingPolicyApplication(d *schema.ResourceData, meta interf db := client.GetConn().DB rows, err := db.Query(stmt) if err != nil { - return fmt.Errorf("error querying password policy: %w", err) + return diag.FromErr(fmt.Errorf("error querying password policy: %w", err)) } defer rows.Close() maskingPolicy, err := manager.Parse(rows, column) if err != nil { - return fmt.Errorf("failed to parse result of describe: %w", err) + return diag.FromErr(fmt.Errorf("failed to parse result of describe: %w", err)) } if err = d.Set("masking_policy", maskingPolicy); err != nil { - return fmt.Errorf("error setting masking_policy: %w", err) + return diag.FromErr(fmt.Errorf("error setting masking_policy: %w", err)) } return nil } // DeleteTableColumnMaskingPolicyApplication implements schema.DeleteFunc. -func DeleteTableColumnMaskingPolicyApplication(d *schema.ResourceData, meta interface{}) error { +func DeleteTableColumnMaskingPolicyApplication(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { manager := snowflake.NewTableColumnMaskingPolicyApplicationManager() input := &snowflake.TableColumnMaskingPolicyApplicationDeleteInput{ @@ -127,7 +131,7 @@ func DeleteTableColumnMaskingPolicyApplication(d *schema.ResourceData, meta inte db := client.GetConn().DB _, err := db.Exec(stmt) if err != nil { - return fmt.Errorf("error executing drop statement: %w", err) + return diag.FromErr(fmt.Errorf("error executing drop statement: %w", err)) } return nil diff --git a/pkg/resources/table_constraint.go b/pkg/resources/table_constraint.go index 415322c9fd..92e2f6a3f7 100644 --- a/pkg/resources/table_constraint.go +++ b/pkg/resources/table_constraint.go @@ -5,6 +5,9 @@ import ( "fmt" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -179,10 +182,10 @@ var tableConstraintSchema = map[string]*schema.Schema{ func TableConstraint() *schema.Resource { return &schema.Resource{ - Create: CreateTableConstraint, - Read: ReadTableConstraint, - Update: UpdateTableConstraint, - Delete: DeleteTableConstraint, + CreateContext: TrackingCreateWrapper(resources.TableConstraint, CreateTableConstraint), + ReadContext: TrackingReadWrapper(resources.TableConstraint, ReadTableConstraint), + UpdateContext: TrackingUpdateWrapper(resources.TableConstraint, UpdateTableConstraint), + DeleteContext: TrackingDeleteWrapper(resources.TableConstraint, DeleteTableConstraint), Schema: tableConstraintSchema, Importer: &schema.ResourceImporter{ @@ -229,9 +232,8 @@ func getTableIdentifier(s string) (*sdk.SchemaObjectIdentifier, error) { } // CreateTableConstraint implements schema.CreateFunc. -func CreateTableConstraint(d *schema.ResourceData, meta interface{}) error { +func CreateTableConstraint(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() name := d.Get("name").(string) cType := d.Get("type").(string) @@ -239,12 +241,12 @@ func CreateTableConstraint(d *schema.ResourceData, meta interface{}) error { tableIdentifier, err := getTableIdentifier(tableID) if err != nil { - return err + return diag.FromErr(err) } constraintType, err := sdk.ToColumnConstraintType(cType) if err != nil { - return err + return diag.FromErr(err) } constraintRequest := sdk.NewOutOfLineConstraintRequest(constraintType).WithName(&name) @@ -290,11 +292,11 @@ func CreateTableConstraint(d *schema.ResourceData, meta interface{}) error { fkTableID := references["table_id"].(string) fkId, err := helpers.DecodeSnowflakeParameterID(fkTableID) if err != nil { - return fmt.Errorf("table id is incorrect: %s, err: %w", fkTableID, err) + return diag.FromErr(fmt.Errorf("table id is incorrect: %s, err: %w", fkTableID, err)) } referencedTableIdentifier, ok := fkId.(sdk.SchemaObjectIdentifier) if !ok { - return fmt.Errorf("table id is incorrect: %s", fkId) + return diag.FromErr(fmt.Errorf("table id is incorrect: %s", fkId)) } cols := references["columns"].([]interface{}) @@ -306,17 +308,17 @@ func CreateTableConstraint(d *schema.ResourceData, meta interface{}) error { matchType, err := sdk.ToMatchType(foreignKeyProperties["match"].(string)) if err != nil { - return err + return diag.FromErr(err) } foreignKeyRequest.WithMatch(&matchType) onUpdate, err := sdk.ToForeignKeyAction(foreignKeyProperties["on_update"].(string)) if err != nil { - return err + return diag.FromErr(err) } onDelete, err := sdk.ToForeignKeyAction(foreignKeyProperties["on_delete"].(string)) if err != nil { - return err + return diag.FromErr(err) } foreignKeyRequest.WithOn(sdk.NewForeignKeyOnAction(). WithOnDelete(&onDelete). @@ -328,7 +330,7 @@ func CreateTableConstraint(d *schema.ResourceData, meta interface{}) error { alterStatement := sdk.NewAlterTableRequest(*tableIdentifier).WithConstraintAction(sdk.NewTableConstraintActionRequest().WithAdd(constraintRequest)) err = client.Tables.Alter(ctx, alterStatement) if err != nil { - return fmt.Errorf("error creating table constraint %v err = %w", name, err) + return diag.FromErr(fmt.Errorf("error creating table constraint %v err = %w", name, err)) } tc := tableConstraintID{ @@ -338,11 +340,11 @@ func CreateTableConstraint(d *schema.ResourceData, meta interface{}) error { } d.SetId(tc.String()) - return ReadTableConstraint(d, meta) + return ReadTableConstraint(ctx, d, meta) } // ReadTableConstraint implements schema.ReadFunc. -func ReadTableConstraint(_ *schema.ResourceData, _ interface{}) error { +func ReadTableConstraint(ctx context.Context, _ *schema.ResourceData, _ interface{}) diag.Diagnostics { // TODO(issue-2683): Implement read operation // commenting this out since it requires an active warehouse to be set which may not be intuitive. // also it takes a while for the database to reflect changes. Would likely need to add a validation @@ -356,24 +358,24 @@ func ReadTableConstraint(_ *schema.ResourceData, _ interface{}) error { // just need to check to make sure it exists _, err := snowflake.ShowTableConstraint(tc.name, databaseName, schemaName, tableName, db) if err != nil { - return fmt.Errorf(fmt.Sprintf("error reading table constraint %v", tc.String())) + return diag.FromErr(fmt.Errorf(fmt.Sprintf("error reading table constraint %v", tc.String())) }*/ return nil } // UpdateTableConstraint implements schema.UpdateFunc. -func UpdateTableConstraint(d *schema.ResourceData, meta interface{}) error { +func UpdateTableConstraint(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { /* TODO(issue-2683): Update isn't be possible with non-existing Read operation. The Update logic is ready to be uncommented once the Read operation is ready. client := meta.(*provider.Context).Client - ctx := context.Background() + tc := tableConstraintID{} tc.parse(d.Id()) tableIdentifier, err := getTableIdentifier(tc.tableID) if err != nil { - return err + return diag.FromErr(err) } if d.HasChange("name") { @@ -383,7 +385,7 @@ func UpdateTableConstraint(d *schema.ResourceData, meta interface{}) error { err = client.Tables.Alter(ctx, alterStatement) if err != nil { - return fmt.Errorf("error renaming table constraint %s err = %w", tc.name, err) + return diag.FromErr(fmt.Errorf("error renaming table constraint %s err = %w", tc.name, err) } tc.name = newName @@ -391,20 +393,19 @@ func UpdateTableConstraint(d *schema.ResourceData, meta interface{}) error { } */ - return ReadTableConstraint(d, meta) + return ReadTableConstraint(ctx, d, meta) } // DeleteTableConstraint implements schema.DeleteFunc. -func DeleteTableConstraint(d *schema.ResourceData, meta interface{}) error { +func DeleteTableConstraint(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() tc := tableConstraintID{} tc.parse(d.Id()) tableIdentifier, err := getTableIdentifier(tc.tableID) if err != nil { - return err + return diag.FromErr(err) } dropRequest := sdk.NewTableConstraintDropActionRequest().WithConstraintName(&tc.name) @@ -416,7 +417,7 @@ func DeleteTableConstraint(d *schema.ResourceData, meta interface{}) error { d.SetId("") return nil } - return fmt.Errorf("error dropping table constraint %v err = %w", tc.name, err) + return diag.FromErr(fmt.Errorf("error dropping table constraint %v err = %w", tc.name, err)) } d.SetId("") diff --git a/pkg/resources/user_authentication_policy_attachment.go b/pkg/resources/user_authentication_policy_attachment.go index c29b54880f..7e03ef118b 100644 --- a/pkg/resources/user_authentication_policy_attachment.go +++ b/pkg/resources/user_authentication_policy_attachment.go @@ -4,6 +4,9 @@ import ( "context" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -31,20 +34,19 @@ var userAuthenticationPolicyAttachmentSchema = map[string]*schema.Schema{ // UserAuthenticationPolicyAttachment returns a pointer to the resource representing a user authentication policy attachment. func UserAuthenticationPolicyAttachment() *schema.Resource { return &schema.Resource{ - Description: "Specifies the authentication policy to use for a certain user.", - Create: CreateUserAuthenticationPolicyAttachment, - Read: ReadUserAuthenticationPolicyAttachment, - Delete: DeleteUserAuthenticationPolicyAttachment, - Schema: userAuthenticationPolicyAttachmentSchema, + Description: "Specifies the authentication policy to use for a certain user.", + CreateContext: TrackingCreateWrapper(resources.UserAuthenticationPolicyAttachment, CreateUserAuthenticationPolicyAttachment), + ReadContext: TrackingReadWrapper(resources.UserAuthenticationPolicyAttachment, ReadUserAuthenticationPolicyAttachment), + DeleteContext: TrackingDeleteWrapper(resources.UserAuthenticationPolicyAttachment, DeleteUserAuthenticationPolicyAttachment), + Schema: userAuthenticationPolicyAttachmentSchema, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, } } -func CreateUserAuthenticationPolicyAttachment(d *schema.ResourceData, meta any) error { +func CreateUserAuthenticationPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() userName := sdk.NewAccountObjectIdentifierFromFullyQualifiedName(d.Get("user_name").(string)) authenticationPolicy := sdk.NewSchemaObjectIdentifierFromFullyQualifiedName(d.Get("authentication_policy_name").(string)) @@ -55,28 +57,27 @@ func CreateUserAuthenticationPolicyAttachment(d *schema.ResourceData, meta any) }, }) if err != nil { - return err + return diag.FromErr(err) } d.SetId(helpers.EncodeResourceIdentifier(userName.FullyQualifiedName(), authenticationPolicy.FullyQualifiedName())) - return ReadUserAuthenticationPolicyAttachment(d, meta) + return ReadUserAuthenticationPolicyAttachment(ctx, d, meta) } -func ReadUserAuthenticationPolicyAttachment(d *schema.ResourceData, meta any) error { +func ReadUserAuthenticationPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() parts := helpers.ParseResourceIdentifier(d.Id()) if len(parts) != 2 { - return fmt.Errorf("required id format 'user_name|authentication_policy_name', but got: '%s'", d.Id()) + return diag.FromErr(fmt.Errorf("required id format 'user_name|authentication_policy_name', but got: '%s'", d.Id())) } // Note: there is no alphanumeric id for an attachment, so we retrieve the authentication policies attached to a certain user. userName := sdk.NewAccountObjectIdentifierFromFullyQualifiedName(parts[0]) policyReferences, err := client.PolicyReferences.GetForEntity(ctx, sdk.NewGetForEntityPolicyReferenceRequest(userName, sdk.PolicyEntityDomainUser)) if err != nil { - return err + return diag.FromErr(err) } authenticationPolicyReferences := make([]sdk.PolicyReference, 0) @@ -88,7 +89,7 @@ func ReadUserAuthenticationPolicyAttachment(d *schema.ResourceData, meta any) er // Note: this should never happen, but just in case: so far, Snowflake only allows one Authentication Policy per user. if len(authenticationPolicyReferences) > 1 { - return fmt.Errorf("internal error: multiple policy references attached to a user. This should never happen") + return diag.FromErr(fmt.Errorf("internal error: multiple policy references attached to a user. This should never happen")) } // Note: this means the resource has been deleted outside of Terraform. @@ -98,7 +99,7 @@ func ReadUserAuthenticationPolicyAttachment(d *schema.ResourceData, meta any) er } if err := d.Set("user_name", userName.Name()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set( "authentication_policy_name", @@ -107,15 +108,14 @@ func ReadUserAuthenticationPolicyAttachment(d *schema.ResourceData, meta any) er *authenticationPolicyReferences[0].PolicySchema, authenticationPolicyReferences[0].PolicyName, ).FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } - return err + return diag.FromErr(err) } -func DeleteUserAuthenticationPolicyAttachment(d *schema.ResourceData, meta any) error { +func DeleteUserAuthenticationPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() userName := sdk.NewAccountObjectIdentifierFromFullyQualifiedName(d.Get("user_name").(string)) @@ -125,7 +125,7 @@ func DeleteUserAuthenticationPolicyAttachment(d *schema.ResourceData, meta any) }, }) if err != nil { - return err + return diag.FromErr(err) } d.SetId("") diff --git a/pkg/resources/user_password_policy_attachment.go b/pkg/resources/user_password_policy_attachment.go index 96bac9523a..84cdd42a1f 100644 --- a/pkg/resources/user_password_policy_attachment.go +++ b/pkg/resources/user_password_policy_attachment.go @@ -4,6 +4,9 @@ import ( "context" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -31,20 +34,19 @@ var userPasswordPolicyAttachmentSchema = map[string]*schema.Schema{ // UserPasswordPolicyAttachment returns a pointer to the resource representing a user password policy attachment. func UserPasswordPolicyAttachment() *schema.Resource { return &schema.Resource{ - Description: "Specifies the password policy to use for a certain user.", - Create: CreateUserPasswordPolicyAttachment, - Read: ReadUserPasswordPolicyAttachment, - Delete: DeleteUserPasswordPolicyAttachment, - Schema: userPasswordPolicyAttachmentSchema, + Description: "Specifies the password policy to use for a certain user.", + CreateContext: TrackingCreateWrapper(resources.UserPasswordPolicyAttachment, CreateUserPasswordPolicyAttachment), + ReadContext: TrackingReadWrapper(resources.UserPasswordPolicyAttachment, ReadUserPasswordPolicyAttachment), + DeleteContext: TrackingDeleteWrapper(resources.UserPasswordPolicyAttachment, DeleteUserPasswordPolicyAttachment), + Schema: userPasswordPolicyAttachmentSchema, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, } } -func CreateUserPasswordPolicyAttachment(d *schema.ResourceData, meta any) error { +func CreateUserPasswordPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() userName := sdk.NewAccountObjectIdentifierFromFullyQualifiedName(d.Get("user_name").(string)) passwordPolicy := sdk.NewSchemaObjectIdentifierFromFullyQualifiedName(d.Get("password_policy_name").(string)) @@ -55,28 +57,27 @@ func CreateUserPasswordPolicyAttachment(d *schema.ResourceData, meta any) error }, }) if err != nil { - return err + return diag.FromErr(err) } d.SetId(helpers.EncodeResourceIdentifier(userName.FullyQualifiedName(), passwordPolicy.FullyQualifiedName())) - return ReadUserPasswordPolicyAttachment(d, meta) + return ReadUserPasswordPolicyAttachment(ctx, d, meta) } -func ReadUserPasswordPolicyAttachment(d *schema.ResourceData, meta any) error { +func ReadUserPasswordPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() parts := helpers.ParseResourceIdentifier(d.Id()) if len(parts) != 2 { - return fmt.Errorf("required id format 'user_name|password_policy_name', but got: '%s'", d.Id()) + return diag.FromErr(fmt.Errorf("required id format 'user_name|password_policy_name', but got: '%s'", d.Id())) } // Note: there is no alphanumeric id for an attachment, so we retrieve the password policies attached to a certain user. userName := sdk.NewAccountObjectIdentifierFromFullyQualifiedName(parts[0]) policyReferences, err := client.PolicyReferences.GetForEntity(ctx, sdk.NewGetForEntityPolicyReferenceRequest(userName, sdk.PolicyEntityDomainUser)) if err != nil { - return err + return diag.FromErr(err) } passwordPolicyReferences := make([]sdk.PolicyReference, 0) @@ -88,7 +89,7 @@ func ReadUserPasswordPolicyAttachment(d *schema.ResourceData, meta any) error { // Note: this should never happen, but just in case: so far, Snowflake only allows one Password Policy per user. if len(passwordPolicyReferences) > 1 { - return fmt.Errorf("internal error: multiple policy references attached to a user. This should never happen") + return diag.FromErr(fmt.Errorf("internal error: multiple policy references attached to a user. This should never happen")) } // Note: this means the resource has been deleted outside of Terraform. @@ -98,7 +99,7 @@ func ReadUserPasswordPolicyAttachment(d *schema.ResourceData, meta any) error { } if err := d.Set("user_name", userName.Name()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set( "password_policy_name", @@ -107,15 +108,14 @@ func ReadUserPasswordPolicyAttachment(d *schema.ResourceData, meta any) error { *passwordPolicyReferences[0].PolicySchema, passwordPolicyReferences[0].PolicyName, ).FullyQualifiedName()); err != nil { - return err + return diag.FromErr(err) } - return err + return diag.FromErr(err) } -func DeleteUserPasswordPolicyAttachment(d *schema.ResourceData, meta any) error { +func DeleteUserPasswordPolicyAttachment(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() userName := sdk.NewAccountObjectIdentifierFromFullyQualifiedName(d.Get("user_name").(string)) @@ -125,7 +125,7 @@ func DeleteUserPasswordPolicyAttachment(d *schema.ResourceData, meta any) error }, }) if err != nil { - return err + return diag.FromErr(err) } d.SetId("") diff --git a/pkg/resources/user_public_keys.go b/pkg/resources/user_public_keys.go index 6f97a1e9a6..c46b0990c7 100644 --- a/pkg/resources/user_public_keys.go +++ b/pkg/resources/user_public_keys.go @@ -8,6 +8,9 @@ import ( "log" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -51,10 +54,10 @@ var userPublicKeysSchema = map[string]*schema.Schema{ func UserPublicKeys() *schema.Resource { return &schema.Resource{ - Create: CreateUserPublicKeys, - Read: ReadUserPublicKeys, - Update: UpdateUserPublicKeys, - Delete: DeleteUserPublicKeys, + CreateContext: TrackingCreateWrapper(resources.UserPublicKeys, CreateUserPublicKeys), + ReadContext: TrackingReadWrapper(resources.UserPublicKeys, ReadUserPublicKeys), + UpdateContext: TrackingUpdateWrapper(resources.UserPublicKeys, UpdateUserPublicKeys), + DeleteContext: TrackingDeleteWrapper(resources.UserPublicKeys, DeleteUserPublicKeys), Schema: userPublicKeysSchema, Importer: &schema.ResourceImporter{ @@ -63,9 +66,7 @@ func UserPublicKeys() *schema.Resource { } } -func checkUserExists(client *sdk.Client, userId sdk.AccountObjectIdentifier) (bool, error) { - ctx := context.Background() - +func checkUserExists(ctx context.Context, client *sdk.Client, userId sdk.AccountObjectIdentifier) (bool, error) { // First check if user exists _, err := client.Users.Describe(ctx, userId) if errors.Is(err, sdk.ErrObjectNotExistOrAuthorized) { @@ -79,13 +80,13 @@ func checkUserExists(client *sdk.Client, userId sdk.AccountObjectIdentifier) (bo return true, nil } -func ReadUserPublicKeys(d *schema.ResourceData, meta interface{}) error { +func ReadUserPublicKeys(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client id := helpers.DecodeSnowflakeID(d.Id()).(sdk.AccountObjectIdentifier) - exists, err := checkUserExists(client, id) + exists, err := checkUserExists(ctx, client, id) if err != nil { - return err + return diag.FromErr(err) } // If not found, mark resource to be removed from state file during apply or refresh if !exists { @@ -96,7 +97,7 @@ func ReadUserPublicKeys(d *schema.ResourceData, meta interface{}) error { return nil } -func CreateUserPublicKeys(d *schema.ResourceData, meta interface{}) error { +func CreateUserPublicKeys(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client db := client.GetConn().DB name := d.Get("name").(string) @@ -108,15 +109,15 @@ func CreateUserPublicKeys(d *schema.ResourceData, meta interface{}) error { } err := updateUserPublicKeys(db, name, prop, publicKey.(string)) if err != nil { - return err + return diag.FromErr(err) } } d.SetId(name) - return ReadUserPublicKeys(d, meta) + return ReadUserPublicKeys(ctx, d, meta) } -func UpdateUserPublicKeys(d *schema.ResourceData, meta interface{}) error { +func UpdateUserPublicKeys(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client db := client.GetConn().DB name := d.Id() @@ -142,7 +143,7 @@ func UpdateUserPublicKeys(d *schema.ResourceData, meta interface{}) error { for prop, value := range propsToSet { err := updateUserPublicKeys(db, name, prop, value) if err != nil { - return err + return diag.FromErr(err) } } @@ -150,14 +151,14 @@ func UpdateUserPublicKeys(d *schema.ResourceData, meta interface{}) error { for k := range propsToUnset { err := unsetUserPublicKeys(db, name, k) if err != nil { - return err + return diag.FromErr(err) } } // re-sync - return ReadUserPublicKeys(d, meta) + return ReadUserPublicKeys(ctx, d, meta) } -func DeleteUserPublicKeys(d *schema.ResourceData, meta interface{}) error { +func DeleteUserPublicKeys(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client db := client.GetConn().DB name := d.Id() @@ -165,7 +166,7 @@ func DeleteUserPublicKeys(d *schema.ResourceData, meta interface{}) error { for _, prop := range userPublicKeyProperties { err := unsetUserPublicKeys(db, name, prop) if err != nil { - return err + return diag.FromErr(err) } } diff --git a/pkg/sdk/dynamic_table.go b/pkg/sdk/dynamic_table.go index 7d697e9709..dac13dc576 100644 --- a/pkg/sdk/dynamic_table.go +++ b/pkg/sdk/dynamic_table.go @@ -4,6 +4,8 @@ import ( "context" "database/sql" "time" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/tracking" ) type DynamicTables interface { @@ -172,7 +174,7 @@ func (dtr dynamicTableRow) convert() *DynamicTable { RefreshMode: DynamicTableRefreshMode(dtr.RefreshMode), Warehouse: dtr.Warehouse, Comment: dtr.Comment, - Text: dtr.Text, + Text: tracking.TrimMetadata(dtr.Text), AutomaticClustering: dtr.AutomaticClustering == "ON", // "ON" or "OFF SchedulingState: DynamicTableSchedulingState(dtr.SchedulingState), IsClone: dtr.IsClone, diff --git a/pkg/sdk/materialized_views_impl_gen.go b/pkg/sdk/materialized_views_impl_gen.go index d422d59b04..9f1393d30c 100644 --- a/pkg/sdk/materialized_views_impl_gen.go +++ b/pkg/sdk/materialized_views_impl_gen.go @@ -4,6 +4,7 @@ import ( "context" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/tracking" ) var _ MaterializedViews = (*materializedViews)(nil) @@ -168,7 +169,7 @@ func (r materializedViewDBRow) convert() *MaterializedView { Owner: r.Owner, Invalid: r.Invalid, BehindBy: r.BehindBy, - Text: r.Text, + Text: tracking.TrimMetadata(r.Text), IsSecure: r.IsSecure, } if r.Reserved.Valid { diff --git a/pkg/sdk/testint/dynamic_table_integration_test.go b/pkg/sdk/testint/dynamic_table_integration_test.go index 778334f561..b7025d4e04 100644 --- a/pkg/sdk/testint/dynamic_table_integration_test.go +++ b/pkg/sdk/testint/dynamic_table_integration_test.go @@ -3,8 +3,12 @@ package testint import ( "context" "errors" + "fmt" "testing" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/tracking" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/helpers/random" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/stretchr/testify/assert" @@ -49,6 +53,23 @@ func TestInt_DynamicTableCreateAndDrop(t *testing.T) { assert.Equal(t, "ROLE", dynamicTableById.OwnerRoleType) }) + t.Run("create with usage tracking comment", func(t *testing.T) { + id := testClientHelper().Ids.RandomSchemaObjectIdentifier() + plainQuery := fmt.Sprintf("SELECT id FROM %s", tableTest.ID().FullyQualifiedName()) + query, err := tracking.AppendMetadata(plainQuery, tracking.NewVersionedMetadata(resources.DynamicTable, tracking.CreateOperation)) + require.NoError(t, err) + + err = client.DynamicTables.Create(ctx, sdk.NewCreateDynamicTableRequest(id, testClientHelper().Ids.WarehouseId(), sdk.TargetLag{ + MaximumDuration: sdk.String("2 minutes"), + }, query)) + require.NoError(t, err) + + dynamicTable, err := client.DynamicTables.ShowByID(ctx, id) + require.NoError(t, err) + + assert.Equal(t, fmt.Sprintf("CREATE DYNAMIC TABLE %s lag = '2 minutes' refresh_mode = 'AUTO' initialize = 'ON_CREATE' warehouse = %s AS %s", id.FullyQualifiedName(), testClientHelper().Ids.WarehouseId().FullyQualifiedName(), plainQuery), dynamicTable.Text) + }) + t.Run("test complete with target lag", func(t *testing.T) { id := testClientHelper().Ids.RandomSchemaObjectIdentifier() targetLag := sdk.TargetLag{ diff --git a/pkg/sdk/testint/materialized_views_gen_integration_test.go b/pkg/sdk/testint/materialized_views_gen_integration_test.go index 98ba774f94..6ef58925e9 100644 --- a/pkg/sdk/testint/materialized_views_gen_integration_test.go +++ b/pkg/sdk/testint/materialized_views_gen_integration_test.go @@ -5,6 +5,9 @@ import ( "fmt" "testing" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/tracking" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/stretchr/testify/assert" @@ -109,6 +112,18 @@ func TestInt_MaterializedViews(t *testing.T) { assertMaterializedView(t, view, request.GetName()) }) + t.Run("create materialized view: with usage tracking comment", func(t *testing.T) { + id := testClientHelper().Ids.RandomSchemaObjectIdentifier() + plainQuery := fmt.Sprintf("SELECT id FROM %s", table.ID().FullyQualifiedName()) + query, err := tracking.AppendMetadata(plainQuery, tracking.NewVersionedMetadata(resources.MaterializedView, tracking.CreateOperation)) + require.NoError(t, err) + + view := createMaterializedViewWithRequest(t, sdk.NewCreateMaterializedViewRequest(id, query)) + + assertMaterializedView(t, view, sdk.NewCreateMaterializedViewRequest(id, query).GetName()) + assert.Equal(t, fmt.Sprintf("CREATE MATERIALIZED VIEW %s AS %s", id.FullyQualifiedName(), plainQuery), view.Text) + }) + t.Run("create materialized view: almost complete case", func(t *testing.T) { rowAccessPolicy, rowAccessPolicyCleanup := testClientHelper().RowAccessPolicy.CreateRowAccessPolicy(t) t.Cleanup(rowAccessPolicyCleanup) diff --git a/pkg/sdk/testint/views_gen_integration_test.go b/pkg/sdk/testint/views_gen_integration_test.go index 7fda5f15ae..3682a3ee09 100644 --- a/pkg/sdk/testint/views_gen_integration_test.go +++ b/pkg/sdk/testint/views_gen_integration_test.go @@ -7,6 +7,9 @@ import ( "slices" "testing" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/tracking" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" + assertions "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/objectassert" @@ -166,6 +169,19 @@ func TestInt_Views(t *testing.T) { assertView(t, view, request.GetName()) }) + t.Run("create view: with usage tracking comment", func(t *testing.T) { + id := testClientHelper().Ids.RandomSchemaObjectIdentifier() + plainQuery := "SELECT NULL AS TYPE" + query, err := tracking.AppendMetadata(plainQuery, tracking.NewVersionedMetadata(resources.View, tracking.CreateOperation)) + require.NoError(t, err) + request := sdk.NewCreateViewRequest(id, query) + + view := createViewWithRequest(t, request) + + assertView(t, view, request.GetName()) + assert.Equal(t, fmt.Sprintf("CREATE VIEW %s AS %s", id.FullyQualifiedName(), plainQuery), view.Text) + }) + t.Run("create view: almost complete case - without masking and projection policies", func(t *testing.T) { rowAccessPolicy, rowAccessPolicyCleanup := testClientHelper().RowAccessPolicy.CreateRowAccessPolicy(t) t.Cleanup(rowAccessPolicyCleanup) diff --git a/pkg/sdk/views_impl_gen.go b/pkg/sdk/views_impl_gen.go index d9a939a268..a9a2783fb4 100644 --- a/pkg/sdk/views_impl_gen.go +++ b/pkg/sdk/views_impl_gen.go @@ -3,6 +3,8 @@ package sdk import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/tracking" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" ) @@ -277,7 +279,7 @@ func (r viewDBRow) convert() *View { view.Comment = r.Comment.String } if r.Text.Valid { - view.Text = r.Text.String + view.Text = tracking.TrimMetadata(r.Text.String) } if r.Kind.Valid { view.Kind = r.Kind.String From 8210bb84b69fe91e0fff22ac836feb79d6e9a402 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Cie=C5=9Blak?= Date: Tue, 26 Nov 2024 16:14:00 +0100 Subject: [PATCH 07/10] chore: Add support for usage tracking to data sources (#3224) The last part of usage tracking, changes: - Added support for usage tracking in all data sources (\+ acceptance test) - Added schema version and datasource field to metadata --- pkg/datasources/accounts.go | 16 ++-- pkg/datasources/alerts.go | 14 ++-- pkg/datasources/common.go | 12 +++ pkg/datasources/connections.go | 4 +- pkg/datasources/cortex_search_services.go | 13 +-- pkg/datasources/current_account.go | 16 ++-- pkg/datasources/current_role.go | 12 +-- pkg/datasources/database.go | 30 +++---- pkg/datasources/database_role.go | 16 ++-- pkg/datasources/database_roles.go | 4 +- pkg/datasources/databases.go | 4 +- pkg/datasources/dynamic_tables.go | 13 +-- pkg/datasources/external_functions.go | 4 +- pkg/datasources/external_tables.go | 12 +-- pkg/datasources/failover_groups.go | 14 ++-- pkg/datasources/file_formats.go | 14 ++-- pkg/datasources/functions.go | 4 +- pkg/datasources/grants.go | 4 +- pkg/datasources/masking_policies.go | 4 +- pkg/datasources/materialized_views.go | 12 +-- pkg/datasources/network_policies.go | 4 +- pkg/datasources/parameters.go | 18 ++-- pkg/datasources/pipes.go | 14 ++-- pkg/datasources/procedures.go | 4 +- pkg/datasources/resource_monitors.go | 4 +- pkg/datasources/role.go | 14 ++-- pkg/datasources/roles.go | 4 +- pkg/datasources/row_access_policies.go | 4 +- pkg/datasources/schemas.go | 4 +- pkg/datasources/secrets.go | 4 +- pkg/datasources/security_integrations.go | 4 +- pkg/datasources/sequences.go | 14 ++-- pkg/datasources/shares.go | 14 ++-- pkg/datasources/stages.go | 4 +- pkg/datasources/storage_integrations.go | 18 ++-- pkg/datasources/streamlits.go | 4 +- pkg/datasources/streams.go | 4 +- .../system_generate_scim_access_token.go | 12 ++- .../system_get_aws_sns_iam_policy.go | 14 ++-- .../system_get_privatelink_config.go | 28 ++++--- .../system_get_snowflake_platform_info.go | 19 +++-- pkg/datasources/tables.go | 12 +-- pkg/datasources/tags.go | 4 +- pkg/datasources/tasks.go | 7 +- .../usage_tracking_acceptance_test.go | 84 +++++++++++++++++++ pkg/datasources/users.go | 4 +- pkg/datasources/views.go | 4 +- pkg/datasources/warehouses.go | 4 +- pkg/internal/tracking/context.go | 51 +++++++---- pkg/internal/tracking/context_test.go | 6 +- pkg/internal/tracking/query_test.go | 11 +-- pkg/provider/datasources/datasources.go | 63 ++++++++++++++ pkg/resources/common.go | 12 +-- .../usage_tracking_acceptance_test.go | 12 ++- pkg/sdk/testint/client_integration_test.go | 2 +- .../testint/dynamic_table_integration_test.go | 2 +- ...materialized_views_gen_integration_test.go | 2 +- pkg/sdk/testint/views_gen_integration_test.go | 2 +- 58 files changed, 496 insertions(+), 217 deletions(-) create mode 100644 pkg/datasources/usage_tracking_acceptance_test.go create mode 100644 pkg/provider/datasources/datasources.go diff --git a/pkg/datasources/accounts.go b/pkg/datasources/accounts.go index 7f3079074e..87c706152c 100644 --- a/pkg/datasources/accounts.go +++ b/pkg/datasources/accounts.go @@ -4,6 +4,9 @@ import ( "context" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/snowflakeroles" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -110,19 +113,18 @@ var accountsSchema = map[string]*schema.Schema{ // Accounts Snowflake Accounts resource. func Accounts() *schema.Resource { return &schema.Resource{ - Read: ReadAccounts, - Schema: accountsSchema, + ReadContext: TrackingReadWrapper(datasources.Accounts, ReadAccounts), + Schema: accountsSchema, } } // ReadAccounts lists accounts. -func ReadAccounts(d *schema.ResourceData, meta interface{}) error { +func ReadAccounts(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() ok, err := client.ContextFunctions.IsRoleInSession(ctx, snowflakeroles.Orgadmin) if err != nil { - return err + return diag.FromErr(err) } if !ok { log.Printf("[DEBUG] ORGADMIN role is not in current session, cannot read accounts") @@ -136,7 +138,7 @@ func ReadAccounts(d *schema.ResourceData, meta interface{}) error { } accounts, err := client.Accounts.Show(ctx, opts) if err != nil { - return err + return diag.FromErr(err) } d.SetId("accounts") accountsFlatten := []map[string]interface{}{} @@ -161,7 +163,7 @@ func ReadAccounts(d *schema.ResourceData, meta interface{}) error { accountsFlatten = append(accountsFlatten, m) } if err := d.Set("accounts", accountsFlatten); err != nil { - return err + return diag.FromErr(err) } return nil } diff --git a/pkg/datasources/alerts.go b/pkg/datasources/alerts.go index 554a7a7cd1..ac0d643528 100644 --- a/pkg/datasources/alerts.go +++ b/pkg/datasources/alerts.go @@ -4,6 +4,9 @@ import ( "context" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -76,15 +79,14 @@ var alertsSchema = map[string]*schema.Schema{ // Alerts Snowflake Roles resource. func Alerts() *schema.Resource { return &schema.Resource{ - Read: ReadAlerts, - Schema: alertsSchema, + ReadContext: TrackingReadWrapper(datasources.Alerts, ReadAlerts), + Schema: alertsSchema, } } // ReadAlerts Reads the database metadata information. -func ReadAlerts(d *schema.ResourceData, meta interface{}) error { +func ReadAlerts(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() d.SetId("alerts_read") @@ -114,7 +116,7 @@ func ReadAlerts(d *schema.ResourceData, meta interface{}) error { if err != nil { log.Printf("[DEBUG] failed to list alerts in schema (%s)", d.Id()) d.SetId("") - return err + return diag.FromErr(err) } alerts := make([]map[string]any, 0, len(listAlerts)) @@ -127,7 +129,7 @@ func ReadAlerts(d *schema.ResourceData, meta interface{}) error { } if err := d.Set("alerts", alerts); err != nil { - return err + return diag.FromErr(err) } return nil } diff --git a/pkg/datasources/common.go b/pkg/datasources/common.go index 24c68fcb85..9b4f354dda 100644 --- a/pkg/datasources/common.go +++ b/pkg/datasources/common.go @@ -1,10 +1,15 @@ package datasources import ( + "context" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/tracking" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -205,3 +210,10 @@ func handleExtendedIn(d *schema.ResourceData, setField **sdk.ExtendedIn) error { } return nil } + +func TrackingReadWrapper(datasourceName datasources.Datasource, readImplementation schema.ReadContextFunc) schema.ReadContextFunc { + return func(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { + ctx = tracking.NewContext(ctx, tracking.NewVersionedDatasourceMetadata(datasourceName)) + return readImplementation(ctx, d, meta) + } +} diff --git a/pkg/datasources/connections.go b/pkg/datasources/connections.go index ef03ffa7f1..bc3c59c378 100644 --- a/pkg/datasources/connections.go +++ b/pkg/datasources/connections.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -34,7 +36,7 @@ var connectionsSchema = map[string]*schema.Schema{ func Connections() *schema.Resource { return &schema.Resource{ - ReadContext: ReadConnections, + ReadContext: TrackingReadWrapper(datasources.Connections, ReadConnections), Schema: connectionsSchema, Description: "Datasource used to get details of filtered connections. Filtering is aligned with the current possibilities for [SHOW CONNECTIONS](https://docs.snowflake.com/en/sql-reference/sql/show-connections) query. The results of SHOW is encapsulated in one output collection `connections`.", } diff --git a/pkg/datasources/cortex_search_services.go b/pkg/datasources/cortex_search_services.go index aa58a98b4b..b657d9f156 100644 --- a/pkg/datasources/cortex_search_services.go +++ b/pkg/datasources/cortex_search_services.go @@ -4,6 +4,9 @@ import ( "context" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -108,13 +111,13 @@ var cortexSearchServicesSchema = map[string]*schema.Schema{ // CortexSearchServices Snowflake Cortex search services resource. func CortexSearchServices() *schema.Resource { return &schema.Resource{ - Read: ReadCortexSearchServices, - Schema: cortexSearchServicesSchema, + ReadContext: TrackingReadWrapper(datasources.CortexSearchServices, ReadCortexSearchServices), + Schema: cortexSearchServicesSchema, } } // ReadCortexSearchServices Reads the cortex search services metadata information. -func ReadCortexSearchServices(d *schema.ResourceData, meta interface{}) error { +func ReadCortexSearchServices(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client request := sdk.NewShowCortexSearchServiceRequest() @@ -167,7 +170,7 @@ func ReadCortexSearchServices(d *schema.ResourceData, meta interface{}) error { if err != nil { log.Printf("[DEBUG] snowflake_cortex_search_services.go: %v", err) d.SetId("") - return err + return diag.FromErr(err) } d.SetId("cortex_search_services") records := make([]map[string]any, 0, len(dts)) @@ -181,7 +184,7 @@ func ReadCortexSearchServices(d *schema.ResourceData, meta interface{}) error { records = append(records, record) } if err := d.Set("cortex_search_services", records); err != nil { - return err + return diag.FromErr(err) } return nil } diff --git a/pkg/datasources/current_account.go b/pkg/datasources/current_account.go index d6a0d1d291..c3ba23e1fa 100644 --- a/pkg/datasources/current_account.go +++ b/pkg/datasources/current_account.go @@ -5,6 +5,9 @@ import ( "fmt" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -33,15 +36,14 @@ var currentAccountSchema = map[string]*schema.Schema{ // CurrentAccount the Snowflake current account resource. func CurrentAccount() *schema.Resource { return &schema.Resource{ - Read: ReadCurrentAccount, - Schema: currentAccountSchema, + ReadContext: TrackingReadWrapper(datasources.CurrentAccount, ReadCurrentAccount), + Schema: currentAccountSchema, } } // ReadCurrentAccount read the current snowflake account information. -func ReadCurrentAccount(d *schema.ResourceData, meta interface{}) error { +func ReadCurrentAccount(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() current, err := client.ContextFunctions.CurrentSessionDetails(ctx) if err != nil { @@ -53,11 +55,11 @@ func ReadCurrentAccount(d *schema.ResourceData, meta interface{}) error { d.SetId(fmt.Sprintf("%s.%s", current.Account, current.Region)) accountErr := d.Set("account", current.Account) if accountErr != nil { - return accountErr + return diag.FromErr(accountErr) } regionErr := d.Set("region", current.Region) if regionErr != nil { - return regionErr + return diag.FromErr(regionErr) } url, err := current.AccountURL() if err != nil { @@ -67,7 +69,7 @@ func ReadCurrentAccount(d *schema.ResourceData, meta interface{}) error { urlErr := d.Set("url", url) if urlErr != nil { - return urlErr + return diag.FromErr(urlErr) } return nil } diff --git a/pkg/datasources/current_role.go b/pkg/datasources/current_role.go index 254bb70dbc..e078773590 100644 --- a/pkg/datasources/current_role.go +++ b/pkg/datasources/current_role.go @@ -4,6 +4,9 @@ import ( "context" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -19,14 +22,13 @@ var currentRoleSchema = map[string]*schema.Schema{ func CurrentRole() *schema.Resource { return &schema.Resource{ - Read: ReadCurrentRole, - Schema: currentRoleSchema, + ReadContext: TrackingReadWrapper(datasources.CurrentRole, ReadCurrentRole), + Schema: currentRoleSchema, } } -func ReadCurrentRole(d *schema.ResourceData, meta interface{}) error { +func ReadCurrentRole(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() role, err := client.ContextFunctions.CurrentRole(ctx) if err != nil { @@ -38,7 +40,7 @@ func ReadCurrentRole(d *schema.ResourceData, meta interface{}) error { d.SetId(helpers.EncodeSnowflakeID(role)) err = d.Set("name", role.Name()) if err != nil { - return err + return diag.FromErr(err) } return nil } diff --git a/pkg/datasources/database.go b/pkg/datasources/database.go index 0be0368121..d04595cad8 100644 --- a/pkg/datasources/database.go +++ b/pkg/datasources/database.go @@ -3,6 +3,9 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -54,52 +57,51 @@ var databaseSchema = map[string]*schema.Schema{ // Database the Snowflake Database resource. func Database() *schema.Resource { return &schema.Resource{ - Read: ReadDatabase, - Schema: databaseSchema, + ReadContext: TrackingReadWrapper(datasources.Database, ReadDatabase), + Schema: databaseSchema, } } // ReadDatabase read the database meta-data information. -func ReadDatabase(d *schema.ResourceData, meta interface{}) error { +func ReadDatabase(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() name := d.Get("name").(string) id := sdk.NewAccountObjectIdentifier(name) database, err := client.Databases.ShowByID(ctx, id) if err != nil { - return err + return diag.FromErr(err) } d.SetId(helpers.EncodeResourceIdentifier(database.ID())) if err := d.Set("name", database.Name); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("comment", database.Comment); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("owner", database.Owner); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("is_default", database.IsDefault); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("is_current", database.IsCurrent); err != nil { - return err + return diag.FromErr(err) } var origin string if database.Origin != nil { origin = database.Origin.FullyQualifiedName() } if err := d.Set("origin", origin); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("retention_time", database.RetentionTime); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("created_on", database.CreatedOn.String()); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("options", database.Options); err != nil { - return err + return diag.FromErr(err) } return nil } diff --git a/pkg/datasources/database_role.go b/pkg/datasources/database_role.go index 4379954ec3..dc33ac89fa 100644 --- a/pkg/datasources/database_role.go +++ b/pkg/datasources/database_role.go @@ -4,6 +4,9 @@ import ( "context" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -36,34 +39,33 @@ var databaseRoleSchema = map[string]*schema.Schema{ // DatabaseRole Snowflake Database Role resource. func DatabaseRole() *schema.Resource { return &schema.Resource{ - Read: ReadDatabaseRole, - Schema: databaseRoleSchema, + ReadContext: TrackingReadWrapper(datasources.DatabaseRole, ReadDatabaseRole), + Schema: databaseRoleSchema, } } // ReadDatabaseRole Reads the database role metadata information. -func ReadDatabaseRole(d *schema.ResourceData, meta interface{}) error { +func ReadDatabaseRole(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client databaseName := d.Get("database").(string) roleName := d.Get("name").(string) - ctx := context.Background() dbObjId := sdk.NewDatabaseObjectIdentifier(databaseName, roleName) databaseRole, err := client.DatabaseRoles.ShowByID(ctx, dbObjId) if err != nil { log.Printf("[DEBUG] unable to show database role %s in db (%s)", roleName, databaseName) d.SetId("") - return err + return diag.FromErr(err) } err = d.Set("comment", databaseRole.Comment) if err != nil { - return err + return diag.FromErr(err) } err = d.Set("owner", databaseRole.Owner) if err != nil { - return err + return diag.FromErr(err) } d.SetId("database_role_read") diff --git a/pkg/datasources/database_roles.go b/pkg/datasources/database_roles.go index 615951e890..570b75fd60 100644 --- a/pkg/datasources/database_roles.go +++ b/pkg/datasources/database_roles.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -64,7 +66,7 @@ var databaseRolesSchema = map[string]*schema.Schema{ func DatabaseRoles() *schema.Resource { return &schema.Resource{ - ReadContext: ReadDatabaseRoles, + ReadContext: TrackingReadWrapper(datasources.DatabaseRoles, ReadDatabaseRoles), Schema: databaseRolesSchema, Description: "Datasource used to get details of filtered database roles. Filtering is aligned with the current possibilities for [SHOW DATABASE ROLES](https://docs.snowflake.com/en/sql-reference/sql/show-database-roles) query (`like` and `limit` are supported). The results of SHOW is encapsulated in show_output collection.", } diff --git a/pkg/datasources/databases.go b/pkg/datasources/databases.go index a9a0f6d2d1..21fb414aed 100644 --- a/pkg/datasources/databases.go +++ b/pkg/datasources/databases.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -91,7 +93,7 @@ var databasesSchema = map[string]*schema.Schema{ func Databases() *schema.Resource { return &schema.Resource{ - ReadContext: ReadDatabases, + ReadContext: TrackingReadWrapper(datasources.Databases, ReadDatabases), Schema: databasesSchema, Description: "Datasource used to get details of filtered databases. Filtering is aligned with the current possibilities for [SHOW DATABASES](https://docs.snowflake.com/en/sql-reference/sql/show-databases) query (`like`, `starts_with`, and `limit` are all supported). The results of SHOW, DESCRIBE, and SHOW PARAMETERS IN are encapsulated in one output collection.", } diff --git a/pkg/datasources/dynamic_tables.go b/pkg/datasources/dynamic_tables.go index 4431201cc4..8b5468ef00 100644 --- a/pkg/datasources/dynamic_tables.go +++ b/pkg/datasources/dynamic_tables.go @@ -4,6 +4,9 @@ import ( "context" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -194,13 +197,13 @@ var dynamicTablesSchema = map[string]*schema.Schema{ // DynamicTables Snowflake Dynamic Tables resource. func DynamicTables() *schema.Resource { return &schema.Resource{ - Read: ReadDynamicTables, - Schema: dynamicTablesSchema, + ReadContext: TrackingReadWrapper(datasources.DynamicTables, ReadDynamicTables), + Schema: dynamicTablesSchema, } } // ReadDynamicTables Reads the dynamic tables metadata information. -func ReadDynamicTables(d *schema.ResourceData, meta interface{}) error { +func ReadDynamicTables(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client request := sdk.NewShowDynamicTableRequest() if v, ok := d.GetOk("like"); ok { @@ -252,7 +255,7 @@ func ReadDynamicTables(d *schema.ResourceData, meta interface{}) error { if err != nil { log.Printf("[DEBUG] snowflake_dynamic_tables.go: %v", err) d.SetId("") - return err + return diag.FromErr(err) } d.SetId("dynamic_tables") records := make([]map[string]any, 0, len(dts)) @@ -287,7 +290,7 @@ func ReadDynamicTables(d *schema.ResourceData, meta interface{}) error { records = append(records, record) } if err := d.Set("records", records); err != nil { - return err + return diag.FromErr(err) } return nil } diff --git a/pkg/datasources/external_functions.go b/pkg/datasources/external_functions.go index 01e5b956e6..9a85bd08c3 100644 --- a/pkg/datasources/external_functions.go +++ b/pkg/datasources/external_functions.go @@ -5,6 +5,8 @@ import ( "fmt" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -59,7 +61,7 @@ var externalFunctionsSchema = map[string]*schema.Schema{ func ExternalFunctions() *schema.Resource { return &schema.Resource{ - ReadContext: ReadContextExternalFunctions, + ReadContext: TrackingReadWrapper(datasources.ExternalFunctions, ReadContextExternalFunctions), Schema: externalFunctionsSchema, } } diff --git a/pkg/datasources/external_tables.go b/pkg/datasources/external_tables.go index 429fac4a3c..7c8c40ff52 100644 --- a/pkg/datasources/external_tables.go +++ b/pkg/datasources/external_tables.go @@ -4,6 +4,9 @@ import ( "context" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -54,14 +57,13 @@ var externalTablesSchema = map[string]*schema.Schema{ func ExternalTables() *schema.Resource { return &schema.Resource{ - Read: ReadExternalTables, - Schema: externalTablesSchema, + ReadContext: TrackingReadWrapper(datasources.ExternalTables, ReadExternalTables), + Schema: externalTablesSchema, } } -func ReadExternalTables(d *schema.ResourceData, meta interface{}) error { +func ReadExternalTables(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() databaseName := d.Get("database").(string) schemaName := d.Get("schema").(string) @@ -86,5 +88,5 @@ func ReadExternalTables(d *schema.ResourceData, meta interface{}) error { d.SetId(helpers.EncodeSnowflakeID(schemaId)) - return d.Set("external_tables", externalTablesObjects) + return diag.FromErr(d.Set("external_tables", externalTablesObjects)) } diff --git a/pkg/datasources/failover_groups.go b/pkg/datasources/failover_groups.go index f1bdf428af..85c50fd2c0 100644 --- a/pkg/datasources/failover_groups.go +++ b/pkg/datasources/failover_groups.go @@ -3,6 +3,9 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -117,15 +120,14 @@ var failoverGroupsSchema = map[string]*schema.Schema{ // FailoverGroups Snowflake FailoverGroups resource. func FailoverGroups() *schema.Resource { return &schema.Resource{ - Read: ReadFailoverGroups, - Schema: failoverGroupsSchema, + ReadContext: TrackingReadWrapper(datasources.FailoverGroups, ReadFailoverGroups), + Schema: failoverGroupsSchema, } } // ReadFailoverGroups lists failover groups. -func ReadFailoverGroups(d *schema.ResourceData, meta interface{}) error { +func ReadFailoverGroups(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() inAccount := d.Get("in_account").(string) opts := sdk.ShowFailoverGroupOptions{} @@ -134,7 +136,7 @@ func ReadFailoverGroups(d *schema.ResourceData, meta interface{}) error { } failoverGroups, err := client.FailoverGroups.Show(ctx, &opts) if err != nil { - return err + return diag.FromErr(err) } d.SetId("failover_groups") failoverGroupsFlatten := []map[string]interface{}{} @@ -173,7 +175,7 @@ func ReadFailoverGroups(d *schema.ResourceData, meta interface{}) error { failoverGroupsFlatten = append(failoverGroupsFlatten, m) } if err := d.Set("failover_groups", failoverGroupsFlatten); err != nil { - return err + return diag.FromErr(err) } return nil } diff --git a/pkg/datasources/file_formats.go b/pkg/datasources/file_formats.go index 0bb77aa79e..465ea03c4a 100644 --- a/pkg/datasources/file_formats.go +++ b/pkg/datasources/file_formats.go @@ -4,6 +4,9 @@ import ( "context" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -56,14 +59,13 @@ var fileFormatsSchema = map[string]*schema.Schema{ func FileFormats() *schema.Resource { return &schema.Resource{ - Read: ReadFileFormats, - Schema: fileFormatsSchema, + ReadContext: TrackingReadWrapper(datasources.FileFormats, ReadFileFormats), + Schema: fileFormatsSchema, } } -func ReadFileFormats(d *schema.ResourceData, meta interface{}) error { +func ReadFileFormats(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() databaseName := d.Get("database").(string) schemaName := d.Get("schema").(string) @@ -75,7 +77,7 @@ func ReadFileFormats(d *schema.ResourceData, meta interface{}) error { }) if err != nil { d.SetId("") - return err + return diag.FromErr(err) } fileFormats := []map[string]interface{}{} @@ -93,5 +95,5 @@ func ReadFileFormats(d *schema.ResourceData, meta interface{}) error { } d.SetId(fmt.Sprintf(`%v|%v`, databaseName, schemaName)) - return d.Set("file_formats", fileFormats) + return diag.FromErr(d.Set("file_formats", fileFormats)) } diff --git a/pkg/datasources/functions.go b/pkg/datasources/functions.go index b54213442c..480ed24b6f 100644 --- a/pkg/datasources/functions.go +++ b/pkg/datasources/functions.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -64,7 +66,7 @@ var functionsSchema = map[string]*schema.Schema{ func Functions() *schema.Resource { return &schema.Resource{ - ReadContext: ReadContextFunctions, + ReadContext: TrackingReadWrapper(datasources.Functions, ReadContextFunctions), Schema: functionsSchema, } } diff --git a/pkg/datasources/grants.go b/pkg/datasources/grants.go index 87ea97752f..e4e24e68e7 100644 --- a/pkg/datasources/grants.go +++ b/pkg/datasources/grants.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" @@ -320,7 +322,7 @@ var grantsSchema = map[string]*schema.Schema{ func Grants() *schema.Resource { return &schema.Resource{ - ReadContext: ReadGrants, + ReadContext: TrackingReadWrapper(datasources.Grants, ReadGrants), Schema: grantsSchema, } } diff --git a/pkg/datasources/masking_policies.go b/pkg/datasources/masking_policies.go index 0435c1907b..670e29b760 100644 --- a/pkg/datasources/masking_policies.go +++ b/pkg/datasources/masking_policies.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -117,7 +119,7 @@ var maskingPoliciesSchema = map[string]*schema.Schema{ func MaskingPolicies() *schema.Resource { return &schema.Resource{ - ReadContext: ReadMaskingPolicies, + ReadContext: TrackingReadWrapper(datasources.MaskingPolicies, ReadMaskingPolicies), Schema: maskingPoliciesSchema, Description: "Datasource used to get details of filtered masking policies. Filtering is aligned with the current possibilities for [SHOW MASKING POLICIES](https://docs.snowflake.com/en/sql-reference/sql/show-masking-policies) query. The results of SHOW and DESCRIBE are encapsulated in one output collection `masking_policies`.", } diff --git a/pkg/datasources/materialized_views.go b/pkg/datasources/materialized_views.go index ca5f929570..18f3b796e7 100644 --- a/pkg/datasources/materialized_views.go +++ b/pkg/datasources/materialized_views.go @@ -4,6 +4,9 @@ import ( "context" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -52,17 +55,16 @@ var materializedViewsSchema = map[string]*schema.Schema{ func MaterializedViews() *schema.Resource { return &schema.Resource{ - Read: ReadMaterializedViews, - Schema: materializedViewsSchema, + ReadContext: TrackingReadWrapper(datasources.MaterializedViews, ReadMaterializedViews), + Schema: materializedViewsSchema, } } -func ReadMaterializedViews(d *schema.ResourceData, meta interface{}) error { +func ReadMaterializedViews(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client databaseName := d.Get("database").(string) schemaName := d.Get("schema").(string) - ctx := context.Background() schemaId := sdk.NewDatabaseObjectIdentifier(databaseName, schemaName) extractedMaterializedViews, err := client.MaterializedViews.Show(ctx, sdk.NewShowMaterializedViewRequest().WithIn( &sdk.In{Schema: schemaId}, @@ -85,5 +87,5 @@ func ReadMaterializedViews(d *schema.ResourceData, meta interface{}) error { } d.SetId(helpers.EncodeSnowflakeID(databaseName, schemaName)) - return d.Set("materialized_views", materializedViews) + return diag.FromErr(d.Set("materialized_views", materializedViews)) } diff --git a/pkg/datasources/network_policies.go b/pkg/datasources/network_policies.go index c67db55b2f..6aa615d136 100644 --- a/pkg/datasources/network_policies.go +++ b/pkg/datasources/network_policies.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -52,7 +54,7 @@ var networkPoliciesSchema = map[string]*schema.Schema{ func NetworkPolicies() *schema.Resource { return &schema.Resource{ - ReadContext: ReadNetworkPolicies, + ReadContext: TrackingReadWrapper(datasources.NetworkPolicies, ReadNetworkPolicies), Schema: networkPoliciesSchema, Description: "Datasource used to get details of filtered network policies. Filtering is aligned with the current possibilities for [SHOW NETWORK POLICIES](https://docs.snowflake.com/en/sql-reference/sql/show-network-policies) query (`like` is supported). The results of SHOW and DESCRIBE are encapsulated in one output collection.", } diff --git a/pkg/datasources/parameters.go b/pkg/datasources/parameters.go index 9ed876e404..8c88eb1222 100644 --- a/pkg/datasources/parameters.go +++ b/pkg/datasources/parameters.go @@ -5,6 +5,9 @@ import ( "fmt" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -79,14 +82,13 @@ var parametersSchema = map[string]*schema.Schema{ func Parameters() *schema.Resource { return &schema.Resource{ - Read: ReadParameters, - Schema: parametersSchema, + ReadContext: TrackingReadWrapper(datasources.Parameters, ReadParameters), + Schema: parametersSchema, } } -func ReadParameters(d *schema.ResourceData, meta interface{}) error { +func ReadParameters(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() p, ok := d.GetOk("pattern") pattern := "" if ok { @@ -107,7 +109,7 @@ func ReadParameters(d *schema.ResourceData, meta interface{}) error { case "SESSION": user := d.Get("user").(string) if user == "" { - return fmt.Errorf("user is required when parameter_type is set to SESSION") + return diag.FromErr(fmt.Errorf("user is required when parameter_type is set to SESSION")) } opts.In.User = sdk.NewAccountObjectIdentifier(user) case "OBJECT": @@ -125,12 +127,12 @@ func ReadParameters(d *schema.ResourceData, meta interface{}) error { case sdk.ObjectTypeTable: opts.In.Table = sdk.NewSchemaObjectIdentifierFromFullyQualifiedName(objectName) default: - return fmt.Errorf("object_type %s is not supported", objectType) + return diag.FromErr(fmt.Errorf("object_type %s is not supported", objectType)) } } parameters, err = client.Parameters.ShowParameters(ctx, &opts) if err != nil { - return fmt.Errorf("error listing parameters: %w", err) + return diag.FromErr(fmt.Errorf("error listing parameters: %w", err)) } d.SetId("parameters") @@ -146,5 +148,5 @@ func ReadParameters(d *schema.ResourceData, meta interface{}) error { params = append(params, paramMap) } - return d.Set("parameters", params) + return diag.FromErr(d.Set("parameters", params)) } diff --git a/pkg/datasources/pipes.go b/pkg/datasources/pipes.go index d157251187..9cca5a20b2 100644 --- a/pkg/datasources/pipes.go +++ b/pkg/datasources/pipes.go @@ -5,6 +5,9 @@ import ( "fmt" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -57,14 +60,13 @@ var pipesSchema = map[string]*schema.Schema{ func Pipes() *schema.Resource { return &schema.Resource{ - Read: ReadPipes, - Schema: pipesSchema, + ReadContext: TrackingReadWrapper(datasources.Pipes, ReadPipes), + Schema: pipesSchema, } } -func ReadPipes(d *schema.ResourceData, meta interface{}) error { +func ReadPipes(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() databaseName := d.Get("database").(string) schemaName := d.Get("schema").(string) @@ -77,7 +79,7 @@ func ReadPipes(d *schema.ResourceData, meta interface{}) error { if err != nil { log.Printf("[DEBUG] unable to parse pipes in schema (%s)", d.Id()) d.SetId("") - return err + return diag.FromErr(err) } pipes := make([]map[string]any, 0, len(extractedPipes)) @@ -94,5 +96,5 @@ func ReadPipes(d *schema.ResourceData, meta interface{}) error { } d.SetId(fmt.Sprintf(`%v|%v`, databaseName, schemaName)) - return d.Set("pipes", pipes) + return diag.FromErr(d.Set("pipes", pipes)) } diff --git a/pkg/datasources/procedures.go b/pkg/datasources/procedures.go index 28a1ddc563..a20c338a68 100644 --- a/pkg/datasources/procedures.go +++ b/pkg/datasources/procedures.go @@ -6,6 +6,8 @@ import ( "regexp" "strings" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -65,7 +67,7 @@ var proceduresSchema = map[string]*schema.Schema{ func Procedures() *schema.Resource { return &schema.Resource{ - ReadContext: ReadContextProcedures, + ReadContext: TrackingReadWrapper(datasources.Procedures, ReadContextProcedures), Schema: proceduresSchema, } } diff --git a/pkg/datasources/resource_monitors.go b/pkg/datasources/resource_monitors.go index 0af5724842..8a3825034f 100644 --- a/pkg/datasources/resource_monitors.go +++ b/pkg/datasources/resource_monitors.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -39,7 +41,7 @@ var resourceMonitorsSchema = map[string]*schema.Schema{ func ResourceMonitors() *schema.Resource { return &schema.Resource{ - ReadContext: ReadResourceMonitors, + ReadContext: TrackingReadWrapper(datasources.ResourceMonitors, ReadResourceMonitors), Schema: resourceMonitorsSchema, Description: "Datasource used to get details of filtered resource monitors. Filtering is aligned with the current possibilities for [SHOW RESOURCE MONITORS](https://docs.snowflake.com/en/sql-reference/sql/show-resource-monitors) query (`like` is supported). The results of SHOW is encapsulated in show_output collection.", } diff --git a/pkg/datasources/role.go b/pkg/datasources/role.go index 4bf1ddbb1b..4f2ccb5898 100644 --- a/pkg/datasources/role.go +++ b/pkg/datasources/role.go @@ -4,6 +4,9 @@ import ( "context" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -28,7 +31,7 @@ var roleSchema = map[string]*schema.Schema{ // Role Snowflake Role resource. func Role() *schema.Resource { return &schema.Resource{ - Read: ReadRole, + ReadContext: TrackingReadWrapper(datasources.Role, ReadRole), Schema: roleSchema, DeprecationMessage: "This resource is deprecated and will be removed in a future major version release. Please use snowflake_roles instead.", Importer: &schema.ResourceImporter{ @@ -38,13 +41,12 @@ func Role() *schema.Resource { } // ReadRole Reads the database metadata information. -func ReadRole(d *schema.ResourceData, meta interface{}) error { +func ReadRole(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() roleId, err := sdk.ParseAccountObjectIdentifier(d.Get("name").(string)) if err != nil { - return err + return diag.FromErr(err) } role, err := client.Roles.ShowByID(ctx, roleId) @@ -56,10 +58,10 @@ func ReadRole(d *schema.ResourceData, meta interface{}) error { d.SetId(helpers.EncodeResourceIdentifier(role.ID())) if err := d.Set("name", role.Name); err != nil { - return err + return diag.FromErr(err) } if err := d.Set("comment", role.Comment); err != nil { - return err + return diag.FromErr(err) } return nil } diff --git a/pkg/datasources/roles.go b/pkg/datasources/roles.go index 6af7cf88cf..7278988f7d 100644 --- a/pkg/datasources/roles.go +++ b/pkg/datasources/roles.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -47,7 +49,7 @@ var rolesSchema = map[string]*schema.Schema{ func Roles() *schema.Resource { return &schema.Resource{ - ReadContext: ReadRoles, + ReadContext: TrackingReadWrapper(datasources.Roles, ReadRoles), Schema: rolesSchema, Description: "Datasource used to get details of filtered roles. Filtering is aligned with the current possibilities for [SHOW ROLES](https://docs.snowflake.com/en/sql-reference/sql/show-roles) query (`like` and `in_class` are all supported). The results of SHOW are encapsulated in one output collection.", } diff --git a/pkg/datasources/row_access_policies.go b/pkg/datasources/row_access_policies.go index eb6196b584..22d8607422 100644 --- a/pkg/datasources/row_access_policies.go +++ b/pkg/datasources/row_access_policies.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -113,7 +115,7 @@ var rowAccessPoliciesSchema = map[string]*schema.Schema{ func RowAccessPolicies() *schema.Resource { return &schema.Resource{ - ReadContext: ReadRowAccessPolicies, + ReadContext: TrackingReadWrapper(datasources.RowAccessPolicies, ReadRowAccessPolicies), Schema: rowAccessPoliciesSchema, Description: "Datasource used to get details of filtered row access policies. Filtering is aligned with the current possibilities for [SHOW ROW ACCESS POLICIES](https://docs.snowflake.com/en/sql-reference/sql/show-row-access-policies) query. The results of SHOW and DESCRIBE are encapsulated in one output collection `row_access_policies`.", } diff --git a/pkg/datasources/schemas.go b/pkg/datasources/schemas.go index 07c5be6d67..bab420e5e3 100644 --- a/pkg/datasources/schemas.go +++ b/pkg/datasources/schemas.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -127,7 +129,7 @@ var schemasSchema = map[string]*schema.Schema{ func Schemas() *schema.Resource { return &schema.Resource{ - ReadContext: ReadSchemas, + ReadContext: TrackingReadWrapper(datasources.Schemas, ReadSchemas), Schema: schemasSchema, Description: "Datasource used to get details of filtered schemas. Filtering is aligned with the current possibilities for [SHOW SCHEMAS](https://docs.snowflake.com/en/sql-reference/sql/show-schemas) query. The results of SHOW, DESCRIBE, and SHOW PARAMETERS IN are encapsulated in one output collection.", } diff --git a/pkg/datasources/secrets.go b/pkg/datasources/secrets.go index 41101c1ae4..c40ec7aa6e 100644 --- a/pkg/datasources/secrets.go +++ b/pkg/datasources/secrets.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -96,7 +98,7 @@ var secretsSchema = map[string]*schema.Schema{ func Secrets() *schema.Resource { return &schema.Resource{ - ReadContext: ReadSecrets, + ReadContext: TrackingReadWrapper(datasources.Secrets, ReadSecrets), Schema: secretsSchema, Description: "Datasource used to get details of filtered secrets. Filtering is aligned with the current possibilities for [SHOW SECRETS](https://docs.snowflake.com/en/sql-reference/sql/show-secrets) query. The results of SHOW and DESCRIBE are encapsulated in one output collection `secrets`.", } diff --git a/pkg/datasources/security_integrations.go b/pkg/datasources/security_integrations.go index c020cc78c2..6418f6e4fc 100644 --- a/pkg/datasources/security_integrations.go +++ b/pkg/datasources/security_integrations.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -52,7 +54,7 @@ var securityIntegrationsSchema = map[string]*schema.Schema{ func SecurityIntegrations() *schema.Resource { return &schema.Resource{ - ReadContext: ReadSecurityIntegrations, + ReadContext: TrackingReadWrapper(datasources.SecurityIntegrations, ReadSecurityIntegrations), Schema: securityIntegrationsSchema, Description: "Datasource used to get details of filtered security integrations. Filtering is aligned with the current possibilities for [SHOW SECURITY INTEGRATIONS](https://docs.snowflake.com/en/sql-reference/sql/show-integrations) query (only `like` is supported). The results of SHOW and DESCRIBE are encapsulated in one output collection `security_integrations`.", } diff --git a/pkg/datasources/sequences.go b/pkg/datasources/sequences.go index d59078266d..e9c6b7b53e 100644 --- a/pkg/datasources/sequences.go +++ b/pkg/datasources/sequences.go @@ -4,6 +4,9 @@ import ( "context" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -51,14 +54,13 @@ var sequencesSchema = map[string]*schema.Schema{ func Sequences() *schema.Resource { return &schema.Resource{ - Read: ReadSequences, - Schema: sequencesSchema, + ReadContext: TrackingReadWrapper(datasources.Sequences, ReadSequences), + Schema: sequencesSchema, } } -func ReadSequences(d *schema.ResourceData, meta interface{}) error { +func ReadSequences(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() databaseName := d.Get("database").(string) schemaName := d.Get("schema").(string) @@ -67,7 +69,7 @@ func ReadSequences(d *schema.ResourceData, meta interface{}) error { }) seqs, err := client.Sequences.Show(ctx, req) if err != nil { - return err + return diag.FromErr(err) } sequences := []map[string]interface{}{} for _, seq := range seqs { @@ -81,5 +83,5 @@ func ReadSequences(d *schema.ResourceData, meta interface{}) error { } d.SetId(fmt.Sprintf(`%v|%v`, databaseName, schemaName)) - return d.Set("sequences", sequences) + return diag.FromErr(d.Set("sequences", sequences)) } diff --git a/pkg/datasources/shares.go b/pkg/datasources/shares.go index afe4902450..721debb9ed 100644 --- a/pkg/datasources/shares.go +++ b/pkg/datasources/shares.go @@ -3,6 +3,9 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -55,18 +58,17 @@ var sharesSchema = map[string]*schema.Schema{ // Shares Snowflake Shares resource. func Shares() *schema.Resource { return &schema.Resource{ - Read: ReadShares, - Schema: sharesSchema, + ReadContext: TrackingReadWrapper(datasources.Shares, ReadShares), + Schema: sharesSchema, } } // ReadShares Reads the database metadata information. -func ReadShares(d *schema.ResourceData, meta interface{}) error { +func ReadShares(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client d.SetId("shares_read") pattern := d.Get("pattern").(string) - ctx := context.Background() var opts sdk.ShowShareOptions if pattern != "" { opts.Like = &sdk.Like{ @@ -75,7 +77,7 @@ func ReadShares(d *schema.ResourceData, meta interface{}) error { } shares, err := client.Shares.Show(ctx, &opts) if err != nil { - return err + return diag.FromErr(err) } sharesFlatten := []map[string]interface{}{} for _, share := range shares { @@ -93,7 +95,7 @@ func ReadShares(d *schema.ResourceData, meta interface{}) error { } if err := d.Set("shares", sharesFlatten); err != nil { - return err + return diag.FromErr(err) } return nil } diff --git a/pkg/datasources/stages.go b/pkg/datasources/stages.go index 65d665fbf6..695e785776 100644 --- a/pkg/datasources/stages.go +++ b/pkg/datasources/stages.go @@ -4,6 +4,8 @@ import ( "context" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -58,7 +60,7 @@ var stagesSchema = map[string]*schema.Schema{ func Stages() *schema.Resource { return &schema.Resource{ - ReadContext: ReadStages, + ReadContext: TrackingReadWrapper(datasources.Stages, ReadStages), Schema: stagesSchema, } } diff --git a/pkg/datasources/storage_integrations.go b/pkg/datasources/storage_integrations.go index 5a523e065b..2e17b29701 100644 --- a/pkg/datasources/storage_integrations.go +++ b/pkg/datasources/storage_integrations.go @@ -4,6 +4,9 @@ import ( "context" "fmt" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" @@ -42,25 +45,24 @@ var storageIntegrationsSchema = map[string]*schema.Schema{ func StorageIntegrations() *schema.Resource { return &schema.Resource{ - Read: ReadStorageIntegrations, - Schema: storageIntegrationsSchema, + ReadContext: TrackingReadWrapper(datasources.StorageIntegrations, ReadStorageIntegrations), + Schema: storageIntegrationsSchema, } } -func ReadStorageIntegrations(d *schema.ResourceData, meta interface{}) error { +func ReadStorageIntegrations(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() account, err := client.ContextFunctions.CurrentAccount(ctx) if err != nil { d.SetId("") - return fmt.Errorf("[DEBUG] unable to retrieve current account") + return diag.FromErr(fmt.Errorf("[DEBUG] unable to retrieve current account")) } region, err := client.ContextFunctions.CurrentRegion(ctx) if err != nil { d.SetId("") - return fmt.Errorf("[DEBUG] unable to retrieve current region") + return diag.FromErr(fmt.Errorf("[DEBUG] unable to retrieve current region")) } d.SetId(fmt.Sprintf("%s.%s", account, region)) @@ -68,7 +70,7 @@ func ReadStorageIntegrations(d *schema.ResourceData, meta interface{}) error { storageIntegrations, err := client.StorageIntegrations.Show(ctx, sdk.NewShowStorageIntegrationRequest()) if err != nil { d.SetId("") - return fmt.Errorf("unable to retrieve storage integrations in account (%s), err = %w", d.Id(), err) + return diag.FromErr(fmt.Errorf("unable to retrieve storage integrations in account (%s), err = %w", d.Id(), err)) } storageIntegrationMaps := make([]map[string]any, len(storageIntegrations)) @@ -82,5 +84,5 @@ func ReadStorageIntegrations(d *schema.ResourceData, meta interface{}) error { } } - return d.Set("storage_integrations", storageIntegrationMaps) + return diag.FromErr(d.Set("storage_integrations", storageIntegrationMaps)) } diff --git a/pkg/datasources/streamlits.go b/pkg/datasources/streamlits.go index 4fed061263..889a23548f 100644 --- a/pkg/datasources/streamlits.go +++ b/pkg/datasources/streamlits.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -100,7 +102,7 @@ var streamlitsSchema = map[string]*schema.Schema{ func Streamlits() *schema.Resource { return &schema.Resource{ - ReadContext: ReadStreamlits, + ReadContext: TrackingReadWrapper(datasources.Streamlits, ReadStreamlits), Schema: streamlitsSchema, Description: "Datasource used to get details of filtered streamlits. Filtering is aligned with the current possibilities for [SHOW STREAMLITS](https://docs.snowflake.com/en/sql-reference/sql/show-streamlits) query (only `like` is supported). The results of SHOW and DESCRIBE are encapsulated in one output collection `streamlits`.", } diff --git a/pkg/datasources/streams.go b/pkg/datasources/streams.go index d992f2d669..4323fb19d2 100644 --- a/pkg/datasources/streams.go +++ b/pkg/datasources/streams.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -52,7 +54,7 @@ var streamsSchema = map[string]*schema.Schema{ func Streams() *schema.Resource { return &schema.Resource{ - ReadContext: ReadStreams, + ReadContext: TrackingReadWrapper(datasources.Streams, ReadStreams), Schema: streamsSchema, Description: "Datasource used to get details of filtered streams. Filtering is aligned with the current possibilities for [SHOW STREAMS](https://docs.snowflake.com/en/sql-reference/sql/show-streams) query. The results of SHOW and DESCRIBE are encapsulated in one output collection `streams`.", } diff --git a/pkg/datasources/system_generate_scim_access_token.go b/pkg/datasources/system_generate_scim_access_token.go index 5630b58cae..f40eb00306 100644 --- a/pkg/datasources/system_generate_scim_access_token.go +++ b/pkg/datasources/system_generate_scim_access_token.go @@ -1,10 +1,14 @@ package datasources import ( + "context" "database/sql" "errors" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" @@ -28,13 +32,13 @@ var systemGenerateSCIMAccesstokenSchema = map[string]*schema.Schema{ func SystemGenerateSCIMAccessToken() *schema.Resource { return &schema.Resource{ - Read: ReadSystemGenerateSCIMAccessToken, - Schema: systemGenerateSCIMAccesstokenSchema, + ReadContext: TrackingReadWrapper(datasources.SystemGenerateScimAccessToken, ReadSystemGenerateSCIMAccessToken), + Schema: systemGenerateSCIMAccesstokenSchema, } } // ReadSystemGetAWSSNSIAMPolicy implements schema.ReadFunc. -func ReadSystemGenerateSCIMAccessToken(d *schema.ResourceData, meta interface{}) error { +func ReadSystemGenerateSCIMAccessToken(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client db := client.GetConn().DB @@ -57,5 +61,5 @@ func ReadSystemGenerateSCIMAccessToken(d *schema.ResourceData, meta interface{}) } d.SetId(integrationName) - return d.Set("access_token", accessToken.Token) + return diag.FromErr(d.Set("access_token", accessToken.Token)) } diff --git a/pkg/datasources/system_get_aws_sns_iam_policy.go b/pkg/datasources/system_get_aws_sns_iam_policy.go index 2f27b43736..a772d370ad 100644 --- a/pkg/datasources/system_get_aws_sns_iam_policy.go +++ b/pkg/datasources/system_get_aws_sns_iam_policy.go @@ -1,10 +1,14 @@ package datasources import ( + "context" "database/sql" "errors" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/snowflake" @@ -26,13 +30,13 @@ var systemGetAWSSNSIAMPolicySchema = map[string]*schema.Schema{ func SystemGetAWSSNSIAMPolicy() *schema.Resource { return &schema.Resource{ - Read: ReadSystemGetAWSSNSIAMPolicy, - Schema: systemGetAWSSNSIAMPolicySchema, + ReadContext: TrackingReadWrapper(datasources.SystemGetAwsSnsIamPolicy, ReadSystemGetAWSSNSIAMPolicy), + Schema: systemGetAWSSNSIAMPolicySchema, } } // ReadSystemGetAWSSNSIAMPolicy implements schema.ReadFunc. -func ReadSystemGetAWSSNSIAMPolicy(d *schema.ResourceData, meta interface{}) error { +func ReadSystemGetAWSSNSIAMPolicy(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client db := client.GetConn().DB awsSNSTopicArn := d.Get("aws_sns_topic_arn").(string) @@ -47,9 +51,9 @@ func ReadSystemGetAWSSNSIAMPolicy(d *schema.ResourceData, meta interface{}) erro return nil } if err != nil { - return err + return diag.FromErr(err) } d.SetId(awsSNSTopicArn) - return d.Set("aws_sns_topic_policy_json", policy.Policy) + return diag.FromErr(d.Set("aws_sns_topic_policy_json", policy.Policy)) } diff --git a/pkg/datasources/system_get_privatelink_config.go b/pkg/datasources/system_get_privatelink_config.go index 947519f433..ae13316ad7 100644 --- a/pkg/datasources/system_get_privatelink_config.go +++ b/pkg/datasources/system_get_privatelink_config.go @@ -1,10 +1,14 @@ package datasources import ( + "context" "database/sql" "errors" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/snowflake" @@ -69,13 +73,13 @@ var systemGetPrivateLinkConfigSchema = map[string]*schema.Schema{ func SystemGetPrivateLinkConfig() *schema.Resource { return &schema.Resource{ - Read: ReadSystemGetPrivateLinkConfig, - Schema: systemGetPrivateLinkConfigSchema, + ReadContext: TrackingReadWrapper(datasources.SystemGetPrivateLinkConfig, ReadSystemGetPrivateLinkConfig), + Schema: systemGetPrivateLinkConfigSchema, } } // ReadSystemGetPrivateLinkConfig implements schema.ReadFunc. -func ReadSystemGetPrivateLinkConfig(d *schema.ResourceData, meta interface{}) error { +func ReadSystemGetPrivateLinkConfig(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client db := client.GetConn().DB @@ -100,56 +104,56 @@ func ReadSystemGetPrivateLinkConfig(d *schema.ResourceData, meta interface{}) er d.SetId(config.AccountName) accNameErr := d.Set("account_name", config.AccountName) if accNameErr != nil { - return accNameErr + return diag.FromErr(accNameErr) } accURLErr := d.Set("account_url", config.AccountURL) if accURLErr != nil { - return accURLErr + return diag.FromErr(accURLErr) } ocspURLErr := d.Set("ocsp_url", config.OCSPURL) if ocspURLErr != nil { - return ocspURLErr + return diag.FromErr(ocspURLErr) } if config.AwsVpceID != "" { awsVpceIDErr := d.Set("aws_vpce_id", config.AwsVpceID) if awsVpceIDErr != nil { - return awsVpceIDErr + return diag.FromErr(awsVpceIDErr) } } if config.AzurePrivateLinkServiceID != "" { azurePlsIDErr := d.Set("azure_pls_id", config.AzurePrivateLinkServiceID) if azurePlsIDErr != nil { - return azurePlsIDErr + return diag.FromErr(azurePlsIDErr) } } if config.InternalStage != "" { intStgErr := d.Set("internal_stage", config.InternalStage) if intStgErr != nil { - return intStgErr + return diag.FromErr(intStgErr) } } if config.SnowsightURL != "" { snowSigURLErr := d.Set("snowsight_url", config.SnowsightURL) if snowSigURLErr != nil { - return snowSigURLErr + return diag.FromErr(snowSigURLErr) } } if config.RegionlessSnowsightURL != "" { reglssSnowURLErr := d.Set("regionless_snowsight_url", config.RegionlessSnowsightURL) if reglssSnowURLErr != nil { - return reglssSnowURLErr + return diag.FromErr(reglssSnowURLErr) } } if config.RegionlessAccountURL != "" { reglssAccURLErr := d.Set("regionless_account_url", config.RegionlessAccountURL) if reglssAccURLErr != nil { - return reglssAccURLErr + return diag.FromErr(reglssAccURLErr) } } diff --git a/pkg/datasources/system_get_snowflake_platform_info.go b/pkg/datasources/system_get_snowflake_platform_info.go index 793e37f76e..e9f54ac4ce 100644 --- a/pkg/datasources/system_get_snowflake_platform_info.go +++ b/pkg/datasources/system_get_snowflake_platform_info.go @@ -7,6 +7,9 @@ import ( "fmt" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/snowflake" @@ -30,13 +33,13 @@ var systemGetSnowflakePlatformInfoSchema = map[string]*schema.Schema{ func SystemGetSnowflakePlatformInfo() *schema.Resource { return &schema.Resource{ - Read: ReadSystemGetSnowflakePlatformInfo, - Schema: systemGetSnowflakePlatformInfoSchema, + ReadContext: TrackingReadWrapper(datasources.SystemGetSnowflakePlatformInfo, ReadSystemGetSnowflakePlatformInfo), + Schema: systemGetSnowflakePlatformInfoSchema, } } // ReadSystemGetSnowflakePlatformInfo implements schema.ReadFunc. -func ReadSystemGetSnowflakePlatformInfo(d *schema.ResourceData, meta interface{}) error { +func ReadSystemGetSnowflakePlatformInfo(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client db := client.GetConn().DB @@ -48,7 +51,7 @@ func ReadSystemGetSnowflakePlatformInfo(d *schema.ResourceData, meta interface{} // If not found, mark resource to be removed from state file during apply or refresh d.SetId("") log.Println("[DEBUG] current_account failed to decode") - return fmt.Errorf("error current_account err = %w", err) + return diag.FromErr(fmt.Errorf("error current_account err = %w", err)) } d.SetId(fmt.Sprintf("%s.%s", acc.Account, acc.Region)) @@ -57,22 +60,22 @@ func ReadSystemGetSnowflakePlatformInfo(d *schema.ResourceData, meta interface{} if errors.Is(err, sql.ErrNoRows) { // If not found, mark resource to be removed from state file during apply or refresh log.Println("[DEBUG] system_get_snowflake_platform_info not found") - return fmt.Errorf("error system_get_snowflake_platform_info err = %w", err) + return diag.FromErr(fmt.Errorf("error system_get_snowflake_platform_info err = %w", err)) } info, err := rawInfo.GetStructuredConfig() if err != nil { log.Println("[DEBUG] system_get_snowflake_platform_info failed to decode") d.SetId("") - return fmt.Errorf("error system_get_snowflake_platform_info err = %w", err) + return diag.FromErr(fmt.Errorf("error system_get_snowflake_platform_info err = %w", err)) } if err := d.Set("azure_vnet_subnet_ids", info.AzureVnetSubnetIds); err != nil { - return fmt.Errorf("error system_get_snowflake_platform_info err = %w", err) + return diag.FromErr(fmt.Errorf("error system_get_snowflake_platform_info err = %w", err)) } if err := d.Set("aws_vpc_ids", info.AwsVpcIds); err != nil { - return fmt.Errorf("error system_get_snowflake_platform_info err = %w", err) + return diag.FromErr(fmt.Errorf("error system_get_snowflake_platform_info err = %w", err)) } return nil diff --git a/pkg/datasources/tables.go b/pkg/datasources/tables.go index 09944aeeda..fac59da574 100644 --- a/pkg/datasources/tables.go +++ b/pkg/datasources/tables.go @@ -4,6 +4,9 @@ import ( "context" "log" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/helpers" @@ -52,14 +55,13 @@ var tablesSchema = map[string]*schema.Schema{ func Tables() *schema.Resource { return &schema.Resource{ - Read: ReadTables, - Schema: tablesSchema, + ReadContext: TrackingReadWrapper(datasources.Tables, ReadTables), + Schema: tablesSchema, } } -func ReadTables(d *schema.ResourceData, meta interface{}) error { +func ReadTables(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { client := meta.(*provider.Context).Client - ctx := context.Background() databaseName := d.Get("database").(string) schemaName := d.Get("schema").(string) @@ -91,5 +93,5 @@ func ReadTables(d *schema.ResourceData, meta interface{}) error { } d.SetId(helpers.EncodeSnowflakeID(databaseName, schemaName)) - return d.Set("tables", tables) + return diag.FromErr(d.Set("tables", tables)) } diff --git a/pkg/datasources/tags.go b/pkg/datasources/tags.go index 05b551fc0b..6fee2d84d6 100644 --- a/pkg/datasources/tags.go +++ b/pkg/datasources/tags.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -36,7 +38,7 @@ var tagsSchema = map[string]*schema.Schema{ func Tags() *schema.Resource { return &schema.Resource{ - ReadContext: ReadTags, + ReadContext: TrackingReadWrapper(datasources.Tags, ReadTags), Schema: tagsSchema, Description: "Datasource used to get details of filtered tags. Filtering is aligned with the current possibilities for [SHOW TAGS](https://docs.snowflake.com/en/sql-reference/sql/show-tags) query. The results of SHOW are encapsulated in one output collection `tags`.", } diff --git a/pkg/datasources/tasks.go b/pkg/datasources/tasks.go index a43cad365d..356556dbf7 100644 --- a/pkg/datasources/tasks.go +++ b/pkg/datasources/tasks.go @@ -3,11 +3,12 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -57,7 +58,7 @@ var tasksSchema = map[string]*schema.Schema{ func Tasks() *schema.Resource { return &schema.Resource{ - ReadContext: ReadTasks, + ReadContext: TrackingReadWrapper(datasources.Tasks, ReadTasks), Schema: tasksSchema, Description: "Data source used to get details of filtered tasks. Filtering is aligned with the current possibilities for [SHOW TASKS](https://docs.snowflake.com/en/sql-reference/sql/show-tasks) query. The results of SHOW and SHOW PARAMETERS IN are encapsulated in one output collection `tasks`.", } diff --git a/pkg/datasources/usage_tracking_acceptance_test.go b/pkg/datasources/usage_tracking_acceptance_test.go new file mode 100644 index 0000000000..a2146f7bed --- /dev/null +++ b/pkg/datasources/usage_tracking_acceptance_test.go @@ -0,0 +1,84 @@ +package datasources_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/sdk" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/testenvs" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/collections" + "github.com/hashicorp/terraform-plugin-testing/terraform" + + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/helpers" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/tracking" + + acc "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/assert/resourceassert" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/acceptance/bettertestspoc/config/model" + "github.com/hashicorp/terraform-plugin-testing/tfversion" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" +) + +func TestAcc_CompleteUsageTracking(t *testing.T) { + _ = testenvs.GetOrSkipTest(t, testenvs.EnableAcceptance) + acc.TestAccPreCheck(t) + + id := acc.TestClient().Ids.RandomDatabaseObjectIdentifier() + schemaModel := model.Schema("test", id.DatabaseName(), id.Name()) + + assertQueryMetadataExists := func(t *testing.T, query string) resource.TestCheckFunc { + t.Helper() + return func(state *terraform.State) error { + queryHistory := acc.TestClient().InformationSchema.GetQueryHistory(t, 100) + expectedMetadata := tracking.NewVersionedDatasourceMetadata(datasources.Schemas) + if _, err := collections.FindFirst(queryHistory, func(history helpers.QueryHistory) bool { + metadata, err := tracking.ParseMetadata(history.QueryText) + return err == nil && + expectedMetadata == metadata && + strings.Contains(history.QueryText, query) + }); err != nil { + return fmt.Errorf("query history does not contain query metadata: %v with query containing: %s", expectedMetadata, query) + } + return nil + } + } + + resource.Test(t, resource.TestCase{ + ProtoV6ProviderFactories: acc.TestAccProtoV6ProviderFactories, + TerraformVersionChecks: []tfversion.TerraformVersionCheck{ + tfversion.RequireAbove(tfversion.Version1_5_0), + }, + PreCheck: func() { acc.TestAccPreCheck(t) }, + Steps: []resource.TestStep{ + { + Config: config.FromModel(t, schemaModel) + schemaDatasourceConfigWithDependency(schemaModel.ResourceReference(), id), + Check: assert.AssertThat(t, + resourceassert.SchemaResource(t, schemaModel.ResourceReference()). + HasNameString(id.Name()), + assert.Check(assertQueryMetadataExists(t, fmt.Sprintf(`SHOW SCHEMAS LIKE '%s' IN DATABASE "%s"`, id.Name(), id.DatabaseName()))), + // SHOW PARAMETERS IN SCHEMA "acc_test_db_AT_1AB7E1DE_1A10_89C3_C13C_899754A250B6"."FPGDHEAT_1AB7E1DE_1A10_89C3_C13C_899754A250B6" --terraform_provider_usage_tracking {"json_schema_version":"1","version":"v0.99.0","datasource":"snowflake_schemas","operation":"read"} + assert.Check(assertQueryMetadataExists(t, fmt.Sprintf(`SHOW PARAMETERS IN SCHEMA %s`, id.FullyQualifiedName()))), + assert.Check(assertQueryMetadataExists(t, fmt.Sprintf(`DESCRIBE SCHEMA %s`, id.FullyQualifiedName()))), + ), + }, + }, + }) +} + +func schemaDatasourceConfigWithDependency(schemaResourceReference string, id sdk.DatabaseObjectIdentifier) string { + return fmt.Sprintf(` +data "snowflake_schemas" "test" { + depends_on = [ %[1]s ] + in { + database = "%[2]s" + } + like = "%[3]s" +} +`, schemaResourceReference, id.DatabaseName(), id.Name()) +} diff --git a/pkg/datasources/users.go b/pkg/datasources/users.go index a2e576a2ac..5afe984886 100644 --- a/pkg/datasources/users.go +++ b/pkg/datasources/users.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -91,7 +93,7 @@ var usersSchema = map[string]*schema.Schema{ func Users() *schema.Resource { return &schema.Resource{ - ReadContext: ReadUsers, + ReadContext: TrackingReadWrapper(datasources.Users, ReadUsers), Schema: usersSchema, Description: "Datasource used to get details of filtered users. Filtering is aligned with the current possibilities for [SHOW USERS](https://docs.snowflake.com/en/sql-reference/sql/show-users) query. The results of SHOW, DESCRIBE, and SHOW PARAMETERS IN are encapsulated in one output collection. Important note is that when querying users you don't have permissions to, the querying options are limited. You won't get almost any field in `show_output` (only empty or default values), the DESCRIBE command cannot be called, so you have to set `with_describe = false`. Only `parameters` output is not affected by the lack of privileges.", } diff --git a/pkg/datasources/views.go b/pkg/datasources/views.go index fa99c1becf..12a1bcdf14 100644 --- a/pkg/datasources/views.go +++ b/pkg/datasources/views.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -106,7 +108,7 @@ var viewsSchema = map[string]*schema.Schema{ func Views() *schema.Resource { return &schema.Resource{ - ReadContext: ReadViews, + ReadContext: TrackingReadWrapper(datasources.Views, ReadViews), Schema: viewsSchema, Description: "Datasource used to get details of filtered views. Filtering is aligned with the current possibilities for [SHOW VIEWS](https://docs.snowflake.com/en/sql-reference/sql/show-views) query (only `like` is supported). The results of SHOW and DESCRIBE are encapsulated in one output collection `views`.", } diff --git a/pkg/datasources/warehouses.go b/pkg/datasources/warehouses.go index e52c09e9aa..9c42fb5e07 100644 --- a/pkg/datasources/warehouses.go +++ b/pkg/datasources/warehouses.go @@ -3,6 +3,8 @@ package datasources import ( "context" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/internal/provider" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/resources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/schemas" @@ -66,7 +68,7 @@ var warehousesSchema = map[string]*schema.Schema{ func Warehouses() *schema.Resource { return &schema.Resource{ - ReadContext: ReadWarehouses, + ReadContext: TrackingReadWrapper(datasources.Warehouses, ReadWarehouses), Schema: warehousesSchema, Description: "Datasource used to get details of filtered warehouses. Filtering is aligned with the current possibilities for [SHOW WAREHOUSES](https://docs.snowflake.com/en/sql-reference/sql/show-warehouses) query (only `like` is supported). The results of SHOW, DESCRIBE, and SHOW PARAMETERS IN are encapsulated in one output collection.", } diff --git a/pkg/internal/tracking/context.go b/pkg/internal/tracking/context.go index 9519bf1bb4..8fdbf8c03f 100644 --- a/pkg/internal/tracking/context.go +++ b/pkg/internal/tracking/context.go @@ -4,12 +4,14 @@ import ( "context" "errors" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" ) const ( - ProviderVersion string = "v0.99.0" // TODO(SNOW-1814934): Currently hardcoded, make it computed - MetadataPrefix string = "terraform_provider_usage_tracking" + CurrentSchemaVersion string = "1" + ProviderVersion string = "v0.99.0" // TODO(SNOW-1814934): Currently hardcoded, make it computed + MetadataPrefix string = "terraform_provider_usage_tracking" ) type key struct{} @@ -28,18 +30,23 @@ const ( ) type Metadata struct { - Version string `json:"version,omitempty"` - Resource string `json:"resource,omitempty"` - Operation Operation `json:"operation,omitempty"` + SchemaVersion string `json:"json_schema_version,omitempty"` + Version string `json:"version,omitempty"` + Resource string `json:"resource,omitempty"` + Datasource string `json:"datasource,omitempty"` + Operation Operation `json:"operation,omitempty"` } func (m Metadata) validate() error { errs := make([]error, 0) + if m.SchemaVersion == "" { + errs = append(errs, errors.New("schema version for metadata should not be empty")) + } if m.Version == "" { - errs = append(errs, errors.New("version for metadata should not be empty")) + errs = append(errs, errors.New("provider version for metadata should not be empty")) } - if m.Resource == "" { - errs = append(errs, errors.New("resource name for metadata should not be empty")) + if m.Resource == "" && m.Datasource == "" { + errs = append(errs, errors.New("either resource or data source name for metadata should be specified")) } if m.Operation == "" { errs = append(errs, errors.New("operation for metadata should not be empty")) @@ -47,19 +54,31 @@ func (m Metadata) validate() error { return errors.Join(errs...) } -func NewMetadata(version string, resource resources.Resource, operation Operation) Metadata { +// newTestMetadata is a helper constructor that is used only for testing purposes +func newTestMetadata(version string, resource resources.Resource, operation Operation) Metadata { + return Metadata{ + SchemaVersion: CurrentSchemaVersion, + Version: version, + Resource: resource.String(), + Operation: operation, + } +} + +func NewVersionedResourceMetadata(resource resources.Resource, operation Operation) Metadata { return Metadata{ - Version: version, - Resource: resource.String(), - Operation: operation, + SchemaVersion: CurrentSchemaVersion, + Version: ProviderVersion, + Resource: resource.String(), + Operation: operation, } } -func NewVersionedMetadata(resource resources.Resource, operation Operation) Metadata { +func NewVersionedDatasourceMetadata(datasource datasources.Datasource) Metadata { return Metadata{ - Version: ProviderVersion, - Resource: resource.String(), - Operation: operation, + SchemaVersion: CurrentSchemaVersion, + Version: ProviderVersion, + Datasource: datasource.String(), + Operation: ReadOperation, } } diff --git a/pkg/internal/tracking/context_test.go b/pkg/internal/tracking/context_test.go index 96e38f75a3..18bb38d588 100644 --- a/pkg/internal/tracking/context_test.go +++ b/pkg/internal/tracking/context_test.go @@ -4,13 +4,15 @@ import ( "context" "testing" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/datasources" + "github.com/Snowflake-Labs/terraform-provider-snowflake/pkg/provider/resources" "github.com/stretchr/testify/require" ) func Test_Context(t *testing.T) { - metadata := NewMetadata("123", resources.Account, CreateOperation) - newMetadata := NewMetadata("321", resources.Database, UpdateOperation) + metadata := newTestMetadata("123", resources.Account, CreateOperation) + newMetadata := NewVersionedDatasourceMetadata(datasources.Databases) ctx := context.Background() // no metadata in context diff --git a/pkg/internal/tracking/query_test.go b/pkg/internal/tracking/query_test.go index 0261d77684..83696b928b 100644 --- a/pkg/internal/tracking/query_test.go +++ b/pkg/internal/tracking/query_test.go @@ -47,7 +47,7 @@ func TestTrimMetadata(t *testing.T) { } func TestAppendMetadata(t *testing.T) { - metadata := NewMetadata("123", resources.Account, CreateOperation) + metadata := newTestMetadata("123", resources.Account, CreateOperation) sql := "SELECT 1" bytes, err := json.Marshal(metadata) @@ -61,7 +61,7 @@ func TestAppendMetadata(t *testing.T) { } func TestParseMetadata(t *testing.T) { - metadata := NewMetadata("123", resources.Account, CreateOperation) + metadata := newTestMetadata("123", resources.Account, CreateOperation) bytes, err := json.Marshal(metadata) require.NoError(t, err) sql := fmt.Sprintf("SELECT 1 --%s %s", MetadataPrefix, string(bytes)) @@ -75,8 +75,9 @@ func TestParseInvalidMetadataKeys(t *testing.T) { sql := fmt.Sprintf(`SELECT 1 --%s {"key": "value"}`, MetadataPrefix) parsedMetadata, err := ParseMetadata(sql) - require.ErrorContains(t, err, "version for metadata should not be empty") - require.ErrorContains(t, err, "resource name for metadata should not be empty") + require.ErrorContains(t, err, "schema version for metadata should not be empty") + require.ErrorContains(t, err, "provider version for metadata should not be empty") + require.ErrorContains(t, err, "either resource or data source name for metadata should be specified") require.ErrorContains(t, err, "operation for metadata should not be empty") require.Equal(t, Metadata{}, parsedMetadata) } @@ -90,7 +91,7 @@ func TestParseInvalidMetadataJson(t *testing.T) { } func TestParseMetadataFromInvalidSqlCommentPrefix(t *testing.T) { - metadata := NewMetadata("123", resources.Account, CreateOperation) + metadata := newTestMetadata("123", resources.Account, CreateOperation) sql := "SELECT 1" bytes, err := json.Marshal(metadata) diff --git a/pkg/provider/datasources/datasources.go b/pkg/provider/datasources/datasources.go new file mode 100644 index 0000000000..56ea68b1c3 --- /dev/null +++ b/pkg/provider/datasources/datasources.go @@ -0,0 +1,63 @@ +package datasources + +type datasource string + +const ( + Accounts datasource = "snowflake_accounts" + Alerts datasource = "snowflake_alerts" + Connections datasource = "snowflake_connections" + CortexSearchServices datasource = "snowflake_cortex_search_services" + CurrentAccount datasource = "snowflake_current_account" + CurrentRole datasource = "snowflake_current_role" + Database datasource = "snowflake_database" + DatabaseRole datasource = "snowflake_database_role" + DatabaseRoles datasource = "snowflake_database_roles" + Databases datasource = "snowflake_databases" + DynamicTables datasource = "snowflake_dynamic_tables" + ExternalFunctions datasource = "snowflake_external_functions" + ExternalTables datasource = "snowflake_external_tables" + FailoverGroups datasource = "snowflake_failover_groups" + FileFormats datasource = "snowflake_file_formats" + Functions datasource = "snowflake_functions" + Grants datasource = "snowflake_grants" + MaskingPolicies datasource = "snowflake_masking_policies" + MaterializedViews datasource = "snowflake_materialized_views" + NetworkPolicies datasource = "snowflake_network_policies" + Parameters datasource = "snowflake_parameters" + Pipes datasource = "snowflake_pipes" + Procedures datasource = "snowflake_procedures" + ResourceMonitors datasource = "snowflake_resource_monitors" + Role datasource = "snowflake_role" + Roles datasource = "snowflake_roles" + RowAccessPolicies datasource = "snowflake_row_access_policies" + Schemas datasource = "snowflake_schemas" + Secrets datasource = "snowflake_secrets" + SecurityIntegrations datasource = "snowflake_security_integrations" + Sequences datasource = "snowflake_sequences" + Shares datasource = "snowflake_shares" + Stages datasource = "snowflake_stages" + StorageIntegrations datasource = "snowflake_storage_integrations" + Streams datasource = "snowflake_streams" + Streamlits datasource = "snowflake_streamlits" + SystemGenerateScimAccessToken datasource = "snowflake_system_generate_scim_access_token" + SystemGetAwsSnsIamPolicy datasource = "snowflake_system_get_aws_sns_iam_policy" + SystemGetPrivateLinkConfig datasource = "snowflake_system_get_privatelink_config" + SystemGetSnowflakePlatformInfo datasource = "snowflake_system_get_snowflake_platform_info" + Tables datasource = "snowflake_tables" + Tags datasource = "snowflake_tags" + Tasks datasource = "snowflake_tasks" + Users datasource = "snowflake_users" + Views datasource = "snowflake_views" + Warehouses datasource = "snowflake_warehouses" +) + +type Datasource interface { + xxxProtected() + String() string +} + +func (r datasource) xxxProtected() {} + +func (r datasource) String() string { + return string(r) +} diff --git a/pkg/resources/common.go b/pkg/resources/common.go index 36a1da648a..643524f9d9 100644 --- a/pkg/resources/common.go +++ b/pkg/resources/common.go @@ -108,42 +108,42 @@ func ImportName[T sdk.AccountObjectIdentifier | sdk.DatabaseObjectIdentifier | s func TrackingImportWrapper(resourceName resources.Resource, importImplementation schema.StateContextFunc) schema.StateContextFunc { return func(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - ctx = tracking.NewContext(ctx, tracking.NewVersionedMetadata(resourceName, tracking.ImportOperation)) + ctx = tracking.NewContext(ctx, tracking.NewVersionedResourceMetadata(resourceName, tracking.ImportOperation)) return importImplementation(ctx, d, meta) } } func TrackingCreateWrapper(resourceName resources.Resource, createImplementation schema.CreateContextFunc) schema.CreateContextFunc { return func(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - ctx = tracking.NewContext(ctx, tracking.NewVersionedMetadata(resourceName, tracking.CreateOperation)) + ctx = tracking.NewContext(ctx, tracking.NewVersionedResourceMetadata(resourceName, tracking.CreateOperation)) return createImplementation(ctx, d, meta) } } func TrackingReadWrapper(resourceName resources.Resource, readImplementation schema.ReadContextFunc) schema.ReadContextFunc { return func(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - ctx = tracking.NewContext(ctx, tracking.NewVersionedMetadata(resourceName, tracking.ReadOperation)) + ctx = tracking.NewContext(ctx, tracking.NewVersionedResourceMetadata(resourceName, tracking.ReadOperation)) return readImplementation(ctx, d, meta) } } func TrackingUpdateWrapper(resourceName resources.Resource, updateImplementation schema.UpdateContextFunc) schema.UpdateContextFunc { return func(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - ctx = tracking.NewContext(ctx, tracking.NewVersionedMetadata(resourceName, tracking.UpdateOperation)) + ctx = tracking.NewContext(ctx, tracking.NewVersionedResourceMetadata(resourceName, tracking.UpdateOperation)) return updateImplementation(ctx, d, meta) } } func TrackingDeleteWrapper(resourceName resources.Resource, deleteImplementation schema.DeleteContextFunc) schema.DeleteContextFunc { return func(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { - ctx = tracking.NewContext(ctx, tracking.NewVersionedMetadata(resourceName, tracking.DeleteOperation)) + ctx = tracking.NewContext(ctx, tracking.NewVersionedResourceMetadata(resourceName, tracking.DeleteOperation)) return deleteImplementation(ctx, d, meta) } } func TrackingCustomDiffWrapper(resourceName resources.Resource, customdiffImplementation schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { return func(ctx context.Context, diff *schema.ResourceDiff, meta any) error { - ctx = tracking.NewContext(ctx, tracking.NewVersionedMetadata(resourceName, tracking.CustomDiffOperation)) + ctx = tracking.NewContext(ctx, tracking.NewVersionedResourceMetadata(resourceName, tracking.CustomDiffOperation)) return customdiffImplementation(ctx, diff, meta) } } diff --git a/pkg/resources/usage_tracking_acceptance_test.go b/pkg/resources/usage_tracking_acceptance_test.go index a50dbc855f..fcd866afcd 100644 --- a/pkg/resources/usage_tracking_acceptance_test.go +++ b/pkg/resources/usage_tracking_acceptance_test.go @@ -38,14 +38,12 @@ func TestAcc_CompleteUsageTracking(t *testing.T) { t.Helper() return func(state *terraform.State) error { queryHistory := acc.TestClient().InformationSchema.GetQueryHistory(t, 60) - expectedMetadata := tracking.NewVersionedMetadata(resources.Schema, operation) + expectedMetadata := tracking.NewVersionedResourceMetadata(resources.Schema, operation) if _, err := collections.FindFirst(queryHistory, func(history helpers.QueryHistory) bool { - if metadata, err := tracking.ParseMetadata(history.QueryText); err == nil { - if expectedMetadata == metadata && strings.Contains(history.QueryText, query) { - return true - } - } - return false + metadata, err := tracking.ParseMetadata(history.QueryText) + return err == nil && + expectedMetadata == metadata && + strings.Contains(history.QueryText, query) }); err != nil { return fmt.Errorf("query history does not contain query metadata: %v with query containing: %s", expectedMetadata, query) } diff --git a/pkg/sdk/testint/client_integration_test.go b/pkg/sdk/testint/client_integration_test.go index 8cacd6b540..b670fbfe34 100644 --- a/pkg/sdk/testint/client_integration_test.go +++ b/pkg/sdk/testint/client_integration_test.go @@ -12,7 +12,7 @@ import ( func TestInt_Client_AdditionalMetadata(t *testing.T) { client := testClient(t) - metadata := tracking.NewMetadata("v1.13.1002-rc-test", resources.Database, tracking.CreateOperation) + metadata := tracking.Metadata{SchemaVersion: "1", Version: "v1.13.1002-rc-test", Resource: resources.Database.String(), Operation: tracking.CreateOperation} assertQueryMetadata := func(t *testing.T, queryId string) { t.Helper() diff --git a/pkg/sdk/testint/dynamic_table_integration_test.go b/pkg/sdk/testint/dynamic_table_integration_test.go index b7025d4e04..8eb130486c 100644 --- a/pkg/sdk/testint/dynamic_table_integration_test.go +++ b/pkg/sdk/testint/dynamic_table_integration_test.go @@ -56,7 +56,7 @@ func TestInt_DynamicTableCreateAndDrop(t *testing.T) { t.Run("create with usage tracking comment", func(t *testing.T) { id := testClientHelper().Ids.RandomSchemaObjectIdentifier() plainQuery := fmt.Sprintf("SELECT id FROM %s", tableTest.ID().FullyQualifiedName()) - query, err := tracking.AppendMetadata(plainQuery, tracking.NewVersionedMetadata(resources.DynamicTable, tracking.CreateOperation)) + query, err := tracking.AppendMetadata(plainQuery, tracking.NewVersionedResourceMetadata(resources.DynamicTable, tracking.CreateOperation)) require.NoError(t, err) err = client.DynamicTables.Create(ctx, sdk.NewCreateDynamicTableRequest(id, testClientHelper().Ids.WarehouseId(), sdk.TargetLag{ diff --git a/pkg/sdk/testint/materialized_views_gen_integration_test.go b/pkg/sdk/testint/materialized_views_gen_integration_test.go index 6ef58925e9..6cad7bcb93 100644 --- a/pkg/sdk/testint/materialized_views_gen_integration_test.go +++ b/pkg/sdk/testint/materialized_views_gen_integration_test.go @@ -115,7 +115,7 @@ func TestInt_MaterializedViews(t *testing.T) { t.Run("create materialized view: with usage tracking comment", func(t *testing.T) { id := testClientHelper().Ids.RandomSchemaObjectIdentifier() plainQuery := fmt.Sprintf("SELECT id FROM %s", table.ID().FullyQualifiedName()) - query, err := tracking.AppendMetadata(plainQuery, tracking.NewVersionedMetadata(resources.MaterializedView, tracking.CreateOperation)) + query, err := tracking.AppendMetadata(plainQuery, tracking.NewVersionedResourceMetadata(resources.MaterializedView, tracking.CreateOperation)) require.NoError(t, err) view := createMaterializedViewWithRequest(t, sdk.NewCreateMaterializedViewRequest(id, query)) diff --git a/pkg/sdk/testint/views_gen_integration_test.go b/pkg/sdk/testint/views_gen_integration_test.go index 3682a3ee09..df8db37ab8 100644 --- a/pkg/sdk/testint/views_gen_integration_test.go +++ b/pkg/sdk/testint/views_gen_integration_test.go @@ -172,7 +172,7 @@ func TestInt_Views(t *testing.T) { t.Run("create view: with usage tracking comment", func(t *testing.T) { id := testClientHelper().Ids.RandomSchemaObjectIdentifier() plainQuery := "SELECT NULL AS TYPE" - query, err := tracking.AppendMetadata(plainQuery, tracking.NewVersionedMetadata(resources.View, tracking.CreateOperation)) + query, err := tracking.AppendMetadata(plainQuery, tracking.NewVersionedResourceMetadata(resources.View, tracking.CreateOperation)) require.NoError(t, err) request := sdk.NewCreateViewRequest(id, query) From 9f6745743daba831422627b5171df404373e9650 Mon Sep 17 00:00:00 2001 From: Jakub Michalak Date: Tue, 26 Nov 2024 16:52:25 +0100 Subject: [PATCH 08/10] fix: Small fixes and adjustments (#3226) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changes included (they are really small or fix some wrong changes done during tag rework that were not released yet): - smaller fixes extracted from https://github.com/Snowflake-Labs/terraform-provider-snowflake/pull/3210/files - support tagging account for identifiers with org name - add notes about manually unassigning policies from objects, add a todo with an issue number - fix a wrong issue number in essential objects list Changes NOT included - rework tag_association resource - return nil from GetTag instead of failing - add more tests regarding tag/masking policy: assert that ALTER MASKING POLICY SET TAG differs from ALTER TAG SET MASKING POLICY - support IF EXISTS for unsetting tags ## Test Plan * [ ] acceptance tests * [ ] … ## References https://github.com/Snowflake-Labs/terraform-provider-snowflake/pull/3210 --- docs/resources/authentication_policy.md | 3 ++ docs/resources/masking_policy.md | 3 ++ docs/resources/network_policy.md | 3 ++ docs/resources/password_policy.md | 3 ++ docs/resources/row_access_policy.md | 3 ++ pkg/acceptance/helpers/context_client.go | 9 +++++ pkg/resources/authentication_policy.go | 1 + pkg/resources/masking_policy.go | 1 + pkg/resources/network_policy.go | 1 + pkg/resources/password_policy.go | 2 + pkg/resources/row_access_policy.go | 1 + pkg/sdk/tags_impl.go | 17 +++++++++ pkg/sdk/tags_test.go | 12 ------ pkg/sdk/tags_validations.go | 6 --- pkg/sdk/testint/tags_integration_test.go | 18 ++++++++- .../resources/authentication_policy.md.tmpl | 37 +++++++++++++++++++ templates/resources/masking_policy.md.tmpl | 3 ++ templates/resources/network_policy.md.tmpl | 3 ++ templates/resources/password_policy.md.tmpl | 37 +++++++++++++++++++ templates/resources/row_access_policy.md.tmpl | 3 ++ v1-preparations/ESSENTIAL_GA_OBJECTS.MD | 2 +- v1-preparations/REMAINING_GA_OBJECTS.MD | 2 +- 22 files changed, 149 insertions(+), 21 deletions(-) create mode 100644 templates/resources/authentication_policy.md.tmpl create mode 100644 templates/resources/password_policy.md.tmpl diff --git a/docs/resources/authentication_policy.md b/docs/resources/authentication_policy.md index e53ccb42ca..926acdd4fb 100644 --- a/docs/resources/authentication_policy.md +++ b/docs/resources/authentication_policy.md @@ -5,6 +5,9 @@ description: |- Resource used to manage authentication policy objects. For more information, check authentication policy documentation https://docs.snowflake.com/en/sql-reference/sql/create-authentication-policy. --- +> [!WARNING] +> According to Snowflake [docs](https://docs.snowflake.com/en/sql-reference/sql/drop-authentication-policy#usage-notes), an authentication policy cannot be dropped successfully if it is currently assigned to another object. Currently, the provider does not unassign such objects automatically. Before dropping the resource, list the assigned objects with `SELECT * from table(information_schema.policy_references(policy_name=>''));` and unassign them manually with `ALTER ...` or with updated Terraform configuration, if possible. + # snowflake_authentication_policy (Resource) Resource used to manage authentication policy objects. For more information, check [authentication policy documentation](https://docs.snowflake.com/en/sql-reference/sql/create-authentication-policy). diff --git a/docs/resources/masking_policy.md b/docs/resources/masking_policy.md index aa9bb89af4..cb34591a7f 100644 --- a/docs/resources/masking_policy.md +++ b/docs/resources/masking_policy.md @@ -7,6 +7,9 @@ description: |- !> **V1 release candidate** This resource was reworked and is a release candidate for the V1. We do not expect significant changes in it before the V1. We will welcome any feedback and adjust the resource if needed. Any errors reported will be resolved with a higher priority. We encourage checking this resource out before the V1 release. Please follow the [migration guide](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/MIGRATION_GUIDE.md#v0950--v0960) to use it. +> [!WARNING] +> According to Snowflake [docs](https://docs.snowflake.com/en/sql-reference/sql/drop-masking-policy#usage-notes), a masking policy cannot be dropped successfully if it is currently assigned to another object. Currently, the provider does not unassign such objects automatically. Before dropping the resource, list the assigned objects with `SELECT * from table(information_schema.policy_references(policy_name=>''));` and unassign them manually with `ALTER ...` or with updated Terraform configuration, if possible. + # snowflake_masking_policy (Resource) Resource used to manage masking policies. For more information, check [masking policies documentation](https://docs.snowflake.com/en/sql-reference/sql/create-masking-policy). diff --git a/docs/resources/network_policy.md b/docs/resources/network_policy.md index aaa3bc02c5..f6eda6e0d8 100644 --- a/docs/resources/network_policy.md +++ b/docs/resources/network_policy.md @@ -7,6 +7,9 @@ description: |- !> **V1 release candidate** This resource was reworked and is a release candidate for the V1. We do not expect significant changes in it before the V1. We will welcome any feedback and adjust the resource if needed. Any errors reported will be resolved with a higher priority. We encourage checking this resource out before the V1 release. Please follow the [migration guide](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/MIGRATION_GUIDE.md#v0920--v0930) to use it. +> [!WARNING] +> According to Snowflake [docs](https://docs.snowflake.com/en/sql-reference/sql/drop-network-policy#usage-notes), a network policy cannot be dropped successfully if it is currently assigned to another object. Currently, the provider does not unassign such objects automatically. Before dropping the resource, list the assigned objects with `SELECT * from table(information_schema.policy_references(policy_name=>''));` and unassign them manually with `ALTER ...` or with updated Terraform configuration, if possible. + # snowflake_network_policy (Resource) Resource used to control network traffic. For more information, check an [official guide](https://docs.snowflake.com/en/user-guide/network-policies) on controlling network traffic with network policies. diff --git a/docs/resources/password_policy.md b/docs/resources/password_policy.md index 3214efcf68..c88f79b2f5 100644 --- a/docs/resources/password_policy.md +++ b/docs/resources/password_policy.md @@ -5,6 +5,9 @@ description: |- A password policy specifies the requirements that must be met to create and reset a password to authenticate to Snowflake. --- +> [!WARNING] +> According to Snowflake [docs](https://docs.snowflake.com/en/sql-reference/sql/drop-password-policy#usage-notes), a password policy cannot be dropped successfully if it is currently assigned to another object. Currently, the provider does not unassign such objects automatically. Before dropping the resource, list the assigned objects with `SELECT * from table(information_schema.policy_references(policy_name=>''));` and unassign them manually with `ALTER ...` or with updated Terraform configuration, if possible. + # snowflake_password_policy (Resource) A password policy specifies the requirements that must be met to create and reset a password to authenticate to Snowflake. diff --git a/docs/resources/row_access_policy.md b/docs/resources/row_access_policy.md index 1d1951dcb0..0023f99391 100644 --- a/docs/resources/row_access_policy.md +++ b/docs/resources/row_access_policy.md @@ -7,6 +7,9 @@ description: |- !> **V1 release candidate** This resource was reworked and is a release candidate for the V1. We do not expect significant changes in it before the V1. We will welcome any feedback and adjust the resource if needed. Any errors reported will be resolved with a higher priority. We encourage checking this resource out before the V1 release. Please follow the [migration guide](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/MIGRATION_GUIDE.md#v0950--v0960) to use it. +> [!WARNING] +> According to Snowflake [docs](https://docs.snowflake.com/en/sql-reference/sql/drop-row-access-policy#usage-notes), a row access policy cannot be dropped successfully if it is currently assigned to another object. Currently, the provider does not unassign such objects automatically. Before dropping the resource, list the assigned objects with `SELECT * from table(information_schema.policy_references(policy_name=>''));` and unassign them manually with `ALTER ...` or with updated Terraform configuration, if possible. + # snowflake_row_access_policy (Resource) Resource used to manage row access policy objects. For more information, check [row access policy documentation](https://docs.snowflake.com/en/sql-reference/sql/create-row-access-policy). diff --git a/pkg/acceptance/helpers/context_client.go b/pkg/acceptance/helpers/context_client.go index e2186026e9..6c5b4bda46 100644 --- a/pkg/acceptance/helpers/context_client.go +++ b/pkg/acceptance/helpers/context_client.go @@ -63,6 +63,15 @@ func (c *ContextClient) CurrentUser(t *testing.T) sdk.AccountObjectIdentifier { return currentUser } +func (c *ContextClient) CurrentAccountIdentifier(t *testing.T) sdk.AccountIdentifier { + t.Helper() + + details, err := c.client().CurrentSessionDetails(context.Background()) + require.NoError(t, err) + + return sdk.NewAccountIdentifier(details.OrganizationName, details.AccountName) +} + func (c *ContextClient) IsRoleInSession(t *testing.T, id sdk.AccountObjectIdentifier) bool { t.Helper() ctx := context.Background() diff --git a/pkg/resources/authentication_policy.go b/pkg/resources/authentication_policy.go index 85c00356a1..851cbe67d9 100644 --- a/pkg/resources/authentication_policy.go +++ b/pkg/resources/authentication_policy.go @@ -491,6 +491,7 @@ func DeleteContextAuthenticationPolicy(ctx context.Context, d *schema.ResourceDa } } + // TODO(SNOW-1818849): unassign policies before dropping if err := client.AuthenticationPolicies.Drop(ctx, sdk.NewDropAuthenticationPolicyRequest(id).WithIfExists(true)); err != nil { return diag.FromErr(err) } diff --git a/pkg/resources/masking_policy.go b/pkg/resources/masking_policy.go index 282e8ce644..4acd32bbb1 100644 --- a/pkg/resources/masking_policy.go +++ b/pkg/resources/masking_policy.go @@ -366,6 +366,7 @@ func DeleteMaskingPolicy(ctx context.Context, d *schema.ResourceData, meta any) return diag.FromErr(err) } + // TODO(SNOW-1818849): unassign policies before dropping err = client.MaskingPolicies.Drop(ctx, id, &sdk.DropMaskingPolicyOptions{IfExists: sdk.Pointer(true)}) if err != nil { return diag.Diagnostics{ diff --git a/pkg/resources/network_policy.go b/pkg/resources/network_policy.go index 07cab78f28..26ab6a1b31 100644 --- a/pkg/resources/network_policy.go +++ b/pkg/resources/network_policy.go @@ -377,6 +377,7 @@ func DeleteContextNetworkPolicy(ctx context.Context, d *schema.ResourceData, met return diag.FromErr(err) } + // TODO(SNOW-1818849): unassign policies before dropping err = client.NetworkPolicies.Drop(ctx, sdk.NewDropNetworkPolicyRequest(id).WithIfExists(true)) if err != nil { return diag.Diagnostics{ diff --git a/pkg/resources/password_policy.go b/pkg/resources/password_policy.go index da3cf85038..f450ef6104 100644 --- a/pkg/resources/password_policy.go +++ b/pkg/resources/password_policy.go @@ -437,6 +437,8 @@ func DeletePasswordPolicy(ctx context.Context, d *schema.ResourceData, meta any) client := meta.(*provider.Context).Client objectIdentifier := helpers.DecodeSnowflakeID(d.Id()).(sdk.SchemaObjectIdentifier) + + // TODO(SNOW-1818849): unassign policies before dropping err := client.PasswordPolicies.Drop(ctx, objectIdentifier, nil) if err != nil { return diag.FromErr(err) diff --git a/pkg/resources/row_access_policy.go b/pkg/resources/row_access_policy.go index f1028a7f82..12c3050cb3 100644 --- a/pkg/resources/row_access_policy.go +++ b/pkg/resources/row_access_policy.go @@ -306,6 +306,7 @@ func DeleteRowAccessPolicy(ctx context.Context, d *schema.ResourceData, meta any client := meta.(*provider.Context).Client + // TODO(SNOW-1818849): unassign policies before dropping err = client.RowAccessPolicies.Drop(ctx, sdk.NewDropRowAccessPolicyRequest(id).WithIfExists(sdk.Pointer(true))) if err != nil { return diag.Diagnostics{ diff --git a/pkg/sdk/tags_impl.go b/pkg/sdk/tags_impl.go index d0e534ae57..e80c934ff1 100644 --- a/pkg/sdk/tags_impl.go +++ b/pkg/sdk/tags_impl.go @@ -149,6 +149,14 @@ func (s *SetTagRequest) toOpts() *setTagOptions { o.column = String(id.Name()) } } + // TODO(SNOW-1818976): Remove this workaround. Currently ALTER "ORGNAME"."ACCOUNTNAME" SET TAG does not work, but ALTER "ACCOUNTNAME" does. + if o.objectType == ObjectTypeAccount { + id, ok := o.objectName.(AccountIdentifier) + if ok { + o.objectName = NewAccountIdentifierFromFullyQualifiedName(id.AccountName()) + } + } + return o } @@ -167,5 +175,14 @@ func (s *UnsetTagRequest) toOpts() *unsetTagOptions { o.column = String(id.Name()) } } + + // TODO(SNOW-1818976): Remove this workaround. Currently ALTER "ORGNAME"."ACCOUNTNAME" SET TAG does not work, but ALTER "ACCOUNTNAME" does. + if o.objectType == ObjectTypeAccount { + id, ok := o.objectName.(AccountIdentifier) + if ok { + o.objectName = NewAccountIdentifierFromFullyQualifiedName(id.AccountName()) + } + } + return o } diff --git a/pkg/sdk/tags_test.go b/pkg/sdk/tags_test.go index 0979537b27..94ed4c641c 100644 --- a/pkg/sdk/tags_test.go +++ b/pkg/sdk/tags_test.go @@ -377,12 +377,6 @@ func TestTagSet(t *testing.T) { assertOptsInvalidJoinedErrors(t, opts, errors.New("tagging for object type SEQUENCE is not supported")) }) - t.Run("validation: unsupported account", func(t *testing.T) { - opts := defaultOpts() - opts.objectType = ObjectTypeAccount - assertOptsInvalidJoinedErrors(t, opts, errors.New("tagging for object type ACCOUNT is not supported - use Tags.SetOnCurrentAccount instead")) - }) - t.Run("set with all optional", func(t *testing.T) { opts := defaultOpts() opts.SetTags = []TagAssociation{ @@ -434,12 +428,6 @@ func TestTagUnset(t *testing.T) { assertOptsInvalidJoinedErrors(t, opts, errors.New("tagging for object type SEQUENCE is not supported")) }) - t.Run("validation: unsupported account", func(t *testing.T) { - opts := defaultOpts() - opts.objectType = ObjectTypeAccount - assertOptsInvalidJoinedErrors(t, opts, errors.New("tagging for object type ACCOUNT is not supported - use Tags.UnsetOnCurrentAccount instead")) - }) - t.Run("unset with all optional", func(t *testing.T) { opts := defaultOpts() opts.UnsetTags = []ObjectIdentifier{ diff --git a/pkg/sdk/tags_validations.go b/pkg/sdk/tags_validations.go index f6fd74b83b..25a219c533 100644 --- a/pkg/sdk/tags_validations.go +++ b/pkg/sdk/tags_validations.go @@ -154,9 +154,6 @@ func (opts *setTagOptions) validate() error { if !canBeAssociatedWithTag(opts.objectType) { return fmt.Errorf("tagging for object type %s is not supported", opts.objectType) } - if opts.objectType == ObjectTypeAccount { - return fmt.Errorf("tagging for object type ACCOUNT is not supported - use Tags.SetOnCurrentAccount instead") - } return errors.Join(errs...) } @@ -171,8 +168,5 @@ func (opts *unsetTagOptions) validate() error { if !canBeAssociatedWithTag(opts.objectType) { return fmt.Errorf("tagging for object type %s is not supported", opts.objectType) } - if opts.objectType == ObjectTypeAccount { - return fmt.Errorf("tagging for object type ACCOUNT is not supported - use Tags.UnsetOnCurrentAccount instead") - } return errors.Join(errs...) } diff --git a/pkg/sdk/testint/tags_integration_test.go b/pkg/sdk/testint/tags_integration_test.go index d16d74d661..c3d9414750 100644 --- a/pkg/sdk/testint/tags_integration_test.go +++ b/pkg/sdk/testint/tags_integration_test.go @@ -324,7 +324,7 @@ func TestInt_TagsAssociations(t *testing.T) { require.ErrorContains(t, err, "sql: Scan error on column index 0, name \"TAG\": converting NULL to string is unsupported") } - t.Run("TestInt_TagAssociationForAccount", func(t *testing.T) { + t.Run("TestInt_TagAssociationForAccountLocator", func(t *testing.T) { id := testClientHelper().Ids.AccountIdentifierWithLocator() err := client.Accounts.Alter(ctx, &sdk.AlterAccountOptions{ SetTag: tags, @@ -358,6 +358,22 @@ func TestInt_TagsAssociations(t *testing.T) { require.ErrorContains(t, err, "sql: Scan error on column index 0, name \"TAG\": converting NULL to string is unsupported") }) + t.Run("TestInt_TagAssociationForAccount", func(t *testing.T) { + id := testClientHelper().Context.CurrentAccountIdentifier(t) + err := client.Tags.Set(ctx, sdk.NewSetTagRequest(sdk.ObjectTypeAccount, id).WithSetTags(tags)) + require.NoError(t, err) + + returnedTagValue, err := client.SystemFunctions.GetTag(ctx, tag.ID(), id, sdk.ObjectTypeAccount) + require.NoError(t, err) + assert.Equal(t, tagValue, returnedTagValue) + + err = client.Tags.Unset(ctx, sdk.NewUnsetTagRequest(sdk.ObjectTypeAccount, id).WithUnsetTags(unsetTags)) + require.NoError(t, err) + + _, err = client.SystemFunctions.GetTag(ctx, tag.ID(), id, sdk.ObjectTypeAccount) + require.ErrorContains(t, err, "sql: Scan error on column index 0, name \"TAG\": converting NULL to string is unsupported") + }) + accountObjectTestCases := []struct { name string objectType sdk.ObjectType diff --git a/templates/resources/authentication_policy.md.tmpl b/templates/resources/authentication_policy.md.tmpl new file mode 100644 index 0000000000..93b46362ee --- /dev/null +++ b/templates/resources/authentication_policy.md.tmpl @@ -0,0 +1,37 @@ +--- +page_title: "{{.Name}} {{.Type}} - {{.ProviderName}}" +subcategory: "" +description: |- +{{ if gt (len (split .Description "")) 1 -}} +{{ index (split .Description "") 1 | plainmarkdown | trimspace | prefixlines " " }} +{{- else -}} +{{ .Description | plainmarkdown | trimspace | prefixlines " " }} +{{- end }} +--- + +> [!WARNING] +> According to Snowflake [docs](https://docs.snowflake.com/en/sql-reference/sql/drop-authentication-policy#usage-notes), an authentication policy cannot be dropped successfully if it is currently assigned to another object. Currently, the provider does not unassign such objects automatically. Before dropping the resource, list the assigned objects with `SELECT * from table(information_schema.policy_references(policy_name=>''));` and unassign them manually with `ALTER ...` or with updated Terraform configuration, if possible. + +# {{.Name}} ({{.Type}}) + +{{ .Description | trimspace }} + +{{ if .HasExample -}} +## Example Usage + +{{ tffile (printf "examples/resources/%s/resource.tf" .Name)}} + +-> **Note** Instead of using fully_qualified_name, you can reference objects managed outside Terraform by constructing a correct ID, consult [identifiers guide](https://registry.terraform.io/providers/Snowflake-Labs/snowflake/latest/docs/guides/identifiers#new-computed-fully-qualified-name-field-in-resources). + + +{{- end }} + +{{ .SchemaMarkdown | trimspace }} +{{- if .HasImport }} + +## Import + +Import is supported using the following syntax: + +{{ codefile "shell" (printf "examples/resources/%s/import.sh" .Name)}} +{{- end }} diff --git a/templates/resources/masking_policy.md.tmpl b/templates/resources/masking_policy.md.tmpl index e09e58f03f..8c42e823de 100644 --- a/templates/resources/masking_policy.md.tmpl +++ b/templates/resources/masking_policy.md.tmpl @@ -11,6 +11,9 @@ description: |- !> **V1 release candidate** This resource was reworked and is a release candidate for the V1. We do not expect significant changes in it before the V1. We will welcome any feedback and adjust the resource if needed. Any errors reported will be resolved with a higher priority. We encourage checking this resource out before the V1 release. Please follow the [migration guide](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/MIGRATION_GUIDE.md#v0950--v0960) to use it. +> [!WARNING] +> According to Snowflake [docs](https://docs.snowflake.com/en/sql-reference/sql/drop-masking-policy#usage-notes), a masking policy cannot be dropped successfully if it is currently assigned to another object. Currently, the provider does not unassign such objects automatically. Before dropping the resource, list the assigned objects with `SELECT * from table(information_schema.policy_references(policy_name=>''));` and unassign them manually with `ALTER ...` or with updated Terraform configuration, if possible. + # {{.Name}} ({{.Type}}) {{ .Description | trimspace }} diff --git a/templates/resources/network_policy.md.tmpl b/templates/resources/network_policy.md.tmpl index 28e2af568d..c509e6a3e9 100644 --- a/templates/resources/network_policy.md.tmpl +++ b/templates/resources/network_policy.md.tmpl @@ -11,6 +11,9 @@ description: |- !> **V1 release candidate** This resource was reworked and is a release candidate for the V1. We do not expect significant changes in it before the V1. We will welcome any feedback and adjust the resource if needed. Any errors reported will be resolved with a higher priority. We encourage checking this resource out before the V1 release. Please follow the [migration guide](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/MIGRATION_GUIDE.md#v0920--v0930) to use it. +> [!WARNING] +> According to Snowflake [docs](https://docs.snowflake.com/en/sql-reference/sql/drop-network-policy#usage-notes), a network policy cannot be dropped successfully if it is currently assigned to another object. Currently, the provider does not unassign such objects automatically. Before dropping the resource, list the assigned objects with `SELECT * from table(information_schema.policy_references(policy_name=>''));` and unassign them manually with `ALTER ...` or with updated Terraform configuration, if possible. + # {{.Name}} ({{.Type}}) {{ .Description | trimspace }} diff --git a/templates/resources/password_policy.md.tmpl b/templates/resources/password_policy.md.tmpl new file mode 100644 index 0000000000..28771e2c07 --- /dev/null +++ b/templates/resources/password_policy.md.tmpl @@ -0,0 +1,37 @@ +--- +page_title: "{{.Name}} {{.Type}} - {{.ProviderName}}" +subcategory: "" +description: |- +{{ if gt (len (split .Description "")) 1 -}} +{{ index (split .Description "") 1 | plainmarkdown | trimspace | prefixlines " " }} +{{- else -}} +{{ .Description | plainmarkdown | trimspace | prefixlines " " }} +{{- end }} +--- + +> [!WARNING] +> According to Snowflake [docs](https://docs.snowflake.com/en/sql-reference/sql/drop-password-policy#usage-notes), a password policy cannot be dropped successfully if it is currently assigned to another object. Currently, the provider does not unassign such objects automatically. Before dropping the resource, list the assigned objects with `SELECT * from table(information_schema.policy_references(policy_name=>''));` and unassign them manually with `ALTER ...` or with updated Terraform configuration, if possible. + +# {{.Name}} ({{.Type}}) + +{{ .Description | trimspace }} + +{{ if .HasExample -}} +## Example Usage + +{{ tffile (printf "examples/resources/%s/resource.tf" .Name)}} + +-> **Note** Instead of using fully_qualified_name, you can reference objects managed outside Terraform by constructing a correct ID, consult [identifiers guide](https://registry.terraform.io/providers/Snowflake-Labs/snowflake/latest/docs/guides/identifiers#new-computed-fully-qualified-name-field-in-resources). + + +{{- end }} + +{{ .SchemaMarkdown | trimspace }} +{{- if .HasImport }} + +## Import + +Import is supported using the following syntax: + +{{ codefile "shell" (printf "examples/resources/%s/import.sh" .Name)}} +{{- end }} diff --git a/templates/resources/row_access_policy.md.tmpl b/templates/resources/row_access_policy.md.tmpl index e09e58f03f..eed337762b 100644 --- a/templates/resources/row_access_policy.md.tmpl +++ b/templates/resources/row_access_policy.md.tmpl @@ -11,6 +11,9 @@ description: |- !> **V1 release candidate** This resource was reworked and is a release candidate for the V1. We do not expect significant changes in it before the V1. We will welcome any feedback and adjust the resource if needed. Any errors reported will be resolved with a higher priority. We encourage checking this resource out before the V1 release. Please follow the [migration guide](https://github.com/Snowflake-Labs/terraform-provider-snowflake/blob/main/MIGRATION_GUIDE.md#v0950--v0960) to use it. +> [!WARNING] +> According to Snowflake [docs](https://docs.snowflake.com/en/sql-reference/sql/drop-row-access-policy#usage-notes), a row access policy cannot be dropped successfully if it is currently assigned to another object. Currently, the provider does not unassign such objects automatically. Before dropping the resource, list the assigned objects with `SELECT * from table(information_schema.policy_references(policy_name=>''));` and unassign them manually with `ALTER ...` or with updated Terraform configuration, if possible. + # {{.Name}} ({{.Type}}) {{ .Description | trimspace }} diff --git a/v1-preparations/ESSENTIAL_GA_OBJECTS.MD b/v1-preparations/ESSENTIAL_GA_OBJECTS.MD index 96c1eea1d7..2f69537f57 100644 --- a/v1-preparations/ESSENTIAL_GA_OBJECTS.MD +++ b/v1-preparations/ESSENTIAL_GA_OBJECTS.MD @@ -32,7 +32,7 @@ newer provider versions. We will address these while working on the given object | STREAM | 🚀 | [#2975](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2975), [#2413](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2413), [#2201](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2201), [#1150](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1150) | | STREAMLIT | 🚀 | - | | TABLE | 👨‍💻 | [#2997](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2997), [#2844](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2844), [#2839](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2839), [#2735](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2735), [#2733](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2733), [#2683](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2683), [#2676](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2676), [#2674](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2674), [#2629](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2629), [#2418](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2418), [#2415](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2415), [#2406](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2406), [#2236](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2236), [#2035](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2035), [#1823](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1823), [#1799](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1799), [#1764](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1764), [#1600](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1600), [#1387](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1387), [#1272](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1272), [#1271](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1271), [#1248](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1248), [#1241](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1241), [#1146](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1146), [#1032](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1032), [#420](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/420) | -| TAG | 👨‍💻 | [#2943](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2902), [#2598](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2598), [#1910](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1910), [#1909](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1909), [#1862](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1862), [#1806](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1806), [#1657](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1657), [#1496](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1496), [#1443](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1443), [#1394](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1394), [#1372](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1372), [#1074](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1074) | +| TAG | 👨‍💻 | [#2943](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2943), [#2598](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2598), [#1910](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1910), [#1909](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1909), [#1862](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1862), [#1806](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1806), [#1657](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1657), [#1496](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1496), [#1443](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1443), [#1394](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1394), [#1372](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1372), [#1074](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1074) | | TASK | 👨‍💻 | [#3136](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3136), [#1419](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1419), [#1250](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1250), [#1194](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1194), [#1088](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1088) | | VIEW | 🚀 | issues in the older versions: [resources](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues?q=label%3Aresource%3Aview+) and [datasources](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues?q=label%3Adata_source%3Aviews+) | | snowflake_unsafe_execute | 👨‍💻 | [#2934](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2934) | diff --git a/v1-preparations/REMAINING_GA_OBJECTS.MD b/v1-preparations/REMAINING_GA_OBJECTS.MD index fae960f8d3..97f8f7bade 100644 --- a/v1-preparations/REMAINING_GA_OBJECTS.MD +++ b/v1-preparations/REMAINING_GA_OBJECTS.MD @@ -22,7 +22,7 @@ Known issues lists open issues touching the given object. Note that some of thes | EXTERNAL ACCESS INTEGRATION | ❌ | [#2546](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2546) | | FAILOVER GROUP | ❌ | [#2516](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2516), [#2332](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2332), [#1418](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1418) | | MANAGED ACCOUNT | ❌ | - | -| NOTIFICATION INTEGRATION | ❌ | [#2966](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2966), [#2965](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2965), [#1051](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1051) | +| NOTIFICATION INTEGRATION | ❌ | [#3138](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3138), [#2966](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2966), [#2965](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2965), [#1051](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1051) | | REPLICATION GROUP | ❌ | [#1602](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1602) | | SHARE | ❌ | [#3051](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3051), [#2189](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2189), [#1279](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1279), [#630](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/630) | | STORAGE INTEGRATION | ❌ | [#3082](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3082), [#2624](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/2624), [#1445](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/1445) | From 57c0c2caf01d935469ded934cd7042ca41a5894b Mon Sep 17 00:00:00 2001 From: "snowflake-release-please[bot]" <105954990+snowflake-release-please[bot]@users.noreply.github.com> Date: Tue, 26 Nov 2024 19:13:48 +0100 Subject: [PATCH 09/10] chore(main): release 0.99.0 (#3201) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit :robot: I have created a release *beep* *boop* --- ## [0.99.0](https://github.com/Snowflake-Labs/terraform-provider-snowflake/compare/v0.98.0...v0.99.0) (2024-11-26) ### 🎉 **What's new:** * Add tags data source ([#3211](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3211)) ([8907d9d](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/8907d9dfea69d6b8ac26fc0a9e249676f332f8b3)) * Tag resource v1 ([#3197](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3197)) ([77b3bf0](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/77b3bf0c9998c05a30951730439f8b03a2e418ac)) * Tasks v1 readiness ([#3222](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3222)) ([e2284d9](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/e2284d98d23586031514934d7bc7c67139f5e272)) ### 🔧 **Misc** * Add support for usage tracking to data sources ([#3224](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3224)) ([8210bb8](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/8210bb84b69fe91e0fff22ac836feb79d6e9a402)) * Add usage tracking for the rest of the resources and fix views ([#3223](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3223)) ([231f653](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/231f65323611f110564117a325062355e7ed7cf6)) * Basic object tracking ([#3205](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3205)) ([1f0dc94](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/1f0dc94e6ac95940ac5fd0e0b5f62152b8f821a5)) * basic object tracking part 2 ([#3214](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3214)) ([e44f2e1](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/e44f2e1938807285ed4d521b56d2efeab7b927bb)) * Improve tags integration tests ([#3193](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3193)) ([7736e0a](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/7736e0a5fa6a97f9e5551507cea955fb62dd1e90)) * parser and secret tests ([#3192](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3192)) ([5ec9c86](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/5ec9c86fdc3450f6f07820a4a5fe7f74779c7c41)) * Storage integration with custom protocol ([#3213](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3213)) ([a3a44ae](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/a3a44ae5a6eca2a9623369499d8cac4516a87004)) * Unskip auth config tests ([#3180](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3180)) ([46ab142](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/46ab142ad74e5fdc5deb6cc6edc409f487434862)) ### 🐛 **Bug fixes:** * Small fixes and adjustments ([#3226](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3226)) ([9f67457](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/9f6745743daba831422627b5171df404373e9650)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). Co-authored-by: snowflake-release-please[bot] <105954990+snowflake-release-please[bot]@users.noreply.github.com> --- CHANGELOG.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6ba8439c12..233351f618 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,31 @@ # Changelog +## [0.99.0](https://github.com/Snowflake-Labs/terraform-provider-snowflake/compare/v0.98.0...v0.99.0) (2024-11-26) + + +### 🎉 **What's new:** + +* Add tags data source ([#3211](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3211)) ([8907d9d](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/8907d9dfea69d6b8ac26fc0a9e249676f332f8b3)) +* Tag resource v1 ([#3197](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3197)) ([77b3bf0](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/77b3bf0c9998c05a30951730439f8b03a2e418ac)) +* Tasks v1 readiness ([#3222](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3222)) ([e2284d9](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/e2284d98d23586031514934d7bc7c67139f5e272)) + + +### 🔧 **Misc** + +* Add support for usage tracking to data sources ([#3224](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3224)) ([8210bb8](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/8210bb84b69fe91e0fff22ac836feb79d6e9a402)) +* Add usage tracking for the rest of the resources and fix views ([#3223](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3223)) ([231f653](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/231f65323611f110564117a325062355e7ed7cf6)) +* Basic object tracking ([#3205](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3205)) ([1f0dc94](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/1f0dc94e6ac95940ac5fd0e0b5f62152b8f821a5)) +* basic object tracking part 2 ([#3214](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3214)) ([e44f2e1](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/e44f2e1938807285ed4d521b56d2efeab7b927bb)) +* Improve tags integration tests ([#3193](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3193)) ([7736e0a](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/7736e0a5fa6a97f9e5551507cea955fb62dd1e90)) +* parser and secret tests ([#3192](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3192)) ([5ec9c86](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/5ec9c86fdc3450f6f07820a4a5fe7f74779c7c41)) +* Storage integration with custom protocol ([#3213](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3213)) ([a3a44ae](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/a3a44ae5a6eca2a9623369499d8cac4516a87004)) +* Unskip auth config tests ([#3180](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3180)) ([46ab142](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/46ab142ad74e5fdc5deb6cc6edc409f487434862)) + + +### 🐛 **Bug fixes:** + +* Small fixes and adjustments ([#3226](https://github.com/Snowflake-Labs/terraform-provider-snowflake/issues/3226)) ([9f67457](https://github.com/Snowflake-Labs/terraform-provider-snowflake/commit/9f6745743daba831422627b5171df404373e9650)) + ## [0.98.0](https://github.com/Snowflake-Labs/terraform-provider-snowflake/compare/v0.97.0...v0.98.0) (2024-11-08) From c209a8ae6c15fa9515e933d18add962070b60257 Mon Sep 17 00:00:00 2001 From: Artur Sawicki Date: Thu, 28 Nov 2024 10:07:17 +0100 Subject: [PATCH 10/10] chore: Apply masking (#3234) Based on the: - experiments here: https://github.com/Snowflake-Labs/terraform-provider-snowflake/pull/3232 - docs here: https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/workflow-commands-for-github-actions#masking-a-value-in-a-log --- .github/workflows/tests.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3ec814e652..824bd0cb0e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -36,6 +36,14 @@ jobs: go-version-file: ./go.mod cache: false + - name: Apply masking + run: | + while IFS= read -r line || [[ -n $line ]]; do + echo "::add-mask::$line" + done < <(printf '%s' "$SF_TF_GH_MASKING") + env: + SF_TF_GH_MASKING: ${{ secrets.SF_TF_GH_MASKING }} + - name: Install dependencies run: make dev-setup