From 90efc256e9a18bef4767b6abf40642e59035e34e Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Tue, 10 Dec 2024 12:08:50 +0100 Subject: [PATCH] [Internal] Bump Go SDK and generate TF structs (#4300) ## Changes Bump Go SDK to latest and generate TF structs to same OpenAPI spec as Go SDK ## Tests N/A - [ ] `make test` run locally - [ ] relevant change in `docs/` folder - [ ] covered with integration tests in `internal/acceptance` - [ ] relevant acceptance tests are passing - [ ] using Go SDK --- .codegen/_openapi_sha | 2 +- .gitattributes | 1 + go.mod | 2 +- go.sum | 2 + internal/service/catalog_tf/model.go | 84 ++-- internal/service/cleanrooms_tf/model.go | 608 ++++++++++++++++++++++++ internal/service/dashboards_tf/model.go | 31 ++ internal/service/jobs_tf/model.go | 31 +- internal/service/settings_tf/model.go | 167 +++++++ internal/service/sharing_tf/model.go | 31 +- 10 files changed, 904 insertions(+), 55 deletions(-) create mode 100755 internal/service/cleanrooms_tf/model.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index a2ba58aa56..68cd2f4be8 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -f2385add116e3716c8a90a0b68e204deb40f996c \ No newline at end of file +7016dcbf2e011459416cf408ce21143bcc4b3a25 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 4f9942b0df..576702427b 100755 --- a/.gitattributes +++ b/.gitattributes @@ -1,6 +1,7 @@ internal/service/apps_tf/model.go linguist-generated=true internal/service/billing_tf/model.go linguist-generated=true internal/service/catalog_tf/model.go linguist-generated=true +internal/service/cleanrooms_tf/model.go linguist-generated=true internal/service/compute_tf/model.go linguist-generated=true internal/service/dashboards_tf/model.go linguist-generated=true internal/service/files_tf/model.go linguist-generated=true diff --git a/go.mod b/go.mod index 2911926b60..35d2c1acfd 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22.0 toolchain go1.22.5 require ( - github.com/databricks/databricks-sdk-go v0.52.0 + github.com/databricks/databricks-sdk-go v0.53.0 github.com/golang-jwt/jwt/v4 v4.5.1 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 diff --git a/go.sum b/go.sum index 3e6b5ad7ba..b1eaf6382b 100644 --- a/go.sum +++ b/go.sum @@ -28,6 +28,8 @@ github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53E github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/databricks/databricks-sdk-go v0.52.0 h1:WKcj0F+pdx0gjI5xMicjYC4O43S2q5nyTpaGGMFmgHw= github.com/databricks/databricks-sdk-go v0.52.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.53.0 h1:rZMXaTC3HNKZt+m4C4I/dY3EdZj+kl/sVd/Kdq55Qfo= +github.com/databricks/databricks-sdk-go v0.53.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index aef640cdfa..eb62e58b32 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -311,7 +311,8 @@ func (newState *AzureManagedIdentityResponse) SyncEffectiveFieldsDuringCreateOrU func (newState *AzureManagedIdentityResponse) SyncEffectiveFieldsDuringRead(existingState AzureManagedIdentityResponse) { } -// The Azure service principal configuration. +// The Azure service principal configuration. Only applicable when purpose is +// **STORAGE**. type AzureServicePrincipal struct { // The application ID of the application registration within the referenced // AAD tenant. @@ -461,7 +462,7 @@ type ColumnInfo struct { TypeIntervalType types.String `tfsdk:"type_interval_type" tf:"optional"` // Full data type specification, JSON-serialized. TypeJson types.String `tfsdk:"type_json" tf:"optional"` - // Name of type (INT, STRUCT, MAP, etc.). + TypeName types.String `tfsdk:"type_name" tf:"optional"` // Digits of precision; required for DecimalTypes. TypePrecision types.Int64 `tfsdk:"type_precision" tf:"optional"` @@ -616,13 +617,14 @@ type CreateCredentialRequest struct { AwsIamRole []AwsIamRole `tfsdk:"aws_iam_role" tf:"optional,object"` // The Azure managed identity configuration. AzureManagedIdentity []AzureManagedIdentity `tfsdk:"azure_managed_identity" tf:"optional,object"` - // The Azure service principal configuration. + // The Azure service principal configuration. Only applicable when purpose + // is **STORAGE**. AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional,object"` // Comment associated with the credential. Comment types.String `tfsdk:"comment" tf:"optional"` - // TODO(UC-978): Document GCP service account key usage for service - // credentials. - GcpServiceAccountKey []GcpServiceAccountKey `tfsdk:"gcp_service_account_key" tf:"optional,object"` + // GCP long-lived credential. Databricks-created Google Cloud Storage + // service account. + DatabricksGcpServiceAccount []DatabricksGcpServiceAccount `tfsdk:"databricks_gcp_service_account" tf:"optional,object"` // The credential name. The name must be unique among storage and service // credentials within the metastore. Name types.String `tfsdk:"name" tf:""` @@ -949,7 +951,8 @@ type CredentialInfo struct { AwsIamRole []AwsIamRole `tfsdk:"aws_iam_role" tf:"optional,object"` // The Azure managed identity configuration. AzureManagedIdentity []AzureManagedIdentity `tfsdk:"azure_managed_identity" tf:"optional,object"` - // The Azure service principal configuration. + // The Azure service principal configuration. Only applicable when purpose + // is **STORAGE**. AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional,object"` // Comment associated with the credential. Comment types.String `tfsdk:"comment" tf:"optional"` @@ -957,6 +960,9 @@ type CredentialInfo struct { CreatedAt types.Int64 `tfsdk:"created_at" tf:"optional"` // Username of credential creator. CreatedBy types.String `tfsdk:"created_by" tf:"optional"` + // GCP long-lived credential. Databricks-created Google Cloud Storage + // service account. + DatabricksGcpServiceAccount []DatabricksGcpServiceAccount `tfsdk:"databricks_gcp_service_account" tf:"optional,object"` // The full name of the credential. FullName types.String `tfsdk:"full_name" tf:"optional"` // The unique identifier of the credential. @@ -1016,6 +1022,26 @@ func (newState *CurrentWorkspaceBindings) SyncEffectiveFieldsDuringCreateOrUpdat func (newState *CurrentWorkspaceBindings) SyncEffectiveFieldsDuringRead(existingState CurrentWorkspaceBindings) { } +// GCP long-lived credential. Databricks-created Google Cloud Storage service +// account. +type DatabricksGcpServiceAccount struct { + // The Databricks internal ID that represents this managed identity. This + // field is only used to persist the credential_id once it is fetched from + // the credentials manager - as we only use the protobuf serializer to store + // credentials, this ID gets persisted to the database + CredentialId types.String `tfsdk:"credential_id" tf:"optional"` + // The email of the service account. + Email types.String `tfsdk:"email" tf:"optional"` + // The ID that represents the private key for this Service Account + PrivateKeyId types.String `tfsdk:"private_key_id" tf:"optional"` +} + +func (newState *DatabricksGcpServiceAccount) SyncEffectiveFieldsDuringCreateOrUpdate(plan DatabricksGcpServiceAccount) { +} + +func (newState *DatabricksGcpServiceAccount) SyncEffectiveFieldsDuringRead(existingState DatabricksGcpServiceAccount) { +} + type DatabricksGcpServiceAccountRequest struct { } @@ -1696,7 +1722,7 @@ type FunctionParameterInfo struct { TypeIntervalType types.String `tfsdk:"type_interval_type" tf:"optional"` // Full data type spec, JSON-serialized. TypeJson types.String `tfsdk:"type_json" tf:"optional"` - // Name of type (INT, STRUCT, MAP, etc.). + TypeName types.String `tfsdk:"type_name" tf:""` // Digits of precision; required on Create for DecimalTypes. TypePrecision types.Int64 `tfsdk:"type_precision" tf:"optional"` @@ -1736,23 +1762,7 @@ func (newState *GcpOauthToken) SyncEffectiveFieldsDuringCreateOrUpdate(plan GcpO func (newState *GcpOauthToken) SyncEffectiveFieldsDuringRead(existingState GcpOauthToken) { } -// GCP long-lived credential. GCP Service Account. -type GcpServiceAccountKey struct { - // The email of the service account. [Create:REQ Update:OPT]. - Email types.String `tfsdk:"email" tf:"optional"` - // The service account's RSA private key. [Create:REQ Update:OPT] - PrivateKey types.String `tfsdk:"private_key" tf:"optional"` - // The ID of the service account's private key. [Create:REQ Update:OPT] - PrivateKeyId types.String `tfsdk:"private_key_id" tf:"optional"` -} - -func (newState *GcpServiceAccountKey) SyncEffectiveFieldsDuringCreateOrUpdate(plan GcpServiceAccountKey) { -} - -func (newState *GcpServiceAccountKey) SyncEffectiveFieldsDuringRead(existingState GcpServiceAccountKey) { -} - -// Options to customize the requested temporary credential +// The Azure cloud options to customize the requested temporary credential type GenerateTemporaryServiceCredentialAzureOptions struct { // The resources to which the temporary Azure credential should apply. These // resources are the scopes that are passed to the token provider (see @@ -1766,12 +1776,28 @@ func (newState *GenerateTemporaryServiceCredentialAzureOptions) SyncEffectiveFie func (newState *GenerateTemporaryServiceCredentialAzureOptions) SyncEffectiveFieldsDuringRead(existingState GenerateTemporaryServiceCredentialAzureOptions) { } +// The GCP cloud options to customize the requested temporary credential +type GenerateTemporaryServiceCredentialGcpOptions struct { + // The scopes to which the temporary GCP credential should apply. These + // resources are the scopes that are passed to the token provider (see + // https://google-auth.readthedocs.io/en/latest/reference/google.auth.html#google.auth.credentials.Credentials) + Scopes []types.String `tfsdk:"scopes" tf:"optional"` +} + +func (newState *GenerateTemporaryServiceCredentialGcpOptions) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenerateTemporaryServiceCredentialGcpOptions) { +} + +func (newState *GenerateTemporaryServiceCredentialGcpOptions) SyncEffectiveFieldsDuringRead(existingState GenerateTemporaryServiceCredentialGcpOptions) { +} + type GenerateTemporaryServiceCredentialRequest struct { - // Options to customize the requested temporary credential + // The Azure cloud options to customize the requested temporary credential AzureOptions []GenerateTemporaryServiceCredentialAzureOptions `tfsdk:"azure_options" tf:"optional,object"` // The name of the service credential used to generate a temporary // credential CredentialName types.String `tfsdk:"credential_name" tf:""` + // The GCP cloud options to customize the requested temporary credential + GcpOptions []GenerateTemporaryServiceCredentialGcpOptions `tfsdk:"gcp_options" tf:"optional,object"` } func (newState *GenerateTemporaryServiceCredentialRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GenerateTemporaryServiceCredentialRequest) { @@ -4032,10 +4058,14 @@ type UpdateCredentialRequest struct { AwsIamRole []AwsIamRole `tfsdk:"aws_iam_role" tf:"optional,object"` // The Azure managed identity configuration. AzureManagedIdentity []AzureManagedIdentity `tfsdk:"azure_managed_identity" tf:"optional,object"` - // The Azure service principal configuration. + // The Azure service principal configuration. Only applicable when purpose + // is **STORAGE**. AzureServicePrincipal []AzureServicePrincipal `tfsdk:"azure_service_principal" tf:"optional,object"` // Comment associated with the credential. Comment types.String `tfsdk:"comment" tf:"optional"` + // GCP long-lived credential. Databricks-created Google Cloud Storage + // service account. + DatabricksGcpServiceAccount []DatabricksGcpServiceAccount `tfsdk:"databricks_gcp_service_account" tf:"optional,object"` // Force an update even if there are dependent services (when purpose is // **SERVICE**) or dependent external locations and external tables (when // purpose is **STORAGE**). diff --git a/internal/service/cleanrooms_tf/model.go b/internal/service/cleanrooms_tf/model.go new file mode 100755 index 0000000000..fbd3e6b40f --- /dev/null +++ b/internal/service/cleanrooms_tf/model.go @@ -0,0 +1,608 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +/* +These generated types are for terraform plugin framework to interact with the terraform state conveniently. + +These types follow the same structure as the types in go-sdk. +The only difference is that the primitive types are no longer using the go-native types, but with tfsdk types. +Plus the json tags get converted into tfsdk tags. +We use go-native types for lists and maps intentionally for the ease for converting these types into the go-sdk types. +*/ + +package cleanrooms_tf + +import ( + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/databricks/databricks-sdk-go/service/sharing" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type CleanRoom struct { + // Whether clean room access is restricted due to [CSP] + // + // [CSP]: https://docs.databricks.com/en/security/privacy/security-profile.html + AccessRestricted types.String `tfsdk:"access_restricted" tf:"optional"` + + Comment types.String `tfsdk:"comment" tf:"optional"` + // When the clean room was created, in epoch milliseconds. + CreatedAt types.Int64 `tfsdk:"created_at" tf:"optional"` + // The alias of the collaborator tied to the local clean room. + LocalCollaboratorAlias types.String `tfsdk:"local_collaborator_alias" tf:"optional"` + // The name of the clean room. It should follow [UC securable naming + // requirements]. + // + // [UC securable naming requirements]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#securable-object-naming-requirements + Name types.String `tfsdk:"name" tf:"optional"` + // Output catalog of the clean room. It is an output only field. Output + // catalog is manipulated using the separate CreateCleanRoomOutputCatalog + // API. + OutputCatalog []CleanRoomOutputCatalog `tfsdk:"output_catalog" tf:"optional,object"` + // This is Databricks username of the owner of the local clean room + // securable for permission management. + Owner types.String `tfsdk:"owner" tf:"optional"` + // Central clean room details. During creation, users need to specify + // cloud_vendor, region, and collaborators.global_metastore_id. This field + // will not be filled in the ListCleanRooms call. + RemoteDetailedInfo []CleanRoomRemoteDetail `tfsdk:"remote_detailed_info" tf:"optional,object"` + // Clean room status. + Status types.String `tfsdk:"status" tf:"optional"` + // When the clean room was last updated, in epoch milliseconds. + UpdatedAt types.Int64 `tfsdk:"updated_at" tf:"optional"` +} + +func (newState *CleanRoom) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoom) { +} + +func (newState *CleanRoom) SyncEffectiveFieldsDuringRead(existingState CleanRoom) { +} + +// Metadata of the clean room asset +type CleanRoomAsset struct { + // When the asset is added to the clean room, in epoch milliseconds. + AddedAt types.Int64 `tfsdk:"added_at" tf:"optional"` + // The type of the asset. + AssetType types.String `tfsdk:"asset_type" tf:"optional"` + // Foreign table details available to all collaborators of the clean room. + // Present if and only if **asset_type** is **FOREIGN_TABLE** + ForeignTable []CleanRoomAssetForeignTable `tfsdk:"foreign_table" tf:"optional,object"` + // Local details for a foreign that are only available to its owner. Present + // if and only if **asset_type** is **FOREIGN_TABLE** + ForeignTableLocalDetails []CleanRoomAssetForeignTableLocalDetails `tfsdk:"foreign_table_local_details" tf:"optional,object"` + // A fully qualified name that uniquely identifies the asset within the + // clean room. This is also the name displayed in the clean room UI. + // + // For UC securable assets (tables, volumes, etc.), the format is + // *shared_catalog*.*shared_schema*.*asset_name* + // + // For notebooks, the name is the notebook file name. + Name types.String `tfsdk:"name" tf:"optional"` + // Notebook details available to all collaborators of the clean room. + // Present if and only if **asset_type** is **NOTEBOOK_FILE** + Notebook []CleanRoomAssetNotebook `tfsdk:"notebook" tf:"optional,object"` + // The alias of the collaborator who owns this asset + OwnerCollaboratorAlias types.String `tfsdk:"owner_collaborator_alias" tf:"optional"` + // Status of the asset + Status types.String `tfsdk:"status" tf:"optional"` + // Table details available to all collaborators of the clean room. Present + // if and only if **asset_type** is **TABLE** + Table []CleanRoomAssetTable `tfsdk:"table" tf:"optional,object"` + // Local details for a table that are only available to its owner. Present + // if and only if **asset_type** is **TABLE** + TableLocalDetails []CleanRoomAssetTableLocalDetails `tfsdk:"table_local_details" tf:"optional,object"` + // View details available to all collaborators of the clean room. Present if + // and only if **asset_type** is **VIEW** + View []CleanRoomAssetView `tfsdk:"view" tf:"optional,object"` + // Local details for a view that are only available to its owner. Present if + // and only if **asset_type** is **VIEW** + ViewLocalDetails []CleanRoomAssetViewLocalDetails `tfsdk:"view_local_details" tf:"optional,object"` + // Local details for a volume that are only available to its owner. Present + // if and only if **asset_type** is **VOLUME** + VolumeLocalDetails []CleanRoomAssetVolumeLocalDetails `tfsdk:"volume_local_details" tf:"optional,object"` +} + +func (newState *CleanRoomAsset) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomAsset) { +} + +func (newState *CleanRoomAsset) SyncEffectiveFieldsDuringRead(existingState CleanRoomAsset) { +} + +type CleanRoomAssetForeignTable struct { + // The metadata information of the columns in the foreign table + Columns catalog.ColumnInfo `tfsdk:"columns" tf:"optional"` +} + +func (newState *CleanRoomAssetForeignTable) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomAssetForeignTable) { +} + +func (newState *CleanRoomAssetForeignTable) SyncEffectiveFieldsDuringRead(existingState CleanRoomAssetForeignTable) { +} + +type CleanRoomAssetForeignTableLocalDetails struct { + // The fully qualified name of the foreign table in its owner's local + // metastore, in the format of *catalog*.*schema*.*foreign_table_name* + LocalName types.String `tfsdk:"local_name" tf:"optional"` +} + +func (newState *CleanRoomAssetForeignTableLocalDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomAssetForeignTableLocalDetails) { +} + +func (newState *CleanRoomAssetForeignTableLocalDetails) SyncEffectiveFieldsDuringRead(existingState CleanRoomAssetForeignTableLocalDetails) { +} + +type CleanRoomAssetNotebook struct { + // Server generated checksum that represents the notebook version. + Etag types.String `tfsdk:"etag" tf:"optional"` + // Base 64 representation of the notebook contents. This is the same format + // as returned by :method:workspace/export with the format of **HTML**. + NotebookContent types.String `tfsdk:"notebook_content" tf:"optional"` +} + +func (newState *CleanRoomAssetNotebook) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomAssetNotebook) { +} + +func (newState *CleanRoomAssetNotebook) SyncEffectiveFieldsDuringRead(existingState CleanRoomAssetNotebook) { +} + +type CleanRoomAssetTable struct { + // The metadata information of the columns in the table + Columns catalog.ColumnInfo `tfsdk:"columns" tf:"optional"` +} + +func (newState *CleanRoomAssetTable) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomAssetTable) { +} + +func (newState *CleanRoomAssetTable) SyncEffectiveFieldsDuringRead(existingState CleanRoomAssetTable) { +} + +type CleanRoomAssetTableLocalDetails struct { + // The fully qualified name of the table in its owner's local metastore, in + // the format of *catalog*.*schema*.*table_name* + LocalName types.String `tfsdk:"local_name" tf:"optional"` + // Partition filtering specification for a shared table. + Partitions sharing.PartitionSpecificationPartition `tfsdk:"partitions" tf:"optional"` +} + +func (newState *CleanRoomAssetTableLocalDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomAssetTableLocalDetails) { +} + +func (newState *CleanRoomAssetTableLocalDetails) SyncEffectiveFieldsDuringRead(existingState CleanRoomAssetTableLocalDetails) { +} + +type CleanRoomAssetView struct { + // The metadata information of the columns in the view + Columns catalog.ColumnInfo `tfsdk:"columns" tf:"optional"` +} + +func (newState *CleanRoomAssetView) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomAssetView) { +} + +func (newState *CleanRoomAssetView) SyncEffectiveFieldsDuringRead(existingState CleanRoomAssetView) { +} + +type CleanRoomAssetViewLocalDetails struct { + // The fully qualified name of the view in its owner's local metastore, in + // the format of *catalog*.*schema*.*view_name* + LocalName types.String `tfsdk:"local_name" tf:"optional"` +} + +func (newState *CleanRoomAssetViewLocalDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomAssetViewLocalDetails) { +} + +func (newState *CleanRoomAssetViewLocalDetails) SyncEffectiveFieldsDuringRead(existingState CleanRoomAssetViewLocalDetails) { +} + +type CleanRoomAssetVolumeLocalDetails struct { + // The fully qualified name of the volume in its owner's local metastore, in + // the format of *catalog*.*schema*.*volume_name* + LocalName types.String `tfsdk:"local_name" tf:"optional"` +} + +func (newState *CleanRoomAssetVolumeLocalDetails) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomAssetVolumeLocalDetails) { +} + +func (newState *CleanRoomAssetVolumeLocalDetails) SyncEffectiveFieldsDuringRead(existingState CleanRoomAssetVolumeLocalDetails) { +} + +// Publicly visible clean room collaborator. +type CleanRoomCollaborator struct { + // Collaborator alias specified by the clean room creator. It is unique + // across all collaborators of this clean room, and used to derive multiple + // values internally such as catalog alias and clean room name for single + // metastore clean rooms. It should follow [UC securable naming + // requirements]. + // + // [UC securable naming requirements]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#securable-object-naming-requirements + CollaboratorAlias types.String `tfsdk:"collaborator_alias" tf:"optional"` + // Generated display name for the collaborator. In the case of a single + // metastore clean room, it is the clean room name. For x-metastore clean + // rooms, it is the organization name of the metastore. It is not restricted + // to these values and could change in the future + DisplayName types.String `tfsdk:"display_name" tf:"optional"` + // The global Unity Catalog metastore id of the collaborator. The identifier + // is of format cloud:region:metastore-uuid. + GlobalMetastoreId types.String `tfsdk:"global_metastore_id" tf:"optional"` + // Email of the user who is receiving the clean room "invitation". It should + // be empty for the creator of the clean room, and non-empty for the + // invitees of the clean room. It is only returned in the output when clean + // room creator calls GET + InviteRecipientEmail types.String `tfsdk:"invite_recipient_email" tf:"optional"` + // Workspace ID of the user who is receiving the clean room "invitation". + // Must be specified if invite_recipient_email is specified. It should be + // empty when the collaborator is the creator of the clean room. + InviteRecipientWorkspaceId types.Int64 `tfsdk:"invite_recipient_workspace_id" tf:"optional"` + // [Organization + // name](:method:metastores/list#metastores-delta_sharing_organization_name) + // configured in the metastore + OrganizationName types.String `tfsdk:"organization_name" tf:"optional"` +} + +func (newState *CleanRoomCollaborator) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomCollaborator) { +} + +func (newState *CleanRoomCollaborator) SyncEffectiveFieldsDuringRead(existingState CleanRoomCollaborator) { +} + +// Stores information about a single task run. +type CleanRoomNotebookTaskRun struct { + // Job run info of the task in the runner's local workspace. This field is + // only included in the LIST API. if the task was run within the same + // workspace the API is being called. If the task run was in a different + // workspace under the same metastore, only the workspace_id is included. + CollaboratorJobRunInfo []CollaboratorJobRunInfo `tfsdk:"collaborator_job_run_info" tf:"optional,object"` + // State of the task run. + NotebookJobRunState jobs.CleanRoomTaskRunState `tfsdk:"notebook_job_run_state" tf:"optional,object"` + // Asset name of the notebook executed in this task run. + NotebookName types.String `tfsdk:"notebook_name" tf:"optional"` + // Expiration time of the output schema of the task run (if any), in epoch + // milliseconds. + OutputSchemaExpirationTime types.Int64 `tfsdk:"output_schema_expiration_time" tf:"optional"` + // Name of the output schema associated with the clean rooms notebook task + // run. + OutputSchemaName types.String `tfsdk:"output_schema_name" tf:"optional"` + // Duration of the task run, in milliseconds. + RunDuration types.Int64 `tfsdk:"run_duration" tf:"optional"` + // When the task run started, in epoch milliseconds. + StartTime types.Int64 `tfsdk:"start_time" tf:"optional"` +} + +func (newState *CleanRoomNotebookTaskRun) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomNotebookTaskRun) { +} + +func (newState *CleanRoomNotebookTaskRun) SyncEffectiveFieldsDuringRead(existingState CleanRoomNotebookTaskRun) { +} + +type CleanRoomOutputCatalog struct { + // The name of the output catalog in UC. It should follow [UC securable + // naming requirements]. The field will always exist if status is CREATED. + // + // [UC securable naming requirements]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#securable-object-naming-requirements + CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` + + Status types.String `tfsdk:"status" tf:"optional"` +} + +func (newState *CleanRoomOutputCatalog) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomOutputCatalog) { +} + +func (newState *CleanRoomOutputCatalog) SyncEffectiveFieldsDuringRead(existingState CleanRoomOutputCatalog) { +} + +// Publicly visible central clean room details. +type CleanRoomRemoteDetail struct { + // Central clean room ID. + CentralCleanRoomId types.String `tfsdk:"central_clean_room_id" tf:"optional"` + // Cloud vendor (aws,azure,gcp) of the central clean room. + CloudVendor types.String `tfsdk:"cloud_vendor" tf:"optional"` + // Collaborators in the central clean room. There should one and only one + // collaborator in the list that satisfies the owner condition: + // + // 1. It has the creator's global_metastore_id (determined by caller of + // CreateCleanRoom). + // + // 2. Its invite_recipient_email is empty. + Collaborators []CleanRoomCollaborator `tfsdk:"collaborators" tf:"optional"` + // The compliance security profile used to process regulated data following + // compliance standards. + ComplianceSecurityProfile []ComplianceSecurityProfile `tfsdk:"compliance_security_profile" tf:"optional,object"` + // Collaborator who creates the clean room. + Creator []CleanRoomCollaborator `tfsdk:"creator" tf:"optional,object"` + // Egress network policy to apply to the central clean room workspace. + EgressNetworkPolicy settings.EgressNetworkPolicy `tfsdk:"egress_network_policy" tf:"optional,object"` + // Region of the central clean room. + Region types.String `tfsdk:"region" tf:"optional"` +} + +func (newState *CleanRoomRemoteDetail) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomRemoteDetail) { +} + +func (newState *CleanRoomRemoteDetail) SyncEffectiveFieldsDuringRead(existingState CleanRoomRemoteDetail) { +} + +type CollaboratorJobRunInfo struct { + // Alias of the collaborator that triggered the task run. + CollaboratorAlias types.String `tfsdk:"collaborator_alias" tf:"optional"` + // Job ID of the task run in the collaborator's workspace. + CollaboratorJobId types.Int64 `tfsdk:"collaborator_job_id" tf:"optional"` + // Job run ID of the task run in the collaborator's workspace. + CollaboratorJobRunId types.Int64 `tfsdk:"collaborator_job_run_id" tf:"optional"` + // Task run ID of the task run in the collaborator's workspace. + CollaboratorTaskRunId types.Int64 `tfsdk:"collaborator_task_run_id" tf:"optional"` + // ID of the collaborator's workspace that triggered the task run. + CollaboratorWorkspaceId types.Int64 `tfsdk:"collaborator_workspace_id" tf:"optional"` +} + +func (newState *CollaboratorJobRunInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan CollaboratorJobRunInfo) { +} + +func (newState *CollaboratorJobRunInfo) SyncEffectiveFieldsDuringRead(existingState CollaboratorJobRunInfo) { +} + +// The compliance security profile used to process regulated data following +// compliance standards. +type ComplianceSecurityProfile struct { + // The list of compliance standards that the compliance security profile is + // configured to enforce. + ComplianceStandards settings.ComplianceStandard `tfsdk:"compliance_standards" tf:"optional"` + // Whether the compliance security profile is enabled. + IsEnabled types.Bool `tfsdk:"is_enabled" tf:"optional"` +} + +func (newState *ComplianceSecurityProfile) SyncEffectiveFieldsDuringCreateOrUpdate(plan ComplianceSecurityProfile) { +} + +func (newState *ComplianceSecurityProfile) SyncEffectiveFieldsDuringRead(existingState ComplianceSecurityProfile) { +} + +// Create an asset +type CreateCleanRoomAssetRequest struct { + // Metadata of the clean room asset + Asset []CleanRoomAsset `tfsdk:"asset" tf:"optional,object"` + // Name of the clean room. + CleanRoomName types.String `tfsdk:"-"` +} + +func (newState *CreateCleanRoomAssetRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCleanRoomAssetRequest) { +} + +func (newState *CreateCleanRoomAssetRequest) SyncEffectiveFieldsDuringRead(existingState CreateCleanRoomAssetRequest) { +} + +// Create an output catalog +type CreateCleanRoomOutputCatalogRequest struct { + // Name of the clean room. + CleanRoomName types.String `tfsdk:"-"` + + OutputCatalog []CleanRoomOutputCatalog `tfsdk:"output_catalog" tf:"optional,object"` +} + +func (newState *CreateCleanRoomOutputCatalogRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCleanRoomOutputCatalogRequest) { +} + +func (newState *CreateCleanRoomOutputCatalogRequest) SyncEffectiveFieldsDuringRead(existingState CreateCleanRoomOutputCatalogRequest) { +} + +type CreateCleanRoomOutputCatalogResponse struct { + OutputCatalog []CleanRoomOutputCatalog `tfsdk:"output_catalog" tf:"optional,object"` +} + +func (newState *CreateCleanRoomOutputCatalogResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCleanRoomOutputCatalogResponse) { +} + +func (newState *CreateCleanRoomOutputCatalogResponse) SyncEffectiveFieldsDuringRead(existingState CreateCleanRoomOutputCatalogResponse) { +} + +// Create a clean room +type CreateCleanRoomRequest struct { + CleanRoom []CleanRoom `tfsdk:"clean_room" tf:"optional,object"` +} + +func (newState *CreateCleanRoomRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan CreateCleanRoomRequest) { +} + +func (newState *CreateCleanRoomRequest) SyncEffectiveFieldsDuringRead(existingState CreateCleanRoomRequest) { +} + +// Delete an asset +type DeleteCleanRoomAssetRequest struct { + // The fully qualified name of the asset, it is same as the name field in + // CleanRoomAsset. + AssetFullName types.String `tfsdk:"-"` + // The type of the asset. + AssetType types.String `tfsdk:"-"` + // Name of the clean room. + CleanRoomName types.String `tfsdk:"-"` +} + +func (newState *DeleteCleanRoomAssetRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCleanRoomAssetRequest) { +} + +func (newState *DeleteCleanRoomAssetRequest) SyncEffectiveFieldsDuringRead(existingState DeleteCleanRoomAssetRequest) { +} + +// Response for delete clean room request. Using an empty message since the +// generic Empty proto does not externd UnshadedMessageMarker. +type DeleteCleanRoomAssetResponse struct { +} + +func (newState *DeleteCleanRoomAssetResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCleanRoomAssetResponse) { +} + +func (newState *DeleteCleanRoomAssetResponse) SyncEffectiveFieldsDuringRead(existingState DeleteCleanRoomAssetResponse) { +} + +// Delete a clean room +type DeleteCleanRoomRequest struct { + // Name of the clean room. + Name types.String `tfsdk:"-"` +} + +func (newState *DeleteCleanRoomRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteCleanRoomRequest) { +} + +func (newState *DeleteCleanRoomRequest) SyncEffectiveFieldsDuringRead(existingState DeleteCleanRoomRequest) { +} + +type DeleteResponse struct { +} + +func (newState *DeleteResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteResponse) { +} + +func (newState *DeleteResponse) SyncEffectiveFieldsDuringRead(existingState DeleteResponse) { +} + +// Get an asset +type GetCleanRoomAssetRequest struct { + // The fully qualified name of the asset, it is same as the name field in + // CleanRoomAsset. + AssetFullName types.String `tfsdk:"-"` + // The type of the asset. + AssetType types.String `tfsdk:"-"` + // Name of the clean room. + CleanRoomName types.String `tfsdk:"-"` +} + +func (newState *GetCleanRoomAssetRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetCleanRoomAssetRequest) { +} + +func (newState *GetCleanRoomAssetRequest) SyncEffectiveFieldsDuringRead(existingState GetCleanRoomAssetRequest) { +} + +// Get a clean room +type GetCleanRoomRequest struct { + Name types.String `tfsdk:"-"` +} + +func (newState *GetCleanRoomRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan GetCleanRoomRequest) { +} + +func (newState *GetCleanRoomRequest) SyncEffectiveFieldsDuringRead(existingState GetCleanRoomRequest) { +} + +// List assets +type ListCleanRoomAssetsRequest struct { + // Name of the clean room. + CleanRoomName types.String `tfsdk:"-"` + // Opaque pagination token to go to next page based on previous query. + PageToken types.String `tfsdk:"-"` +} + +func (newState *ListCleanRoomAssetsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCleanRoomAssetsRequest) { +} + +func (newState *ListCleanRoomAssetsRequest) SyncEffectiveFieldsDuringRead(existingState ListCleanRoomAssetsRequest) { +} + +type ListCleanRoomAssetsResponse struct { + // Assets in the clean room. + Assets []CleanRoomAsset `tfsdk:"assets" tf:"optional"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. page_token should be set to this value for the next request + // (for the next page of results). + NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` +} + +func (newState *ListCleanRoomAssetsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCleanRoomAssetsResponse) { +} + +func (newState *ListCleanRoomAssetsResponse) SyncEffectiveFieldsDuringRead(existingState ListCleanRoomAssetsResponse) { +} + +// List notebook task runs +type ListCleanRoomNotebookTaskRunsRequest struct { + // Name of the clean room. + CleanRoomName types.String `tfsdk:"-"` + // Notebook name + NotebookName types.String `tfsdk:"-"` + // The maximum number of task runs to return + PageSize types.Int64 `tfsdk:"-"` + // Opaque pagination token to go to next page based on previous query. + PageToken types.String `tfsdk:"-"` +} + +func (newState *ListCleanRoomNotebookTaskRunsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCleanRoomNotebookTaskRunsRequest) { +} + +func (newState *ListCleanRoomNotebookTaskRunsRequest) SyncEffectiveFieldsDuringRead(existingState ListCleanRoomNotebookTaskRunsRequest) { +} + +type ListCleanRoomNotebookTaskRunsResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. page_token should be set to this value for the next request + // (for the next page of results). + NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` + // Name of the clean room. + Runs []CleanRoomNotebookTaskRun `tfsdk:"runs" tf:"optional"` +} + +func (newState *ListCleanRoomNotebookTaskRunsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCleanRoomNotebookTaskRunsResponse) { +} + +func (newState *ListCleanRoomNotebookTaskRunsResponse) SyncEffectiveFieldsDuringRead(existingState ListCleanRoomNotebookTaskRunsResponse) { +} + +// List clean rooms +type ListCleanRoomsRequest struct { + // Maximum number of clean rooms to return (i.e., the page length). Defaults + // to 100. + PageSize types.Int64 `tfsdk:"-"` + // Opaque pagination token to go to next page based on previous query. + PageToken types.String `tfsdk:"-"` +} + +func (newState *ListCleanRoomsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCleanRoomsRequest) { +} + +func (newState *ListCleanRoomsRequest) SyncEffectiveFieldsDuringRead(existingState ListCleanRoomsRequest) { +} + +type ListCleanRoomsResponse struct { + CleanRooms []CleanRoom `tfsdk:"clean_rooms" tf:"optional"` + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. page_token should be set to this value for the next request + // (for the next page of results). + NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` +} + +func (newState *ListCleanRoomsResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan ListCleanRoomsResponse) { +} + +func (newState *ListCleanRoomsResponse) SyncEffectiveFieldsDuringRead(existingState ListCleanRoomsResponse) { +} + +// Update an asset +type UpdateCleanRoomAssetRequest struct { + // Metadata of the clean room asset + Asset []CleanRoomAsset `tfsdk:"asset" tf:"optional,object"` + // The type of the asset. + AssetType types.String `tfsdk:"-"` + // Name of the clean room. + CleanRoomName types.String `tfsdk:"-"` + // A fully qualified name that uniquely identifies the asset within the + // clean room. This is also the name displayed in the clean room UI. + // + // For UC securable assets (tables, volumes, etc.), the format is + // *shared_catalog*.*shared_schema*.*asset_name* + // + // For notebooks, the name is the notebook file name. + Name types.String `tfsdk:"-"` +} + +func (newState *UpdateCleanRoomAssetRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateCleanRoomAssetRequest) { +} + +func (newState *UpdateCleanRoomAssetRequest) SyncEffectiveFieldsDuringRead(existingState UpdateCleanRoomAssetRequest) { +} + +type UpdateCleanRoomRequest struct { + CleanRoom []CleanRoom `tfsdk:"clean_room" tf:"optional,object"` + // Name of the clean room. + Name types.String `tfsdk:"-"` +} + +func (newState *UpdateCleanRoomRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateCleanRoomRequest) { +} + +func (newState *UpdateCleanRoomRequest) SyncEffectiveFieldsDuringRead(existingState UpdateCleanRoomRequest) { +} diff --git a/internal/service/dashboards_tf/model.go b/internal/service/dashboards_tf/model.go index 2b6ff5a197..9e0c94224f 100755 --- a/internal/service/dashboards_tf/model.go +++ b/internal/service/dashboards_tf/model.go @@ -575,6 +575,7 @@ func (newState *PublishedDashboard) SyncEffectiveFieldsDuringRead(existingState } type QueryAttachment struct { + CachedQuerySchema []QuerySchema `tfsdk:"cached_query_schema" tf:"optional,object"` // Description of the query Description types.String `tfsdk:"description" tf:"optional"` @@ -599,6 +600,36 @@ func (newState *QueryAttachment) SyncEffectiveFieldsDuringCreateOrUpdate(plan Qu func (newState *QueryAttachment) SyncEffectiveFieldsDuringRead(existingState QueryAttachment) { } +type QuerySchema struct { + Columns []QuerySchemaColumn `tfsdk:"columns" tf:"optional"` + // Used to determine if the stored query schema is compatible with the + // latest run. The service should always clear the schema when the query is + // re-executed. + StatementId types.String `tfsdk:"statement_id" tf:"optional"` +} + +func (newState *QuerySchema) SyncEffectiveFieldsDuringCreateOrUpdate(plan QuerySchema) { +} + +func (newState *QuerySchema) SyncEffectiveFieldsDuringRead(existingState QuerySchema) { +} + +type QuerySchemaColumn struct { + // Populated from + // https://docs.databricks.com/sql/language-manual/sql-ref-datatypes.html + DataType types.String `tfsdk:"data_type" tf:""` + + Name types.String `tfsdk:"name" tf:""` + // Corresponds to type desc + TypeText types.String `tfsdk:"type_text" tf:""` +} + +func (newState *QuerySchemaColumn) SyncEffectiveFieldsDuringCreateOrUpdate(plan QuerySchemaColumn) { +} + +func (newState *QuerySchemaColumn) SyncEffectiveFieldsDuringRead(existingState QuerySchemaColumn) { +} + type Result struct { // If result is truncated IsTruncated types.Bool `tfsdk:"is_truncated" tf:"optional"` diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index 51e158e64f..80b3056671 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -221,6 +221,22 @@ func (newState *CancelRunResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan func (newState *CancelRunResponse) SyncEffectiveFieldsDuringRead(existingState CancelRunResponse) { } +// Stores the run state of the clean room notebook V1 task. +type CleanRoomTaskRunState struct { + // A value indicating the run's current lifecycle state. This field is + // always available in the response. + LifeCycleState types.String `tfsdk:"life_cycle_state" tf:"optional"` + // A value indicating the run's result. This field is only available for + // terminal lifecycle states. + ResultState types.String `tfsdk:"result_state" tf:"optional"` +} + +func (newState *CleanRoomTaskRunState) SyncEffectiveFieldsDuringCreateOrUpdate(plan CleanRoomTaskRunState) { +} + +func (newState *CleanRoomTaskRunState) SyncEffectiveFieldsDuringRead(existingState CleanRoomTaskRunState) { +} + type ClusterInstance struct { // The canonical identifier for the cluster used by a run. This field is // always available for runs on existing clusters. For runs on new clusters, @@ -385,9 +401,8 @@ type CreateJob struct { Parameters []JobParameterDefinition `tfsdk:"parameter" tf:"optional"` // The queue settings of the job. Queue []QueueSettings `tfsdk:"queue" tf:"optional,object"` - // Write-only setting. Specifies the user, service principal or group that - // the job/pipeline runs as. If not specified, the job/pipeline runs as the - // user who created the job/pipeline. + // Write-only setting. Specifies the user or service principal that the job + // runs as. If not specified, the job runs as the user who created the job. // // Either `user_name` or `service_principal_name` should be specified. If // not, an error is thrown. @@ -1174,9 +1189,8 @@ func (newState *JobPermissionsRequest) SyncEffectiveFieldsDuringCreateOrUpdate(p func (newState *JobPermissionsRequest) SyncEffectiveFieldsDuringRead(existingState JobPermissionsRequest) { } -// Write-only setting. Specifies the user, service principal or group that the -// job/pipeline runs as. If not specified, the job/pipeline runs as the user who -// created the job/pipeline. +// Write-only setting. Specifies the user or service principal that the job runs +// as. If not specified, the job runs as the user who created the job. // // Either `user_name` or `service_principal_name` should be specified. If not, // an error is thrown. @@ -1269,9 +1283,8 @@ type JobSettings struct { Parameters []JobParameterDefinition `tfsdk:"parameter" tf:"optional"` // The queue settings of the job. Queue []QueueSettings `tfsdk:"queue" tf:"optional,object"` - // Write-only setting. Specifies the user, service principal or group that - // the job/pipeline runs as. If not specified, the job/pipeline runs as the - // user who created the job/pipeline. + // Write-only setting. Specifies the user or service principal that the job + // runs as. If not specified, the job runs as the user who created the job. // // Either `user_name` or `service_principal_name` should be specified. If // not, an error is thrown. diff --git a/internal/service/settings_tf/model.go b/internal/service/settings_tf/model.go index e34e7c4d5f..ca15612fc6 100755 --- a/internal/service/settings_tf/model.go +++ b/internal/service/settings_tf/model.go @@ -478,6 +478,78 @@ func (newState *DeleteAccountIpAccessListRequest) SyncEffectiveFieldsDuringCreat func (newState *DeleteAccountIpAccessListRequest) SyncEffectiveFieldsDuringRead(existingState DeleteAccountIpAccessListRequest) { } +// Delete the AI/BI dashboard embedding access policy +type DeleteAibiDashboardEmbeddingAccessPolicySettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +func (newState *DeleteAibiDashboardEmbeddingAccessPolicySettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAibiDashboardEmbeddingAccessPolicySettingRequest) { +} + +func (newState *DeleteAibiDashboardEmbeddingAccessPolicySettingRequest) SyncEffectiveFieldsDuringRead(existingState DeleteAibiDashboardEmbeddingAccessPolicySettingRequest) { +} + +// The etag is returned. +type DeleteAibiDashboardEmbeddingAccessPolicySettingResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"etag" tf:""` +} + +func (newState *DeleteAibiDashboardEmbeddingAccessPolicySettingResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAibiDashboardEmbeddingAccessPolicySettingResponse) { +} + +func (newState *DeleteAibiDashboardEmbeddingAccessPolicySettingResponse) SyncEffectiveFieldsDuringRead(existingState DeleteAibiDashboardEmbeddingAccessPolicySettingResponse) { +} + +// Delete AI/BI dashboard embedding approved domains +type DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +func (newState *DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest) { +} + +func (newState *DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest) SyncEffectiveFieldsDuringRead(existingState DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest) { +} + +// The etag is returned. +type DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"etag" tf:""` +} + +func (newState *DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse) SyncEffectiveFieldsDuringCreateOrUpdate(plan DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse) { +} + +func (newState *DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse) SyncEffectiveFieldsDuringRead(existingState DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse) { +} + // Delete the default namespace setting type DeleteDefaultNamespaceSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -845,6 +917,101 @@ func (newState *DisableLegacyFeatures) SyncEffectiveFieldsDuringCreateOrUpdate(p func (newState *DisableLegacyFeatures) SyncEffectiveFieldsDuringRead(existingState DisableLegacyFeatures) { } +// The network policies applying for egress traffic. This message is used by the +// UI/REST API. We translate this message to the format expected by the +// dataplane in Lakehouse Network Manager (for the format expected by the +// dataplane, see networkconfig.textproto). +type EgressNetworkPolicy struct { + // The access policy enforced for egress traffic to the internet. + InternetAccess []EgressNetworkPolicyInternetAccessPolicy `tfsdk:"internet_access" tf:"optional,object"` +} + +func (newState *EgressNetworkPolicy) SyncEffectiveFieldsDuringCreateOrUpdate(plan EgressNetworkPolicy) { +} + +func (newState *EgressNetworkPolicy) SyncEffectiveFieldsDuringRead(existingState EgressNetworkPolicy) { +} + +type EgressNetworkPolicyInternetAccessPolicy struct { + AllowedInternetDestinations []EgressNetworkPolicyInternetAccessPolicyInternetDestination `tfsdk:"allowed_internet_destinations" tf:"optional"` + + AllowedStorageDestinations []EgressNetworkPolicyInternetAccessPolicyStorageDestination `tfsdk:"allowed_storage_destinations" tf:"optional"` + // Optional. If not specified, assume the policy is enforced for all + // workloads. + LogOnlyMode []EgressNetworkPolicyInternetAccessPolicyLogOnlyMode `tfsdk:"log_only_mode" tf:"optional,object"` + // At which level can Databricks and Databricks managed compute access + // Internet. FULL_ACCESS: Databricks can access Internet. No blocking rules + // will apply. RESTRICTED_ACCESS: Databricks can only access explicitly + // allowed internet and storage destinations, as well as UC connections and + // external locations. PRIVATE_ACCESS_ONLY (not used): Databricks can only + // access destinations via private link. + RestrictionMode types.String `tfsdk:"restriction_mode" tf:"optional"` +} + +func (newState *EgressNetworkPolicyInternetAccessPolicy) SyncEffectiveFieldsDuringCreateOrUpdate(plan EgressNetworkPolicyInternetAccessPolicy) { +} + +func (newState *EgressNetworkPolicyInternetAccessPolicy) SyncEffectiveFieldsDuringRead(existingState EgressNetworkPolicyInternetAccessPolicy) { +} + +// Users can specify accessible internet destinations when outbound access is +// restricted. We only support domain name (FQDN) destinations for the time +// being, though going forwards we want to support host names and IP addresses. +type EgressNetworkPolicyInternetAccessPolicyInternetDestination struct { + Destination types.String `tfsdk:"destination" tf:"optional"` + // The filtering protocol used by the DP. For private and public preview, + // SEG will only support TCP filtering (i.e. DNS based filtering, filtering + // by destination IP address), so protocol will be set to TCP by default and + // hidden from the user. In the future, users may be able to select HTTP + // filtering (i.e. SNI based filtering, filtering by FQDN). + Protocol types.String `tfsdk:"protocol" tf:"optional"` + + Type types.String `tfsdk:"type" tf:"optional"` +} + +func (newState *EgressNetworkPolicyInternetAccessPolicyInternetDestination) SyncEffectiveFieldsDuringCreateOrUpdate(plan EgressNetworkPolicyInternetAccessPolicyInternetDestination) { +} + +func (newState *EgressNetworkPolicyInternetAccessPolicyInternetDestination) SyncEffectiveFieldsDuringRead(existingState EgressNetworkPolicyInternetAccessPolicyInternetDestination) { +} + +type EgressNetworkPolicyInternetAccessPolicyLogOnlyMode struct { + LogOnlyModeType types.String `tfsdk:"log_only_mode_type" tf:"optional"` + + Workloads []types.String `tfsdk:"workloads" tf:"optional"` +} + +func (newState *EgressNetworkPolicyInternetAccessPolicyLogOnlyMode) SyncEffectiveFieldsDuringCreateOrUpdate(plan EgressNetworkPolicyInternetAccessPolicyLogOnlyMode) { +} + +func (newState *EgressNetworkPolicyInternetAccessPolicyLogOnlyMode) SyncEffectiveFieldsDuringRead(existingState EgressNetworkPolicyInternetAccessPolicyLogOnlyMode) { +} + +// Users can specify accessible storage destinations. +type EgressNetworkPolicyInternetAccessPolicyStorageDestination struct { + AllowedPaths []types.String `tfsdk:"allowed_paths" tf:"optional"` + + AzureContainer types.String `tfsdk:"azure_container" tf:"optional"` + + AzureDnsZone types.String `tfsdk:"azure_dns_zone" tf:"optional"` + + AzureStorageAccount types.String `tfsdk:"azure_storage_account" tf:"optional"` + + AzureStorageService types.String `tfsdk:"azure_storage_service" tf:"optional"` + + BucketName types.String `tfsdk:"bucket_name" tf:"optional"` + + Region types.String `tfsdk:"region" tf:"optional"` + + Type types.String `tfsdk:"type" tf:"optional"` +} + +func (newState *EgressNetworkPolicyInternetAccessPolicyStorageDestination) SyncEffectiveFieldsDuringCreateOrUpdate(plan EgressNetworkPolicyInternetAccessPolicyStorageDestination) { +} + +func (newState *EgressNetworkPolicyInternetAccessPolicyStorageDestination) SyncEffectiveFieldsDuringRead(existingState EgressNetworkPolicyInternetAccessPolicyStorageDestination) { +} + type EmailConfig struct { // Email addresses to notify. Addresses []types.String `tfsdk:"addresses" tf:"optional"` diff --git a/internal/service/sharing_tf/model.go b/internal/service/sharing_tf/model.go index 6bde086372..6de053a937 100755 --- a/internal/service/sharing_tf/model.go +++ b/internal/service/sharing_tf/model.go @@ -354,13 +354,24 @@ func (newState *Partition) SyncEffectiveFieldsDuringCreateOrUpdate(plan Partitio func (newState *Partition) SyncEffectiveFieldsDuringRead(existingState Partition) { } +type PartitionSpecificationPartition struct { + // An array of partition values. + Values []PartitionValue `tfsdk:"value" tf:"optional"` +} + +func (newState *PartitionSpecificationPartition) SyncEffectiveFieldsDuringCreateOrUpdate(plan PartitionSpecificationPartition) { +} + +func (newState *PartitionSpecificationPartition) SyncEffectiveFieldsDuringRead(existingState PartitionSpecificationPartition) { +} + type PartitionValue struct { // The name of the partition column. Name types.String `tfsdk:"name" tf:"optional"` // The operator to apply for the value. Op types.String `tfsdk:"op" tf:"optional"` // The key of a Delta Sharing recipient's property. For example - // `databricks-account-id`. When this field is set, field `value` can not be + // "databricks-account-id". When this field is set, field `value` can not be // set. RecipientPropertyKey types.String `tfsdk:"recipient_property_key" tf:"optional"` // The value of the partition column. When this value is not set, it means @@ -606,8 +617,7 @@ type ShareInfo struct { // A list of shared data objects within the share. Objects []SharedDataObject `tfsdk:"object" tf:"optional"` // Username of current owner of share. - Owner types.String `tfsdk:"owner" tf:"optional"` - EffectiveOwner types.String `tfsdk:"effective_owner" tf:"computed,optional"` + Owner types.String `tfsdk:"owner" tf:"computed,optional"` // Storage Location URL (full path) for the share. StorageLocation types.String `tfsdk:"storage_location" tf:"optional"` // Storage root URL for the share. @@ -619,15 +629,9 @@ type ShareInfo struct { } func (newState *ShareInfo) SyncEffectiveFieldsDuringCreateOrUpdate(plan ShareInfo) { - newState.EffectiveOwner = newState.Owner - newState.Owner = plan.Owner } func (newState *ShareInfo) SyncEffectiveFieldsDuringRead(existingState ShareInfo) { - newState.EffectiveOwner = existingState.EffectiveOwner - if existingState.EffectiveOwner.ValueString() == newState.Owner.ValueString() { - newState.Owner = existingState.Owner - } } // Get recipient share permissions @@ -836,8 +840,7 @@ type UpdateShare struct { // New name for the share. NewName types.String `tfsdk:"new_name" tf:"optional"` // Username of current owner of share. - Owner types.String `tfsdk:"owner" tf:"optional"` - EffectiveOwner types.String `tfsdk:"effective_owner" tf:"computed,optional"` + Owner types.String `tfsdk:"owner" tf:"computed,optional"` // Storage root URL for the share. StorageRoot types.String `tfsdk:"storage_root" tf:"optional"` // Array of shared data object updates. @@ -845,15 +848,9 @@ type UpdateShare struct { } func (newState *UpdateShare) SyncEffectiveFieldsDuringCreateOrUpdate(plan UpdateShare) { - newState.EffectiveOwner = newState.Owner - newState.Owner = plan.Owner } func (newState *UpdateShare) SyncEffectiveFieldsDuringRead(existingState UpdateShare) { - newState.EffectiveOwner = existingState.EffectiveOwner - if existingState.EffectiveOwner.ValueString() == newState.Owner.ValueString() { - newState.Owner = existingState.Owner - } } type UpdateSharePermissions struct {