diff --git a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go index fc97eae941..310793e882 100644 --- a/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go +++ b/internal/service/backupcompliancepolicy/resource_backup_compliance_policy_test.go @@ -380,7 +380,7 @@ func configOverwriteIncompatibleBackupPoliciesError(projectName, orgID, projectO retention_value = 1 } } - `, info.ClusterTerraformStr, info.ClusterResourceName) + `, info.TerraformStr, info.ResourceName) } func configClusterWithBackupSchedule(projectName, orgID, projectOwnerID string, info *acc.ClusterInfo) string { @@ -402,7 +402,7 @@ func configClusterWithBackupSchedule(projectName, orgID, projectOwnerID string, should_copy_oplogs = false } } - `, info.ClusterTerraformStr, info.ClusterResourceName) + `, info.TerraformStr, info.ResourceName) } func basicChecks() []resource.TestCheckFunc { diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go index 2caacc8108..435f2e103a 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_migration_test.go @@ -30,8 +30,8 @@ func TestMigBackupRSCloudBackupSchedule_basic(t *testing.T) { Config: config, Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "0"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "0"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "7"), diff --git a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go index 66f13b6235..b2f26f32fd 100644 --- a/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go +++ b/internal/service/cloudbackupschedule/resource_cloud_backup_schedule_test.go @@ -36,7 +36,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { }), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "3"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "45"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "4"), @@ -45,7 +45,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "policy_item_weekly.#", "0"), resource.TestCheckResourceAttr(resourceName, "policy_item_monthly.#", "0"), resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.#", "0"), - resource.TestCheckResourceAttr(dataSourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(dataSourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttrSet(dataSourceName, "reference_hour_of_day"), resource.TestCheckResourceAttrSet(dataSourceName, "reference_minute_of_hour"), resource.TestCheckResourceAttrSet(dataSourceName, "restore_window_days"), @@ -64,7 +64,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { }, true), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "0"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "0"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "7"), @@ -93,7 +93,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.0.frequency_interval", "1"), resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.0.retention_unit", "years"), resource.TestCheckResourceAttr(resourceName, "policy_item_yearly.0.retention_value", "1"), - resource.TestCheckResourceAttr(dataSourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(dataSourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttrSet(dataSourceName, "reference_hour_of_day"), resource.TestCheckResourceAttrSet(dataSourceName, "reference_minute_of_hour"), resource.TestCheckResourceAttrSet(dataSourceName, "restore_window_days"), @@ -107,7 +107,7 @@ func TestAccBackupRSCloudBackupSchedule_basic(t *testing.T) { }), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "auto_export_enabled", "false"), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "0"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "0"), @@ -167,7 +167,7 @@ func TestAccBackupRSCloudBackupSchedule_export(t *testing.T) { Config: configExportPolicies(&clusterInfo, policyName, roleName, bucketName), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "auto_export_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "20"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "5"), @@ -199,7 +199,7 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { }), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "3"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "45"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "4"), @@ -233,7 +233,7 @@ func TestAccBackupRSCloudBackupSchedule_onePolicy(t *testing.T) { }), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "0"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "0"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "7"), @@ -259,9 +259,9 @@ func TestAccBackupRSCloudBackupSchedule_copySettings(t *testing.T) { }, PitEnabled: true, // you cannot copy oplogs when pit is not enabled }) - clusterName = clusterInfo.ClusterName - terraformStr = clusterInfo.ClusterTerraformStr - clusterResourceName = clusterInfo.ClusterResourceName + clusterName = clusterInfo.Name + terraformStr = clusterInfo.TerraformStr + clusterResourceName = clusterInfo.ResourceName projectID = clusterInfo.ProjectID checkMap = map[string]string{ "cluster_name": clusterName, @@ -345,7 +345,7 @@ func TestAccBackupRSCloudBackupScheduleImport_basic(t *testing.T) { }), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "reference_hour_of_day", "3"), resource.TestCheckResourceAttr(resourceName, "reference_minute_of_hour", "45"), resource.TestCheckResourceAttr(resourceName, "restore_window_days", "4"), @@ -400,7 +400,7 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { }), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.frequency_interval", "1"), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_unit", "days"), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_value", "1")), @@ -413,7 +413,7 @@ func TestAccBackupRSCloudBackupSchedule_azure(t *testing.T) { }), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.frequency_interval", "2"), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_unit", "days"), resource.TestCheckResourceAttr(resourceName, "policy_item_hourly.0.retention_value", "3"), @@ -473,10 +473,10 @@ func checkDestroy(s *terraform.State) error { } func configNoPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q reference_hour_of_day = %[3]d reference_minute_of_hour = %[4]d @@ -485,16 +485,16 @@ func configNoPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule data "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q } - `, info.ClusterNameStr, info.ProjectIDStr, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) + `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } func configDefault(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q reference_hour_of_day = %[3]d reference_minute_of_hour = %[4]d @@ -529,9 +529,9 @@ func configDefault(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) s data "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q } - `, info.ClusterNameStr, info.ProjectIDStr, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) + `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } func configCopySettings(terraformStr, projectID, clusterResourceName string, emptyCopySettings bool, p *admin.DiskBackupSnapshotSchedule) string { @@ -592,10 +592,10 @@ func configCopySettings(terraformStr, projectID, clusterResourceName string, emp } func configOnePolicy(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q reference_hour_of_day = %[3]d reference_minute_of_hour = %[4]d @@ -607,7 +607,7 @@ func configOnePolicy(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) retention_value = 1 } } - `, info.ClusterNameStr, info.ProjectIDStr, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) + `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } func configNewPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule, useYearly bool) string { @@ -622,10 +622,10 @@ func configNewPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedul ` } - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q reference_hour_of_day = %[3]d reference_minute_of_hour = %[4]d @@ -656,16 +656,16 @@ func configNewPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedul data "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q } - `, info.ClusterNameStr, info.ProjectIDStr, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), strYearly) + `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays(), strYearly) } func configAzure(info *acc.ClusterInfo, policy *admin.DiskBackupApiPolicyItem) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q policy_item_hourly { frequency_interval = %[3]d @@ -676,16 +676,16 @@ func configAzure(info *acc.ClusterInfo, policy *admin.DiskBackupApiPolicyItem) s data "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q } - `, info.ClusterNameStr, info.ProjectIDStr, policy.GetFrequencyInterval(), policy.GetRetentionUnit(), policy.GetRetentionValue()) + `, info.TerraformNameRef, info.ProjectID, policy.GetFrequencyInterval(), policy.GetRetentionUnit(), policy.GetRetentionValue()) } func configAdvancedPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSchedule) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q auto_export_enabled = false reference_hour_of_day = %[3]d @@ -728,14 +728,14 @@ func configAdvancedPolicies(info *acc.ClusterInfo, p *admin.DiskBackupSnapshotSc retention_value = 1 } } - `, info.ClusterNameStr, info.ProjectIDStr, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) + `, info.TerraformNameRef, info.ProjectID, p.GetReferenceHourOfDay(), p.GetReferenceMinuteOfHour(), p.GetRestoreWindowDays()) } func configExportPolicies(info *acc.ClusterInfo, policyName, roleName, bucketName string) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_schedule" "schedule_test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q auto_export_enabled = true reference_hour_of_day = 20 reference_minute_of_hour = "05" @@ -775,12 +775,12 @@ func configExportPolicies(info *acc.ClusterInfo, policyName, roleName, bucketNam } resource "mongodbatlas_cloud_provider_access_setup" "setup_only" { - project_id = %[2]s + project_id = %[2]q provider_name = "AWS" } resource "mongodbatlas_cloud_provider_access_authorization" "auth_role" { - project_id = %[2]s + project_id = %[2]q role_id = mongodbatlas_cloud_provider_access_setup.setup_only.role_id aws { iam_assumed_role_arn = aws_iam_role.test_role.arn @@ -788,7 +788,7 @@ func configExportPolicies(info *acc.ClusterInfo, policyName, roleName, bucketNam } resource "mongodbatlas_cloud_backup_snapshot_export_bucket" "test" { - project_id = %[2]s + project_id = %[2]q iam_role_id = mongodbatlas_cloud_provider_access_authorization.auth_role.role_id bucket_name = aws_s3_bucket.backup.bucket cloud_provider = "AWS" @@ -837,7 +837,7 @@ func configExportPolicies(info *acc.ClusterInfo, policyName, roleName, bucketNam } EOF } - `, info.ClusterNameStr, info.ProjectIDStr, policyName, roleName, bucketName) + `, info.TerraformNameRef, info.ProjectID, policyName, roleName, bucketName) } func importStateIDFunc(resourceName string) resource.ImportStateIdFunc { diff --git a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go index f15e1c1f93..164cab06a7 100644 --- a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go +++ b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_migration_test.go @@ -29,8 +29,8 @@ func TestMigBackupRSCloudBackupSnapshot_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "type", "replicaSet"), resource.TestCheckResourceAttr(resourceName, "members.#", "0"), resource.TestCheckResourceAttr(resourceName, "snapshot_ids.#", "0"), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), - resource.TestCheckResourceAttr(resourceName, "replica_set_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), + resource.TestCheckResourceAttr(resourceName, "replica_set_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "cloud_provider", "AWS"), resource.TestCheckResourceAttr(resourceName, "description", description), resource.TestCheckResourceAttr(resourceName, "retention_in_days", retentionInDays), diff --git a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go index b93c361c65..993eebd793 100644 --- a/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go +++ b/internal/service/cloudbackupsnapshot/resource_cloud_backup_snapshot_test.go @@ -38,8 +38,8 @@ func TestAccBackupRSCloudBackupSnapshot_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "type", "replicaSet"), resource.TestCheckResourceAttr(resourceName, "members.#", "0"), resource.TestCheckResourceAttr(resourceName, "snapshot_ids.#", "0"), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), - resource.TestCheckResourceAttr(resourceName, "replica_set_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), + resource.TestCheckResourceAttr(resourceName, "replica_set_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "cloud_provider", "AWS"), resource.TestCheckResourceAttr(resourceName, "description", description), resource.TestCheckResourceAttr(resourceName, "retention_in_days", retentionInDays), @@ -47,8 +47,8 @@ func TestAccBackupRSCloudBackupSnapshot_basic(t *testing.T) { resource.TestCheckResourceAttr(dataSourceName, "type", "replicaSet"), resource.TestCheckResourceAttr(dataSourceName, "members.#", "0"), resource.TestCheckResourceAttr(dataSourceName, "snapshot_ids.#", "0"), - resource.TestCheckResourceAttr(dataSourceName, "cluster_name", clusterInfo.ClusterName), - resource.TestCheckResourceAttr(dataSourceName, "replica_set_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(dataSourceName, "cluster_name", clusterInfo.Name), + resource.TestCheckResourceAttr(dataSourceName, "replica_set_name", clusterInfo.Name), resource.TestCheckResourceAttr(dataSourceName, "cloud_provider", "AWS"), resource.TestCheckResourceAttr(dataSourceName, "description", description), resource.TestCheckResourceAttrSet(dataSourcePluralSimpleName, "results.#"), @@ -147,10 +147,10 @@ func importStateIDFunc(resourceName string) resource.ImportStateIdFunc { } func configBasic(info *acc.ClusterInfo, description, retentionInDays string) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_cloud_backup_snapshot" "test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q description = %[3]q retention_in_days = %[4]q } @@ -158,21 +158,21 @@ func configBasic(info *acc.ClusterInfo, description, retentionInDays string) str data "mongodbatlas_cloud_backup_snapshot" "test" { snapshot_id = mongodbatlas_cloud_backup_snapshot.test.snapshot_id cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q } data "mongodbatlas_cloud_backup_snapshots" "test" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q } data "mongodbatlas_cloud_backup_snapshots" "pagination" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q page_num = 1 items_per_page = 5 } - `, info.ClusterNameStr, info.ProjectIDStr, description, retentionInDays) + `, info.TerraformNameRef, info.ProjectID, description, retentionInDays) } func configSharded(projectID, clusterName, description, retentionInDays string) string { diff --git a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go index 4b451363e5..7ebf7f5694 100644 --- a/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go +++ b/internal/service/cloudbackupsnapshotexportjob/resource_cloud_backup_snapshot_export_job_test.go @@ -58,7 +58,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Steps: []resource.TestStep{ { - Config: configBasic(projectID, bucketName, roleName, policyName, clusterInfo.ClusterNameStr, clusterInfo.ClusterTerraformStr), + Config: configBasic(projectID, bucketName, roleName, policyName, clusterInfo.TerraformNameRef, clusterInfo.TerraformStr), Check: resource.ComposeAggregateTestCheckFunc(checks...), }, { diff --git a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go index e14a88c3b8..3f27e3a900 100644 --- a/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go +++ b/internal/service/cloudbackupsnapshotrestorejob/resource_cloud_backup_snapshot_restore_job_test.go @@ -34,12 +34,12 @@ func TestAccCloudBackupSnapshotRestoreJob_basic(t *testing.T) { func TestAccCloudBackupSnapshotRestoreJob_basicDownload(t *testing.T) { var ( clusterInfo = acc.GetClusterInfo(t, clusterRequest()) - clusterName = clusterInfo.ClusterName + clusterName = clusterInfo.Name description = fmt.Sprintf("My description in %s", clusterName) retentionInDays = "1" useSnapshotID = true - clusterTerraformStr = clusterInfo.ClusterTerraformStr - clusterResourceName = clusterInfo.ClusterResourceName + clusterTerraformStr = clusterInfo.TerraformStr + clusterResourceName = clusterInfo.ResourceName ) resource.ParallelTest(t, resource.TestCase{ @@ -69,7 +69,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { snapshotsDataSourceName = "data.mongodbatlas_cloud_backup_snapshot_restore_jobs.test" snapshotsDataSourcePaginationName = "data.mongodbatlas_cloud_backup_snapshot_restore_jobs.pagination" clusterInfo = acc.GetClusterInfo(tb, clusterRequest()) - clusterName = clusterInfo.ClusterName + clusterName = clusterInfo.Name description = fmt.Sprintf("My description in %s", clusterName) retentionInDays = "1" ) @@ -80,7 +80,7 @@ func basicTestCase(tb testing.TB) *resource.TestCase { CheckDestroy: checkDestroy, Steps: []resource.TestStep{ { - Config: configBasic(clusterInfo.ClusterTerraformStr, clusterInfo.ClusterResourceName, description, retentionInDays), + Config: configBasic(clusterInfo.TerraformStr, clusterInfo.ResourceName, description, retentionInDays), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "delivery_type_config.0.automated", "true"), diff --git a/internal/service/cluster/resource_cluster_test.go b/internal/service/cluster/resource_cluster_test.go index 50dedc053c..4e891aced7 100644 --- a/internal/service/cluster/resource_cluster_test.go +++ b/internal/service/cluster/resource_cluster_test.go @@ -603,7 +603,7 @@ func TestAccCluster_Global(t *testing.T) { CheckDestroy: acc.CheckDestroyCluster, Steps: []resource.TestStep{ { - Config: acc.ConfigClusterGlobal(orgID, projectName, clusterName), + Config: configClusterGlobal(orgID, projectName, clusterName), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "mongo_uri"), @@ -2290,6 +2290,51 @@ resource "mongodbatlas_cluster" "test" { `, projectID, name, backupEnabled, paused) } +func configClusterGlobal(orgID, projectName, clusterName string) string { + return fmt.Sprintf(` + + resource "mongodbatlas_project" "test" { + org_id = %[1]q + name = %[2]q + } + + resource "mongodbatlas_cluster" test { + project_id = mongodbatlas_project.test.id + name = %[3]q + disk_size_gb = 80 + num_shards = 1 + cloud_backup = false + cluster_type = "GEOSHARDED" + + // Provider Settings "block" + provider_name = "AWS" + provider_instance_size_name = "M30" + + replication_specs { + zone_name = "Zone 1" + num_shards = 2 + regions_config { + region_name = "US_EAST_1" + electable_nodes = 3 + priority = 7 + read_only_nodes = 0 + } + } + + replication_specs { + zone_name = "Zone 2" + num_shards = 2 + regions_config { + region_name = "US_WEST_2" + electable_nodes = 3 + priority = 7 + read_only_nodes = 0 + } + } + } + `, orgID, projectName, clusterName) +} + func TestIsMultiRegionCluster(t *testing.T) { tests := []struct { name string diff --git a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go index fedf03e316..cd0eb7dae5 100644 --- a/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go +++ b/internal/service/clusteroutagesimulation/resource_cluster_outage_simulation_test.go @@ -29,7 +29,7 @@ func singleRegionTestCase(t *testing.T) *resource.TestCase { }, } clusterInfo = acc.GetClusterInfo(t, &singleRegionRequest) - clusterName = clusterInfo.ClusterName + clusterName = clusterInfo.Name ) return &resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, @@ -76,7 +76,7 @@ func multiRegionTestCase(t *testing.T) *resource.TestCase { }, }} clusterInfo = acc.GetClusterInfo(t, &multiRegionRequest) - clusterName = clusterInfo.ClusterName + clusterName = clusterInfo.Name ) return &resource.TestCase{ @@ -124,7 +124,7 @@ func configSingleRegion(info *acc.ClusterInfo) string { cluster_name = %[3]q depends_on = [mongodbatlas_cluster_outage_simulation.test_outage] } - `, info.ClusterTerraformStr, info.ProjectID, info.ClusterName, info.ClusterResourceName) + `, info.TerraformStr, info.ProjectID, info.Name, info.ResourceName) } func configMultiRegion(info *acc.ClusterInfo) string { @@ -150,7 +150,7 @@ func configMultiRegion(info *acc.ClusterInfo) string { cluster_name = %[3]q depends_on = [mongodbatlas_cluster_outage_simulation.test_outage] } - `, info.ClusterTerraformStr, info.ProjectID, info.ClusterName, info.ClusterResourceName) + `, info.TerraformStr, info.ProjectID, info.Name, info.ResourceName) } func checkDestroy(s *terraform.State) error { diff --git a/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go b/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go index 8dc6667b01..7bba2984eb 100644 --- a/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go +++ b/internal/service/federateddatabaseinstance/resource_federated_database_instance_test.go @@ -129,7 +129,7 @@ func TestAccFederatedDatabaseInstance_atlasCluster(t *testing.T) { ResourceSuffix: "cluster2", } cluster2Info = acc.GetClusterInfo(t, &clusterRequest2) - dependencyTerraform = fmt.Sprintf("%s\n%s", clusterInfo.ClusterTerraformStr, cluster2Info.ClusterTerraformStr) + dependencyTerraform = fmt.Sprintf("%s\n%s", clusterInfo.TerraformStr, cluster2Info.TerraformStr) ) resource.ParallelTest(t, resource.TestCase{ @@ -138,7 +138,7 @@ func TestAccFederatedDatabaseInstance_atlasCluster(t *testing.T) { Steps: []resource.TestStep{ { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, - Config: configWithCluster(dependencyTerraform, projectID, clusterInfo.ClusterResourceName, cluster2Info.ClusterResourceName, name), + Config: configWithCluster(dependencyTerraform, projectID, clusterInfo.ResourceName, cluster2Info.ResourceName, name), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, "project_id"), resource.TestCheckResourceAttr(resourceName, "name", name), diff --git a/internal/service/globalclusterconfig/resource_global_cluster_config_migration_test.go b/internal/service/globalclusterconfig/resource_global_cluster_config_migration_test.go index c70697344c..7353bc22cd 100644 --- a/internal/service/globalclusterconfig/resource_global_cluster_config_migration_test.go +++ b/internal/service/globalclusterconfig/resource_global_cluster_config_migration_test.go @@ -27,7 +27,7 @@ func TestMigClusterRSGlobalCluster_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.%"), resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.CA"), resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "managed_namespaces.#", "1"), resource.TestCheckResourceAttr(resourceName, "managed_namespaces.0.is_custom_shard_key_hashed", "false"), resource.TestCheckResourceAttr(resourceName, "managed_namespaces.0.is_shard_key_unique", "false"), diff --git a/internal/service/globalclusterconfig/resource_global_cluster_config_test.go b/internal/service/globalclusterconfig/resource_global_cluster_config_test.go index 68cdc51f5e..522305f543 100644 --- a/internal/service/globalclusterconfig/resource_global_cluster_config_test.go +++ b/internal/service/globalclusterconfig/resource_global_cluster_config_test.go @@ -31,7 +31,7 @@ func TestAccClusterRSGlobalCluster_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.%"), resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.CA"), resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), resource.TestCheckResourceAttr(resourceName, "managed_namespaces.#", "1"), resource.TestCheckResourceAttr(resourceName, "managed_namespaces.0.is_custom_shard_key_hashed", "false"), resource.TestCheckResourceAttr(resourceName, "managed_namespaces.0.is_shard_key_unique", "false"), @@ -64,7 +64,7 @@ func TestAccClusterRSGlobalCluster_withAWSAndBackup(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.%"), resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.CA"), resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), ), }, { @@ -103,7 +103,7 @@ func TestAccClusterRSGlobalCluster_database(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.IE"), resource.TestCheckResourceAttrSet(resourceName, "custom_zone_mapping.DE"), resource.TestCheckResourceAttrSet(resourceName, "project_id"), - resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.ClusterName), + resource.TestCheckResourceAttr(resourceName, "cluster_name", clusterInfo.Name), ), }, { @@ -174,10 +174,10 @@ func checkDestroy(s *terraform.State) error { } func configBasic(info *acc.ClusterInfo, isCustomShard, isShardKeyUnique bool) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_global_cluster_config" "config" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q managed_namespaces { db = "mydata" @@ -195,16 +195,16 @@ func configBasic(info *acc.ClusterInfo, isCustomShard, isShardKeyUnique bool) st data "mongodbatlas_global_cluster_config" "config" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q } - `, info.ClusterNameStr, info.ProjectIDStr, isCustomShard, isShardKeyUnique) + `, info.TerraformNameRef, info.ProjectID, isCustomShard, isShardKeyUnique) } func configWithDBConfig(info *acc.ClusterInfo, zones string) string { - return info.ClusterTerraformStr + fmt.Sprintf(` + return info.TerraformStr + fmt.Sprintf(` resource "mongodbatlas_global_cluster_config" "config" { cluster_name = %[1]s - project_id = %[2]s + project_id = %[2]q managed_namespaces { db = "horizonv2-sg" @@ -233,7 +233,7 @@ func configWithDBConfig(info *acc.ClusterInfo, zones string) string { } %[3]s } - `, info.ClusterNameStr, info.ProjectIDStr, zones) + `, info.TerraformNameRef, info.ProjectID, zones) } const ( diff --git a/internal/service/ldapconfiguration/resource_ldap_configuration_test.go b/internal/service/ldapconfiguration/resource_ldap_configuration_test.go index f9eeba0eac..5fb300be5c 100644 --- a/internal/service/ldapconfiguration/resource_ldap_configuration_test.go +++ b/internal/service/ldapconfiguration/resource_ldap_configuration_test.go @@ -37,7 +37,7 @@ func TestAccLDAPConfiguration_withVerify_CACertificateComplete(t *testing.T) { }, }) projectID = clusterInfo.ProjectID - clusterTerraformStr = clusterInfo.ClusterTerraformStr + clusterTerraformStr = clusterInfo.TerraformStr ) resource.Test(t, resource.TestCase{ @@ -45,7 +45,7 @@ func TestAccLDAPConfiguration_withVerify_CACertificateComplete(t *testing.T) { ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, Steps: []resource.TestStep{ { - Config: configWithVerify(clusterTerraformStr, clusterInfo.ClusterResourceName, projectID, hostname, username, password, caCertificate, cast.ToInt(port), true), + Config: configWithVerify(clusterTerraformStr, clusterInfo.ResourceName, projectID, hostname, username, password, caCertificate, cast.ToInt(port), true), Check: resource.ComposeAggregateTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrSet(resourceName, "project_id"), diff --git a/internal/service/onlinearchive/resource_online_archive_migration_test.go b/internal/service/onlinearchive/resource_online_archive_migration_test.go index 96fe9c4d1f..6035a59544 100644 --- a/internal/service/onlinearchive/resource_online_archive_migration_test.go +++ b/internal/service/onlinearchive/resource_online_archive_migration_test.go @@ -13,10 +13,10 @@ func TestMigBackupRSOnlineArchiveWithNoChangeBetweenVersions(t *testing.T) { var ( onlineArchiveResourceName = "mongodbatlas_online_archive.users_archive" clusterInfo = acc.GetClusterInfo(t, clusterRequest()) - clusterName = clusterInfo.ClusterName + clusterName = clusterInfo.Name projectID = clusterInfo.ProjectID - clusterTerraformStr = clusterInfo.ClusterTerraformStr - clusterResourceName = clusterInfo.ClusterResourceName + clusterTerraformStr = clusterInfo.TerraformStr + clusterResourceName = clusterInfo.ResourceName deleteExpirationDays = 0 ) if mig.IsProviderVersionAtLeast("1.12.2") { diff --git a/internal/service/onlinearchive/resource_online_archive_test.go b/internal/service/onlinearchive/resource_online_archive_test.go index ecb3de2241..5f2e95b16d 100644 --- a/internal/service/onlinearchive/resource_online_archive_test.go +++ b/internal/service/onlinearchive/resource_online_archive_test.go @@ -30,10 +30,10 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { onlineArchiveDataSourceName = "data.mongodbatlas_online_archive.read_archive" onlineArchivesDataSourceName = "data.mongodbatlas_online_archives.all" clusterInfo = acc.GetClusterInfo(t, clusterRequest()) - clusterName = clusterInfo.ClusterName + clusterName = clusterInfo.Name projectID = clusterInfo.ProjectID - clusterTerraformStr = clusterInfo.ClusterTerraformStr - clusterResourceName = clusterInfo.ClusterResourceName + clusterTerraformStr = clusterInfo.TerraformStr + clusterResourceName = clusterInfo.ResourceName ) resource.ParallelTest(t, resource.TestCase{ @@ -127,11 +127,11 @@ func TestAccBackupRSOnlineArchive(t *testing.T) { func TestAccBackupRSOnlineArchiveBasic(t *testing.T) { var ( clusterInfo = acc.GetClusterInfo(t, clusterRequest()) - clusterResourceName = clusterInfo.ClusterResourceName - clusterName = clusterInfo.ClusterName + clusterResourceName = clusterInfo.ResourceName + clusterName = clusterInfo.Name projectID = clusterInfo.ProjectID onlineArchiveResourceName = "mongodbatlas_online_archive.users_archive" - clusterTerraformStr = clusterInfo.ClusterTerraformStr + clusterTerraformStr = clusterInfo.TerraformStr ) resource.ParallelTest(t, resource.TestCase{ @@ -175,10 +175,10 @@ func TestAccBackupRSOnlineArchiveWithProcessRegion(t *testing.T) { onlineArchiveResourceName = "mongodbatlas_online_archive.users_archive" onlineArchiveDataSourceName = "data.mongodbatlas_online_archive.read_archive" clusterInfo = acc.GetClusterInfo(t, clusterRequest()) - clusterResourceName = clusterInfo.ClusterResourceName - clusterName = clusterInfo.ClusterName + clusterResourceName = clusterInfo.ResourceName + clusterName = clusterInfo.Name projectID = clusterInfo.ProjectID - clusterTerraformStr = clusterInfo.ClusterTerraformStr + clusterTerraformStr = clusterInfo.TerraformStr cloudProvider = "AWS" processRegion = "US_EAST_1" ) @@ -221,9 +221,9 @@ func TestAccBackupRSOnlineArchiveWithProcessRegion(t *testing.T) { func TestAccBackupRSOnlineArchiveInvalidProcessRegion(t *testing.T) { var ( clusterInfo = acc.GetClusterInfo(t, clusterRequest()) - clusterTerraformStr = clusterInfo.ClusterTerraformStr + clusterTerraformStr = clusterInfo.TerraformStr cloudProvider = "AWS" - clusterResourceName = clusterInfo.ClusterResourceName + clusterResourceName = clusterInfo.ResourceName ) resource.ParallelTest(t, resource.TestCase{ diff --git a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go index a020aa6aa8..93be48622b 100644 --- a/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go +++ b/internal/service/privateendpointregionalmode/resource_private_endpoint_regional_mode_test.go @@ -30,13 +30,13 @@ func TestAccPrivateEndpointRegionalMode_conn(t *testing.T) { spec2 = acc.ReplicationSpecRequest{Region: "US_WEST_2", ProviderName: providerName, ZoneName: "Zone 2"} clusterInfo = acc.GetClusterInfo(t, &acc.ClusterRequest{Geosharded: true, DiskSizeGb: 80, ReplicationSpecs: []acc.ReplicationSpecRequest{spec1, spec2}}) projectID = clusterInfo.ProjectID - clusterResourceName = clusterInfo.ClusterResourceName + clusterResourceName = clusterInfo.ResourceName clusterDataName = "data.mongodbatlas_advanced_cluster.test" endpointResources = testConfigUnmanagedAWS( awsAccessKey, awsSecretKey, projectID, providerName, region, endpointResourceSuffix, ) clusterDataSource = modeClusterData(clusterResourceName, resourceName, privatelinkEndpointServiceResourceName) - dependencies = []string{clusterInfo.ClusterTerraformStr, clusterDataSource, endpointResources} + dependencies = []string{clusterInfo.TerraformStr, clusterDataSource, endpointResources} ) resource.Test(t, resource.TestCase{ diff --git a/internal/testutil/acc/advanced_cluster.go b/internal/testutil/acc/advanced_cluster.go index 31c6b27a04..45ccad7a9e 100644 --- a/internal/testutil/acc/advanced_cluster.go +++ b/internal/testutil/acc/advanced_cluster.go @@ -40,51 +40,6 @@ func CheckDestroyCluster(s *terraform.State) error { return nil } -func ConfigClusterGlobal(orgID, projectName, clusterName string) string { - return fmt.Sprintf(` - - resource "mongodbatlas_project" "test" { - org_id = %[1]q - name = %[2]q - } - - resource "mongodbatlas_cluster" test { - project_id = mongodbatlas_project.test.id - name = %[3]q - disk_size_gb = 80 - num_shards = 1 - cloud_backup = false - cluster_type = "GEOSHARDED" - - // Provider Settings "block" - provider_name = "AWS" - provider_instance_size_name = "M30" - - replication_specs { - zone_name = "Zone 1" - num_shards = 2 - regions_config { - region_name = "US_EAST_1" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - - replication_specs { - zone_name = "Zone 2" - num_shards = 2 - regions_config { - region_name = "US_WEST_2" - electable_nodes = 3 - priority = 7 - read_only_nodes = 0 - } - } - } - `, orgID, projectName, clusterName) -} - func ImportStateClusterIDFunc(resourceName string) resource.ImportStateIdFunc { return func(s *terraform.State) (string, error) { rs, ok := s.RootModule().Resources[resourceName] diff --git a/internal/testutil/acc/cluster.go b/internal/testutil/acc/cluster.go index ec6fbabec3..9298e1da19 100644 --- a/internal/testutil/acc/cluster.go +++ b/internal/testutil/acc/cluster.go @@ -10,6 +10,8 @@ import ( "go.mongodb.org/atlas-sdk/v20240530002/admin" ) +// ClusterRequest contains configuration for a cluster where all fields are optional and AddDefaults is used for required fields. +// Used together with GetClusterInfo which will set ProjectID if it is unset. type ClusterRequest struct { Tags map[string]string ProjectID string @@ -26,6 +28,7 @@ type ClusterRequest struct { PitEnabled bool } +// AddDefaults ensures the required fields are populated to generate a resource. func (r *ClusterRequest) AddDefaults() { if r.ResourceSuffix == "" { r.ResourceSuffix = defaultClusterResourceSuffix @@ -38,49 +41,52 @@ func (r *ClusterRequest) AddDefaults() { } } +func (r *ClusterRequest) ClusterType() string { + if r.Geosharded { + return "GEOSHARDED" + } + return "REPLICASET" +} + type ClusterInfo struct { - ProjectIDStr string - ProjectID string - ClusterName string - ClusterResourceName string - ClusterNameStr string - ClusterTerraformStr string + ProjectID string + Name string + ResourceName string + TerraformNameRef string + TerraformStr string } const defaultClusterResourceSuffix = "cluster_info" // GetClusterInfo is used to obtain a project and cluster configuration resource. -// When `MONGODB_ATLAS_CLUSTER_NAME` and `MONGODB_ATLAS_PROJECT_ID` are defined, creation of resources is avoided. This is useful for local execution but not intended for CI executions. -// Clusters will be created in project ProjectIDExecution. +// When `MONGODB_ATLAS_CLUSTER_NAME` and `MONGODB_ATLAS_PROJECT_ID` are defined, a data source is created instead. This is useful for local execution but not intended for CI executions. +// Clusters will be created in project ProjectIDExecution or in req.ProjectID which can be both a direct id, e.g., `664610ec80cc36255e634074` or a config reference `mongodbatlas_project.test.id`. func GetClusterInfo(tb testing.TB, req *ClusterRequest) ClusterInfo { tb.Helper() if req == nil { req = new(ClusterRequest) } + hclCreator := ClusterResourceHcl if req.ProjectID == "" { if ExistingClusterUsed() { projectID, clusterName := existingProjectIDClusterName() - return ClusterInfo{ - ProjectIDStr: fmt.Sprintf("%q", projectID), - ProjectID: projectID, - ClusterName: clusterName, - ClusterNameStr: fmt.Sprintf("%q", clusterName), - ClusterTerraformStr: "", - } + req.ProjectID = projectID + req.ClusterName = clusterName + hclCreator = ClusterDatasourceHcl + } else { + req.ProjectID = ProjectIDExecution(tb) } - req.ProjectID = ProjectIDExecution(tb) } - clusterTerraformStr, clusterName, clusterResourceName, err := ClusterResourceHcl(req) + clusterTerraformStr, clusterName, clusterResourceName, err := hclCreator(req) if err != nil { tb.Error(err) } return ClusterInfo{ - ProjectIDStr: fmt.Sprintf("%q", req.ProjectID), - ProjectID: req.ProjectID, - ClusterName: clusterName, - ClusterNameStr: fmt.Sprintf("%s.name", clusterResourceName), - ClusterResourceName: clusterResourceName, - ClusterTerraformStr: clusterTerraformStr, + ProjectID: req.ProjectID, + Name: clusterName, + TerraformNameRef: fmt.Sprintf("%s.name", clusterResourceName), + ResourceName: clusterResourceName, + TerraformStr: clusterTerraformStr, } } @@ -93,6 +99,9 @@ func existingProjectIDClusterName() (projectID, clusterName string) { return os.Getenv("MONGODB_ATLAS_PROJECT_ID"), os.Getenv("MONGODB_ATLAS_CLUSTER_NAME") } +// ReplicationSpecRequest can be used to customize the ReplicationSpecs of a Cluster. +// No fields are required. +// Use `ExtraRegionConfigs` to specify multiple region configs. type ReplicationSpecRequest struct { ZoneName string Region string @@ -128,15 +137,16 @@ func (r *ReplicationSpecRequest) AddDefaults() { } func (r *ReplicationSpecRequest) AllRegionConfigs() []admin.CloudRegionConfig { - config := CloudRegionConfig(*r) + config := cloudRegionConfig(*r) configs := []admin.CloudRegionConfig{config} - for _, extra := range r.ExtraRegionConfigs { - configs = append(configs, CloudRegionConfig(extra)) + for i := range r.ExtraRegionConfigs { + extra := r.ExtraRegionConfigs[i] + configs = append(configs, cloudRegionConfig(extra)) } return configs } -func ReplicationSpec(req *ReplicationSpecRequest) admin.ReplicationSpec { +func replicationSpec(req *ReplicationSpecRequest) admin.ReplicationSpec { if req == nil { req = new(ReplicationSpecRequest) } @@ -150,7 +160,7 @@ func ReplicationSpec(req *ReplicationSpecRequest) admin.ReplicationSpec { } } -func CloudRegionConfig(req ReplicationSpecRequest) admin.CloudRegionConfig { +func cloudRegionConfig(req ReplicationSpecRequest) admin.CloudRegionConfig { req.AddDefaults() var readOnly admin.DedicatedHardwareSpec if req.NodeCountReadOnly != 0 { diff --git a/internal/testutil/acc/config_cluster.go b/internal/testutil/acc/config_cluster.go new file mode 100644 index 0000000000..2968356ff9 --- /dev/null +++ b/internal/testutil/acc/config_cluster.go @@ -0,0 +1,160 @@ +package acc + +import ( + "errors" + "fmt" + "strings" + + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/zclconf/go-cty/cty" + "go.mongodb.org/atlas-sdk/v20240530002/admin" +) + +func ClusterDatasourceHcl(req *ClusterRequest) (configStr, clusterName, resourceName string, err error) { + if req == nil || req.ProjectID == "" || req.ClusterName == "" { + return "", "", "", errors.New("must specify a ClusterRequest with at least ProjectID and ClusterName set") + } + req.AddDefaults() + f := hclwrite.NewEmptyFile() + root := f.Body() + resourceType := "mongodbatlas_advanced_cluster" + resourceSuffix := req.ResourceSuffix + cluster := root.AppendNewBlock("data", []string{resourceType, resourceSuffix}).Body() + clusterResourceName := fmt.Sprintf("data.%s.%s", resourceType, resourceSuffix) + clusterName = req.ClusterName + clusterRootAttributes := map[string]any{ + "name": clusterName, + } + projectID := req.ProjectID + if strings.Contains(req.ProjectID, ".") { + err = setAttributeHcl(cluster, fmt.Sprintf("project_id = %s", projectID)) + if err != nil { + return "", "", "", fmt.Errorf("failed to set project_id = %s", projectID) + } + } else { + clusterRootAttributes["project_id"] = projectID + } + addPrimitiveAttributes(cluster, clusterRootAttributes) + return "\n" + string(f.Bytes()), clusterName, clusterResourceName, err +} + +func ClusterResourceHcl(req *ClusterRequest) (configStr, clusterName, resourceName string, err error) { + if req == nil || req.ProjectID == "" { + return "", "", "", errors.New("must specify a ClusterRequest with at least ProjectID set") + } + projectID := req.ProjectID + req.AddDefaults() + specRequests := req.ReplicationSpecs + specs := make([]admin.ReplicationSpec, len(specRequests)) + for i := range specRequests { + specRequest := specRequests[i] + specs[i] = replicationSpec(&specRequest) + } + clusterName = req.ClusterName + resourceSuffix := req.ResourceSuffix + clusterType := req.ClusterType() + + f := hclwrite.NewEmptyFile() + root := f.Body() + resourceType := "mongodbatlas_advanced_cluster" + cluster := root.AppendNewBlock("resource", []string{resourceType, resourceSuffix}).Body() + clusterRootAttributes := map[string]any{ + "cluster_type": clusterType, + "name": clusterName, + "backup_enabled": req.CloudBackup, + "pit_enabled": req.PitEnabled, + "mongo_db_major_version": req.MongoDBMajorVersion, + } + if strings.Contains(req.ProjectID, ".") { + err = setAttributeHcl(cluster, fmt.Sprintf("project_id = %s", projectID)) + if err != nil { + return "", "", "", fmt.Errorf("failed to set project_id = %s", projectID) + } + } else { + clusterRootAttributes["project_id"] = projectID + } + if req.DiskSizeGb != 0 { + clusterRootAttributes["disk_size_gb"] = req.DiskSizeGb + } + if req.RetainBackupsEnabled { + clusterRootAttributes["retain_backups_enabled"] = req.RetainBackupsEnabled + } + addPrimitiveAttributes(cluster, clusterRootAttributes) + cluster.AppendNewline() + if len(req.AdvancedConfiguration) > 0 { + for _, key := range sortStringMapKeysAny(req.AdvancedConfiguration) { + if !knownAdvancedConfig[key] { + return "", "", "", fmt.Errorf("unknown key in advanced configuration: %s", key) + } + } + advancedClusterBlock := cluster.AppendNewBlock("advanced_configuration", nil).Body() + addPrimitiveAttributes(advancedClusterBlock, req.AdvancedConfiguration) + cluster.AppendNewline() + } + for i, spec := range specs { + err = writeReplicationSpec(cluster, spec) + if err != nil { + return "", "", "", fmt.Errorf("error writing hcl for replication spec %d: %w", i, err) + } + } + if len(req.Tags) > 0 { + for _, key := range sortStringMapKeys(req.Tags) { + value := req.Tags[key] + tagBlock := cluster.AppendNewBlock("tags", nil).Body() + tagBlock.SetAttributeValue("key", cty.StringVal(key)) + tagBlock.SetAttributeValue("value", cty.StringVal(value)) + } + } + cluster.AppendNewline() + if req.ResourceDependencyName != "" { + if !strings.Contains(req.ResourceDependencyName, ".") { + return "", "", "", fmt.Errorf("req.ResourceDependencyName must have a '.'") + } + err = setAttributeHcl(cluster, fmt.Sprintf("depends_on = [%s]", req.ResourceDependencyName)) + if err != nil { + return "", "", "", err + } + } + clusterResourceName := fmt.Sprintf("%s.%s", resourceType, resourceSuffix) + return "\n" + string(f.Bytes()), clusterName, clusterResourceName, err +} + +func writeReplicationSpec(cluster *hclwrite.Body, spec admin.ReplicationSpec) error { + replicationBlock := cluster.AppendNewBlock("replication_specs", nil).Body() + err := addPrimitiveAttributesViaJSON(replicationBlock, spec) + if err != nil { + return err + } + for _, rc := range spec.GetRegionConfigs() { + if rc.Priority == nil { + rc.SetPriority(7) + } + replicationBlock.AppendNewline() + rcBlock := replicationBlock.AppendNewBlock("region_configs", nil).Body() + err = addPrimitiveAttributesViaJSON(rcBlock, rc) + if err != nil { + return err + } + autoScalingBlock := rcBlock.AppendNewBlock("auto_scaling", nil).Body() + if rc.AutoScaling == nil { + autoScalingBlock.SetAttributeValue("disk_gb_enabled", cty.BoolVal(false)) + } else { + autoScaling := rc.GetAutoScaling() + asDisk := autoScaling.GetDiskGB() + autoScalingBlock.SetAttributeValue("disk_gb_enabled", cty.BoolVal(asDisk.GetEnabled())) + if autoScaling.Compute != nil { + return fmt.Errorf("auto_scaling.compute is not supportd yet %v", autoScaling) + } + } + nodeSpec := rc.GetElectableSpecs() + nodeSpecBlock := rcBlock.AppendNewBlock("electable_specs", nil).Body() + err = addPrimitiveAttributesViaJSON(nodeSpecBlock, nodeSpec) + + readOnlySpecs := rc.GetReadOnlySpecs() + if readOnlySpecs.GetNodeCount() != 0 { + readOnlyBlock := rcBlock.AppendNewBlock("read_only_specs", nil).Body() + err = addPrimitiveAttributesViaJSON(readOnlyBlock, readOnlySpecs) + } + } + return err +} diff --git a/internal/testutil/acc/config_cluster_test.go b/internal/testutil/acc/config_cluster_test.go new file mode 100644 index 0000000000..724e0ec8d6 --- /dev/null +++ b/internal/testutil/acc/config_cluster_test.go @@ -0,0 +1,396 @@ +package acc_test + +import ( + "testing" + + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var standardClusterResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + pit_enabled = false + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + +} +` +var overrideClusterResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + project_id = mongodbatlas_project.test.id + backup_enabled = true + cluster_type = "GEOSHARDED" + mongo_db_major_version = "6.0" + name = "my-name" + pit_enabled = true + retain_backups_enabled = true + + advanced_configuration { + oplog_min_retention_hours = 8 + } + + replication_specs { + num_shards = 1 + zone_name = "Zone X" + + region_configs { + priority = 7 + provider_name = "AZURE" + region_name = "MY_REGION_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + ebs_volume_type = "STANDARD" + instance_size = "M30" + node_count = 30 + } + } + } + +} +` + +var dependsOnClusterResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + pit_enabled = false + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + + depends_on = [mongodbatlas_project.project_execution] +} +` +var dependsOnMultiResource = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + pit_enabled = false + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + + depends_on = [mongodbatlas_private_endpoint_regional_mode.atlasrm, mongodbatlas_privatelink_endpoint_service.atlasple] +} +` +var twoReplicationSpecs = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + pit_enabled = false + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + replication_specs { + num_shards = 1 + zone_name = "Zone 2" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_2" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + +} +` +var twoRegionConfigs = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + pit_enabled = false + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "EU_WEST_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + +} +` + +var autoScalingDiskEnabled = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + pit_enabled = false + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 7 + provider_name = "AWS" + region_name = "US_WEST_2" + auto_scaling { + disk_gb_enabled = true + } + electable_specs { + instance_size = "M10" + node_count = 3 + } + } + } + tags { + key = "ArchiveTest" + value = "true" + } + tags { + key = "Owner" + value = "test" + } + +} +` +var readOnlyAndPriority = ` +resource "mongodbatlas_advanced_cluster" "cluster_info" { + backup_enabled = false + cluster_type = "REPLICASET" + name = "my-name" + pit_enabled = false + project_id = "project" + + replication_specs { + num_shards = 1 + zone_name = "Zone 1" + + region_configs { + priority = 5 + provider_name = "AWS" + region_name = "US_EAST_1" + auto_scaling { + disk_gb_enabled = false + } + electable_specs { + instance_size = "M10" + node_count = 5 + } + read_only_specs { + instance_size = "M10" + node_count = 1 + } + } + } + +} +` + +func Test_ClusterResourceHcl(t *testing.T) { + var ( + clusterName = "my-name" + testCases = map[string]struct { + expected string + req acc.ClusterRequest + }{ + "defaults": { + standardClusterResource, + acc.ClusterRequest{ClusterName: clusterName}, + }, + "dependsOn": { + dependsOnClusterResource, + acc.ClusterRequest{ClusterName: clusterName, ResourceDependencyName: "mongodbatlas_project.project_execution"}, + }, + "dependsOnMulti": { + dependsOnMultiResource, + acc.ClusterRequest{ClusterName: clusterName, ResourceDependencyName: "mongodbatlas_private_endpoint_regional_mode.atlasrm, mongodbatlas_privatelink_endpoint_service.atlasple"}, + }, + "twoReplicationSpecs": { + twoReplicationSpecs, + acc.ClusterRequest{ClusterName: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "US_WEST_1", ZoneName: "Zone 1"}, + {Region: "EU_WEST_2", ZoneName: "Zone 2"}, + }}, + }, + "overrideClusterResource": { + overrideClusterResource, + acc.ClusterRequest{ + ProjectID: "mongodbatlas_project.test.id", + ClusterName: clusterName, + Geosharded: true, + CloudBackup: true, + MongoDBMajorVersion: "6.0", + RetainBackupsEnabled: true, + ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Region: "MY_REGION_1", ZoneName: "Zone X", InstanceSize: "M30", NodeCount: 30, ProviderName: constant.AZURE, EbsVolumeType: "STANDARD"}, + }, + PitEnabled: true, + AdvancedConfiguration: map[string]any{ + acc.ClusterAdvConfigOplogMinRetentionHours: 8, + }, + }, + }, + "twoRegionConfigs": { + twoRegionConfigs, + acc.ClusterRequest{ClusterName: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ + { + Region: "US_WEST_1", + InstanceSize: "M10", + NodeCount: 3, + ExtraRegionConfigs: []acc.ReplicationSpecRequest{{Region: "EU_WEST_1", InstanceSize: "M10", NodeCount: 3, ProviderName: constant.AWS}}, + }, + }, + }, + }, + "autoScalingDiskEnabled": { + autoScalingDiskEnabled, + acc.ClusterRequest{ClusterName: clusterName, Tags: map[string]string{ + "ArchiveTest": "true", "Owner": "test", + }, ReplicationSpecs: []acc.ReplicationSpecRequest{ + {AutoScalingDiskGbEnabled: true}, + }}, + }, + "readOnlyAndPriority": { + readOnlyAndPriority, + acc.ClusterRequest{ + ClusterName: clusterName, + ReplicationSpecs: []acc.ReplicationSpecRequest{ + {Priority: 5, NodeCount: 5, Region: "US_EAST_1", NodeCountReadOnly: 1}, + }}, + }, + } + ) + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + req := tc.req + if req.ProjectID == "" { + req.ProjectID = "project" + } + config, actualClusterName, actualResourceName, err := acc.ClusterResourceHcl(&req) + require.NoError(t, err) + assert.Equal(t, "mongodbatlas_advanced_cluster.cluster_info", actualResourceName) + assert.Equal(t, clusterName, actualClusterName) + assert.Equal(t, tc.expected, config) + }) + } +} + +var expectedDatasource = ` +data "mongodbatlas_advanced_cluster" "cluster_info" { + name = "my-datasource-cluster" + project_id = "datasource-project" +} +` + +func Test_ClusterDatasourceHcl(t *testing.T) { + expectedClusterName := "my-datasource-cluster" + config, clusterName, resourceName, err := acc.ClusterDatasourceHcl(&acc.ClusterRequest{ + ClusterName: expectedClusterName, + ProjectID: "datasource-project", + }) + require.NoError(t, err) + assert.Equal(t, "data.mongodbatlas_advanced_cluster.cluster_info", resourceName) + assert.Equal(t, expectedClusterName, clusterName) + assert.Equal(t, expectedDatasource, config) +} diff --git a/internal/testutil/acc/config_formatter.go b/internal/testutil/acc/config_formatter.go index 595ee4009d..6ee705c87f 100644 --- a/internal/testutil/acc/config_formatter.go +++ b/internal/testutil/acc/config_formatter.go @@ -2,7 +2,6 @@ package acc import ( "encoding/json" - "errors" "fmt" "regexp" "sort" @@ -12,7 +11,6 @@ import ( "github.com/hashicorp/hcl/v2/hclsyntax" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/zclconf/go-cty/cty" - "go.mongodb.org/atlas-sdk/v20240530002/admin" ) func FormatToHCLMap(m map[string]string, indent, varName string) string { @@ -83,129 +81,6 @@ var ( } ) -func ClusterResourceHcl(req *ClusterRequest) (configStr, clusterName, resourceName string, err error) { - if req == nil || req.ProjectID == "" { - return "", "", "", errors.New("must specify a ClusterRequest with at least ProjectID set") - } - projectID := req.ProjectID - req.AddDefaults() - specRequests := req.ReplicationSpecs - specs := make([]admin.ReplicationSpec, len(specRequests)) - for i, specRequest := range specRequests { - specs[i] = ReplicationSpec(&specRequest) - } - clusterName = req.ClusterName - resourceSuffix := req.ResourceSuffix - clusterTypeStr := "REPLICASET" - if req.Geosharded { - clusterTypeStr = "GEOSHARDED" - } - - f := hclwrite.NewEmptyFile() - root := f.Body() - resourceType := "mongodbatlas_advanced_cluster" - cluster := root.AppendNewBlock("resource", []string{resourceType, resourceSuffix}).Body() - clusterRootAttributes := map[string]any{ - "cluster_type": clusterTypeStr, - "name": clusterName, - "backup_enabled": req.CloudBackup, - "pit_enabled": req.PitEnabled, - "mongo_db_major_version": req.MongoDBMajorVersion, - } - if strings.Contains(req.ProjectID, ".") { - err = setAttributeHcl(cluster, fmt.Sprintf("project_id = %s", projectID)) - if err != nil { - return "", "", "", fmt.Errorf("failed to set project_id = %s", projectID) - } - } else { - clusterRootAttributes["project_id"] = projectID - } - if req.DiskSizeGb != 0 { - clusterRootAttributes["disk_size_gb"] = req.DiskSizeGb - } - if req.RetainBackupsEnabled { - clusterRootAttributes["retain_backups_enabled"] = req.RetainBackupsEnabled - } - addPrimitiveAttributes(cluster, clusterRootAttributes) - cluster.AppendNewline() - if len(req.AdvancedConfiguration) > 0 { - for _, key := range sortStringMapKeysAny(req.AdvancedConfiguration) { - if !knownAdvancedConfig[key] { - return "", "", "", fmt.Errorf("unknown key in advanced configuration: %s", key) - } - } - advancedClusterBlock := cluster.AppendNewBlock("advanced_configuration", nil).Body() - addPrimitiveAttributes(advancedClusterBlock, req.AdvancedConfiguration) - cluster.AppendNewline() - } - for i, spec := range specs { - err = writeReplicationSpec(cluster, spec) - if err != nil { - return "", "", "", fmt.Errorf("error writing hcl for replication spec %d: %w", i, err) - } - } - if len(req.Tags) > 0 { - for _, key := range sortStringMapKeys(req.Tags) { - value := req.Tags[key] - tagBlock := cluster.AppendNewBlock("tags", nil).Body() - tagBlock.SetAttributeValue("key", cty.StringVal(key)) - tagBlock.SetAttributeValue("value", cty.StringVal(value)) - } - } - cluster.AppendNewline() - if req.ResourceDependencyName != "" { - if !strings.Contains(req.ResourceDependencyName, ".") { - return "", "", "", fmt.Errorf("req.ResourceDependencyName must have a '.'") - } - err = setAttributeHcl(cluster, fmt.Sprintf("depends_on = [%s]", req.ResourceDependencyName)) - if err != nil { - return "", "", "", err - } - } - clusterResourceName := fmt.Sprintf("%s.%s", resourceType, resourceSuffix) - return "\n" + string(f.Bytes()), clusterName, clusterResourceName, err -} - -func writeReplicationSpec(cluster *hclwrite.Body, spec admin.ReplicationSpec) error { - replicationBlock := cluster.AppendNewBlock("replication_specs", nil).Body() - err := addPrimitiveAttributesViaJSON(replicationBlock, spec) - if err != nil { - return err - } - for _, rc := range spec.GetRegionConfigs() { - if rc.Priority == nil { - rc.SetPriority(7) - } - replicationBlock.AppendNewline() - rcBlock := replicationBlock.AppendNewBlock("region_configs", nil).Body() - err = addPrimitiveAttributesViaJSON(rcBlock, rc) - if err != nil { - return err - } - autoScalingBlock := rcBlock.AppendNewBlock("auto_scaling", nil).Body() - if rc.AutoScaling == nil { - autoScalingBlock.SetAttributeValue("disk_gb_enabled", cty.BoolVal(false)) - } else { - autoScaling := rc.GetAutoScaling() - asDisk := autoScaling.GetDiskGB() - autoScalingBlock.SetAttributeValue("disk_gb_enabled", cty.BoolVal(asDisk.GetEnabled())) - if autoScaling.Compute != nil { - return fmt.Errorf("auto_scaling.compute is not supportd yet %v", autoScaling) - } - } - nodeSpec := rc.GetElectableSpecs() - nodeSpecBlock := rcBlock.AppendNewBlock("electable_specs", nil).Body() - err = addPrimitiveAttributesViaJSON(nodeSpecBlock, nodeSpec) - - readOnlySpecs := rc.GetReadOnlySpecs() - if readOnlySpecs.GetNodeCount() != 0 { - readOnlyBlock := rcBlock.AppendNewBlock("read_only_specs", nil).Body() - err = addPrimitiveAttributesViaJSON(readOnlyBlock, readOnlySpecs) - } - } - return err -} - // addPrimitiveAttributesViaJSON adds "primitive" bool/string/int/float attributes of a struct. func addPrimitiveAttributesViaJSON(b *hclwrite.Body, obj any) error { var objMap map[string]any diff --git a/internal/testutil/acc/config_formatter_test.go b/internal/testutil/acc/config_formatter_test.go index 88984a47f9..16ac5ef7f8 100644 --- a/internal/testutil/acc/config_formatter_test.go +++ b/internal/testutil/acc/config_formatter_test.go @@ -4,10 +4,8 @@ import ( "fmt" "testing" - "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" "github.com/mongodb/terraform-provider-mongodbatlas/internal/testutil/acc" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func projectTemplateWithExtra(extra string) string { @@ -106,370 +104,3 @@ func TestFormatToHCLLifecycleIgnore(t *testing.T) { }) } } - -var standardClusterResource = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - backup_enabled = false - cluster_type = "REPLICASET" - name = "my-name" - pit_enabled = false - project_id = "project" - - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "US_WEST_2" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - -} -` -var overrideClusterResource = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - project_id = mongodbatlas_project.test.id - backup_enabled = true - cluster_type = "GEOSHARDED" - mongo_db_major_version = "6.0" - name = "my-name" - pit_enabled = true - retain_backups_enabled = true - - advanced_configuration { - oplog_min_retention_hours = 8 - } - - replication_specs { - num_shards = 1 - zone_name = "Zone X" - - region_configs { - priority = 7 - provider_name = "AZURE" - region_name = "MY_REGION_1" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - ebs_volume_type = "STANDARD" - instance_size = "M30" - node_count = 30 - } - } - } - -} -` - -var dependsOnClusterResource = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - backup_enabled = false - cluster_type = "REPLICASET" - name = "my-name" - pit_enabled = false - project_id = "project" - - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "US_WEST_2" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - - depends_on = [mongodbatlas_project.project_execution] -} -` -var dependsOnMultiResource = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - backup_enabled = false - cluster_type = "REPLICASET" - name = "my-name" - pit_enabled = false - project_id = "project" - - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "US_WEST_2" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - - depends_on = [mongodbatlas_private_endpoint_regional_mode.atlasrm, mongodbatlas_privatelink_endpoint_service.atlasple] -} -` -var twoReplicationSpecs = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - backup_enabled = false - cluster_type = "REPLICASET" - name = "my-name" - pit_enabled = false - project_id = "project" - - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "US_WEST_1" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - replication_specs { - num_shards = 1 - zone_name = "Zone 2" - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "EU_WEST_2" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - -} -` -var twoRegionConfigs = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - backup_enabled = false - cluster_type = "REPLICASET" - name = "my-name" - pit_enabled = false - project_id = "project" - - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "US_WEST_1" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "EU_WEST_1" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - -} -` - -var autoScalingDiskEnabled = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - backup_enabled = false - cluster_type = "REPLICASET" - name = "my-name" - pit_enabled = false - project_id = "project" - - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - - region_configs { - priority = 7 - provider_name = "AWS" - region_name = "US_WEST_2" - auto_scaling { - disk_gb_enabled = true - } - electable_specs { - instance_size = "M10" - node_count = 3 - } - } - } - tags { - key = "ArchiveTest" - value = "true" - } - tags { - key = "Owner" - value = "test" - } - -} -` -var readOnlyAndPriority = ` -resource "mongodbatlas_advanced_cluster" "cluster_info" { - backup_enabled = false - cluster_type = "REPLICASET" - name = "my-name" - pit_enabled = false - project_id = "project" - - replication_specs { - num_shards = 1 - zone_name = "Zone 1" - - region_configs { - priority = 5 - provider_name = "AWS" - region_name = "US_EAST_1" - auto_scaling { - disk_gb_enabled = false - } - electable_specs { - instance_size = "M10" - node_count = 5 - } - read_only_specs { - instance_size = "M10" - node_count = 1 - } - } - } - -} -` - -func Test_ClusterResourceHcl(t *testing.T) { - var ( - clusterName = "my-name" - testCases = map[string]struct { - expected string - req acc.ClusterRequest - }{ - "defaults": { - standardClusterResource, - acc.ClusterRequest{ClusterName: clusterName}, - }, - "dependsOn": { - dependsOnClusterResource, - acc.ClusterRequest{ClusterName: clusterName, ResourceDependencyName: "mongodbatlas_project.project_execution"}, - }, - "dependsOnMulti": { - dependsOnMultiResource, - acc.ClusterRequest{ClusterName: clusterName, ResourceDependencyName: "mongodbatlas_private_endpoint_regional_mode.atlasrm, mongodbatlas_privatelink_endpoint_service.atlasple"}, - }, - "twoReplicationSpecs": { - twoReplicationSpecs, - acc.ClusterRequest{ClusterName: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ - {Region: "US_WEST_1", ZoneName: "Zone 1"}, - {Region: "EU_WEST_2", ZoneName: "Zone 2"}, - }}, - }, - "overrideClusterResource": { - overrideClusterResource, - acc.ClusterRequest{ - ProjectID: "mongodbatlas_project.test.id", - ClusterName: clusterName, - Geosharded: true, - CloudBackup: true, - MongoDBMajorVersion: "6.0", - RetainBackupsEnabled: true, - ReplicationSpecs: []acc.ReplicationSpecRequest{ - {Region: "MY_REGION_1", ZoneName: "Zone X", InstanceSize: "M30", NodeCount: 30, ProviderName: constant.AZURE, EbsVolumeType: "STANDARD"}, - }, - PitEnabled: true, - AdvancedConfiguration: map[string]any{ - acc.ClusterAdvConfigOplogMinRetentionHours: 8, - }, - }, - }, - "twoRegionConfigs": { - twoRegionConfigs, - acc.ClusterRequest{ClusterName: clusterName, ReplicationSpecs: []acc.ReplicationSpecRequest{ - { - Region: "US_WEST_1", - InstanceSize: "M10", - NodeCount: 3, - ExtraRegionConfigs: []acc.ReplicationSpecRequest{{Region: "EU_WEST_1", InstanceSize: "M10", NodeCount: 3, ProviderName: constant.AWS}}, - }, - }, - }, - }, - "autoScalingDiskEnabled": { - autoScalingDiskEnabled, - acc.ClusterRequest{ClusterName: clusterName, Tags: map[string]string{ - "ArchiveTest": "true", "Owner": "test", - }, ReplicationSpecs: []acc.ReplicationSpecRequest{ - {AutoScalingDiskGbEnabled: true}, - }}, - }, - "readOnlyAndPriority": { - readOnlyAndPriority, - acc.ClusterRequest{ - ClusterName: clusterName, - ReplicationSpecs: []acc.ReplicationSpecRequest{ - {Priority: 5, NodeCount: 5, Region: "US_EAST_1", NodeCountReadOnly: 1}, - }}, - }, - } - ) - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - req := tc.req - if req.ProjectID == "" { - req.ProjectID = "project" - } - config, actualClusterName, actualResourceName, err := acc.ClusterResourceHcl(&req) - require.NoError(t, err) - assert.Equal(t, "mongodbatlas_advanced_cluster.cluster_info", actualResourceName) - assert.Equal(t, clusterName, actualClusterName) - assert.Equal(t, tc.expected, config) - }) - } -}