Skip to content

Commit

Permalink
feat: Support disk_size_gb at region config level while also preser…
Browse files Browse the repository at this point in the history
…ving backwards compatibility with root level value (#2405)

* wip

* define function for setting root disk_size_gb when calling new api in read

* add test with old shard schema and disk size gb at inner level

* adjust check which was searching for readOnly spec

* define root size gb at root level of data source when using new API

* add unit testing and adjust acceptance test

* fix float comparison

* fix merge conflicts

* include structs defined in test case
  • Loading branch information
AgustinBettati authored Jul 12, 2024
1 parent 7ef7db3 commit 3c04d6b
Show file tree
Hide file tree
Showing 6 changed files with 196 additions and 29 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -303,6 +303,11 @@ func dataSourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.

clusterID = clusterDescLatest.GetId()

// root disk_size_gb defined for backwards compatibility avoiding breaking changes
if err := d.Set("disk_size_gb", GetDiskSizeGBFromReplicationSpec(clusterDescLatest)); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err))
}

replicationSpecs, err = flattenAdvancedReplicationSpecsDS(ctx, clusterDescLatest.GetReplicationSpecs(), d, connV2)
if err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "replication_specs", clusterName, err))
Expand Down
19 changes: 18 additions & 1 deletion internal/service/advancedcluster/model_advanced_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,20 @@ func IsSharedTier(instanceSize string) bool {
return instanceSize == "M0" || instanceSize == "M2" || instanceSize == "M5"
}

// GetDiskSizeGBFromReplicationSpec obtains the diskSizeGB value by looking into the electable spec of the first replication spec.
// Independent storage size scaling is not supported (CLOUDP-201331), meaning all electable/analytics/readOnly configs in all replication specs are the same.
func GetDiskSizeGBFromReplicationSpec(cluster *admin.ClusterDescription20250101) float64 {
specs := cluster.GetReplicationSpecs()
if len(specs) < 1 {
return 0
}
configs := specs[0].GetRegionConfigs()
if len(configs) < 1 {
return 0
}
return configs[0].ElectableSpecs.GetDiskSizeGB()
}

func UpgradeRefreshFunc(ctx context.Context, name, projectID string, client admin20231115.ClustersApi) retry.StateRefreshFunc {
return func() (any, string, error) {
cluster, resp, err := client.GetCluster(ctx, projectID, name).Execute()
Expand Down Expand Up @@ -942,8 +956,11 @@ func expandRegionConfigSpec(tfList []any, providerName string, rootDiskSizeGB *f
apiObject.NodeCount = conversion.Pointer(v.(int))
}

// TODO: CLOUDP-258270 once disk_size_gb schema attribute is added at this level, we will check if defined in config and prioritize over value defined at root level (deprecated and to be removed)
apiObject.DiskSizeGB = rootDiskSizeGB
// disk size gb defined in inner level will take precedence over root level.
if v, ok := tfMap["disk_size_gb"]; ok && v.(float64) != 0 {
apiObject.DiskSizeGB = conversion.Pointer(v.(float64))
}

return apiObject
}
Expand Down
41 changes: 41 additions & 0 deletions internal/service/advancedcluster/model_advanced_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package advancedcluster_test
import (
"context"
"errors"
"fmt"
"net/http"
"testing"

Expand Down Expand Up @@ -150,6 +151,46 @@ func TestFlattenReplicationSpecs(t *testing.T) {
}
}

func TestGetDiskSizeGBFromReplicationSpec(t *testing.T) {
diskSizeGBValue := 40.0

testCases := map[string]struct {
clusterDescription admin.ClusterDescription20250101
expectedDiskSizeResult float64
}{
"cluster description with disk size gb value at electable spec": {
clusterDescription: admin.ClusterDescription20250101{
ReplicationSpecs: &[]admin.ReplicationSpec20250101{{
RegionConfigs: &[]admin.CloudRegionConfig20250101{{
ElectableSpecs: &admin.HardwareSpec20250101{
DiskSizeGB: admin.PtrFloat64(diskSizeGBValue),
},
}},
}},
},
expectedDiskSizeResult: diskSizeGBValue,
},
"cluster description with no electable spec": {
clusterDescription: admin.ClusterDescription20250101{
ReplicationSpecs: &[]admin.ReplicationSpec20250101{
{RegionConfigs: &[]admin.CloudRegionConfig20250101{{}}},
},
},
expectedDiskSizeResult: 0,
},
"cluster description with no replication spec": {
clusterDescription: admin.ClusterDescription20250101{},
expectedDiskSizeResult: 0,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
result := advancedcluster.GetDiskSizeGBFromReplicationSpec(&tc.clusterDescription)
assert.Equal(t, fmt.Sprintf("%.f", tc.expectedDiskSizeResult), fmt.Sprintf("%.f", result)) // formatting to string to avoid float comparison
})
}
}

type Result struct {
response any
error error
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -540,7 +540,10 @@ func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Di
return diag.FromErr(fmt.Errorf(errorRead, clusterName, err))
}

// TODO: CLOUDP-258270 set disk size gb at root level
// root disk_size_gb defined for backwards compatibility avoiding breaking changes
if err := d.Set("disk_size_gb", GetDiskSizeGBFromReplicationSpec(cluster)); err != nil {
return diag.FromErr(fmt.Errorf(ErrorClusterAdvancedSetting, "disk_size_gb", clusterName, err))
}

replicationSpecs, err = flattenAdvancedReplicationSpecs(ctx, cluster.GetReplicationSpecs(), d.Get("replication_specs").([]any), d, connV2)
if err != nil {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ func TestMigAdvancedCluster_singleAWSProvider(t *testing.T) {
{
ExternalProviders: mig.ExternalProviders(),
Config: config,
Check: checkSingleProvider(projectID, clusterName),
Check: checkSingleProvider(projectID, clusterName, false),
},
mig.TestStepCheckEmptyPlan(config),
},
Expand Down
153 changes: 127 additions & 26 deletions internal/service/advancedcluster/resource_advanced_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ func TestAccClusterAdvancedCluster_singleProvider(t *testing.T) {
Steps: []resource.TestStep{
{
Config: configSingleProvider(projectID, clusterName),
Check: checkSingleProvider(projectID, clusterName),
Check: checkSingleProvider(projectID, clusterName, true),
},
{
ResourceName: resourceName,
Expand Down Expand Up @@ -408,8 +408,8 @@ func TestAccClusterAdvancedClusterConfig_replicationSpecsAndShardUpdating(t *tes
CheckDestroy: acc.CheckDestroyCluster,
Steps: []resource.TestStep{
{
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, false),
Check: checkMultiZoneWithShards(clusterName, 1, 1),
Config: configMultiZoneWithShardsOldSchema(orgID, projectName, clusterName, 1, 1, false),
Check: checkMultiZoneWithShardsOldSchema(clusterName, 1, 1),
},
{
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 2, false),
Expand Down Expand Up @@ -460,15 +460,15 @@ func TestAccClusterAdvancedClusterConfig_selfManagedSharding(t *testing.T) {
CheckDestroy: acc.CheckDestroyCluster,
Steps: []resource.TestStep{
{
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, true),
Config: configMultiZoneWithShardsOldSchema(orgID, projectName, clusterName, 1, 1, true),
Check: resource.ComposeAggregateTestCheckFunc(
checkExists(resourceName),
resource.TestCheckResourceAttr(resourceName, "global_cluster_self_managed_sharding", "true"),
resource.TestCheckResourceAttr(dataSourceName, "global_cluster_self_managed_sharding", "true"),
),
},
{
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 1, 1, false),
Config: configMultiZoneWithShardsOldSchema(orgID, projectName, clusterName, 1, 1, false),
ExpectError: regexp.MustCompile("CANNOT_MODIFY_GLOBAL_CLUSTER_MANAGEMENT_SETTING"),
},
},
Expand Down Expand Up @@ -508,12 +508,33 @@ func TestAccClusterAdvancedClusterConfig_symmetricGeoShardedOldSchema(t *testing
CheckDestroy: acc.CheckDestroyCluster,
Steps: []resource.TestStep{
{
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 2, 2, false),
Check: checkMultiZoneWithShards(clusterName, 2, 2),
Config: configMultiZoneWithShardsOldSchema(orgID, projectName, clusterName, 2, 2, false),
Check: checkMultiZoneWithShardsOldSchema(clusterName, 2, 2),
},
{
Config: configMultiZoneWithShards(orgID, projectName, clusterName, 3, 3, false),
Check: checkMultiZoneWithShards(clusterName, 3, 3),
Config: configMultiZoneWithShardsOldSchema(orgID, projectName, clusterName, 3, 3, false),
Check: checkMultiZoneWithShardsOldSchema(clusterName, 3, 3),
},
},
})
}

func TestAccClusterAdvancedClusterConfig_symmetricShardedOldSchemaDiskSizeGBAtElectableLevel(t *testing.T) {
acc.SkipTestForCI(t) // TODO: CLOUDP-260154 for ensuring this use case is supported
var (
orgID = os.Getenv("MONGODB_ATLAS_ORG_ID")
projectName = acc.RandomProjectName()
clusterName = acc.RandomClusterName()
)

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { acc.PreCheckBasic(t) },
ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
CheckDestroy: acc.CheckDestroyCluster,
Steps: []resource.TestStep{
{
Config: configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, clusterName),
Check: checkShardedOldSchemaDiskSizeGBElectableLevel(),
},
},
})
Expand Down Expand Up @@ -711,6 +732,7 @@ func configSingleProvider(projectID, name string) string {
name = %[2]q
cluster_type = "REPLICASET"
retain_backups_enabled = "true"
disk_size_gb = 60
replication_specs {
region_configs {
Expand All @@ -736,15 +758,28 @@ func configSingleProvider(projectID, name string) string {
`, projectID, name)
}

func checkSingleProvider(projectID, name string) resource.TestCheckFunc {
func checkSingleProvider(projectID, name string, checkDiskSizeGBInnerLevel bool) resource.TestCheckFunc {
additionalChecks := []resource.TestCheckFunc{
resource.TestCheckResourceAttr(resourceName, "retain_backups_enabled", "true"),
resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)),
resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)),
}
if checkDiskSizeGBInnerLevel {
additionalChecks = append(additionalChecks,
checkAggr([]string{}, map[string]string{
"replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60",
"replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60",
}),
)
}
return checkAggr(
[]string{"replication_specs.#", "replication_specs.0.region_configs.#"},
map[string]string{
"project_id": projectID,
"name": name},
resource.TestCheckResourceAttr(resourceName, "retain_backups_enabled", "true"),
resource.TestCheckResourceAttrWith(resourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)),
resource.TestCheckResourceAttrWith(dataSourceName, "replication_specs.0.region_configs.0.electable_specs.0.disk_iops", acc.IntGreatThan(0)))
"project_id": projectID,
"disk_size_gb": "60",
"name": name},
additionalChecks...,
)
}

func configIncorrectTypeGobalClusterSelfManagedSharding(projectID, name string) string {
Expand Down Expand Up @@ -1136,7 +1171,7 @@ func configReplicationSpecsAnalyticsAutoScaling(projectID, clusterName string, p
`, projectID, clusterName, p.Compute.GetEnabled(), p.DiskGB.GetEnabled(), p.Compute.GetMaxInstanceSize())
}

func configMultiZoneWithShards(orgID, projectName, name string, numShardsFirstZone, numShardsSecondZone int, selfManagedSharding bool) string {
func configMultiZoneWithShardsOldSchema(orgID, projectName, name string, numShardsFirstZone, numShardsSecondZone int, selfManagedSharding bool) string {
return fmt.Sprintf(`
resource "mongodbatlas_project" "cluster_project" {
org_id = %[1]q
Expand All @@ -1150,6 +1185,7 @@ func configMultiZoneWithShards(orgID, projectName, name string, numShardsFirstZo
mongo_db_major_version = "7.0"
cluster_type = "GEOSHARDED"
global_cluster_self_managed_sharding = %[6]t
disk_size_gb = 60
replication_specs {
zone_name = "zone n1"
Expand Down Expand Up @@ -1197,6 +1233,72 @@ func configMultiZoneWithShards(orgID, projectName, name string, numShardsFirstZo
`, orgID, projectName, name, numShardsFirstZone, numShardsSecondZone, selfManagedSharding)
}

func checkMultiZoneWithShardsOldSchema(name string, numShardsFirstZone, numShardsSecondZone int) resource.TestCheckFunc {
return checkAggr(
[]string{"project_id"},
map[string]string{
"name": name,
"disk_size_gb": "60",
"replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60",
"replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60",
"replication_specs.0.num_shards": strconv.Itoa(numShardsFirstZone),
"replication_specs.1.num_shards": strconv.Itoa(numShardsSecondZone),
})
}

func configShardedOldSchemaDiskSizeGBElectableLevel(orgID, projectName, name string) string {
return fmt.Sprintf(`
resource "mongodbatlas_project" "cluster_project" {
org_id = %[1]q
name = %[2]q
}
resource "mongodbatlas_advanced_cluster" "test" {
project_id = mongodbatlas_project.cluster_project.id
name = %[3]q
backup_enabled = false
mongo_db_major_version = "7.0"
cluster_type = "SHARDED"
replication_specs {
num_shards = 2
region_configs {
electable_specs {
instance_size = "M10"
node_count = 3
disk_size_gb = 60
}
analytics_specs {
instance_size = "M10"
node_count = 0
disk_size_gb = 60
}
provider_name = "AWS"
priority = 7
region_name = "US_EAST_1"
}
}
}
data "mongodbatlas_advanced_cluster" "test" {
project_id = mongodbatlas_advanced_cluster.test.project_id
name = mongodbatlas_advanced_cluster.test.name
}
`, orgID, projectName, name)
}

func checkShardedOldSchemaDiskSizeGBElectableLevel() resource.TestCheckFunc {
return checkAggr(
[]string{},
map[string]string{
"replication_specs.0.num_shards": "2",
"disk_size_gb": "60",
"replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60",
"replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60",
})
}

func configShardedNewSchema(orgID, projectName, name, instanceSizeSpec1, instanceSizeSpec2 string, diskIopsSpec1, diskIopsSpec2 int) string {
return fmt.Sprintf(`
resource "mongodbatlas_project" "cluster_project" {
Expand All @@ -1216,10 +1318,12 @@ func configShardedNewSchema(orgID, projectName, name, instanceSizeSpec1, instanc
instance_size = %[4]q
disk_iops = %[6]d
node_count = 3
disk_size_gb = 60
}
analytics_specs {
instance_size = %[4]q
node_count = 1
disk_size_gb = 60
}
provider_name = "AWS"
priority = 7
Expand All @@ -1233,10 +1337,12 @@ func configShardedNewSchema(orgID, projectName, name, instanceSizeSpec1, instanc
instance_size = %[5]q
disk_iops = %[7]d
node_count = 3
disk_size_gb = 60
}
analytics_specs {
instance_size = %[5]q
node_count = 1
disk_size_gb = 60
}
provider_name = "AWS"
priority = 7
Expand All @@ -1257,21 +1363,16 @@ func checkShardedNewSchema(instanceSizeSpec1, instanceSizeSpec2, diskIopsSpec1,
return checkAggr(
[]string{"replication_specs.0.external_id", "replication_specs.0.zone_id", "replication_specs.1.external_id", "replication_specs.1.zone_id"},
map[string]string{
"disk_size_gb": "60",
"replication_specs.#": "2",
"replication_specs.0.id": "",
"replication_specs.0.region_configs.0.electable_specs.0.instance_size": instanceSizeSpec1,
"replication_specs.1.region_configs.0.electable_specs.0.instance_size": instanceSizeSpec2,
"replication_specs.0.region_configs.0.electable_specs.0.disk_size_gb": "60",
"replication_specs.1.region_configs.0.electable_specs.0.disk_size_gb": "60",
"replication_specs.0.region_configs.0.analytics_specs.0.disk_size_gb": "60",
"replication_specs.1.region_configs.0.analytics_specs.0.disk_size_gb": "60",
"replication_specs.0.region_configs.0.electable_specs.0.disk_iops": diskIopsSpec1,
"replication_specs.1.region_configs.0.electable_specs.0.disk_iops": diskIopsSpec2,
})
}

func checkMultiZoneWithShards(name string, numShardsFirstZone, numShardsSecondZone int) resource.TestCheckFunc {
return checkAggr(
[]string{"project_id"},
map[string]string{
"name": name,
"replication_specs.0.num_shards": strconv.Itoa(numShardsFirstZone),
"replication_specs.1.num_shards": strconv.Itoa(numShardsSecondZone),
})
}

0 comments on commit 3c04d6b

Please sign in to comment.