Skip to content

Commit edacc1e

Browse files
authored
chore: Uses always new schema when moving from cluster (#3004)
* use always new schema when moving from cluster * multisharding move test * remove leftover * apply feedback * clarify sharding bool
1 parent 3057211 commit edacc1e

File tree

2 files changed

+112
-37
lines changed

2 files changed

+112
-37
lines changed

internal/service/advancedclustertpf/move_upgrade_state.go

Lines changed: 25 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -34,11 +34,13 @@ func stateMover(ctx context.Context, req resource.MoveStateRequest, resp *resour
3434
if req.SourceTypeName != "mongodbatlas_cluster" || !strings.HasSuffix(req.SourceProviderAddress, "/mongodbatlas") {
3535
return
3636
}
37-
setStateResponse(ctx, &resp.Diagnostics, req.SourceRawState, &resp.TargetState)
37+
// Use always new sharding config when moving from cluster to adv_cluster
38+
setStateResponse(ctx, &resp.Diagnostics, req.SourceRawState, &resp.TargetState, false)
3839
}
3940

4041
func stateUpgraderFromV1(ctx context.Context, req resource.UpgradeStateRequest, resp *resource.UpgradeStateResponse) {
41-
setStateResponse(ctx, &resp.Diagnostics, req.RawState, &resp.State)
42+
// Use same sharding config as in SDKv2 when upgrading to TPF
43+
setStateResponse(ctx, &resp.Diagnostics, req.RawState, &resp.State, true)
4244
}
4345

4446
// stateAttrs has the attributes needed from source schema.
@@ -65,7 +67,7 @@ var stateAttrs = map[string]tftypes.Type{
6567
},
6668
}
6769

68-
func setStateResponse(ctx context.Context, diags *diag.Diagnostics, stateIn *tfprotov6.RawState, stateOut *tfsdk.State) {
70+
func setStateResponse(ctx context.Context, diags *diag.Diagnostics, stateIn *tfprotov6.RawState, stateOut *tfsdk.State, allowOldShardingConfig bool) {
6971
rawStateValue, err := stateIn.UnmarshalWithOpts(tftypes.Object{
7072
AttributeTypes: stateAttrs,
7173
}, tfprotov6.UnmarshalOpts{ValueFromJSONOpts: tftypes.ValueFromJSONOpts{IgnoreUndefinedAttributes: true}})
@@ -94,11 +96,12 @@ func setStateResponse(ctx context.Context, diags *diag.Diagnostics, stateIn *tfp
9496
return
9597
}
9698
setOptionalModelAttrs(ctx, stateObj, model)
97-
99+
if allowOldShardingConfig {
100+
setReplicationSpecNumShardsAttr(ctx, stateObj, model)
101+
}
98102
// Set tags and labels to null instead of empty so there is no plan change if there are no tags or labels when Read is called.
99103
model.Tags = types.MapNull(types.StringType)
100104
model.Labels = types.MapNull(types.StringType)
101-
102105
diags.Append(stateOut.Set(ctx, model)...)
103106
}
104107

@@ -155,21 +158,26 @@ func setOptionalModelAttrs(ctx context.Context, stateObj map[string]tftypes.Valu
155158
if mongoDBMajorVersion := getAttrFromStateObj[string](stateObj, "mongo_db_major_version"); mongoDBMajorVersion != nil {
156159
model.MongoDBMajorVersion = types.StringPointerValue(mongoDBMajorVersion)
157160
}
158-
if specsVal := getAttrFromStateObj[[]tftypes.Value](stateObj, "replication_specs"); specsVal != nil {
159-
var specModels []TFReplicationSpecsModel
160-
for _, specVal := range *specsVal {
161-
var specObj map[string]tftypes.Value
162-
if err := specVal.As(&specObj); err != nil {
163-
continue
164-
}
165-
if specModel := replicationSpecModelWithNumShards(specObj["num_shards"]); specModel != nil {
166-
specModels = append(specModels, *specModel)
167-
}
161+
}
162+
163+
func setReplicationSpecNumShardsAttr(ctx context.Context, stateObj map[string]tftypes.Value, model *TFModel) {
164+
specsVal := getAttrFromStateObj[[]tftypes.Value](stateObj, "replication_specs")
165+
if specsVal == nil {
166+
return
167+
}
168+
var specModels []TFReplicationSpecsModel
169+
for _, specVal := range *specsVal {
170+
var specObj map[string]tftypes.Value
171+
if err := specVal.As(&specObj); err != nil {
172+
continue
168173
}
169-
if len(specModels) > 0 {
170-
model.ReplicationSpecs, _ = types.ListValueFrom(ctx, ReplicationSpecsObjType, specModels)
174+
if specModel := replicationSpecModelWithNumShards(specObj["num_shards"]); specModel != nil {
175+
specModels = append(specModels, *specModel)
171176
}
172177
}
178+
if len(specModels) > 0 {
179+
model.ReplicationSpecs, _ = types.ListValueFrom(ctx, ReplicationSpecsObjType, specModels)
180+
}
173181
}
174182

175183
func replicationSpecModelWithNumShards(numShardsVal tftypes.Value) *TFReplicationSpecsModel {

internal/service/advancedclustertpf/move_upgrade_state_test.go

Lines changed: 87 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ package advancedclustertpf_test
33
import (
44
"fmt"
55
"regexp"
6+
"strings"
67
"testing"
78

89
"github.com/hashicorp/terraform-plugin-testing/helper/resource"
@@ -13,8 +14,7 @@ import (
1314

1415
func TestAccAdvancedCluster_moveBasic(t *testing.T) {
1516
var (
16-
projectID = acc.ProjectIDExecution(t)
17-
clusterName = acc.RandomClusterName()
17+
projectID, clusterName = acc.ProjectIDExecutionWithCluster(t, 3)
1818
)
1919
resource.ParallelTest(t, resource.TestCase{
2020
TerraformVersionChecks: []tfversion.TerraformVersionCheck{
@@ -24,10 +24,36 @@ func TestAccAdvancedCluster_moveBasic(t *testing.T) {
2424
CheckDestroy: acc.CheckDestroyCluster,
2525
Steps: []resource.TestStep{
2626
{
27-
Config: configMoveFirst(projectID, clusterName),
27+
Config: configMoveFirst(projectID, clusterName, 1),
2828
},
2929
{
30-
Config: configMoveSecond(projectID, clusterName),
30+
Config: configMoveSecond(projectID, clusterName, 1),
31+
ConfigPlanChecks: resource.ConfigPlanChecks{
32+
PostApplyPreRefresh: []plancheck.PlanCheck{
33+
plancheck.ExpectEmptyPlan(),
34+
},
35+
},
36+
},
37+
},
38+
})
39+
}
40+
41+
func TestAccAdvancedCluster_moveMultisharding(t *testing.T) {
42+
var (
43+
projectID, clusterName = acc.ProjectIDExecutionWithCluster(t, 9)
44+
)
45+
resource.ParallelTest(t, resource.TestCase{
46+
TerraformVersionChecks: []tfversion.TerraformVersionCheck{
47+
tfversion.SkipBelow(tfversion.Version1_8_0),
48+
},
49+
ProtoV6ProviderFactories: acc.TestAccProviderV6Factories,
50+
CheckDestroy: acc.CheckDestroyCluster,
51+
Steps: []resource.TestStep{
52+
{
53+
Config: configMoveFirst(projectID, clusterName, 3),
54+
},
55+
{
56+
Config: configMoveSecond(projectID, clusterName, 3),
3157
ConfigPlanChecks: resource.ConfigPlanChecks{
3258
PostApplyPreRefresh: []plancheck.PlanCheck{
3359
plancheck.ExpectEmptyPlan(),
@@ -40,8 +66,7 @@ func TestAccAdvancedCluster_moveBasic(t *testing.T) {
4066

4167
func TestAccAdvancedCluster_moveInvalid(t *testing.T) {
4268
var (
43-
projectID = acc.ProjectIDExecution(t)
44-
clusterName = acc.RandomClusterName()
69+
projectID, clusterName = acc.ProjectIDExecutionWithCluster(t, 0)
4570
)
4671
resource.ParallelTest(t, resource.TestCase{
4772
TerraformVersionChecks: []tfversion.TerraformVersionCheck{
@@ -64,25 +89,76 @@ func TestAccAdvancedCluster_moveInvalid(t *testing.T) {
6489
})
6590
}
6691

67-
func configMoveFirst(projectID, clusterName string) string {
92+
func configMoveFirst(projectID, clusterName string, numShards int) string {
93+
clusterTypeStr := "REPLICASET"
94+
if numShards > 1 {
95+
clusterTypeStr = "GEOSHARDED"
96+
}
6897
return fmt.Sprintf(`
6998
resource "mongodbatlas_cluster" "old" {
7099
project_id = %[1]q
71100
name = %[2]q
72101
disk_size_gb = 10
73-
cluster_type = "REPLICASET"
102+
cluster_type = %[3]q
74103
provider_name = "AWS"
75104
provider_instance_size_name = "M10"
76105
replication_specs {
77-
num_shards = 1
106+
num_shards = %[4]d
78107
regions_config {
79108
region_name = "US_EAST_1"
80109
electable_nodes = 3
81110
priority = 7
82111
}
83112
}
84113
}
85-
`, projectID, clusterName)
114+
`, projectID, clusterName, clusterTypeStr, numShards)
115+
}
116+
117+
func configMoveBasic(projectID, clusterName string, numShards int) string {
118+
clusterTypeStr := "REPLICASET"
119+
if numShards > 1 {
120+
clusterTypeStr = "GEOSHARDED"
121+
}
122+
var replicationSpecsStr []string
123+
for range numShards {
124+
replicationSpecsStr = append(replicationSpecsStr, `
125+
{
126+
region_configs = [{
127+
priority = 7
128+
provider_name = "AWS"
129+
region_name = "US_EAST_1"
130+
auto_scaling = {
131+
compute_scale_down_enabled = false # necessary to have similar SDKv2 request
132+
compute_enabled = false # necessary to have similar SDKv2 request
133+
disk_gb_enabled = true
134+
}
135+
electable_specs = {
136+
node_count = 3
137+
instance_size = "M10"
138+
disk_size_gb = 10
139+
}
140+
}]
141+
}
142+
`)
143+
}
144+
145+
return fmt.Sprintf(`
146+
resource "mongodbatlas_advanced_cluster" "test" {
147+
project_id = %[1]q
148+
name = %[2]q
149+
cluster_type = %[3]q
150+
replication_specs = [%[4]s]
151+
}
152+
`, projectID, clusterName, clusterTypeStr, strings.Join(replicationSpecsStr, ","))
153+
}
154+
155+
func configMoveSecond(projectID, clusterName string, numShards int) string {
156+
return `
157+
moved {
158+
from = mongodbatlas_cluster.old
159+
to = mongodbatlas_advanced_cluster.test
160+
}
161+
` + configMoveBasic(projectID, clusterName, numShards)
86162
}
87163

88164
func configMoveFirstInvalid(projectID, clusterName string) string {
@@ -100,20 +176,11 @@ func configMoveFirstInvalid(projectID, clusterName string) string {
100176
`, projectID, clusterName)
101177
}
102178

103-
func configMoveSecond(projectID, clusterName string) string {
104-
return `
105-
moved {
106-
from = mongodbatlas_cluster.old
107-
to = mongodbatlas_advanced_cluster.test
108-
}
109-
` + configBasic(projectID, clusterName, "")
110-
}
111-
112179
func configMoveSecondInvalid(projectID, clusterName string) string {
113180
return `
114181
moved {
115182
from = mongodbatlas_database_user.old
116183
to = mongodbatlas_advanced_cluster.test
117184
}
118-
` + configBasic(projectID, clusterName, "")
185+
` + configMoveBasic(projectID, clusterName, 1)
119186
}

0 commit comments

Comments
 (0)