Skip to content

Commit 72e27d0

Browse files
authored
Merge pull request #5680 from nojnhuh/clusterctl-upgrade-aks
Add clusterctl upgrade tests with AKS workload clusters
2 parents d809e63 + 0909161 commit 72e27d0

12 files changed

+567
-17
lines changed

Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -531,6 +531,7 @@ generate-e2e-templates: $(KUSTOMIZE) ## Generate Azure infrastructure templates
531531
$(KUSTOMIZE) build $(AZURE_TEMPLATES)/v1beta1/cluster-template-node-drain --load-restrictor LoadRestrictionsNone > $(AZURE_TEMPLATES)/v1beta1/cluster-template-node-drain.yaml
532532
$(KUSTOMIZE) build $(AZURE_TEMPLATES)/v1beta1/cluster-template-upgrades --load-restrictor LoadRestrictionsNone > $(AZURE_TEMPLATES)/v1beta1/cluster-template-upgrades.yaml
533533
$(KUSTOMIZE) build $(AZURE_TEMPLATES)/v1beta1/cluster-template-kcp-scale-in --load-restrictor LoadRestrictionsNone > $(AZURE_TEMPLATES)/v1beta1/cluster-template-kcp-scale-in.yaml
534+
$(KUSTOMIZE) build $(AZURE_TEMPLATES)/v1beta1/cluster-template-aks --load-restrictor LoadRestrictionsNone > $(AZURE_TEMPLATES)/v1beta1/cluster-template-aks.yaml
534535

535536
.PHONY: generate-addons
536537
generate-addons: fetch-calico-manifests ## Generate metric-server, calico, calico-ipv6, azure cni v1 addons.

test/e2e/aks_machinepools.go

Lines changed: 37 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ import (
3838
)
3939

4040
type AKSMachinePoolSpecInput struct {
41+
MgmtCluster framework.ClusterProxy
4142
Cluster *clusterv1.Cluster
4243
MachinePools []*expv1.MachinePool
4344
WaitIntervals []interface{}
@@ -57,7 +58,7 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp
5758

5859
Byf("Scaling machine pool %s out", mp.Name)
5960
framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{
60-
ClusterProxy: bootstrapClusterProxy,
61+
ClusterProxy: input.MgmtCluster,
6162
Cluster: input.Cluster,
6263
Replicas: ptr.Deref(mp.Spec.Replicas, 0) + 1,
6364
MachinePools: []*expv1.MachinePool{mp},
@@ -66,7 +67,7 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp
6667

6768
Byf("Scaling machine pool %s in", mp.Name)
6869
framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{
69-
ClusterProxy: bootstrapClusterProxy,
70+
ClusterProxy: input.MgmtCluster,
7071
Cluster: input.Cluster,
7172
Replicas: ptr.Deref(mp.Spec.Replicas, 0) - 1,
7273
MachinePools: []*expv1.MachinePool{mp},
@@ -78,7 +79,7 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp
7879
switch mp.Spec.Template.Spec.InfrastructureRef.Kind {
7980
case infrav1.AzureManagedMachinePoolKind:
8081
ammp := &infrav1.AzureManagedMachinePool{}
81-
err := bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{
82+
err := input.MgmtCluster.GetClient().Get(ctx, types.NamespacedName{
8283
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
8384
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
8485
}, ammp)
@@ -89,7 +90,7 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp
8990
}
9091
case infrav1.AzureASOManagedMachinePoolKind:
9192
ammp := &infrav1.AzureASOManagedMachinePool{}
92-
err := bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{
93+
err := input.MgmtCluster.GetClient().Get(ctx, types.NamespacedName{
9394
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
9495
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
9596
}, ammp)
@@ -104,7 +105,7 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp
104105
// mode may not be set in spec. Get the ASO object and check in status.
105106
resource.SetNamespace(ammp.Namespace)
106107
agentPool := &asocontainerservicev1.ManagedClustersAgentPool{}
107-
Expect(bootstrapClusterProxy.GetClient().Get(ctx, client.ObjectKeyFromObject(resource), agentPool)).To(Succeed())
108+
Expect(input.MgmtCluster.GetClient().Get(ctx, client.ObjectKeyFromObject(resource), agentPool)).To(Succeed())
108109
if ptr.Deref(agentPool.Status.Mode, "") != asocontainerservicev1.AgentPoolMode_STATUS_System {
109110
isUserPool = true
110111
}
@@ -115,7 +116,7 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp
115116
if isUserPool {
116117
Byf("Scaling the machine pool %s to zero", mp.Name)
117118
framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{
118-
ClusterProxy: bootstrapClusterProxy,
119+
ClusterProxy: input.MgmtCluster,
119120
Cluster: input.Cluster,
120121
Replicas: 0,
121122
MachinePools: []*expv1.MachinePool{mp},
@@ -125,7 +126,7 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp
125126

126127
Byf("Restoring initial replica count for machine pool %s", mp.Name)
127128
framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{
128-
ClusterProxy: bootstrapClusterProxy,
129+
ClusterProxy: input.MgmtCluster,
129130
Cluster: input.Cluster,
130131
Replicas: originalReplicas,
131132
MachinePools: []*expv1.MachinePool{mp},
@@ -136,3 +137,32 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp
136137

137138
wg.Wait()
138139
}
140+
141+
type AKSMachinePoolPostUpgradeSpecInput struct {
142+
MgmtCluster framework.ClusterProxy
143+
ClusterName string
144+
ClusterNamespace string
145+
}
146+
147+
func AKSMachinePoolPostUpgradeSpec(ctx context.Context, inputGetter func() AKSMachinePoolPostUpgradeSpecInput) {
148+
input := inputGetter()
149+
150+
cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{
151+
Getter: input.MgmtCluster.GetClient(),
152+
Name: input.ClusterName,
153+
Namespace: input.ClusterNamespace,
154+
})
155+
mps := framework.GetMachinePoolsByCluster(ctx, framework.GetMachinePoolsByClusterInput{
156+
Lister: input.MgmtCluster.GetClient(),
157+
ClusterName: input.ClusterName,
158+
Namespace: input.ClusterNamespace,
159+
})
160+
AKSMachinePoolSpec(ctx, func() AKSMachinePoolSpecInput {
161+
return AKSMachinePoolSpecInput{
162+
MgmtCluster: input.MgmtCluster,
163+
Cluster: cluster,
164+
MachinePools: mps,
165+
WaitIntervals: e2eConfig.GetIntervals("default", "wait-machine-pool-nodes"),
166+
}
167+
})
168+
}

test/e2e/azure_test.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -821,6 +821,7 @@ var _ = Describe("Workload cluster creation", func() {
821821
By("Exercising machine pools", func() {
822822
AKSMachinePoolSpec(ctx, func() AKSMachinePoolSpecInput {
823823
return AKSMachinePoolSpecInput{
824+
MgmtCluster: bootstrapClusterProxy,
824825
Cluster: result.Cluster,
825826
MachinePools: result.MachinePools,
826827
WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
@@ -982,6 +983,7 @@ var _ = Describe("Workload cluster creation", func() {
982983
By("Exercising machine pools", func() {
983984
AKSMachinePoolSpec(ctx, func() AKSMachinePoolSpecInput {
984985
return AKSMachinePoolSpecInput{
986+
MgmtCluster: bootstrapClusterProxy,
985987
Cluster: result.Cluster,
986988
MachinePools: result.MachinePools,
987989
WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),

test/e2e/capi_test.go

Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
. "github.com/onsi/ginkgo/v2"
3131
. "github.com/onsi/gomega"
3232
"k8s.io/utils/ptr"
33+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3334
capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
3435
"sigs.k8s.io/cluster-api/test/framework"
3536
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
@@ -181,6 +182,8 @@ var _ = Describe("Running the Cluster API E2E tests", func() {
181182

182183
if os.Getenv("USE_LOCAL_KIND_REGISTRY") != "true" {
183184
Context("API Version Upgrade", func() {
185+
var aksKubernetesVersion string
186+
184187
BeforeEach(func() {
185188
// Unset resource group and vnet env variables, since the upgrade test creates 2 clusters,
186189
// and will result in both the clusters using the same vnet and resource group.
@@ -199,6 +202,9 @@ var _ = Describe("Running the Cluster API E2E tests", func() {
199202
identity, err := identityClient.Get(ctx, identityRG, identityName, nil)
200203
Expect(err).NotTo(HaveOccurred())
201204
Expect(os.Setenv("AZURE_CLIENT_ID_CLOUD_PROVIDER", *identity.Properties.ClientID)).To(Succeed())
205+
206+
aksKubernetesVersion, err = GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersion)
207+
Expect(err).NotTo(HaveOccurred())
202208
})
203209

204210
Context("upgrade from an old version of v1beta1 to current, and scale workload clusters created in the old version", func() {
@@ -250,6 +256,82 @@ var _ = Describe("Running the Cluster API E2E tests", func() {
250256
}
251257
})
252258
})
259+
260+
Context("upgrade from an old version of v1beta1 to current, and scale AKS workload clusters created in the old version", func() {
261+
capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput {
262+
return capi_e2e.ClusterctlUpgradeSpecInput{
263+
E2EConfig: e2eConfig,
264+
ClusterctlConfigPath: clusterctlConfigPath,
265+
WorkloadFlavor: "aks",
266+
WorkloadKubernetesVersion: aksKubernetesVersion,
267+
ControlPlaneMachineCount: ptr.To[int64](0),
268+
BootstrapClusterProxy: bootstrapClusterProxy,
269+
ArtifactFolder: artifactFolder,
270+
SkipCleanup: skipCleanup,
271+
PreInit: getPreInitFunc(ctx),
272+
InitWithProvidersContract: "v1beta1",
273+
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
274+
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
275+
},
276+
InitWithKubernetesVersion: e2eConfig.MustGetVariable(KubernetesVersionAPIUpgradeFrom),
277+
InitWithBinary: fmt.Sprintf("https://github.com/kubernetes-sigs/cluster-api/releases/download/%s/clusterctl-{OS}-{ARCH}", e2eConfig.MustGetVariable(OldCAPIUpgradeVersion)),
278+
InitWithCoreProvider: "cluster-api:" + e2eConfig.MustGetVariable(OldCAPIUpgradeVersion),
279+
InitWithInfrastructureProviders: []string{"azure:" + e2eConfig.MustGetVariable(OldProviderUpgradeVersion)},
280+
Upgrades: []capi_e2e.ClusterctlUpgradeSpecInputUpgrade{
281+
{
282+
Contract: clusterv1.GroupVersion.Version,
283+
PostUpgrade: func(managementClusterProxy framework.ClusterProxy, clusterNamespace, clusterName string) {
284+
AKSMachinePoolPostUpgradeSpec(ctx, func() AKSMachinePoolPostUpgradeSpecInput {
285+
return AKSMachinePoolPostUpgradeSpecInput{
286+
MgmtCluster: managementClusterProxy,
287+
ClusterName: clusterName,
288+
ClusterNamespace: clusterNamespace,
289+
}
290+
})
291+
},
292+
},
293+
},
294+
}
295+
})
296+
})
297+
298+
Context("upgrade from the latest version of v1beta1 to current, and scale AKS workload clusters created in the old version", func() {
299+
capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput {
300+
return capi_e2e.ClusterctlUpgradeSpecInput{
301+
E2EConfig: e2eConfig,
302+
ClusterctlConfigPath: clusterctlConfigPath,
303+
WorkloadFlavor: "aks",
304+
WorkloadKubernetesVersion: aksKubernetesVersion,
305+
ControlPlaneMachineCount: ptr.To[int64](0),
306+
BootstrapClusterProxy: bootstrapClusterProxy,
307+
ArtifactFolder: artifactFolder,
308+
SkipCleanup: skipCleanup,
309+
PreInit: getPreInitFunc(ctx),
310+
InitWithProvidersContract: "v1beta1",
311+
ControlPlaneWaiters: clusterctl.ControlPlaneWaiters{
312+
WaitForControlPlaneInitialized: EnsureControlPlaneInitialized,
313+
},
314+
InitWithKubernetesVersion: e2eConfig.MustGetVariable(KubernetesVersionAPIUpgradeFrom),
315+
InitWithBinary: fmt.Sprintf("https://github.com/kubernetes-sigs/cluster-api/releases/download/%s/clusterctl-{OS}-{ARCH}", e2eConfig.MustGetVariable(LatestCAPIUpgradeVersion)),
316+
InitWithCoreProvider: "cluster-api:" + e2eConfig.MustGetVariable(LatestCAPIUpgradeVersion),
317+
InitWithInfrastructureProviders: []string{"azure:" + e2eConfig.MustGetVariable(LatestProviderUpgradeVersion)},
318+
Upgrades: []capi_e2e.ClusterctlUpgradeSpecInputUpgrade{
319+
{
320+
Contract: clusterv1.GroupVersion.Version,
321+
PostUpgrade: func(managementClusterProxy framework.ClusterProxy, clusterNamespace, clusterName string) {
322+
AKSMachinePoolPostUpgradeSpec(ctx, func() AKSMachinePoolPostUpgradeSpecInput {
323+
return AKSMachinePoolPostUpgradeSpecInput{
324+
MgmtCluster: managementClusterProxy,
325+
ClusterName: clusterName,
326+
ClusterNamespace: clusterNamespace,
327+
}
328+
})
329+
},
330+
},
331+
},
332+
}
333+
})
334+
})
253335
})
254336
}
255337

test/e2e/config/azure-dev.yaml

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -83,29 +83,33 @@ providers:
8383
- name: azure
8484
type: InfrastructureProvider
8585
versions:
86-
- name: v1.18.2 # latest patch of earliest minor in supported v1beta1 releases; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only.
87-
value: https://github.com/kubernetes-sigs/cluster-api-provider-azure/releases/download/v1.18.2/infrastructure-components.yaml
86+
- name: v1.18.5 # latest patch of earliest minor in supported v1beta1 releases; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only.
87+
value: https://github.com/kubernetes-sigs/cluster-api-provider-azure/releases/download/v1.18.5/infrastructure-components.yaml
8888
type: url
8989
contract: v1beta1
9090
files:
9191
- sourcePath: "../data/shared/v1beta1_provider/metadata.yaml"
92-
- sourcePath: "../data/infrastructure-azure/v1.18.2/cluster-template-prow.yaml"
92+
- sourcePath: "../data/infrastructure-azure/v1.18.5/cluster-template-prow.yaml"
9393
targetName: "cluster-template.yaml"
94-
- sourcePath: "../data/infrastructure-azure/v1.18.2/cluster-template-prow-machine-and-machine-pool.yaml"
94+
- sourcePath: "../data/infrastructure-azure/v1.18.5/cluster-template-prow-machine-and-machine-pool.yaml"
9595
targetName: "cluster-template-machine-and-machine-pool.yaml"
96+
- sourcePath: "../data/infrastructure-azure/v1.18.5/cluster-template-aks.yaml"
97+
targetName: "cluster-template-aks.yaml"
9698
replacements:
9799
- old: "imagePullPolicy: Always"
98100
new: "imagePullPolicy: IfNotPresent"
99-
- name: v1.19.1 # latest patch of latest minor in supported v1beta1 releases; this is used for v1beta1 latest --> v1beta1 current clusterctl upgrades test only.
100-
value: https://github.com/kubernetes-sigs/cluster-api-provider-azure/releases/download/v1.19.1/infrastructure-components.yaml
101+
- name: v1.19.4 # latest patch of latest minor in supported v1beta1 releases; this is used for v1beta1 latest --> v1beta1 current clusterctl upgrades test only.
102+
value: https://github.com/kubernetes-sigs/cluster-api-provider-azure/releases/download/v1.19.4/infrastructure-components.yaml
101103
type: url
102104
contract: v1beta1
103105
files:
104106
- sourcePath: "../data/shared/v1beta1_provider/metadata.yaml"
105-
- sourcePath: "../data/infrastructure-azure/v1.19.1/cluster-template-prow.yaml"
107+
- sourcePath: "../data/infrastructure-azure/v1.19.4/cluster-template-prow.yaml"
106108
targetName: "cluster-template.yaml"
107-
- sourcePath: "../data/infrastructure-azure/v1.19.1/cluster-template-prow-machine-and-machine-pool.yaml"
109+
- sourcePath: "../data/infrastructure-azure/v1.19.4/cluster-template-prow-machine-and-machine-pool.yaml"
108110
targetName: "cluster-template-machine-and-machine-pool.yaml"
111+
- sourcePath: "../data/infrastructure-azure/v1.19.4/cluster-template-aks.yaml"
112+
targetName: "cluster-template-aks.yaml"
109113
replacements:
110114
- old: "imagePullPolicy: Always"
111115
new: "imagePullPolicy: IfNotPresent"
@@ -240,8 +244,8 @@ variables:
240244
AZURE_CNI_V1_MANIFEST_PATH: "${PWD}/templates/addons/azure-cni-v1.yaml"
241245
OLD_CAPI_UPGRADE_VERSION: "v1.9.7"
242246
LATEST_CAPI_UPGRADE_VERSION: "v1.10.2"
243-
OLD_PROVIDER_UPGRADE_VERSION: "v1.18.2"
244-
LATEST_PROVIDER_UPGRADE_VERSION: "v1.19.1"
247+
OLD_PROVIDER_UPGRADE_VERSION: "v1.18.5"
248+
LATEST_PROVIDER_UPGRADE_VERSION: "v1.19.4"
245249
OLD_CAAPH_UPGRADE_VERSION: "v0.1.0-alpha.10"
246250
LATEST_CAAPH_UPGRADE_VERSION: "v0.2.5"
247251
CI_RG: "${CI_RG:-capz-ci}"

0 commit comments

Comments
 (0)