From 3e062b8d5050cb2ee24607b18a7fab8cc85a6937 Mon Sep 17 00:00:00 2001 From: sivchari Date: Sun, 6 Jul 2025 21:19:39 +0900 Subject: [PATCH 01/31] migrate from v1beta1 to v1beta2 Signed-off-by: sivchari --- .../main/bases/cluster-with-kcp.yaml | 14 ++-- .../main/bases/cluster-with-topology.yaml | 2 +- .../infrastructure-docker/main/bases/crs.yaml | 2 +- .../infrastructure-docker/main/bases/md.yaml | 10 +-- .../infrastructure-docker/main/bases/mp.yaml | 10 +-- .../cluster-template-ignition/ignition.yaml | 4 +- .../main/cluster-template-in-memory.yaml | 2 +- .../cluster-template-ipv6/cluster-ipv6.yaml | 4 +- .../main/cluster-template-ipv6/kcp-ipv6.yaml | 2 +- .../main/cluster-template-ipv6/md-ipv6.yaml | 2 +- .../step1/cluster-with-cp0.yaml | 16 ++--- .../cluster.yaml | 4 +- .../cluster-with-kcp.yaml | 14 ++-- .../cluster-template-kcp-remediation/mhc.yaml | 2 +- .../kustomization.yaml | 2 +- .../cluster-template-md-remediation/md.yaml | 2 +- .../cluster-template-md-remediation/mhc.yaml | 2 +- .../cluster-autoscaler.yaml | 2 +- .../cluster.yaml | 2 +- .../cluster.yaml | 2 +- .../kustomization.yaml | 2 +- .../kustomization.yaml | 4 +- .../cluster-runtimesdk.yaml | 2 +- .../main/clusterclass-in-memory.yaml | 22 +++--- .../kustomization.yaml | 6 +- .../clusterclass-quick-start-runtimesdk.yaml | 28 ++++---- .../main/clusterclass-quick-start.yaml | 68 +++++++++---------- 27 files changed, 116 insertions(+), 116 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-kcp.yaml index f6e60681e1d5..61234cf750bb 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-kcp.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-kcp.yaml @@ -1,6 +1,6 @@ --- # DockerCluster object referenced by the Cluster object -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerCluster metadata: name: '${CLUSTER_NAME}' @@ -26,7 +26,7 @@ spec: # Cluster object with # - Reference to the KubeadmControlPlane object # - the label cni=${CLUSTER_NAME}-crs-0, so the cluster can be selected by the ClusterResourceSet. -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' @@ -40,16 +40,16 @@ spec: cidrBlocks: ['${DOCKER_POD_CIDRS}'] serviceDomain: '${DOCKER_SERVICE_DOMAIN}' infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerCluster name: '${CLUSTER_NAME}' controlPlaneRef: kind: KubeadmControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 name: "${CLUSTER_NAME}-control-plane" --- # DockerMachineTemplate object referenced by the KubeadmControlPlane object -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: "${CLUSTER_NAME}-control-plane" @@ -65,7 +65,7 @@ spec: # KubeadmControlPlane referenced by the Cluster object with # - the label kcp-adoption.step2, because it should be created in the second step of the kcp-adoption test. kind: KubeadmControlPlane -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: "${CLUSTER_NAME}-control-plane" labels: @@ -75,7 +75,7 @@ spec: machineTemplate: infrastructureRef: kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 name: "${CLUSTER_NAME}-control-plane" kubeadmConfigSpec: clusterConfiguration: diff --git a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml index 28ed20208e86..02130ade7e0a 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' diff --git a/test/e2e/data/infrastructure-docker/main/bases/crs.yaml b/test/e2e/data/infrastructure-docker/main/bases/crs.yaml index b1b61237dc62..c0880053b4d2 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/crs.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/crs.yaml @@ -10,7 +10,7 @@ binaryData: --- # ClusterResourceSet object with # a selector that targets all the Cluster with label cni=${CLUSTER_NAME}-crs-0 -apiVersion: addons.cluster.x-k8s.io/v1beta1 +apiVersion: addons.cluster.x-k8s.io/v1beta2 kind: ClusterResourceSet metadata: name: "${CLUSTER_NAME}-crs-0" diff --git a/test/e2e/data/infrastructure-docker/main/bases/md.yaml b/test/e2e/data/infrastructure-docker/main/bases/md.yaml index 5d42a2cf5e6a..d0f085d4b28f 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/md.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/md.yaml @@ -1,7 +1,7 @@ --- # DockerMachineTemplate referenced by the MachineDeployment and with # - extraMounts for the docker sock, thus allowing self-hosting test -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: "${CLUSTER_NAME}-md-0" @@ -15,7 +15,7 @@ spec: preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- # KubeadmConfigTemplate referenced by the MachineDeployment -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: "${CLUSTER_NAME}-md-0" @@ -26,7 +26,7 @@ spec: nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. --- # MachineDeployment object -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineDeployment metadata: name: "${CLUSTER_NAME}-md-0" @@ -42,10 +42,10 @@ spec: bootstrap: configRef: name: "${CLUSTER_NAME}-md-0" - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate infrastructureRef: name: "${CLUSTER_NAME}-md-0" - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate failureDomain: fd4 diff --git a/test/e2e/data/infrastructure-docker/main/bases/mp.yaml b/test/e2e/data/infrastructure-docker/main/bases/mp.yaml index 11ca197c8acd..b560313cf453 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/mp.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/mp.yaml @@ -1,6 +1,6 @@ --- # MachinePool which references the DockerMachinePool and KubeadmConfigTemplate below -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachinePool metadata: name: "${CLUSTER_NAME}-mp-0" @@ -11,12 +11,12 @@ spec: spec: bootstrap: configRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfig name: "${CLUSTER_NAME}-mp-0-config" clusterName: '${CLUSTER_NAME}' infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePool name: "${CLUSTER_NAME}-dmp-0" version: "${KUBERNETES_VERSION}" @@ -28,7 +28,7 @@ spec: - fd8 --- # DockerMachinePool using default values referenced by the MachinePool -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePool metadata: name: "${CLUSTER_NAME}-dmp-0" @@ -38,7 +38,7 @@ spec: preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- # KubeadmConfigTemplate referenced by the MachinePool -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfig metadata: name: "${CLUSTER_NAME}-mp-0-config" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-ignition/ignition.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-ignition/ignition.yaml index 42cde258b69e..26f66ef119a1 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-ignition/ignition.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-ignition/ignition.yaml @@ -1,5 +1,5 @@ kind: KubeadmControlPlane -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: "${CLUSTER_NAME}-control-plane" spec: @@ -24,7 +24,7 @@ spec: cgroup-root: "/kubelet" runtime-cgroups: "/system.slice/containerd.service" --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: "${CLUSTER_NAME}-md-0" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-in-memory.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-in-memory.yaml index 4c5357ccb844..7e645182c915 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-in-memory.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-in-memory.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: "${CLUSTER_NAME}" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/cluster-ipv6.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/cluster-ipv6.yaml index 0401174fbd89..1f977967b45f 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/cluster-ipv6.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/cluster-ipv6.yaml @@ -1,5 +1,5 @@ --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' @@ -10,7 +10,7 @@ spec: pods: cidrBlocks: ['${DOCKER_POD_IPV6_CIDRS}'] --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerCluster metadata: name: '${CLUSTER_NAME}' diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml index 9315c5472381..328f7616d05f 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml @@ -1,6 +1,6 @@ --- kind: KubeadmControlPlane -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: "${CLUSTER_NAME}-control-plane" spec: diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/md-ipv6.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/md-ipv6.yaml index 715888fdf3d6..ea1ac6716ce0 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/md-ipv6.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/md-ipv6.yaml @@ -1,5 +1,5 @@ --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: "${CLUSTER_NAME}-md-0" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml index 2b1224d2710e..320068b1e77d 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml @@ -1,6 +1,6 @@ --- # DockerCluster object referenced by the Cluster object -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerCluster metadata: name: '${CLUSTER_NAME}' @@ -8,7 +8,7 @@ metadata: # Cluster object with # - No reference to the KubeadmControlPlane object # - the label cni=${CLUSTER_NAME}-crs-0, so the cluster can be selected by the ClusterResourceSet. -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' @@ -22,19 +22,19 @@ spec: cidrBlocks: ['${DOCKER_POD_CIDRS}'] serviceDomain: '${DOCKER_SERVICE_DOMAIN}' infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerCluster name: '${CLUSTER_NAME}' --- # DockerMachine referenced by the Machine cp0 -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachine metadata: name: "${CLUSTER_NAME}-control-plane-0" spec: {} --- # KubeadmConfig referenced by the Machine cp0 -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfig metadata: name: "${CLUSTER_NAME}-control-plane-0" @@ -48,7 +48,7 @@ spec: nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. --- # cp0 Machine -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Machine metadata: name: "${CLUSTER_NAME}-control-plane-0" @@ -60,9 +60,9 @@ spec: bootstrap: configRef: name: "${ CLUSTER_NAME }-control-plane-0" - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfig infrastructureRef: name: "${ CLUSTER_NAME }-control-plane-0" - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachine diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml index 805ca5e732f5..766059c9b587 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml @@ -1,5 +1,5 @@ --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' @@ -9,4 +9,4 @@ spec: controlPlane: metadata: annotations: - pre-drain.delete.hook.machine.cluster.x-k8s.io/kcp-ready-check: "true" \ No newline at end of file + pre-drain.delete.hook.machine.cluster.x-k8s.io/kcp-ready-check: "true" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml index 40a6baf85385..6102f006363a 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml @@ -1,6 +1,6 @@ --- # DockerCluster object referenced by the Cluster object -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerCluster metadata: name: '${CLUSTER_NAME}' @@ -8,7 +8,7 @@ metadata: # Cluster object with # - Reference to the KubeadmControlPlane object # - the label cni=${CLUSTER_NAME}-crs-0, so the cluster can be selected by the ClusterResourceSet. -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' @@ -22,16 +22,16 @@ spec: cidrBlocks: ['${DOCKER_POD_CIDRS}'] serviceDomain: '${DOCKER_SERVICE_DOMAIN}' infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerCluster name: '${CLUSTER_NAME}' controlPlaneRef: kind: KubeadmControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 name: "${CLUSTER_NAME}-control-plane" --- # DockerMachineTemplate object referenced by the KubeadmControlPlane object -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: "${CLUSTER_NAME}-control-plane" @@ -42,7 +42,7 @@ spec: --- # KubeadmControlPlane referenced by the Cluster kind: KubeadmControlPlane -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: "${CLUSTER_NAME}-control-plane" spec: @@ -50,7 +50,7 @@ spec: machineTemplate: infrastructureRef: kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 name: "${CLUSTER_NAME}-control-plane" kubeadmConfigSpec: clusterConfiguration: diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml index 39187cec0a40..f6492de8a547 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml @@ -3,7 +3,7 @@ # - a selector that targets all the machines with label cluster.x-k8s.io/control-plane="" and the mhc-test: "fail" (the label is used to trigger remediation in a controlled way - by adding CP under MHC control intentionally -) # - nodeStartupTimeout: 30s (to force remediation on nodes still provisioning) # - unhealthyConditions triggering remediation after 10s the e2e.remediation.condition condition is set to false (to force remediation on nodes already provisioned) -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineHealthCheck metadata: name: "${CLUSTER_NAME}-mhc-0" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-scale-in/kustomization.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-scale-in/kustomization.yaml index 420dc3c07245..9985506ce769 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-scale-in/kustomization.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-scale-in/kustomization.yaml @@ -6,5 +6,5 @@ patches: - path: kcp-scale-in-variable.yaml target: group: cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: Cluster diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/md.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/md.yaml index 13968556b60a..d317bf328a99 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/md.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/md.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineDeployment metadata: name: "${CLUSTER_NAME}-md-0" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml index c10722590945..6881a4880c2e 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml @@ -2,7 +2,7 @@ # MachineHealthCheck object with # - a selector that targets all the machines with label e2e.remediation.label="" # - unhealthyConditions triggering remediation after 10s the condition is set -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineHealthCheck metadata: name: "${CLUSTER_NAME}-mhc-0" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml index 36466c480d94..d6454a2e70bc 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml index 4becb1d1d900..61396936da61 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml @@ -1,5 +1,5 @@ --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml index d43dc35b4c8a..7af9a1ae630b 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml @@ -1,5 +1,5 @@ --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/kustomization.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/kustomization.yaml index f4a499e6ed8b..fdd9b64d3ad9 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/kustomization.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/kustomization.yaml @@ -5,5 +5,5 @@ patches: - path: cluster-topology-class.yaml target: group: cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: Cluster diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-no-workers/kustomization.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-no-workers/kustomization.yaml index e2b7e414c57e..cb1ffb120cb6 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-no-workers/kustomization.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-no-workers/kustomization.yaml @@ -6,10 +6,10 @@ patches: - path: disable-control-plane-taint-variable.yaml target: group: cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: Cluster - path: remove-topology-workers.yaml target: group: cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: Cluster diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml index c27d9bb62220..059c0408f7bf 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml index fbdbf653040c..2f61818e1381 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml @@ -1,4 +1,4 @@ -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevClusterTemplate metadata: name: in-memory-cluster @@ -8,7 +8,7 @@ spec: backend: inMemory: {} --- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate metadata: name: in-memory-control-plane @@ -34,7 +34,7 @@ spec: kubeletExtraArgs: eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate metadata: name: in-memory-control-plane @@ -60,7 +60,7 @@ spec: startupDuration: "2s" startupJitter: "0.2" --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate metadata: name: in-memory-default-worker-machinetemplate @@ -86,7 +86,7 @@ spec: startupDuration: "2s" startupJitter: "0.2" --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: in-memory-default-worker-bootstraptemplate @@ -99,7 +99,7 @@ spec: kubeletExtraArgs: eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass metadata: name: in-memory @@ -109,11 +109,11 @@ spec: annotations: machineInfrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate name: in-memory-control-plane ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: in-memory-control-plane machineHealthCheck: @@ -126,7 +126,7 @@ spec: timeout: 300s infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevClusterTemplate name: in-memory-cluster workers: @@ -135,12 +135,12 @@ spec: template: bootstrap: ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: in-memory-default-worker-bootstraptemplate infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate name: in-memory-default-worker-machinetemplate machineHealthCheck: diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-kcp-only/kustomization.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-kcp-only/kustomization.yaml index 90dd95d33b84..079c5d9f5c1d 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-kcp-only/kustomization.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-kcp-only/kustomization.yaml @@ -5,15 +5,15 @@ patches: - path: remove-workers.yaml target: group: cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: ClusterClass - path: clusterclass-name.yaml target: group: cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: ClusterClass - path: remove-worker-patches.yaml target: group: cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: ClusterClass diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml index 13f986f2d33d..cca1d56e72cd 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml @@ -1,23 +1,23 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass metadata: name: quick-start-runtimesdk spec: controlPlane: ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: quick-start-control-plane machineInfrastructure: ref: kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 name: quick-start-control-plane namingStrategy: template: "{{ .cluster.name }}-cp-{{ .random }}" infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate name: quick-start-cluster infrastructureNamingStrategy: @@ -30,12 +30,12 @@ spec: template: bootstrap: ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate name: quick-start-default-worker-machinetemplate machinePools: @@ -45,12 +45,12 @@ spec: template: bootstrap: ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate name: quick-start-default-worker-machinepooltemplate patches: @@ -60,7 +60,7 @@ spec: validateExtension: validate-topology.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} discoverVariablesExtension: discover-variables.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate metadata: name: quick-start-cluster @@ -86,7 +86,7 @@ spec: controlPlane: false --- kind: KubeadmControlPlaneTemplate -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: quick-start-control-plane spec: @@ -104,7 +104,7 @@ spec: joinConfiguration: nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-control-plane @@ -116,7 +116,7 @@ spec: hostPath: "/var/run/docker.sock" preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-default-worker-machinetemplate @@ -128,7 +128,7 @@ spec: hostPath: "/var/run/docker.sock" preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate metadata: name: quick-start-default-worker-machinepooltemplate @@ -141,7 +141,7 @@ spec: hostPath: "/var/run/docker.sock" preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: quick-start-default-worker-bootstraptemplate diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index 8dc1b002892f..814e5c7f04fc 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass metadata: name: quick-start @@ -10,13 +10,13 @@ spec: annotations: ClusterClass.controlPlane.annotation: "ClusterClass.controlPlane.annotationValue" ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: quick-start-control-plane machineInfrastructure: ref: kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 name: quick-start-control-plane machineHealthCheck: maxUnhealthy: 100% @@ -26,7 +26,7 @@ spec: timeout: 20s infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate name: quick-start-cluster workers: @@ -40,12 +40,12 @@ spec: ClusterClass.machineDeployment.annotation: "ClusterClass.machineDeployment.annotationValue" bootstrap: ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-md-default-worker-bootstraptemplate infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate name: quick-start-default-worker-machinetemplate machineHealthCheck: @@ -61,12 +61,12 @@ spec: ClusterClass.machinePool.annotation: "ClusterClass.machinePool.annotationValue" bootstrap: ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-mp-default-worker-bootstraptemplate infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate name: quick-start-default-worker-machinepooltemplate variables: @@ -166,7 +166,7 @@ spec: - name: lbImageRepository definitions: - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate matchResources: infrastructureCluster: true @@ -181,7 +181,7 @@ spec: description: "Sets tag to use for the etcd image in the KubeadmControlPlane." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -197,7 +197,7 @@ spec: description: "Sets tag to use for the etcd image in the KubeadmControlPlane." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -211,7 +211,7 @@ spec: description: "Sets the container image that is used for running dockerMachines for the controlPlane and default-worker machineDeployments." definitions: - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate matchResources: machineDeploymentClass: @@ -224,7 +224,7 @@ spec: template: | kindest/node:{{ .builtin.machineDeployment.version | replace "+" "_" }} - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate matchResources: controlPlane: true @@ -235,7 +235,7 @@ spec: template: | kindest/node:{{ .builtin.controlPlane.version | replace "+" "_" }} - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate matchResources: machinePoolClass: @@ -254,7 +254,7 @@ spec: and reduce load to public registries. definitions: - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate matchResources: controlPlane: true @@ -273,7 +273,7 @@ spec: and reduce load to public registries. definitions: - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate matchResources: machinePoolClass: @@ -289,7 +289,7 @@ spec: enabledIf: '{{ ne .kubeadmControlPlaneMaxSurge "" }}' definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -302,7 +302,7 @@ spec: enabledIf: "{{ not .controlPlaneTaint }}" definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -318,7 +318,7 @@ spec: description: "Configures kubelet to run with an external cloud provider for control plane nodes." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -334,7 +334,7 @@ spec: description: "Configures kubelet to run with an external cloud provider for machineDeployment nodes." definitions: - selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machineDeploymentClass: @@ -345,7 +345,7 @@ spec: path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cloud-provider" value: "external" - selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machinePoolClass: @@ -360,7 +360,7 @@ spec: description: "Configures KCP to use IPv6 for its localAPIEndpoint." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -373,7 +373,7 @@ spec: description: "Adds an admission configuration for PodSecurity to the kube-apiserver." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -419,7 +419,7 @@ spec: description: "Configures control plane components and kubelet to run at the log level specified in the variable `kubeControlPlaneLogLevel`." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -441,7 +441,7 @@ spec: description: "Configures control plane kubelets to log at the level set in the variable `kubeletLogLevel`." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -459,7 +459,7 @@ spec: description: "Configures worker kubelets to log at the level set in the variable `kubeletLogLevel`." definitions: - selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machineDeploymentClass: @@ -471,7 +471,7 @@ spec: valueFrom: variable: kubeletLogLevel - selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machinePoolClass: @@ -483,7 +483,7 @@ spec: valueFrom: variable: kubeletLogLevel --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate metadata: name: quick-start-cluster @@ -518,7 +518,7 @@ spec: controlPlane: false --- kind: KubeadmControlPlaneTemplate -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: quick-start-control-plane labels: @@ -565,7 +565,7 @@ spec: kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-control-plane @@ -586,7 +586,7 @@ spec: hostPath: "/var/run/docker.sock" preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-default-worker-machinetemplate @@ -607,7 +607,7 @@ spec: hostPath: "/var/run/docker.sock" preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate metadata: name: quick-start-default-worker-machinepooltemplate @@ -629,7 +629,7 @@ spec: hostPath: "/var/run/docker.sock" preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: quick-start-md-default-worker-bootstraptemplate @@ -650,7 +650,7 @@ spec: kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider to work eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: quick-start-mp-default-worker-bootstraptemplate From d220b02c5aeea7e4ec97b338da58b43ed2e1f0a8 Mon Sep 17 00:00:00 2001 From: sivchari Date: Sun, 6 Jul 2025 21:25:52 +0900 Subject: [PATCH 02/31] use v1beta2 in runtime extension Signed-off-by: sivchari --- .../main/bases/cluster-with-kcp.yaml | 26 ++-- .../main/bases/cluster-with-topology.yaml | 2 +- .../infrastructure-docker/main/bases/md.yaml | 4 +- .../infrastructure-docker/main/bases/mp.yaml | 14 +- .../step1/cluster-with-cp0.yaml | 6 +- .../cluster-with-kcp.yaml | 6 +- .../cluster-runtimesdk.yaml | 2 +- .../main/clusterclass-in-memory.yaml | 10 +- .../clusterclass-quick-start-runtimesdk.yaml | 18 +-- .../main/clusterclass-quick-start.yaml | 14 +- .../handlers/topologymutation/handler.go | 46 +++---- .../handlers/topologymutation/handler_test.go | 122 +++++++++--------- .../clusterclass-quick-start-runtimesdk.yaml | 43 +++--- 13 files changed, 153 insertions(+), 160 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-kcp.yaml index 61234cf750bb..5dcb619935e8 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-kcp.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-kcp.yaml @@ -6,21 +6,21 @@ metadata: name: '${CLUSTER_NAME}' spec: failureDomains: - fd1: + - name: fd1 controlPlane: true - fd2: + - name: fd2 controlPlane: true - fd3: + - name: fd3 controlPlane: true - fd4: + - name: fd4 controlPlane: false - fd5: + - name: fd5 controlPlane: false - fd6: + - name: fd6 controlPlane: false - fd7: + - name: fd7 controlPlane: false - fd8: + - name: fd8 controlPlane: false --- # Cluster object with @@ -40,12 +40,12 @@ spec: cidrBlocks: ['${DOCKER_POD_CIDRS}'] serviceDomain: '${DOCKER_SERVICE_DOMAIN}' infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerCluster name: '${CLUSTER_NAME}' controlPlaneRef: kind: KubeadmControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + apiGroup: controlplane.cluster.x-k8s.io name: "${CLUSTER_NAME}-control-plane" --- # DockerMachineTemplate object referenced by the KubeadmControlPlane object @@ -75,15 +75,11 @@ spec: machineTemplate: infrastructureRef: kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io name: "${CLUSTER_NAME}-control-plane" kubeadmConfigSpec: clusterConfiguration: apiServer: # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] - initConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. - joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. version: "${KUBERNETES_VERSION}" diff --git a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml index 02130ade7e0a..a756d4aa6fdb 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml @@ -67,7 +67,7 @@ spec: minReadySeconds: 5 replicas: ${WORKER_MACHINE_COUNT} failureDomains: - - fd4 + - name: fd4 variables: # We set an empty value to use the default tag kubeadm init is using. - name: etcdImageTag diff --git a/test/e2e/data/infrastructure-docker/main/bases/md.yaml b/test/e2e/data/infrastructure-docker/main/bases/md.yaml index d0f085d4b28f..622d7b665500 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/md.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/md.yaml @@ -42,10 +42,10 @@ spec: bootstrap: configRef: name: "${CLUSTER_NAME}-md-0" - apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + apiGroup: bootstrap.cluster.x-k8s.io kind: KubeadmConfigTemplate infrastructureRef: name: "${CLUSTER_NAME}-md-0" - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerMachineTemplate failureDomain: fd4 diff --git a/test/e2e/data/infrastructure-docker/main/bases/mp.yaml b/test/e2e/data/infrastructure-docker/main/bases/mp.yaml index b560313cf453..0b70a4f2746e 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/mp.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/mp.yaml @@ -11,21 +11,21 @@ spec: spec: bootstrap: configRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + apiGroup: bootstrap.cluster.x-k8s.io kind: KubeadmConfig name: "${CLUSTER_NAME}-mp-0-config" clusterName: '${CLUSTER_NAME}' infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePool name: "${CLUSTER_NAME}-dmp-0" version: "${KUBERNETES_VERSION}" failureDomains: - - fd4 - - fd5 - - fd6 - - fd7 - - fd8 + - name: fd4 + - name: fd5 + - name: fd6 + - name: fd7 + - name: fd8 --- # DockerMachinePool using default values referenced by the MachinePool apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml index 320068b1e77d..01f323159666 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml @@ -22,7 +22,7 @@ spec: cidrBlocks: ['${DOCKER_POD_CIDRS}'] serviceDomain: '${DOCKER_SERVICE_DOMAIN}' infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerCluster name: '${CLUSTER_NAME}' --- @@ -60,9 +60,9 @@ spec: bootstrap: configRef: name: "${ CLUSTER_NAME }-control-plane-0" - apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + apiGroup: bootstrap.cluster.x-k8s.io kind: KubeadmConfig infrastructureRef: name: "${ CLUSTER_NAME }-control-plane-0" - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerMachine diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml index 6102f006363a..dfe594fa573d 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml @@ -22,12 +22,12 @@ spec: cidrBlocks: ['${DOCKER_POD_CIDRS}'] serviceDomain: '${DOCKER_SERVICE_DOMAIN}' infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerCluster name: '${CLUSTER_NAME}' controlPlaneRef: kind: KubeadmControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + apiGroup: controlplane.cluster.x-k8s.io name: "${CLUSTER_NAME}-control-plane" --- # DockerMachineTemplate object referenced by the KubeadmControlPlane object @@ -50,7 +50,7 @@ spec: machineTemplate: infrastructureRef: kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io name: "${CLUSTER_NAME}-control-plane" kubeadmConfigSpec: clusterConfiguration: diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml index 059c0408f7bf..23af0a76fda1 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml @@ -37,7 +37,7 @@ spec: minReadySeconds: 5 replicas: ${WORKER_MACHINE_COUNT} failureDomains: - - fd4 + - name: fd4 variables: - name: kubeadmControlPlaneMaxSurge value: "1" diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml index 2f61818e1381..44d3ceb64880 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml @@ -109,11 +109,11 @@ spec: annotations: machineInfrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io kind: DevMachineTemplate name: in-memory-control-plane ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + apiGroup: controlplane.cluster.x-k8s.io kind: KubeadmControlPlaneTemplate name: in-memory-control-plane machineHealthCheck: @@ -126,7 +126,7 @@ spec: timeout: 300s infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io kind: DevClusterTemplate name: in-memory-cluster workers: @@ -135,12 +135,12 @@ spec: template: bootstrap: ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + apiGroup: bootstrap.cluster.x-k8s.io kind: KubeadmConfigTemplate name: in-memory-default-worker-bootstraptemplate infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io kind: DevMachineTemplate name: in-memory-default-worker-machinetemplate machineHealthCheck: diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml index cca1d56e72cd..abaabc9337a2 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml @@ -5,19 +5,19 @@ metadata: spec: controlPlane: ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + apiGroup: controlplane.cluster.x-k8s.io kind: KubeadmControlPlaneTemplate name: quick-start-control-plane machineInfrastructure: ref: kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io name: quick-start-control-plane namingStrategy: template: "{{ .cluster.name }}-cp-{{ .random }}" infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerClusterTemplate name: quick-start-cluster infrastructureNamingStrategy: @@ -30,12 +30,12 @@ spec: template: bootstrap: ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + apiGroup: bootstrap.cluster.x-k8s.io kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerMachineTemplate name: quick-start-default-worker-machinetemplate machinePools: @@ -45,12 +45,12 @@ spec: template: bootstrap: ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + apiGroup: bootstrap.cluster.x-k8s.io kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerMachinePoolTemplate name: quick-start-default-worker-machinepooltemplate patches: @@ -99,10 +99,6 @@ spec: apiServer: # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] - initConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. - joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index 814e5c7f04fc..acf07ff72b1f 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -10,13 +10,13 @@ spec: annotations: ClusterClass.controlPlane.annotation: "ClusterClass.controlPlane.annotationValue" ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + apiGroup: controlplane.cluster.x-k8s.io kind: KubeadmControlPlaneTemplate name: quick-start-control-plane machineInfrastructure: ref: kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io name: quick-start-control-plane machineHealthCheck: maxUnhealthy: 100% @@ -26,7 +26,7 @@ spec: timeout: 20s infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerClusterTemplate name: quick-start-cluster workers: @@ -40,12 +40,12 @@ spec: ClusterClass.machineDeployment.annotation: "ClusterClass.machineDeployment.annotationValue" bootstrap: ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + apiGroup: bootstrap.cluster.x-k8s.io kind: KubeadmConfigTemplate name: quick-start-md-default-worker-bootstraptemplate infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerMachineTemplate name: quick-start-default-worker-machinetemplate machineHealthCheck: @@ -61,12 +61,12 @@ spec: ClusterClass.machinePool.annotation: "ClusterClass.machinePool.annotationValue" bootstrap: ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + apiGroup: bootstrap.cluster.x-k8s.io kind: KubeadmConfigTemplate name: quick-start-mp-default-worker-bootstraptemplate infrastructure: ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerMachinePoolTemplate name: quick-start-default-worker-machinepooltemplate variables: diff --git a/test/extension/handlers/topologymutation/handler.go b/test/extension/handlers/topologymutation/handler.go index adbe34358ddd..8c079ca7ab00 100644 --- a/test/extension/handlers/topologymutation/handler.go +++ b/test/extension/handlers/topologymutation/handler.go @@ -33,13 +33,13 @@ import ( intstrutil "k8s.io/apimachinery/pkg/util/intstr" ctrl "sigs.k8s.io/controller-runtime" - bootstrapv1beta1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1" - controlplanev1beta1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" "sigs.k8s.io/cluster-api/exp/runtime/topologymutation" - infrav1beta1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" - infraexpv1beta1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta2" + infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta2" "sigs.k8s.io/cluster-api/test/infrastructure/kind" ) @@ -57,16 +57,16 @@ type ExtensionHandlers struct { // NewExtensionHandlers returns a new ExtensionHandlers for the topology mutation hook handlers. func NewExtensionHandlers() *ExtensionHandlers { scheme := runtime.NewScheme() - _ = infrav1beta1.AddToScheme(scheme) - _ = infraexpv1beta1.AddToScheme(scheme) - _ = bootstrapv1beta1.AddToScheme(scheme) - _ = controlplanev1beta1.AddToScheme(scheme) + _ = infrav1.AddToScheme(scheme) + _ = infraexpv1.AddToScheme(scheme) + _ = bootstrapv1.AddToScheme(scheme) + _ = controlplanev1.AddToScheme(scheme) return &ExtensionHandlers{ // Add the apiGroups being handled to the decoder decoder: serializer.NewCodecFactory(scheme).UniversalDecoder( - infrav1beta1.GroupVersion, - bootstrapv1beta1.GroupVersion, - controlplanev1beta1.GroupVersion, + infrav1.GroupVersion, + bootstrapv1.GroupVersion, + controlplanev1.GroupVersion, ), } } @@ -87,18 +87,18 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo log := ctrl.LoggerFrom(ctx) switch obj := obj.(type) { - case *infrav1beta1.DockerClusterTemplate: + case *infrav1.DockerClusterTemplate: if err := patchDockerClusterTemplate(ctx, obj, variables); err != nil { log.Error(err, "Error patching DockerClusterTemplate") return errors.Wrap(err, "error patching DockerClusterTemplate") } - case *controlplanev1beta1.KubeadmControlPlaneTemplate: + case *controlplanev1.KubeadmControlPlaneTemplate: err := patchKubeadmControlPlaneTemplate(ctx, obj, variables) if err != nil { log.Error(err, "Error patching KubeadmControlPlaneTemplate") return errors.Wrapf(err, "error patching KubeadmControlPlaneTemplate") } - case *bootstrapv1beta1.KubeadmConfigTemplate: + case *bootstrapv1.KubeadmConfigTemplate: // NOTE: KubeadmConfigTemplate could be linked to one or more of the existing MachineDeployment class; // the patchKubeadmConfigTemplate func shows how to implement patches only for KubeadmConfigTemplates // linked to a specific MachineDeployment class; another option is to check the holderRef value and call @@ -107,7 +107,7 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo log.Error(err, "Error patching KubeadmConfigTemplate") return errors.Wrap(err, "error patching KubeadmConfigTemplate") } - case *infrav1beta1.DockerMachineTemplate: + case *infrav1.DockerMachineTemplate: // NOTE: DockerMachineTemplate could be linked to the ControlPlane or one or more of the existing MachineDeployment class; // the patchDockerMachineTemplate func shows how to implement different patches for DockerMachineTemplate // linked to ControlPlane or for DockerMachineTemplate linked to MachineDeployment classes; another option @@ -116,7 +116,7 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo log.Error(err, "Error patching DockerMachineTemplate") return errors.Wrap(err, "error patching DockerMachineTemplate") } - case *infraexpv1beta1.DockerMachinePoolTemplate: + case *infraexpv1.DockerMachinePoolTemplate: if err := patchDockerMachinePoolTemplate(ctx, obj, variables); err != nil { log.Error(err, "Error patching DockerMachinePoolTemplate") return errors.Wrap(err, "error patching DockerMachinePoolTemplate") @@ -129,7 +129,7 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo // patchDockerClusterTemplate patches the DockerClusterTemplate. // It sets the LoadBalancer.ImageRepository if the imageRepository variable is provided. // NOTE: this patch is not required for any special reason, it is used for testing the patch machinery itself. -func patchDockerClusterTemplate(_ context.Context, dockerClusterTemplate *infrav1beta1.DockerClusterTemplate, templateVariables map[string]apiextensionsv1.JSON) error { +func patchDockerClusterTemplate(_ context.Context, dockerClusterTemplate *infrav1.DockerClusterTemplate, templateVariables map[string]apiextensionsv1.JSON) error { imageRepo, err := topologymutation.GetStringVariable(templateVariables, "imageRepository") if err != nil { if topologymutation.IsNotFoundError(err) { @@ -146,7 +146,7 @@ func patchDockerClusterTemplate(_ context.Context, dockerClusterTemplate *infrav // patchKubeadmControlPlaneTemplate patches the ControlPlaneTemplate. // It sets the RolloutStrategy.RollingUpdate.MaxSurge if the kubeadmControlPlaneMaxSurge is provided. // NOTE: RolloutStrategy.RollingUpdate.MaxSurge patch is not required for any special reason, it is used for testing the patch machinery itself. -func patchKubeadmControlPlaneTemplate(ctx context.Context, kcpTemplate *controlplanev1beta1.KubeadmControlPlaneTemplate, templateVariables map[string]apiextensionsv1.JSON) error { +func patchKubeadmControlPlaneTemplate(ctx context.Context, kcpTemplate *controlplanev1.KubeadmControlPlaneTemplate, templateVariables map[string]apiextensionsv1.JSON) error { log := ctrl.LoggerFrom(ctx) // 1) Patch RolloutStrategy RollingUpdate MaxSurge with the value from the Cluster Topology variable. @@ -163,17 +163,17 @@ func patchKubeadmControlPlaneTemplate(ctx context.Context, kcpTemplate *controlp kubeadmControlPlaneMaxSurgeIntOrString := intstrutil.Parse(kcpControlPlaneMaxSurge) log.Info(fmt.Sprintf("Setting KubeadmControlPlaneMaxSurge to %q", kubeadmControlPlaneMaxSurgeIntOrString.String())) if kcpTemplate.Spec.Template.Spec.RolloutStrategy == nil { - kcpTemplate.Spec.Template.Spec.RolloutStrategy = &controlplanev1beta1.RolloutStrategy{} + kcpTemplate.Spec.Template.Spec.RolloutStrategy = &controlplanev1.RolloutStrategy{} } if kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate == nil { - kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate = &controlplanev1beta1.RollingUpdate{} + kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate = &controlplanev1.RollingUpdate{} } kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate.MaxSurge = &kubeadmControlPlaneMaxSurgeIntOrString return nil } // patchKubeadmConfigTemplate patches the ControlPlaneTemplate. -func patchKubeadmConfigTemplate(_ context.Context, _ *bootstrapv1beta1.KubeadmConfigTemplate, _ map[string]apiextensionsv1.JSON) error { +func patchKubeadmConfigTemplate(_ context.Context, _ *bootstrapv1.KubeadmConfigTemplate, _ map[string]apiextensionsv1.JSON) error { return nil } @@ -182,7 +182,7 @@ func patchKubeadmConfigTemplate(_ context.Context, _ *bootstrapv1beta1.KubeadmCo // the DockerMachineTemplate belongs to. // NOTE: this patch is not required anymore after the introduction of the kind mapper in kind, however we keep it // as example of version aware patches. -func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infrav1beta1.DockerMachineTemplate, templateVariables map[string]apiextensionsv1.JSON) error { +func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infrav1.DockerMachineTemplate, templateVariables map[string]apiextensionsv1.JSON) error { log := ctrl.LoggerFrom(ctx) // If the DockerMachineTemplate belongs to the ControlPlane, set the images using the ControlPlane version. @@ -237,7 +237,7 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr // It sets the CustomImage to an image for the version in use by the MachinePool. // NOTE: this patch is not required anymore after the introduction of the kind mapper in kind, however we keep it // as example of version aware patches. -func patchDockerMachinePoolTemplate(ctx context.Context, dockerMachinePoolTemplate *infraexpv1beta1.DockerMachinePoolTemplate, templateVariables map[string]apiextensionsv1.JSON) error { +func patchDockerMachinePoolTemplate(ctx context.Context, dockerMachinePoolTemplate *infraexpv1.DockerMachinePoolTemplate, templateVariables map[string]apiextensionsv1.JSON) error { log := ctrl.LoggerFrom(ctx) // If the DockerMachinePoolTemplate belongs to a MachinePool, set the images the MachinePool version. diff --git a/test/extension/handlers/topologymutation/handler_test.go b/test/extension/handlers/topologymutation/handler_test.go index e11949714b6f..2f31fd1b3a47 100644 --- a/test/extension/handlers/topologymutation/handler_test.go +++ b/test/extension/handlers/topologymutation/handler_test.go @@ -30,11 +30,11 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" . "sigs.k8s.io/controller-runtime/pkg/envtest/komega" - bootstrapv1beta1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1" - controlplanev1beta1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" - infrav1beta1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" - infraexpv1beta1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta2" + infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta2" ) var ( @@ -42,10 +42,10 @@ var ( ) func init() { - _ = infrav1beta1.AddToScheme(testScheme) - _ = infraexpv1beta1.AddToScheme(testScheme) - _ = controlplanev1beta1.AddToScheme(testScheme) - _ = bootstrapv1beta1.AddToScheme(testScheme) + _ = infrav1.AddToScheme(testScheme) + _ = infraexpv1.AddToScheme(testScheme) + _ = controlplanev1.AddToScheme(testScheme) + _ = bootstrapv1.AddToScheme(testScheme) } func Test_patchDockerClusterTemplate(t *testing.T) { @@ -53,29 +53,29 @@ func Test_patchDockerClusterTemplate(t *testing.T) { tests := []struct { name string - template *infrav1beta1.DockerClusterTemplate + template *infrav1.DockerClusterTemplate variables map[string]apiextensionsv1.JSON - expectedTemplate *infrav1beta1.DockerClusterTemplate + expectedTemplate *infrav1.DockerClusterTemplate expectedErr bool }{ { name: "no op if imageRepository is not set", - template: &infrav1beta1.DockerClusterTemplate{}, + template: &infrav1.DockerClusterTemplate{}, variables: nil, - expectedTemplate: &infrav1beta1.DockerClusterTemplate{}, + expectedTemplate: &infrav1.DockerClusterTemplate{}, }, { name: "set LoadBalancer.ImageRepository if imageRepository is set", - template: &infrav1beta1.DockerClusterTemplate{}, + template: &infrav1.DockerClusterTemplate{}, variables: map[string]apiextensionsv1.JSON{ "imageRepository": {Raw: toJSON("testImage")}, }, - expectedTemplate: &infrav1beta1.DockerClusterTemplate{ - Spec: infrav1beta1.DockerClusterTemplateSpec{ - Template: infrav1beta1.DockerClusterTemplateResource{ - Spec: infrav1beta1.DockerClusterSpec{ - LoadBalancer: infrav1beta1.DockerLoadBalancer{ - ImageMeta: infrav1beta1.ImageMeta{ + expectedTemplate: &infrav1.DockerClusterTemplate{ + Spec: infrav1.DockerClusterTemplateSpec{ + Template: infrav1.DockerClusterTemplateResource{ + Spec: infrav1.DockerClusterSpec{ + LoadBalancer: infrav1.DockerLoadBalancer{ + ImageMeta: infrav1.ImageMeta{ ImageRepository: "testImage", }, }, @@ -101,14 +101,14 @@ func Test_patchDockerClusterTemplate(t *testing.T) { func Test_patchKubeadmControlPlaneTemplate(t *testing.T) { tests := []struct { name string - template *controlplanev1beta1.KubeadmControlPlaneTemplate + template *controlplanev1.KubeadmControlPlaneTemplate variables map[string]apiextensionsv1.JSON - expectedTemplate *controlplanev1beta1.KubeadmControlPlaneTemplate + expectedTemplate *controlplanev1.KubeadmControlPlaneTemplate expectedErr bool }{ { name: "sets RolloutStrategy.RollingUpdate.MaxSurge if the kubeadmControlPlaneMaxSurge is provided", - template: &controlplanev1beta1.KubeadmControlPlaneTemplate{}, + template: &controlplanev1.KubeadmControlPlaneTemplate{}, variables: map[string]apiextensionsv1.JSON{ runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ @@ -117,12 +117,12 @@ func Test_patchKubeadmControlPlaneTemplate(t *testing.T) { })}, "kubeadmControlPlaneMaxSurge": {Raw: toJSON("1")}, }, - expectedTemplate: &controlplanev1beta1.KubeadmControlPlaneTemplate{ - Spec: controlplanev1beta1.KubeadmControlPlaneTemplateSpec{ - Template: controlplanev1beta1.KubeadmControlPlaneTemplateResource{ - Spec: controlplanev1beta1.KubeadmControlPlaneTemplateResourceSpec{ - RolloutStrategy: &controlplanev1beta1.RolloutStrategy{ - RollingUpdate: &controlplanev1beta1.RollingUpdate{MaxSurge: &intstr.IntOrString{IntVal: 1}}, + expectedTemplate: &controlplanev1.KubeadmControlPlaneTemplate{ + Spec: controlplanev1.KubeadmControlPlaneTemplateSpec{ + Template: controlplanev1.KubeadmControlPlaneTemplateResource{ + Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ + RolloutStrategy: &controlplanev1.RolloutStrategy{ + RollingUpdate: &controlplanev1.RollingUpdate{MaxSurge: &intstr.IntOrString{IntVal: 1}}, }, }, }, @@ -150,21 +150,21 @@ func Test_patchDockerMachineTemplate(t *testing.T) { tests := []struct { name string - template *infrav1beta1.DockerMachineTemplate + template *infrav1.DockerMachineTemplate variables map[string]apiextensionsv1.JSON - expectedTemplate *infrav1beta1.DockerMachineTemplate + expectedTemplate *infrav1.DockerMachineTemplate expectedErr bool }{ { name: "fails if builtin.controlPlane.version nor builtin.machineDeployment.version is not set", - template: &infrav1beta1.DockerMachineTemplate{}, + template: &infrav1.DockerMachineTemplate{}, variables: nil, - expectedTemplate: &infrav1beta1.DockerMachineTemplate{}, + expectedTemplate: &infrav1.DockerMachineTemplate{}, expectedErr: true, }, { name: "sets customImage for templates linked to ControlPlane", - template: &infrav1beta1.DockerMachineTemplate{}, + template: &infrav1.DockerMachineTemplate{}, variables: map[string]apiextensionsv1.JSON{ runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ @@ -172,10 +172,10 @@ func Test_patchDockerMachineTemplate(t *testing.T) { }, })}, }, - expectedTemplate: &infrav1beta1.DockerMachineTemplate{ - Spec: infrav1beta1.DockerMachineTemplateSpec{ - Template: infrav1beta1.DockerMachineTemplateResource{ - Spec: infrav1beta1.DockerMachineSpec{ + expectedTemplate: &infrav1.DockerMachineTemplate{ + Spec: infrav1.DockerMachineTemplateSpec{ + Template: infrav1.DockerMachineTemplateResource{ + Spec: infrav1.DockerMachineSpec{ CustomImage: "kindest/node:v1.23.0", }, }, @@ -184,7 +184,7 @@ func Test_patchDockerMachineTemplate(t *testing.T) { }, { name: "sets customImage for templates linked to ControlPlane for pre versions", - template: &infrav1beta1.DockerMachineTemplate{}, + template: &infrav1.DockerMachineTemplate{}, variables: map[string]apiextensionsv1.JSON{ runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ @@ -192,10 +192,10 @@ func Test_patchDockerMachineTemplate(t *testing.T) { }, })}, }, - expectedTemplate: &infrav1beta1.DockerMachineTemplate{ - Spec: infrav1beta1.DockerMachineTemplateSpec{ - Template: infrav1beta1.DockerMachineTemplateResource{ - Spec: infrav1beta1.DockerMachineSpec{ + expectedTemplate: &infrav1.DockerMachineTemplate{ + Spec: infrav1.DockerMachineTemplateSpec{ + Template: infrav1.DockerMachineTemplateResource{ + Spec: infrav1.DockerMachineSpec{ CustomImage: "kindest/node:v1.23.0-rc.0", }, }, @@ -221,21 +221,21 @@ func Test_patchDockerMachinePoolTemplate(t *testing.T) { tests := []struct { name string - template *infraexpv1beta1.DockerMachinePoolTemplate + template *infraexpv1.DockerMachinePoolTemplate variables map[string]apiextensionsv1.JSON - expectedTemplate *infraexpv1beta1.DockerMachinePoolTemplate + expectedTemplate *infraexpv1.DockerMachinePoolTemplate expectedErr bool }{ { name: "fails if builtin.controlPlane.version nor builtin.machinePool.version is not set", - template: &infraexpv1beta1.DockerMachinePoolTemplate{}, + template: &infraexpv1.DockerMachinePoolTemplate{}, variables: nil, - expectedTemplate: &infraexpv1beta1.DockerMachinePoolTemplate{}, + expectedTemplate: &infraexpv1.DockerMachinePoolTemplate{}, expectedErr: true, }, { name: "sets customImage for templates linked to ControlPlane", - template: &infraexpv1beta1.DockerMachinePoolTemplate{}, + template: &infraexpv1.DockerMachinePoolTemplate{}, variables: map[string]apiextensionsv1.JSON{ runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ @@ -247,11 +247,11 @@ func Test_patchDockerMachinePoolTemplate(t *testing.T) { }, })}, }, - expectedTemplate: &infraexpv1beta1.DockerMachinePoolTemplate{ - Spec: infraexpv1beta1.DockerMachinePoolTemplateSpec{ - Template: infraexpv1beta1.DockerMachinePoolTemplateResource{ - Spec: infraexpv1beta1.DockerMachinePoolSpec{ - Template: infraexpv1beta1.DockerMachinePoolMachineTemplate{ + expectedTemplate: &infraexpv1.DockerMachinePoolTemplate{ + Spec: infraexpv1.DockerMachinePoolTemplateSpec{ + Template: infraexpv1.DockerMachinePoolTemplateResource{ + Spec: infraexpv1.DockerMachinePoolSpec{ + Template: infraexpv1.DockerMachinePoolMachineTemplate{ CustomImage: "kindest/node:v1.23.0", }, }, @@ -305,28 +305,28 @@ func TestHandler_GeneratePatches(t *testing.T) { }, }), } - kubeadmControlPlaneTemplate := controlplanev1beta1.KubeadmControlPlaneTemplate{ + kubeadmControlPlaneTemplate := controlplanev1.KubeadmControlPlaneTemplate{ TypeMeta: metav1.TypeMeta{ Kind: "KubeadmControlPlaneTemplate", - APIVersion: controlplanev1beta1.GroupVersion.String(), + APIVersion: controlplanev1.GroupVersion.String(), }, } - dockerMachineTemplate := infrav1beta1.DockerMachineTemplate{ + dockerMachineTemplate := infrav1.DockerMachineTemplate{ TypeMeta: metav1.TypeMeta{ Kind: "DockerMachineTemplate", - APIVersion: infrav1beta1.GroupVersion.String(), + APIVersion: infrav1.GroupVersion.String(), }, } - dockerMachinePoolTemplate := infraexpv1beta1.DockerMachinePoolTemplate{ + dockerMachinePoolTemplate := infraexpv1.DockerMachinePoolTemplate{ TypeMeta: metav1.TypeMeta{ Kind: "DockerMachinePoolTemplate", - APIVersion: infrav1beta1.GroupVersion.String(), + APIVersion: infrav1.GroupVersion.String(), }, } - dockerClusterTemplate := infrav1beta1.DockerClusterTemplate{ + dockerClusterTemplate := infrav1.DockerClusterTemplate{ TypeMeta: metav1.TypeMeta{ Kind: "DockerClusterTemplate", - APIVersion: infrav1beta1.GroupVersion.String(), + APIVersion: infrav1.GroupVersion.String(), }, } tests := []struct { @@ -349,7 +349,7 @@ func TestHandler_GeneratePatches(t *testing.T) { }, Items: []runtimehooksv1.GeneratePatchesResponseItem{ responseItem("1", `[ -{"op":"add","path":"/spec/template/spec/rolloutStrategy","value":{"rollingUpdate":{"maxSurge":3}}} + {"op":"add","path":"/spec","value":{"template": {"spec":{"rolloutStrategy": {"rollingUpdate":{"maxSurge":3}}}}}} ]`), responseItem("2", `[ {"op":"add","path":"/spec/template/spec/customImage","value":"kindest/node:v1.23.0"} diff --git a/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml b/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml index 114a3c3a895f..ec3c6b398e80 100644 --- a/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml +++ b/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml @@ -5,19 +5,19 @@ metadata: spec: controlPlane: templateRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: quick-start-control-plane machineInfrastructure: templateRef: kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 name: quick-start-control-plane namingStrategy: template: "{{ .cluster.name }}-cp-{{ .random }}" infrastructure: templateRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate name: quick-start-cluster workers: @@ -28,12 +28,12 @@ spec: template: bootstrap: templateRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: templateRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate name: quick-start-default-worker-machinetemplate machinePools: @@ -43,12 +43,12 @@ spec: template: bootstrap: templateRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: templateRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate name: quick-start-default-worker-machinepooltemplate patches: @@ -58,7 +58,7 @@ spec: validateTopologyExtension: validate-topology.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} discoverVariablesExtension: discover-variables.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate metadata: name: quick-start-cluster @@ -66,26 +66,27 @@ spec: template: spec: failureDomains: - fd1: + - + name: fd1 controlPlane: true - fd2: + - name: fd2 controlPlane: true - fd3: + - name: fd3 controlPlane: true - fd4: + - name: fd4 controlPlane: false - fd5: + - name: fd5 controlPlane: false - fd6: + - name: fd6 controlPlane: false - fd7: + - name: fd7 controlPlane: false - fd8: + - name: fd8 controlPlane: false loadBalancer: {} --- kind: KubeadmControlPlaneTemplate -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: quick-start-control-plane spec: @@ -103,7 +104,7 @@ spec: joinConfiguration: nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-control-plane @@ -114,7 +115,7 @@ spec: - containerPath: "/var/run/docker.sock" hostPath: "/var/run/docker.sock" --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-default-worker-machinetemplate @@ -125,7 +126,7 @@ spec: - containerPath: "/var/run/docker.sock" hostPath: "/var/run/docker.sock" --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate metadata: name: quick-start-default-worker-machinepooltemplate @@ -137,7 +138,7 @@ spec: - containerPath: "/var/run/docker.sock" hostPath: "/var/run/docker.sock" --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: quick-start-default-worker-bootstraptemplate From 207f82d53ff55da6f4c5578780eb8ac2af8e6b32 Mon Sep 17 00:00:00 2001 From: sivchari Date: Mon, 7 Jul 2025 16:15:12 +0900 Subject: [PATCH 03/31] migrate topology to v1beta2 Signed-off-by: sivchari --- .../main/bases/cluster-with-topology.yaml | 5 +++-- .../main/cluster-template-in-memory.yaml | 5 +++-- .../main/cluster-template-kcp-pre-drain/cluster.yaml | 3 ++- .../cluster-autoscaler.yaml | 3 ++- .../cluster.yaml | 3 ++- .../cluster.yaml | 3 ++- .../cluster-runtimesdk.yaml | 5 +++-- 7 files changed, 17 insertions(+), 10 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml index a756d4aa6fdb..0feccda2e752 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml @@ -13,8 +13,9 @@ spec: cidrBlocks: ['${DOCKER_POD_CIDRS}'] serviceDomain: '${DOCKER_SERVICE_DOMAIN}' topology: - class: "quick-start" - classNamespace: '${CLUSTER_CLASS_NAMESPACE:-""}' + classRef: + name: "quick-start" + namespace: '${CLUSTER_CLASS_NAMESPACE:-""}' version: "${KUBERNETES_VERSION}" controlPlane: metadata: diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-in-memory.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-in-memory.yaml index 7e645182c915..b867758b7a64 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-in-memory.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-in-memory.yaml @@ -11,8 +11,9 @@ spec: cidrBlocks: ${POD_CIDR:=["192.168.0.0/16"]} serviceDomain: ${SERVICE_DOMAIN:="cluster.local"} topology: - class: in-memory - classNamespace: ${NAMESPACE} + classRef: + name: in-memory + namespace: ${NAMESPACE} version: ${KUBERNETES_VERSION} controlPlane: replicas: ${CONTROL_PLANE_MACHINE_COUNT} diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml index 766059c9b587..1358b7dfc6b5 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml @@ -5,7 +5,8 @@ metadata: name: '${CLUSTER_NAME}' spec: topology: - class: quick-start + classRef: + name: quick-start controlPlane: metadata: annotations: diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml index d6454a2e70bc..a57c4745048f 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml @@ -13,7 +13,8 @@ spec: cidrBlocks: ['${DOCKER_POD_CIDRS}'] serviceDomain: '${DOCKER_SERVICE_DOMAIN}' topology: - class: "quick-start" + classRef: + name: "quick-start" version: "${KUBERNETES_VERSION}" controlPlane: metadata: {} diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml index 61396936da61..a04442b4e078 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml @@ -5,7 +5,8 @@ metadata: name: '${CLUSTER_NAME}' spec: topology: - class: quick-start + classRef: + name: quick-start variables: - name: ipv6Primary value: false diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml index 7af9a1ae630b..e4109076caf2 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml @@ -5,7 +5,8 @@ metadata: name: '${CLUSTER_NAME}' spec: topology: - class: quick-start + classRef: + name: quick-start variables: - name: ipv6Primary value: true diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml index 23af0a76fda1..4b334468c1c9 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml @@ -13,8 +13,9 @@ spec: cidrBlocks: ['${DOCKER_POD_CIDRS}'] serviceDomain: '${DOCKER_SERVICE_DOMAIN}' topology: - class: "quick-start-runtimesdk" - classNamespace: '${CLUSTER_CLASS_NAMESPACE:-""}' + classRef: + name: "quick-start-runtimesdk" + namespace: '${CLUSTER_CLASS_NAMESPACE:-""}' version: "${KUBERNETES_VERSION}" controlPlane: metadata: {} From 3817bf2647b3870c2e7844f10a06d68262945765 Mon Sep 17 00:00:00 2001 From: sivchari Date: Mon, 7 Jul 2025 16:46:49 +0900 Subject: [PATCH 04/31] fix apiGroup Signed-off-by: sivchari --- test/e2e/data/infrastructure-docker/main/bases/mp.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/data/infrastructure-docker/main/bases/mp.yaml b/test/e2e/data/infrastructure-docker/main/bases/mp.yaml index 0b70a4f2746e..640a227351d1 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/mp.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/mp.yaml @@ -16,7 +16,7 @@ spec: name: "${CLUSTER_NAME}-mp-0-config" clusterName: '${CLUSTER_NAME}' infrastructureRef: - apiGroup: infrastructure.cluster.x-k8s.io/v1beta2 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerMachinePool name: "${CLUSTER_NAME}-dmp-0" version: "${KUBERNETES_VERSION}" From 560405ae291c3cade986bbc85a7ba512f2ec719a Mon Sep 17 00:00:00 2001 From: sivchari Date: Mon, 7 Jul 2025 17:04:48 +0900 Subject: [PATCH 05/31] fix failureDomains Signed-off-by: sivchari --- .../clusterclass-quick-start-runtimesdk.yaml | 16 ++++++++-------- .../main/clusterclass-quick-start.yaml | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml index abaabc9337a2..2c31e53d6d65 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml @@ -68,21 +68,21 @@ spec: template: spec: failureDomains: - fd1: + - name: fd1 controlPlane: true - fd2: + - name: fd2 controlPlane: true - fd3: + - name: fd3 controlPlane: true - fd4: + - name: fd4 controlPlane: false - fd5: + - name: fd5 controlPlane: false - fd6: + - name: fd6 controlPlane: false - fd7: + - name: fd7 controlPlane: false - fd8: + - name: fd8 controlPlane: false --- kind: KubeadmControlPlaneTemplate diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index acf07ff72b1f..9919f2f573b6 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -500,21 +500,21 @@ spec: InfrastructureClusterTemplate.template.annotation: "InfrastructureClusterTemplate.template.annotationValue" spec: failureDomains: - fd1: + - name: fd1 controlPlane: true - fd2: + - name: fd2 controlPlane: true - fd3: + - name: fd3 controlPlane: true - fd4: + - name: fd4 controlPlane: false - fd5: + - name: fd5 controlPlane: false - fd6: + - name: fd6 controlPlane: false - fd7: + - name: fd7 controlPlane: false - fd8: + - name: fd8 controlPlane: false --- kind: KubeadmControlPlaneTemplate From 716f0eb35ad0464729d707d7782259590a47ae07 Mon Sep 17 00:00:00 2001 From: sivchari Date: Mon, 7 Jul 2025 20:44:03 +0900 Subject: [PATCH 06/31] migrate definition to v2 specification Signed-off-by: sivchari --- .../main/bases/cluster-with-topology.yaml | 12 +++---- .../cluster-template-kcp-remediation/mhc.yaml | 4 +-- .../cluster-template-md-remediation/mhc.yaml | 2 +- .../cluster-autoscaler.yaml | 6 ++-- .../cluster-runtimesdk.yaml | 10 +++--- .../main/clusterclass-in-memory.yaml | 34 +++++++++---------- .../clusterclass-quick-start-runtimesdk.yaml | 30 ++++++++-------- .../main/clusterclass-quick-start.yaml | 32 ++++++++--------- 8 files changed, 64 insertions(+), 66 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml index 0feccda2e752..8aa1d6807b8a 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml @@ -27,8 +27,8 @@ spec: Cluster.topology.controlPlane.annotation: "Cluster.topology.controlPlane.annotationValue" # Note: this annotation is propagated to Nodes. Cluster.topology.controlPlane.annotation.node.cluster.x-k8s.io: "Cluster.topology.controlPlane.nodeAnnotationValue" - nodeDeletionTimeout: "30s" - nodeVolumeDetachTimeout: "5m" + nodeDeletionTimeoutSeconds: 30 + nodeVolumeDetachTimeoutSeconds: 300 replicas: ${CONTROL_PLANE_MACHINE_COUNT} workers: machineDeployments: @@ -43,8 +43,8 @@ spec: Cluster.topology.machineDeployment.annotation: "Cluster.topology.machineDeployment.annotationValue" # Note: this annotation is propagated to Nodes. Shortened due to name length limitations Cluster.topology.md.annotation.node.cluster.x-k8s.io: "Cluster.topology.machineDeployment.nodeAnnotationValue" - nodeDeletionTimeout: "30s" - nodeVolumeDetachTimeout: "5m" + nodeDeletionTimeoutSeconds: 30 + nodeVolumeDetachTimeoutSeconds: 300 minReadySeconds: 5 replicas: ${WORKER_MACHINE_COUNT} failureDomain: fd4 @@ -63,8 +63,8 @@ spec: Cluster.topology.machinePool.label.node.cluster.x-k8s.io: "Cluster.topology.machinePool.nodeLabelValue" annotations: Cluster.topology.machinePool.annotation: "Cluster.topology.machinePool.annotationValue" - nodeDeletionTimeout: "30s" - nodeVolumeDetachTimeout: "5m" + nodeDeletionTimeoutSeconds: 30 + nodeVolumeDetachTimeoutSeconds: 300 minReadySeconds: 5 replicas: ${WORKER_MACHINE_COUNT} failureDomains: diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml index f6492de8a547..0b61dc77249e 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml @@ -14,8 +14,8 @@ spec: matchLabels: cluster.x-k8s.io/control-plane: "" mhc-test: "fail" - nodeStartupTimeout: 30s + nodeStartupTimeoutSeconds: 30 unhealthyConditions: - type: e2e.remediation.condition status: "False" - timeout: 10s + timeoutSeconds: 10 diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml index 6881a4880c2e..57b7129ccd45 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml @@ -15,4 +15,4 @@ spec: unhealthyConditions: - type: e2e.remediation.condition status: "False" - timeout: 10s + timeoutSeconds: 10 diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml index a57c4745048f..70093ab87ddd 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml @@ -18,13 +18,13 @@ spec: version: "${KUBERNETES_VERSION}" controlPlane: metadata: {} - nodeDeletionTimeout: "30s" + nodeDeletionTimeoutSeconds: 30 replicas: ${CONTROL_PLANE_MACHINE_COUNT} workers: machineDeployments: - class: "default-worker" name: "md-0" - nodeDeletionTimeout: "30s" + nodeDeletionTimeoutSeconds: 30 failureDomain: fd4 metadata: annotations: @@ -33,7 +33,7 @@ spec: machinePools: - class: "default-worker" name: "mp-0" - nodeDeletionTimeout: "30s" + nodeDeletionTimeoutSeconds: 30 failureDomains: - fd4 variables: diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml index 4b334468c1c9..3e9182416c7f 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml @@ -19,22 +19,22 @@ spec: version: "${KUBERNETES_VERSION}" controlPlane: metadata: {} - nodeDeletionTimeout: "30s" + nodeDeletionTimeoutSeconds: 30 replicas: ${CONTROL_PLANE_MACHINE_COUNT} workers: machineDeployments: - class: "default-worker" name: "md-0" - nodeDeletionTimeout: "30s" - nodeVolumeDetachTimeout: "5m" + nodeDeletionTimeoutSeconds: 30 + nodeVolumeDetachTimeoutSeconds: 300 minReadySeconds: 5 replicas: ${WORKER_MACHINE_COUNT} failureDomain: fd4 machinePools: - class: "default-worker" name: "mp-0" - nodeDeletionTimeout: "30s" - nodeVolumeDetachTimeout: "5m" + nodeDeletionTimeoutSeconds: 30 + nodeVolumeDetachTimeoutSeconds: 300 minReadySeconds: 5 replicas: ${WORKER_MACHINE_COUNT} failureDomains: diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml index 44d3ceb64880..5ef9a5b9b706 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml @@ -105,28 +105,26 @@ metadata: name: in-memory spec: controlPlane: - metadata: - annotations: machineInfrastructure: - ref: - apiGroup: infrastructure.cluster.x-k8s.io + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate name: in-memory-control-plane - ref: - apiGroup: controlplane.cluster.x-k8s.io + templateRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: in-memory-control-plane machineHealthCheck: unhealthyConditions: - type: Ready - status: Unknown - timeout: 300s + status: "Unknown" + timeoutSeconds: 300 - type: Ready status: "False" - timeout: 300s + timeoutSeconds: 300 infrastructure: - ref: - apiGroup: infrastructure.cluster.x-k8s.io + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevClusterTemplate name: in-memory-cluster workers: @@ -134,23 +132,23 @@ spec: - class: default-worker template: bootstrap: - ref: - apiGroup: bootstrap.cluster.x-k8s.io + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: in-memory-default-worker-bootstraptemplate infrastructure: - ref: - apiGroup: infrastructure.cluster.x-k8s.io + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate name: in-memory-default-worker-machinetemplate machineHealthCheck: unhealthyConditions: - type: Ready - status: Unknown - timeout: 300s + status: "Unknown" + timeoutSeconds: 300 - type: Ready status: "False" - timeout: 300s + timeoutSeconds: 300 patches: - name: test-patch external: diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml index 2c31e53d6d65..d87bc08af2be 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml @@ -4,20 +4,20 @@ metadata: name: quick-start-runtimesdk spec: controlPlane: - ref: - apiGroup: controlplane.cluster.x-k8s.io + templateRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: quick-start-control-plane machineInfrastructure: - ref: + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate - apiGroup: infrastructure.cluster.x-k8s.io name: quick-start-control-plane namingStrategy: template: "{{ .cluster.name }}-cp-{{ .random }}" infrastructure: - ref: - apiGroup: infrastructure.cluster.x-k8s.io + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate name: quick-start-cluster infrastructureNamingStrategy: @@ -29,13 +29,13 @@ spec: template: "{{ .cluster.name }}-md-{{ .machineDeployment.topologyName }}-{{ .random }}" template: bootstrap: - ref: - apiGroup: bootstrap.cluster.x-k8s.io + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: - ref: - apiGroup: infrastructure.cluster.x-k8s.io + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate name: quick-start-default-worker-machinetemplate machinePools: @@ -44,13 +44,13 @@ spec: template: "{{ .cluster.name }}-mp-{{ .machinePool.topologyName }}-{{ .random }}" template: bootstrap: - ref: - apiGroup: bootstrap.cluster.x-k8s.io + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: - ref: - apiGroup: infrastructure.cluster.x-k8s.io + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate name: quick-start-default-worker-machinepooltemplate patches: @@ -93,7 +93,7 @@ spec: template: spec: machineTemplate: - nodeDrainTimeout: 1s + nodeDrainTimeoutSeconds: 1 kubeadmConfigSpec: clusterConfiguration: apiServer: diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index 9919f2f573b6..9eba97825658 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -9,24 +9,24 @@ spec: ClusterClass.controlPlane.label: "ClusterClass.controlPlane.labelValue" annotations: ClusterClass.controlPlane.annotation: "ClusterClass.controlPlane.annotationValue" - ref: - apiGroup: controlplane.cluster.x-k8s.io + templateRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: quick-start-control-plane machineInfrastructure: - ref: + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate - apiGroup: infrastructure.cluster.x-k8s.io name: quick-start-control-plane machineHealthCheck: maxUnhealthy: 100% unhealthyConditions: - type: e2e.remediation.condition status: "False" - timeout: 20s + timeoutSeconds: 20 infrastructure: - ref: - apiGroup: infrastructure.cluster.x-k8s.io + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate name: quick-start-cluster workers: @@ -39,13 +39,13 @@ spec: annotations: ClusterClass.machineDeployment.annotation: "ClusterClass.machineDeployment.annotationValue" bootstrap: - ref: - apiGroup: bootstrap.cluster.x-k8s.io + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-md-default-worker-bootstraptemplate infrastructure: - ref: - apiGroup: infrastructure.cluster.x-k8s.io + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate name: quick-start-default-worker-machinetemplate machineHealthCheck: @@ -60,13 +60,13 @@ spec: annotations: ClusterClass.machinePool.annotation: "ClusterClass.machinePool.annotationValue" bootstrap: - ref: - apiGroup: bootstrap.cluster.x-k8s.io + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-mp-default-worker-bootstraptemplate infrastructure: - ref: - apiGroup: infrastructure.cluster.x-k8s.io + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate name: quick-start-default-worker-machinepooltemplate variables: @@ -541,7 +541,7 @@ spec: ControlPlaneTemplate.machineTemplate.label: "ControlPlaneTemplate.machineTemplate.labelValue" annotations: ControlPlaneTemplate.machineTemplate.annotation: "ControlPlaneTemplate.machineTemplate.annotationValue" - nodeDrainTimeout: 1s + nodeDrainTimeoutSeconds: 1 kubeadmConfigSpec: clusterConfiguration: # extraArgs must be non-empty for control plane components to enable patches from ClusterClass to work. From 11eec8211553675029c2525f5b1e7d004bd713e7 Mon Sep 17 00:00:00 2001 From: sivchari Date: Tue, 8 Jul 2025 12:33:27 +0900 Subject: [PATCH 07/31] add version to topology Signed-off-by: sivchari --- .../main/cluster-template-kcp-pre-drain/cluster.yaml | 1 + .../cluster.yaml | 1 + .../cluster.yaml | 1 + .../cluster-topology-class.yaml | 3 --- .../cluster-template-topology-kcp-only/kustomization.yaml | 6 ------ 5 files changed, 3 insertions(+), 9 deletions(-) delete mode 100644 test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/cluster-topology-class.yaml diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml index 1358b7dfc6b5..039ecd98df00 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml @@ -7,6 +7,7 @@ spec: topology: classRef: name: quick-start + version: "${KUBERNETES_VERSION}" controlPlane: metadata: annotations: diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml index a04442b4e078..165eb77d021e 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml @@ -7,6 +7,7 @@ spec: topology: classRef: name: quick-start + version: "${KUBERNETES_VERSION}" variables: - name: ipv6Primary value: false diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml index e4109076caf2..9eb4e37a403b 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml @@ -7,6 +7,7 @@ spec: topology: classRef: name: quick-start + version: "${KUBERNETES_VERSION}" variables: - name: ipv6Primary value: true diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/cluster-topology-class.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/cluster-topology-class.yaml deleted file mode 100644 index c9bbfd605299..000000000000 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/cluster-topology-class.yaml +++ /dev/null @@ -1,3 +0,0 @@ -- op: add - path: /spec/topology/class - value: "quick-start-kcp-only" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/kustomization.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/kustomization.yaml index fdd9b64d3ad9..089a3b71e831 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/kustomization.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/kustomization.yaml @@ -1,9 +1,3 @@ resources: - ../cluster-template-topology-no-workers -patches: -- path: cluster-topology-class.yaml - target: - group: cluster.x-k8s.io - version: v1beta2 - kind: Cluster From 3a3a900de0d6430fb7fc1c37af648356d86e8004 Mon Sep 17 00:00:00 2001 From: sivchari Date: Tue, 8 Jul 2025 14:15:46 +0900 Subject: [PATCH 08/31] fix unknown fields Signed-off-by: sivchari --- .../infrastructure-docker/main/bases/md.yaml | 1 - .../infrastructure-docker/main/bases/mp.yaml | 1 - .../main/cluster-template-ipv6/kcp-ipv6.yaml | 6 +- .../step1/cluster-with-cp0.yaml | 4 -- .../cluster-with-kcp.yaml | 4 -- .../cluster-template-kcp-remediation/mhc.yaml | 6 +- .../cluster-template-md-remediation/mhc.yaml | 4 +- .../main/clusterclass-in-memory.yaml | 11 ++-- .../clusterclass-quick-start-runtimesdk.yaml | 5 +- .../main/clusterclass-quick-start.yaml | 56 ++++++++++++------- 10 files changed, 54 insertions(+), 44 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/bases/md.yaml b/test/e2e/data/infrastructure-docker/main/bases/md.yaml index 622d7b665500..3de78c3d9e84 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/md.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/md.yaml @@ -23,7 +23,6 @@ spec: template: spec: joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. --- # MachineDeployment object apiVersion: cluster.x-k8s.io/v1beta2 diff --git a/test/e2e/data/infrastructure-docker/main/bases/mp.yaml b/test/e2e/data/infrastructure-docker/main/bases/mp.yaml index 640a227351d1..ddf4a8666329 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/mp.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/mp.yaml @@ -44,4 +44,3 @@ metadata: name: "${CLUSTER_NAME}-mp-0-config" spec: joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml index 328f7616d05f..f824862a2ad4 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml @@ -15,8 +15,10 @@ spec: bindPort: 6443 nodeRegistration: kubeletExtraArgs: - node-ip: "::" + name: node-ip + value: "::" joinConfiguration: nodeRegistration: kubeletExtraArgs: - node-ip: "::" + name: node-ip + value: "::" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml index 01f323159666..b8b84ed2238e 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml @@ -42,10 +42,6 @@ spec: clusterConfiguration: apiServer: certSANs: [localhost, 127.0.0.1] - initConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. - joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. --- # cp0 Machine apiVersion: cluster.x-k8s.io/v1beta2 diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml index dfe594fa573d..e66054c69622 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml @@ -57,10 +57,6 @@ spec: apiServer: # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] - initConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. - joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. files: - path: /wait-signal.sh content: | diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml index 0b61dc77249e..653b44343125 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml @@ -1,8 +1,8 @@ --- # MachineHealthCheck object with # - a selector that targets all the machines with label cluster.x-k8s.io/control-plane="" and the mhc-test: "fail" (the label is used to trigger remediation in a controlled way - by adding CP under MHC control intentionally -) -# - nodeStartupTimeout: 30s (to force remediation on nodes still provisioning) -# - unhealthyConditions triggering remediation after 10s the e2e.remediation.condition condition is set to false (to force remediation on nodes already provisioned) +# - nodeStartupTimeoutSeconds: 30s (to force remediation on nodes still provisioning) +# - unhealthyNodeConditions triggering remediation after 10s the e2e.remediation.condition condition is set to false (to force remediation on nodes already provisioned) apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineHealthCheck metadata: @@ -15,7 +15,7 @@ spec: cluster.x-k8s.io/control-plane: "" mhc-test: "fail" nodeStartupTimeoutSeconds: 30 - unhealthyConditions: + unhealthyNodeConditions: - type: e2e.remediation.condition status: "False" timeoutSeconds: 10 diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml index 57b7129ccd45..a1209cf6270b 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml @@ -1,7 +1,7 @@ --- # MachineHealthCheck object with # - a selector that targets all the machines with label e2e.remediation.label="" -# - unhealthyConditions triggering remediation after 10s the condition is set +# - unhealthyNodeConditions triggering remediation after 10s the condition is set apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineHealthCheck metadata: @@ -12,7 +12,7 @@ spec: selector: matchLabels: e2e.remediation.label: "" - unhealthyConditions: + unhealthyNodeConditions: - type: e2e.remediation.condition status: "False" timeoutSeconds: 10 diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml index 5ef9a5b9b706..99106f9fa7fc 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml @@ -27,12 +27,14 @@ spec: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + name: eviction-hard + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% joinConfiguration: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + name: eviction-hard + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate @@ -97,7 +99,8 @@ spec: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + name: eviction-hard + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% --- apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass @@ -152,5 +155,5 @@ spec: patches: - name: test-patch external: - generateExtension: generate-patches.${EXTENSION_CONFIG_NAME:-"scale"} + generatePatchesExtension: generate-patches.${EXTENSION_CONFIG_NAME:-"scale"} discoverVariablesExtension: discover-variables.${EXTENSION_CONFIG_NAME:-"scale"} diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml index d87bc08af2be..cd1a73fbb1f5 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml @@ -56,8 +56,8 @@ spec: patches: - name: test-patch external: - generateExtension: generate-patches.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} - validateExtension: validate-topology.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} + generatePatchesExtension: generate-patches.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} + validateTopologyExtension: validate-topology.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} discoverVariablesExtension: discover-variables.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 @@ -145,4 +145,3 @@ spec: template: spec: joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index 9eba97825658..6b45d9dd44bb 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -20,7 +20,7 @@ spec: name: quick-start-control-plane machineHealthCheck: maxUnhealthy: 100% - unhealthyConditions: + unhealthyNodeConditions: - type: e2e.remediation.condition status: "False" timeoutSeconds: 20 @@ -50,7 +50,7 @@ spec: name: quick-start-default-worker-machinetemplate machineHealthCheck: maxUnhealthy: 100% - # We are intentionally not setting the 'unhealthyConditions' here to test that the field is optional. + # We are intentionally not setting the 'unhealthyNodeConditions' here to test that the field is optional. machinePools: - class: default-worker template: @@ -324,11 +324,15 @@ spec: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cloud-provider" - value: "external" + path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs" + value: + name: "cloud-provider" + value: "external" - op: add - path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/cloud-provider" - value: "external" + path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs" + value: + name: "cloud-provider" + value: "external" - name: machineDeploymentExternalCloudProvider enabledIf: "{{ .externalCloudProvider }}" description: "Configures kubelet to run with an external cloud provider for machineDeployment nodes." @@ -342,8 +346,10 @@ spec: - '*-worker' jsonPatches: - op: add - path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cloud-provider" - value: "external" + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs" + value: + name: "cloud-provider" + value: "external" - selector: apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate @@ -353,8 +359,10 @@ spec: - '*-worker' jsonPatches: - op: add - path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cloud-provider" - value: "external" + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs" + value: + name: "cloud-provider" + value: "external" - name: localEndpointIPv6 enabledIf: "{{ .ipv6Primary }}" description: "Configures KCP to use IPv6 for its localAPIEndpoint." @@ -447,13 +455,17 @@ spec: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/v" - valueFrom: - variable: kubeletLogLevel + path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs" + value: + name: v + valueFrom: + value: kubeletLogLevel - op: add - path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/v" - valueFrom: - variable: kubeletLogLevel + path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs" + value: + name: v + valueFrom: + value: kubeletLogLevel - name: workerKubeletLogLevel enabledIf: "{{ if .kubeletLogLevel }}true{{end}}" description: "Configures worker kubelets to log at the level set in the variable `kubeletLogLevel`." @@ -559,11 +571,13 @@ spec: initConfiguration: nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' joinConfiguration: nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate @@ -648,7 +662,8 @@ spec: joinConfiguration: nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider to work - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' --- apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate @@ -669,4 +684,5 @@ spec: joinConfiguration: nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider to work - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' From 347af4654e31217db44e028edb51baed596c9686 Mon Sep 17 00:00:00 2001 From: sivchari Date: Tue, 8 Jul 2025 14:36:51 +0900 Subject: [PATCH 09/31] fix kubelet extra args and naming strategy Signed-off-by: sivchari --- .../cluster-template-ignition/ignition.yaml | 36 ++++++++++++------- .../main/cluster-template-ipv6/kcp-ipv6.yaml | 8 ++--- .../main/cluster-template-ipv6/md-ipv6.yaml | 6 ++-- .../main/clusterclass-in-memory.yaml | 12 +++---- .../clusterclass-quick-start-runtimesdk.yaml | 2 +- .../main/clusterclass-quick-start.yaml | 6 ---- 6 files changed, 39 insertions(+), 31 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-ignition/ignition.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-ignition/ignition.yaml index 26f66ef119a1..ab9468837953 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-ignition/ignition.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-ignition/ignition.yaml @@ -10,19 +10,27 @@ spec: # We have to set the criSocket to containerd as kubeadm defaults to docker runtime if both containerd and docker sockets are found criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' - fail-swap-on: "false" - cgroup-root: "/kubelet" - runtime-cgroups: "/system.slice/containerd.service" + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + - name: fail-swap-on + value: "false" + - name: cgroup-root + value: "/kubelet" + - name: runtime-cgroups + value: "/system.slice/containerd.service" joinConfiguration: nodeRegistration: # We have to set the criSocket to containerd as kubeadm defaults to docker runtime if both containerd and docker sockets are found criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' - fail-swap-on: "false" - cgroup-root: "/kubelet" - runtime-cgroups: "/system.slice/containerd.service" + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + - name: fail-swap-on + value: "false" + - name: cgroup-root + value: "/kubelet" + - name: runtime-cgroups + value: "/system.slice/containerd.service" --- apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate @@ -47,7 +55,11 @@ spec: # We have to set the criSocket to containerd as kubeadm defaults to docker runtime if both containerd and docker sockets are found criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' - fail-swap-on: "false" - cgroup-root: "/kubelet" - runtime-cgroups: "/system.slice/containerd.service" + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + - name: fail-swap-on + value: "false" + - name: cgroup-root + value: "/kubelet" + - name: runtime-cgroups + value: "/system.slice/containerd.service" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml index f824862a2ad4..d5849afab52a 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml @@ -15,10 +15,10 @@ spec: bindPort: 6443 nodeRegistration: kubeletExtraArgs: - name: node-ip - value: "::" + - name: node-ip + value: "::" joinConfiguration: nodeRegistration: kubeletExtraArgs: - name: node-ip - value: "::" + - name: node-ip + value: "::" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/md-ipv6.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/md-ipv6.yaml index ea1ac6716ce0..c5d1e0c24828 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/md-ipv6.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/md-ipv6.yaml @@ -9,8 +9,10 @@ spec: initConfiguration: nodeRegistration: kubeletExtraArgs: - node-ip: "::" + - name: node-ip + value: "::" joinConfiguration: nodeRegistration: kubeletExtraArgs: - node-ip: "::" + - name: node-ip + value: "::" diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml index 99106f9fa7fc..abd2743258be 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml @@ -27,14 +27,14 @@ spec: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - name: eviction-hard - value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + - name: eviction-hard + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% joinConfiguration: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - name: eviction-hard - value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + - name: eviction-hard + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate @@ -99,8 +99,8 @@ spec: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - name: eviction-hard - value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + - name: eviction-hard + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% --- apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml index cd1a73fbb1f5..35bcb59eb66d 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml @@ -20,7 +20,7 @@ spec: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate name: quick-start-cluster - infrastructureNamingStrategy: + namingStrategy: template: "{{ .cluster.name }}-infra-{{ .random }}" workers: machineDeployments: diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index 6b45d9dd44bb..c613080745c2 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -78,12 +78,6 @@ spec: default: kindest - name: etcdImageTag required: true - # This metadata has just been added to verify that we can set metadata. - metadata: - labels: - testLabelKey: testLabelValue - annotations: - testAnnotationKey: testAnnotationValue schema: openAPIV3Schema: type: string From 596f8b27197f0b1df07ac40f4740fd5496f4ae82 Mon Sep 17 00:00:00 2001 From: sivchari Date: Tue, 8 Jul 2025 16:05:04 +0900 Subject: [PATCH 10/31] fix failuredomain on mp Signed-off-by: sivchari --- .../infrastructure-docker/main/bases/cluster-with-topology.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml index 8aa1d6807b8a..ddee49e92ec4 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml @@ -68,7 +68,7 @@ spec: minReadySeconds: 5 replicas: ${WORKER_MACHINE_COUNT} failureDomains: - - name: fd4 + - fd4 variables: # We set an empty value to use the default tag kubeadm init is using. - name: etcdImageTag From 2ef08f9dde122f5f12519cb1754e8711b594b63e Mon Sep 17 00:00:00 2001 From: sivchari Date: Tue, 8 Jul 2025 16:36:28 +0900 Subject: [PATCH 11/31] fix empty ns Signed-off-by: sivchari --- .../infrastructure-docker/main/bases/cluster-with-topology.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml index ddee49e92ec4..b0918597338d 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml @@ -15,7 +15,6 @@ spec: topology: classRef: name: "quick-start" - namespace: '${CLUSTER_CLASS_NAMESPACE:-""}' version: "${KUBERNETES_VERSION}" controlPlane: metadata: From 0f4f62abebe7d14177b4dea1fae3a7a6f3bad0b1 Mon Sep 17 00:00:00 2001 From: sivchari Date: Tue, 8 Jul 2025 17:14:19 +0900 Subject: [PATCH 12/31] upgrade CAPD template to v1beta2 Signed-off-by: sivchari --- .../main/clusterclass-quick-start.yaml | 56 ++++++------ .../cluster-template-development-mp.yaml | 2 +- .../cluster-template-development.yaml | 2 +- .../templates/cluster-template-in-memory.yaml | 2 +- .../templates/clusterclass-in-memory.yaml | 55 ++++++------ .../templates/clusterclass-quick-start.yaml | 85 +++++++++++-------- 6 files changed, 111 insertions(+), 91 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index c613080745c2..570fb3db432a 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -318,12 +318,12 @@ spec: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs" + path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" value: name: "cloud-provider" value: "external" - op: add - path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs" + path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/-" value: name: "cloud-provider" value: "external" @@ -340,7 +340,7 @@ spec: - '*-worker' jsonPatches: - op: add - path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs" + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" value: name: "cloud-provider" value: "external" @@ -353,7 +353,7 @@ spec: - '*-worker' jsonPatches: - op: add - path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs" + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" value: name: "cloud-provider" value: "external" @@ -449,17 +449,17 @@ spec: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs" - value: - name: v - valueFrom: - value: kubeletLogLevel + path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" + valueFrom: + template: | + name: v + value: {{ .kubeletLogLevel }} - op: add - path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs" - value: - name: v - valueFrom: - value: kubeletLogLevel + path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/-" + valueFrom: + template: | + name: v + value: {{ .kubeletLogLevel }} - name: workerKubeletLogLevel enabledIf: "{{ if .kubeletLogLevel }}true{{end}}" description: "Configures worker kubelets to log at the level set in the variable `kubeletLogLevel`." @@ -473,9 +473,11 @@ spec: - '*-worker' jsonPatches: - op: add - path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/v" + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: - variable: kubeletLogLevel + template: | + name: v + value: {{ .kubeletLogLevel }} - selector: apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate @@ -485,9 +487,11 @@ spec: - '*-worker' jsonPatches: - op: add - path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/v" + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: - variable: kubeletLogLevel + template: | + name: v + value: {{ .kubeletLogLevel }} --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate @@ -565,13 +569,13 @@ spec: initConfiguration: nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work - name: eviction-hard - value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' joinConfiguration: nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work - name: eviction-hard - value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate @@ -656,8 +660,8 @@ spec: joinConfiguration: nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider to work - name: eviction-hard - value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' --- apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate @@ -678,5 +682,5 @@ spec: joinConfiguration: nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider to work - name: eviction-hard - value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' diff --git a/test/infrastructure/docker/templates/cluster-template-development-mp.yaml b/test/infrastructure/docker/templates/cluster-template-development-mp.yaml index 9c42920be93c..f1b1cd74cbb5 100644 --- a/test/infrastructure/docker/templates/cluster-template-development-mp.yaml +++ b/test/infrastructure/docker/templates/cluster-template-development-mp.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: "${CLUSTER_NAME}" diff --git a/test/infrastructure/docker/templates/cluster-template-development.yaml b/test/infrastructure/docker/templates/cluster-template-development.yaml index 03a4c8efa8b0..648c7dbacd65 100644 --- a/test/infrastructure/docker/templates/cluster-template-development.yaml +++ b/test/infrastructure/docker/templates/cluster-template-development.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: "${CLUSTER_NAME}" diff --git a/test/infrastructure/docker/templates/cluster-template-in-memory.yaml b/test/infrastructure/docker/templates/cluster-template-in-memory.yaml index 972af42bea16..28e62ba6ff5c 100644 --- a/test/infrastructure/docker/templates/cluster-template-in-memory.yaml +++ b/test/infrastructure/docker/templates/cluster-template-in-memory.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: "${CLUSTER_NAME}" diff --git a/test/infrastructure/docker/templates/clusterclass-in-memory.yaml b/test/infrastructure/docker/templates/clusterclass-in-memory.yaml index d6053aa256c7..73061075fefc 100644 --- a/test/infrastructure/docker/templates/clusterclass-in-memory.yaml +++ b/test/infrastructure/docker/templates/clusterclass-in-memory.yaml @@ -1,4 +1,4 @@ -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevClusterTemplate metadata: name: in-memory-cluster @@ -8,7 +8,7 @@ spec: backend: inMemory: {} --- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate metadata: name: in-memory-control-plane @@ -27,14 +27,16 @@ spec: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + - name: eviction-hard + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% joinConfiguration: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + - name: eviction-hard + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate metadata: name: in-memory-control-plane @@ -60,7 +62,7 @@ spec: startupDuration: "2s" startupJitter: "0.2" --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate metadata: name: in-memory-default-worker-machinetemplate @@ -86,7 +88,7 @@ spec: startupDuration: "2s" startupJitter: "0.2" --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: in-memory-default-worker-bootstraptemplate @@ -97,9 +99,10 @@ spec: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + - name: eviction-hard + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass metadata: name: in-memory @@ -108,25 +111,26 @@ spec: metadata: annotations: machineInfrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate name: in-memory-control-plane - ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: in-memory-control-plane machineHealthCheck: - unhealthyConditions: + maxUnhealthy: 100% + unhealthyNodeConditions: - type: Ready status: Unknown - timeout: 300s + timeoutSeconds: 300 - type: Ready status: "False" - timeout: 300s + timeoutSeconds: 300 infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevClusterTemplate name: in-memory-cluster workers: @@ -134,20 +138,21 @@ spec: - class: default-worker template: bootstrap: - ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: in-memory-default-worker-bootstraptemplate infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate name: in-memory-default-worker-machinetemplate machineHealthCheck: - unhealthyConditions: + maxUnhealthy: 100% + unhealthyNodeConditions: - type: Ready status: Unknown - timeout: 300s + timeoutSeconds: 300 - type: Ready status: "False" - timeout: 300s + timeoutSeconds: 300 diff --git a/test/infrastructure/docker/templates/clusterclass-quick-start.yaml b/test/infrastructure/docker/templates/clusterclass-quick-start.yaml index a2c11924544c..1529b3608b2e 100644 --- a/test/infrastructure/docker/templates/clusterclass-quick-start.yaml +++ b/test/infrastructure/docker/templates/clusterclass-quick-start.yaml @@ -1,29 +1,30 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass metadata: name: quick-start spec: controlPlane: - ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: quick-start-control-plane machineInfrastructure: - ref: + templateRef: kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 name: quick-start-control-plane machineHealthCheck: - unhealthyConditions: + maxUnhealthy: 100% + unhealthyNodeConditions: - type: Ready status: Unknown - timeout: 300s + timeoutSeconds: 300 - type: Ready status: "False" - timeout: 300s + timeoutSeconds: 300 infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate name: quick-start-cluster workers: @@ -31,34 +32,35 @@ spec: - class: default-worker template: bootstrap: - ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate name: quick-start-default-worker-machinetemplate machineHealthCheck: - unhealthyConditions: + maxUnhealthy: 100% + unhealthyNodeConditions: - type: Ready status: Unknown - timeout: 300s + timeoutSeconds: 300 - type: Ready status: "False" - timeout: 300s + timeoutSeconds: 300 machinePools: - class: default-worker template: bootstrap: - ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate name: quick-start-default-worker-machinepooltemplate variables: @@ -114,7 +116,7 @@ spec: enabledIf: '{{ ne .imageRepository "" }}' definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -128,7 +130,7 @@ spec: description: "Sets tag to use for the etcd image in the KubeadmControlPlane." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -144,7 +146,7 @@ spec: description: "Sets tag to use for the etcd image in the KubeadmControlPlane." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -158,7 +160,7 @@ spec: description: "Sets the container image that is used for running dockerMachines for the controlPlane and default-worker machineDeployments." definitions: - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate matchResources: machineDeploymentClass: @@ -171,7 +173,7 @@ spec: template: | kindest/node:{{ .builtin.machineDeployment.version | replace "+" "_" }} - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate matchResources: machinePoolClass: @@ -184,7 +186,7 @@ spec: template: | kindest/node:{{ .builtin.machinePool.version | replace "+" "_" }} - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate matchResources: controlPlane: true @@ -198,7 +200,7 @@ spec: description: "Adds an admission configuration for PodSecurity to the kube-apiserver." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -241,7 +243,7 @@ spec: path: /etc/kubernetes/kube-apiserver-admission-pss.yaml enabledIf: "{{ .podSecurityStandard.enabled }}" --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate metadata: name: quick-start-cluster @@ -250,7 +252,7 @@ spec: spec: {} --- kind: KubeadmControlPlaneTemplate -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: quick-start-control-plane spec: @@ -262,11 +264,17 @@ spec: # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] initConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-control-plane @@ -277,7 +285,7 @@ spec: - containerPath: "/var/run/docker.sock" hostPath: "/var/run/docker.sock" --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-default-worker-machinetemplate @@ -288,7 +296,7 @@ spec: - containerPath: "/var/run/docker.sock" hostPath: "/var/run/docker.sock" --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate metadata: name: quick-start-default-worker-machinepooltemplate @@ -297,7 +305,7 @@ spec: spec: template: {} --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: quick-start-default-worker-bootstraptemplate @@ -305,4 +313,7 @@ spec: template: spec: joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider to work + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' From d508182e7f6e61523d4bead11d8b2b827df54b70 Mon Sep 17 00:00:00 2001 From: sivchari Date: Tue, 8 Jul 2025 18:42:02 +0900 Subject: [PATCH 13/31] fix extraArgs format Signed-off-by: sivchari --- .../main/clusterclass-quick-start.yaml | 33 ++++++++++++------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index 570fb3db432a..a37779fbb38a 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -381,8 +381,10 @@ spec: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/admission-control-config-file" - value: "/etc/kubernetes/kube-apiserver-admission-pss.yaml" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" + value: + name: admission-control-config-file + value: "/etc/kubernetes/kube-apiserver-admission-pss.yaml" - op: add path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraVolumes" value: @@ -427,17 +429,23 @@ spec: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/v" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" valueFrom: - variable: kubeControlPlaneLogLevel + template: | + name: v + value: {{ .kubeControlPlaneLogLevel }} - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/v" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/-" valueFrom: - variable: kubeControlPlaneLogLevel + template: | + name: v + value: {{ .kubeControlPlaneLogLevel }} - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler/extraArgs/v" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler/extraArgs/-" valueFrom: - variable: kubeControlPlaneLogLevel + template: | + name: v + value: {{ .kubeControlPlaneLogLevel }} - name: controlPlaneKubeletLogLevel enabledIf: "{{ if .kubeletLogLevel }}true{{end}}" description: "Configures control plane kubelets to log at the level set in the variable `kubeletLogLevel`." @@ -557,13 +565,16 @@ spec: # extraArgs must be non-empty for control plane components to enable patches from ClusterClass to work. controllerManager: extraArgs: - v: "0" + - name: v + value: "0" scheduler: extraArgs: - v: "0" + - name: v + value: "0" apiServer: extraArgs: - v: "0" + - name: v + value: "0" # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. certSANs: [localhost, host.docker.internal, "::", "::1", "127.0.0.1", "0.0.0.0"] initConfiguration: From a1db62f5ede81d4471728d7cfc62d6f6be621070 Mon Sep 17 00:00:00 2001 From: sivchari Date: Tue, 8 Jul 2025 21:58:32 +0900 Subject: [PATCH 14/31] fix indent Signed-off-by: sivchari --- .../infrastructure/docker/templates/clusterclass-in-memory.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/infrastructure/docker/templates/clusterclass-in-memory.yaml b/test/infrastructure/docker/templates/clusterclass-in-memory.yaml index 73061075fefc..f56e52fb4040 100644 --- a/test/infrastructure/docker/templates/clusterclass-in-memory.yaml +++ b/test/infrastructure/docker/templates/clusterclass-in-memory.yaml @@ -100,7 +100,7 @@ spec: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - name: eviction-hard - value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% --- apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass From df98d18e4ee660b84cbe9e7d5a4f659ded563c94 Mon Sep 17 00:00:00 2001 From: sivchari Date: Tue, 8 Jul 2025 22:10:32 +0900 Subject: [PATCH 15/31] handle v1beta1 and v1beta2 in runtime extension Signed-off-by: sivchari --- .../handlers/topologymutation/handler.go | 106 ++++++++++++++---- 1 file changed, 86 insertions(+), 20 deletions(-) diff --git a/test/extension/handlers/topologymutation/handler.go b/test/extension/handlers/topologymutation/handler.go index 8c079ca7ab00..d3a877c18537 100644 --- a/test/extension/handlers/topologymutation/handler.go +++ b/test/extension/handlers/topologymutation/handler.go @@ -33,12 +33,16 @@ import ( intstrutil "k8s.io/apimachinery/pkg/util/intstr" ctrl "sigs.k8s.io/controller-runtime" + bootstrapv1beta1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1beta1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" "sigs.k8s.io/cluster-api/exp/runtime/topologymutation" + infrav1beta1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta2" + infraexpv1beta1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta2" "sigs.k8s.io/cluster-api/test/infrastructure/kind" ) @@ -57,15 +61,22 @@ type ExtensionHandlers struct { // NewExtensionHandlers returns a new ExtensionHandlers for the topology mutation hook handlers. func NewExtensionHandlers() *ExtensionHandlers { scheme := runtime.NewScheme() + _ = infrav1beta1.AddToScheme(scheme) _ = infrav1.AddToScheme(scheme) + _ = infraexpv1beta1.AddToScheme(scheme) _ = infraexpv1.AddToScheme(scheme) + _ = bootstrapv1beta1.AddToScheme(scheme) _ = bootstrapv1.AddToScheme(scheme) + _ = controlplanev1beta1.AddToScheme(scheme) _ = controlplanev1.AddToScheme(scheme) return &ExtensionHandlers{ // Add the apiGroups being handled to the decoder decoder: serializer.NewCodecFactory(scheme).UniversalDecoder( + infrav1beta1.GroupVersion, infrav1.GroupVersion, bootstrapv1.GroupVersion, + bootstrapv1beta1.GroupVersion, + infraexpv1beta1.GroupVersion, controlplanev1.GroupVersion, ), } @@ -86,19 +97,19 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo topologymutation.WalkTemplates(ctx, h.decoder, req, resp, func(ctx context.Context, obj runtime.Object, variables map[string]apiextensionsv1.JSON, _ runtimehooksv1.HolderReference) error { log := ctrl.LoggerFrom(ctx) - switch obj := obj.(type) { - case *infrav1.DockerClusterTemplate: + switch obj.(type) { + case *infrav1beta1.DockerClusterTemplate, *infrav1.DockerClusterTemplate: if err := patchDockerClusterTemplate(ctx, obj, variables); err != nil { log.Error(err, "Error patching DockerClusterTemplate") return errors.Wrap(err, "error patching DockerClusterTemplate") } - case *controlplanev1.KubeadmControlPlaneTemplate: + case *controlplanev1beta1.KubeadmControlPlaneTemplate, *controlplanev1.KubeadmControlPlaneTemplate: err := patchKubeadmControlPlaneTemplate(ctx, obj, variables) if err != nil { log.Error(err, "Error patching KubeadmControlPlaneTemplate") return errors.Wrapf(err, "error patching KubeadmControlPlaneTemplate") } - case *bootstrapv1.KubeadmConfigTemplate: + case *bootstrapv1beta1.KubeadmConfigTemplate, *bootstrapv1.KubeadmConfigTemplate: // NOTE: KubeadmConfigTemplate could be linked to one or more of the existing MachineDeployment class; // the patchKubeadmConfigTemplate func shows how to implement patches only for KubeadmConfigTemplates // linked to a specific MachineDeployment class; another option is to check the holderRef value and call @@ -107,7 +118,7 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo log.Error(err, "Error patching KubeadmConfigTemplate") return errors.Wrap(err, "error patching KubeadmConfigTemplate") } - case *infrav1.DockerMachineTemplate: + case *infrav1beta1.DockerMachineTemplate, *infrav1.DockerMachineTemplate: // NOTE: DockerMachineTemplate could be linked to the ControlPlane or one or more of the existing MachineDeployment class; // the patchDockerMachineTemplate func shows how to implement different patches for DockerMachineTemplate // linked to ControlPlane or for DockerMachineTemplate linked to MachineDeployment classes; another option @@ -116,7 +127,7 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo log.Error(err, "Error patching DockerMachineTemplate") return errors.Wrap(err, "error patching DockerMachineTemplate") } - case *infraexpv1.DockerMachinePoolTemplate: + case *infraexpv1beta1.DockerMachinePoolTemplate, *infraexpv1.DockerMachinePoolTemplate: if err := patchDockerMachinePoolTemplate(ctx, obj, variables); err != nil { log.Error(err, "Error patching DockerMachinePoolTemplate") return errors.Wrap(err, "error patching DockerMachinePoolTemplate") @@ -129,7 +140,7 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo // patchDockerClusterTemplate patches the DockerClusterTemplate. // It sets the LoadBalancer.ImageRepository if the imageRepository variable is provided. // NOTE: this patch is not required for any special reason, it is used for testing the patch machinery itself. -func patchDockerClusterTemplate(_ context.Context, dockerClusterTemplate *infrav1.DockerClusterTemplate, templateVariables map[string]apiextensionsv1.JSON) error { +func patchDockerClusterTemplate(_ context.Context, obj runtime.Object, templateVariables map[string]apiextensionsv1.JSON) error { imageRepo, err := topologymutation.GetStringVariable(templateVariables, "imageRepository") if err != nil { if topologymutation.IsNotFoundError(err) { @@ -138,7 +149,17 @@ func patchDockerClusterTemplate(_ context.Context, dockerClusterTemplate *infrav return errors.Wrap(err, "could not set DockerClusterTemplate loadBalancer imageRepository") } - dockerClusterTemplate.Spec.Template.Spec.LoadBalancer.ImageRepository = imageRepo + dockerClusterTemplateV1Beta1, ok := obj.(*infrav1beta1.DockerClusterTemplate) + if ok { + dockerClusterTemplateV1Beta1.Spec.Template.Spec.LoadBalancer.ImageRepository = imageRepo + return nil + } + + dockerClusterTemplate, ok := obj.(*infrav1.DockerClusterTemplate) + if ok { + dockerClusterTemplate.Spec.Template.Spec.LoadBalancer.ImageRepository = imageRepo + return nil + } return nil } @@ -146,7 +167,7 @@ func patchDockerClusterTemplate(_ context.Context, dockerClusterTemplate *infrav // patchKubeadmControlPlaneTemplate patches the ControlPlaneTemplate. // It sets the RolloutStrategy.RollingUpdate.MaxSurge if the kubeadmControlPlaneMaxSurge is provided. // NOTE: RolloutStrategy.RollingUpdate.MaxSurge patch is not required for any special reason, it is used for testing the patch machinery itself. -func patchKubeadmControlPlaneTemplate(ctx context.Context, kcpTemplate *controlplanev1.KubeadmControlPlaneTemplate, templateVariables map[string]apiextensionsv1.JSON) error { +func patchKubeadmControlPlaneTemplate(ctx context.Context, obj runtime.Object, templateVariables map[string]apiextensionsv1.JSON) error { log := ctrl.LoggerFrom(ctx) // 1) Patch RolloutStrategy RollingUpdate MaxSurge with the value from the Cluster Topology variable. @@ -162,18 +183,35 @@ func patchKubeadmControlPlaneTemplate(ctx context.Context, kcpTemplate *controlp // This has to be converted to IntOrString type. kubeadmControlPlaneMaxSurgeIntOrString := intstrutil.Parse(kcpControlPlaneMaxSurge) log.Info(fmt.Sprintf("Setting KubeadmControlPlaneMaxSurge to %q", kubeadmControlPlaneMaxSurgeIntOrString.String())) - if kcpTemplate.Spec.Template.Spec.RolloutStrategy == nil { - kcpTemplate.Spec.Template.Spec.RolloutStrategy = &controlplanev1.RolloutStrategy{} + + kcpTemplateV1Beta1, ok := obj.(*controlplanev1beta1.KubeadmControlPlaneTemplate) + if ok { + if kcpTemplateV1Beta1.Spec.Template.Spec.RolloutStrategy == nil { + kcpTemplateV1Beta1.Spec.Template.Spec.RolloutStrategy = &controlplanev1beta1.RolloutStrategy{} + } + if kcpTemplateV1Beta1.Spec.Template.Spec.RolloutStrategy.RollingUpdate == nil { + kcpTemplateV1Beta1.Spec.Template.Spec.RolloutStrategy.RollingUpdate = &controlplanev1beta1.RollingUpdate{} + } + kcpTemplateV1Beta1.Spec.Template.Spec.RolloutStrategy.RollingUpdate.MaxSurge = &kubeadmControlPlaneMaxSurgeIntOrString + return nil } - if kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate == nil { - kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate = &controlplanev1.RollingUpdate{} + + kcpTemplate, ok := obj.(*controlplanev1.KubeadmControlPlaneTemplate) + if ok { + if kcpTemplate.Spec.Template.Spec.RolloutStrategy == nil { + kcpTemplate.Spec.Template.Spec.RolloutStrategy = &controlplanev1.RolloutStrategy{} + } + if kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate == nil { + kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate = &controlplanev1.RollingUpdate{} + } + kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate.MaxSurge = &kubeadmControlPlaneMaxSurgeIntOrString } - kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate.MaxSurge = &kubeadmControlPlaneMaxSurgeIntOrString + return nil } // patchKubeadmConfigTemplate patches the ControlPlaneTemplate. -func patchKubeadmConfigTemplate(_ context.Context, _ *bootstrapv1.KubeadmConfigTemplate, _ map[string]apiextensionsv1.JSON) error { +func patchKubeadmConfigTemplate(_ context.Context, _ runtime.Object, _ map[string]apiextensionsv1.JSON) error { return nil } @@ -182,7 +220,7 @@ func patchKubeadmConfigTemplate(_ context.Context, _ *bootstrapv1.KubeadmConfigT // the DockerMachineTemplate belongs to. // NOTE: this patch is not required anymore after the introduction of the kind mapper in kind, however we keep it // as example of version aware patches. -func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infrav1.DockerMachineTemplate, templateVariables map[string]apiextensionsv1.JSON) error { +func patchDockerMachineTemplate(ctx context.Context, obj runtime.Object, templateVariables map[string]apiextensionsv1.JSON) error { log := ctrl.LoggerFrom(ctx) // If the DockerMachineTemplate belongs to the ControlPlane, set the images using the ControlPlane version. @@ -203,7 +241,16 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr kindMapping := kind.GetMapping(semVer, "") log.Info(fmt.Sprintf("Setting control plane custom image to %q", kindMapping.Image)) - dockerMachineTemplate.Spec.Template.Spec.CustomImage = kindMapping.Image + + dockerMachineTemplateV1Beta1, ok := obj.(*infrav1beta1.DockerMachineTemplate) + if ok { + dockerMachineTemplateV1Beta1.Spec.Template.Spec.CustomImage = kindMapping.Image + } + + dockerMachineTemplate, ok := obj.(*infrav1.DockerMachineTemplate) + if ok { + dockerMachineTemplate.Spec.Template.Spec.CustomImage = kindMapping.Image + } // return early if we have successfully patched a control plane dockerMachineTemplate return nil } @@ -229,7 +276,16 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr kindMapping := kind.GetMapping(semVer, "") log.Info(fmt.Sprintf("Setting MachineDeployment customImage to %q", kindMapping.Image)) - dockerMachineTemplate.Spec.Template.Spec.CustomImage = kindMapping.Image + + dockerMachineTemplateV1Beta1, ok := obj.(*infrav1beta1.DockerMachineTemplate) + if ok { + dockerMachineTemplateV1Beta1.Spec.Template.Spec.CustomImage = kindMapping.Image + } + + dockerMachineTemplate, ok := obj.(*infrav1.DockerMachineTemplate) + if ok { + dockerMachineTemplate.Spec.Template.Spec.CustomImage = kindMapping.Image + } return nil } @@ -237,7 +293,7 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr // It sets the CustomImage to an image for the version in use by the MachinePool. // NOTE: this patch is not required anymore after the introduction of the kind mapper in kind, however we keep it // as example of version aware patches. -func patchDockerMachinePoolTemplate(ctx context.Context, dockerMachinePoolTemplate *infraexpv1.DockerMachinePoolTemplate, templateVariables map[string]apiextensionsv1.JSON) error { +func patchDockerMachinePoolTemplate(ctx context.Context, obj runtime.Object, templateVariables map[string]apiextensionsv1.JSON) error { log := ctrl.LoggerFrom(ctx) // If the DockerMachinePoolTemplate belongs to a MachinePool, set the images the MachinePool version. @@ -261,7 +317,17 @@ func patchDockerMachinePoolTemplate(ctx context.Context, dockerMachinePoolTempla kindMapping := kind.GetMapping(semVer, "") log.Info(fmt.Sprintf("Setting MachinePool customImage to %q", kindMapping.Image)) - dockerMachinePoolTemplate.Spec.Template.Spec.Template.CustomImage = kindMapping.Image + + dockerMachinePoolTemplateV1Beta1, ok := obj.(*infraexpv1beta1.DockerMachinePoolTemplate) + if ok { + dockerMachinePoolTemplateV1Beta1.Spec.Template.Spec.Template.CustomImage = kindMapping.Image + } + + dockerMachinePoolTemplate, ok := obj.(*infraexpv1.DockerMachinePoolTemplate) + if ok { + dockerMachinePoolTemplate.Spec.Template.Spec.Template.CustomImage = kindMapping.Image + } + return nil } From d57afa5488b8e0a8a1e7e3b048472201a3eadd9c Mon Sep 17 00:00:00 2001 From: sivchari Date: Wed, 9 Jul 2025 11:10:05 +0900 Subject: [PATCH 16/31] fix failureDomains and empty metadata Signed-off-by: sivchari --- test/e2e/data/infrastructure-docker/main/bases/mp.yaml | 10 +++++----- .../cluster-autoscaler.yaml | 1 - .../cluster-runtimesdk.yaml | 2 +- test/extension/handlers/topologymutation/handler.go | 2 ++ 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/bases/mp.yaml b/test/e2e/data/infrastructure-docker/main/bases/mp.yaml index ddf4a8666329..e9bfa1b5b3f1 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/mp.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/mp.yaml @@ -21,11 +21,11 @@ spec: name: "${CLUSTER_NAME}-dmp-0" version: "${KUBERNETES_VERSION}" failureDomains: - - name: fd4 - - name: fd5 - - name: fd6 - - name: fd7 - - name: fd8 + - fd4 + - fd5 + - fd6 + - fd7 + - fd8 --- # DockerMachinePool using default values referenced by the MachinePool apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml index 70093ab87ddd..4920b2622eb8 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml @@ -17,7 +17,6 @@ spec: name: "quick-start" version: "${KUBERNETES_VERSION}" controlPlane: - metadata: {} nodeDeletionTimeoutSeconds: 30 replicas: ${CONTROL_PLANE_MACHINE_COUNT} workers: diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml index 3e9182416c7f..76566082d3c7 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml @@ -38,7 +38,7 @@ spec: minReadySeconds: 5 replicas: ${WORKER_MACHINE_COUNT} failureDomains: - - name: fd4 + - fd4 variables: - name: kubeadmControlPlaneMaxSurge value: "1" diff --git a/test/extension/handlers/topologymutation/handler.go b/test/extension/handlers/topologymutation/handler.go index d3a877c18537..111a5ca4193b 100644 --- a/test/extension/handlers/topologymutation/handler.go +++ b/test/extension/handlers/topologymutation/handler.go @@ -77,6 +77,8 @@ func NewExtensionHandlers() *ExtensionHandlers { bootstrapv1.GroupVersion, bootstrapv1beta1.GroupVersion, infraexpv1beta1.GroupVersion, + infraexpv1.GroupVersion, + controlplanev1beta1.GroupVersion, controlplanev1.GroupVersion, ), } From f02ff50cc7705bfc2ed97b6d513a75ec201aa19c Mon Sep 17 00:00:00 2001 From: sivchari Date: Wed, 9 Jul 2025 12:43:28 +0900 Subject: [PATCH 17/31] fix extraArgs format Signed-off-by: sivchari --- .../docker/templates/clusterclass-quick-start.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/infrastructure/docker/templates/clusterclass-quick-start.yaml b/test/infrastructure/docker/templates/clusterclass-quick-start.yaml index 1529b3608b2e..338f197503f0 100644 --- a/test/infrastructure/docker/templates/clusterclass-quick-start.yaml +++ b/test/infrastructure/docker/templates/clusterclass-quick-start.yaml @@ -206,9 +206,10 @@ spec: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" value: - admission-control-config-file: "/etc/kubernetes/kube-apiserver-admission-pss.yaml" + name: admission-control-config-file + value: "/etc/kubernetes/kube-apiserver-admission-pss.yaml" - op: add path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraVolumes" value: From b8faa6794070ad2201598ec29039f20f8a416291 Mon Sep 17 00:00:00 2001 From: sivchari Date: Wed, 9 Jul 2025 12:43:35 +0900 Subject: [PATCH 18/31] delete empty metadata Signed-off-by: sivchari --- .../cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml | 1 - .../docker/templates/cluster-template-development-mp.yaml | 1 - .../docker/templates/cluster-template-development.yaml | 1 - 3 files changed, 3 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml index 76566082d3c7..7ce77c5a5589 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml @@ -18,7 +18,6 @@ spec: namespace: '${CLUSTER_CLASS_NAMESPACE:-""}' version: "${KUBERNETES_VERSION}" controlPlane: - metadata: {} nodeDeletionTimeoutSeconds: 30 replicas: ${CONTROL_PLANE_MACHINE_COUNT} workers: diff --git a/test/infrastructure/docker/templates/cluster-template-development-mp.yaml b/test/infrastructure/docker/templates/cluster-template-development-mp.yaml index f1b1cd74cbb5..837c8e02dc9b 100644 --- a/test/infrastructure/docker/templates/cluster-template-development-mp.yaml +++ b/test/infrastructure/docker/templates/cluster-template-development-mp.yaml @@ -13,7 +13,6 @@ spec: topology: class: quick-start controlPlane: - metadata: {} replicas: ${CONTROL_PLANE_MACHINE_COUNT} variables: - name: imageRepository diff --git a/test/infrastructure/docker/templates/cluster-template-development.yaml b/test/infrastructure/docker/templates/cluster-template-development.yaml index 648c7dbacd65..c40badf99250 100644 --- a/test/infrastructure/docker/templates/cluster-template-development.yaml +++ b/test/infrastructure/docker/templates/cluster-template-development.yaml @@ -13,7 +13,6 @@ spec: topology: class: quick-start controlPlane: - metadata: {} replicas: ${CONTROL_PLANE_MACHINE_COUNT} variables: - name: imageRepository From 882d52a9d25f941bb1c67797c1bce7f2627bdcb7 Mon Sep 17 00:00:00 2001 From: sivchari Date: Wed, 9 Jul 2025 14:10:53 +0900 Subject: [PATCH 19/31] delete namespace Signed-off-by: sivchari --- .../cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml index 7ce77c5a5589..d7cf33d15257 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml @@ -15,7 +15,6 @@ spec: topology: classRef: name: "quick-start-runtimesdk" - namespace: '${CLUSTER_CLASS_NAMESPACE:-""}' version: "${KUBERNETES_VERSION}" controlPlane: nodeDeletionTimeoutSeconds: 30 From 6d8ee82724f13fb7f6b516fe9a914f7fcf7c475d Mon Sep 17 00:00:00 2001 From: sivchari Date: Wed, 9 Jul 2025 18:35:30 +0900 Subject: [PATCH 20/31] quote kubeletLogLevel Signed-off-by: sivchari --- .../main/clusterclass-quick-start.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index a37779fbb38a..c8358b8c7815 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -461,13 +461,13 @@ spec: valueFrom: template: | name: v - value: {{ .kubeletLogLevel }} + value: "{{ .kubeletLogLevel }}" - op: add path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: template: | name: v - value: {{ .kubeletLogLevel }} + value: "{{ .kubeletLogLevel }}" - name: workerKubeletLogLevel enabledIf: "{{ if .kubeletLogLevel }}true{{end}}" description: "Configures worker kubelets to log at the level set in the variable `kubeletLogLevel`." @@ -485,7 +485,7 @@ spec: valueFrom: template: | name: v - value: {{ .kubeletLogLevel }} + value: "{{ .kubeletLogLevel }}" - selector: apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate @@ -499,7 +499,7 @@ spec: valueFrom: template: | name: v - value: {{ .kubeletLogLevel }} + value: "{{ .kubeletLogLevel }}" --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate From bcdaac7e6956ab8f208101afd339a46293b61ef1 Mon Sep 17 00:00:00 2001 From: sivchari Date: Wed, 9 Jul 2025 20:09:52 +0900 Subject: [PATCH 21/31] fix parse error Signed-off-by: sivchari --- .../main/clusterclass-quick-start.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index c8358b8c7815..f44cd2f66827 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -433,19 +433,19 @@ spec: valueFrom: template: | name: v - value: {{ .kubeControlPlaneLogLevel }} + value: "{{ .kubeControlPlaneLogLevel }}" - op: add path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/-" valueFrom: template: | name: v - value: {{ .kubeControlPlaneLogLevel }} + value: "{{ .kubeControlPlaneLogLevel }}" - op: add path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler/extraArgs/-" valueFrom: template: | name: v - value: {{ .kubeControlPlaneLogLevel }} + value: "{{ .kubeControlPlaneLogLevel }}" - name: controlPlaneKubeletLogLevel enabledIf: "{{ if .kubeletLogLevel }}true{{end}}" description: "Configures control plane kubelets to log at the level set in the variable `kubeletLogLevel`." From ee28a54e7a4d629645b5d11298f517c1e3adf7ab Mon Sep 17 00:00:00 2001 From: sivchari Date: Wed, 9 Jul 2025 22:22:18 +0900 Subject: [PATCH 22/31] use replace instead of add Signed-off-by: sivchari --- .../main/clusterclass-quick-start.yaml | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index f44cd2f66827..ef1960815ee2 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -428,20 +428,20 @@ spec: matchResources: controlPlane: true jsonPatches: - - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" + - op: replace + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/0" valueFrom: template: | name: v value: "{{ .kubeControlPlaneLogLevel }}" - - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/-" + - op: replace + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/0" valueFrom: template: | name: v value: "{{ .kubeControlPlaneLogLevel }}" - - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler/extraArgs/-" + - op: replace + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler/extraArgs/0" valueFrom: template: | name: v @@ -456,14 +456,14 @@ spec: matchResources: controlPlane: true jsonPatches: - - op: add - path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" + - op: replace + path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/0" valueFrom: template: | name: v value: "{{ .kubeletLogLevel }}" - - op: add - path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/-" + - op: replace + path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/0" valueFrom: template: | name: v @@ -480,8 +480,8 @@ spec: names: - '*-worker' jsonPatches: - - op: add - path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" + - op: replace + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/0" valueFrom: template: | name: v @@ -494,8 +494,8 @@ spec: names: - '*-worker' jsonPatches: - - op: add - path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" + - op: replace + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/0" valueFrom: template: | name: v From 9ffdbad25cc4839f16a13a5b191e3e040e13d443 Mon Sep 17 00:00:00 2001 From: sivchari Date: Wed, 9 Jul 2025 22:38:58 +0900 Subject: [PATCH 23/31] fix review findings Signed-off-by: sivchari --- .../main/bases/cluster-with-topology.yaml | 1 + .../main/cluster-template-kcp-pre-drain/cluster.yaml | 1 - .../cluster.yaml | 1 - .../cluster.yaml | 1 - .../cluster-topology-class.yaml | 3 +++ .../cluster-template-topology-kcp-only/kustomization.yaml | 6 ++++++ .../cluster-runtimesdk.yaml | 1 + .../main/clusterclass-quick-start.yaml | 4 ---- test/extension/handlers/topologymutation/handler.go | 2 -- test/extension/handlers/topologymutation/handler_test.go | 2 +- .../testdata/clusterclass-quick-start-runtimesdk.yaml | 3 +-- .../docker/templates/clusterclass-in-memory.yaml | 2 -- .../docker/templates/clusterclass-quick-start.yaml | 2 -- 13 files changed, 13 insertions(+), 16 deletions(-) create mode 100644 test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/cluster-topology-class.yaml diff --git a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml index b0918597338d..ddee49e92ec4 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml @@ -15,6 +15,7 @@ spec: topology: classRef: name: "quick-start" + namespace: '${CLUSTER_CLASS_NAMESPACE:-""}' version: "${KUBERNETES_VERSION}" controlPlane: metadata: diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml index 039ecd98df00..1358b7dfc6b5 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml @@ -7,7 +7,6 @@ spec: topology: classRef: name: quick-start - version: "${KUBERNETES_VERSION}" controlPlane: metadata: annotations: diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml index 165eb77d021e..a04442b4e078 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml @@ -7,7 +7,6 @@ spec: topology: classRef: name: quick-start - version: "${KUBERNETES_VERSION}" variables: - name: ipv6Primary value: false diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml index 9eb4e37a403b..e4109076caf2 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml @@ -7,7 +7,6 @@ spec: topology: classRef: name: quick-start - version: "${KUBERNETES_VERSION}" variables: - name: ipv6Primary value: true diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/cluster-topology-class.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/cluster-topology-class.yaml new file mode 100644 index 000000000000..efb625ec74d9 --- /dev/null +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/cluster-topology-class.yaml @@ -0,0 +1,3 @@ +- op: add + path: /spec/topology/classRef/name + value: "quick-start-kcp-only" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/kustomization.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/kustomization.yaml index 089a3b71e831..f4a499e6ed8b 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/kustomization.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/kustomization.yaml @@ -1,3 +1,9 @@ resources: - ../cluster-template-topology-no-workers +patches: +- path: cluster-topology-class.yaml + target: + group: cluster.x-k8s.io + version: v1beta1 + kind: Cluster diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml index d7cf33d15257..7ce77c5a5589 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml @@ -15,6 +15,7 @@ spec: topology: classRef: name: "quick-start-runtimesdk" + namespace: '${CLUSTER_CLASS_NAMESPACE:-""}' version: "${KUBERNETES_VERSION}" controlPlane: nodeDeletionTimeoutSeconds: 30 diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index ef1960815ee2..6e77ff78ef95 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -19,7 +19,6 @@ spec: kind: DockerMachineTemplate name: quick-start-control-plane machineHealthCheck: - maxUnhealthy: 100% unhealthyNodeConditions: - type: e2e.remediation.condition status: "False" @@ -48,9 +47,6 @@ spec: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate name: quick-start-default-worker-machinetemplate - machineHealthCheck: - maxUnhealthy: 100% - # We are intentionally not setting the 'unhealthyNodeConditions' here to test that the field is optional. machinePools: - class: default-worker template: diff --git a/test/extension/handlers/topologymutation/handler.go b/test/extension/handlers/topologymutation/handler.go index 111a5ca4193b..afe160c85448 100644 --- a/test/extension/handlers/topologymutation/handler.go +++ b/test/extension/handlers/topologymutation/handler.go @@ -76,8 +76,6 @@ func NewExtensionHandlers() *ExtensionHandlers { infrav1.GroupVersion, bootstrapv1.GroupVersion, bootstrapv1beta1.GroupVersion, - infraexpv1beta1.GroupVersion, - infraexpv1.GroupVersion, controlplanev1beta1.GroupVersion, controlplanev1.GroupVersion, ), diff --git a/test/extension/handlers/topologymutation/handler_test.go b/test/extension/handlers/topologymutation/handler_test.go index 2f31fd1b3a47..a21866f7d422 100644 --- a/test/extension/handlers/topologymutation/handler_test.go +++ b/test/extension/handlers/topologymutation/handler_test.go @@ -349,7 +349,7 @@ func TestHandler_GeneratePatches(t *testing.T) { }, Items: []runtimehooksv1.GeneratePatchesResponseItem{ responseItem("1", `[ - {"op":"add","path":"/spec","value":{"template": {"spec":{"rolloutStrategy": {"rollingUpdate":{"maxSurge":3}}}}}} +{"op":"add","path":"/spec","value":{"template": {"spec":{"rolloutStrategy": {"rollingUpdate":{"maxSurge":3}}}}}} ]`), responseItem("2", `[ {"op":"add","path":"/spec/template/spec/customImage","value":"kindest/node:v1.23.0"} diff --git a/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml b/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml index ec3c6b398e80..5211ddbe596a 100644 --- a/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml +++ b/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml @@ -66,8 +66,7 @@ spec: template: spec: failureDomains: - - - name: fd1 + - name: fd1 controlPlane: true - name: fd2 controlPlane: true diff --git a/test/infrastructure/docker/templates/clusterclass-in-memory.yaml b/test/infrastructure/docker/templates/clusterclass-in-memory.yaml index f56e52fb4040..f960a620b3be 100644 --- a/test/infrastructure/docker/templates/clusterclass-in-memory.yaml +++ b/test/infrastructure/docker/templates/clusterclass-in-memory.yaml @@ -120,7 +120,6 @@ spec: kind: KubeadmControlPlaneTemplate name: in-memory-control-plane machineHealthCheck: - maxUnhealthy: 100% unhealthyNodeConditions: - type: Ready status: Unknown @@ -148,7 +147,6 @@ spec: kind: DevMachineTemplate name: in-memory-default-worker-machinetemplate machineHealthCheck: - maxUnhealthy: 100% unhealthyNodeConditions: - type: Ready status: Unknown diff --git a/test/infrastructure/docker/templates/clusterclass-quick-start.yaml b/test/infrastructure/docker/templates/clusterclass-quick-start.yaml index 338f197503f0..e113391a6c87 100644 --- a/test/infrastructure/docker/templates/clusterclass-quick-start.yaml +++ b/test/infrastructure/docker/templates/clusterclass-quick-start.yaml @@ -14,7 +14,6 @@ spec: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 name: quick-start-control-plane machineHealthCheck: - maxUnhealthy: 100% unhealthyNodeConditions: - type: Ready status: Unknown @@ -42,7 +41,6 @@ spec: kind: DockerMachineTemplate name: quick-start-default-worker-machinetemplate machineHealthCheck: - maxUnhealthy: 100% unhealthyNodeConditions: - type: Ready status: Unknown From a45a07de3a9ef330de64537af64a2e4ec32e5756 Mon Sep 17 00:00:00 2001 From: sivchari Date: Wed, 9 Jul 2025 22:41:52 +0900 Subject: [PATCH 24/31] use add op and delete extraArgs from clusterConfiguration Signed-off-by: sivchari --- .../main/clusterclass-quick-start.yaml | 41 +++++++------------ 1 file changed, 14 insertions(+), 27 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index 6e77ff78ef95..94c0fa0b9e85 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -424,20 +424,20 @@ spec: matchResources: controlPlane: true jsonPatches: - - op: replace - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/0" + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" valueFrom: template: | name: v value: "{{ .kubeControlPlaneLogLevel }}" - - op: replace - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/0" + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/-" valueFrom: template: | name: v value: "{{ .kubeControlPlaneLogLevel }}" - - op: replace - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler/extraArgs/0" + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler/extraArgs/-" valueFrom: template: | name: v @@ -452,14 +452,14 @@ spec: matchResources: controlPlane: true jsonPatches: - - op: replace - path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/0" + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: template: | name: v value: "{{ .kubeletLogLevel }}" - - op: replace - path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/0" + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: template: | name: v @@ -476,8 +476,8 @@ spec: names: - '*-worker' jsonPatches: - - op: replace - path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/0" + - op: add + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: template: | name: v @@ -490,8 +490,8 @@ spec: names: - '*-worker' jsonPatches: - - op: replace - path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/0" + - op: add + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: template: | name: v @@ -558,19 +558,6 @@ spec: nodeDrainTimeoutSeconds: 1 kubeadmConfigSpec: clusterConfiguration: - # extraArgs must be non-empty for control plane components to enable patches from ClusterClass to work. - controllerManager: - extraArgs: - - name: v - value: "0" - scheduler: - extraArgs: - - name: v - value: "0" - apiServer: - extraArgs: - - name: v - value: "0" # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. certSANs: [localhost, host.docker.internal, "::", "::1", "127.0.0.1", "0.0.0.0"] initConfiguration: From 34ed120a5258cb113d34a302b6d2afa2b4d41369 Mon Sep 17 00:00:00 2001 From: sivchari Date: Wed, 9 Jul 2025 22:45:59 +0900 Subject: [PATCH 25/31] revert label and value Signed-off-by: sivchari --- .../main/clusterclass-quick-start.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index 94c0fa0b9e85..d60fad20166d 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -74,6 +74,12 @@ spec: default: kindest - name: etcdImageTag required: true + # This metadata has just been added to verify that we can set metadata. + metadata: + labels: + testLabelKey: testLabelValue + annotations: + testAnnotationKey: testAnnotationValue schema: openAPIV3Schema: type: string From e67ba75eecafca9b2b170c6dda672eb68305d548 Mon Sep 17 00:00:00 2001 From: sivchari Date: Wed, 9 Jul 2025 23:09:00 +0900 Subject: [PATCH 26/31] fix review findings Signed-off-by: sivchari --- .../infrastructure-docker/main/bases/cluster-with-topology.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml index ddee49e92ec4..4ae1af9f9edf 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml @@ -15,7 +15,7 @@ spec: topology: classRef: name: "quick-start" - namespace: '${CLUSTER_CLASS_NAMESPACE:-""}' + namespace: '${CLUSTER_CLASS_NAMESPACE:-${NAMESPACE}}' version: "${KUBERNETES_VERSION}" controlPlane: metadata: From a56fe63f7173d122cb55f689e3f291124bae8adb Mon Sep 17 00:00:00 2001 From: sivchari Date: Thu, 10 Jul 2025 10:58:51 +0900 Subject: [PATCH 27/31] fix review findings Signed-off-by: sivchari --- .../cluster-runtimesdk.yaml | 2 +- .../main/clusterclass-quick-start.yaml | 10 +++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml index 7ce77c5a5589..878c905593fa 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml @@ -15,7 +15,7 @@ spec: topology: classRef: name: "quick-start-runtimesdk" - namespace: '${CLUSTER_CLASS_NAMESPACE:-""}' + namespace: '${CLUSTER_CLASS_NAMESPACE:-${NAMESPACE}}' version: "${KUBERNETES_VERSION}" controlPlane: nodeDeletionTimeoutSeconds: 30 diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index d60fad20166d..1a6c22ffb8c2 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -47,6 +47,7 @@ spec: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate name: quick-start-default-worker-machinetemplate + # We are intentionally not setting the 'unhealthyConditions' here to test that the field is optional. machinePools: - class: default-worker template: @@ -383,10 +384,13 @@ spec: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer" + value: {} + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs" value: - name: admission-control-config-file - value: "/etc/kubernetes/kube-apiserver-admission-pss.yaml" + - name: admission-control-config-file + value: "/etc/kubernetes/kube-apiserver-admission-pss.yaml" - op: add path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraVolumes" value: From bd6079eae91eb5f716bff43213e4ed03668b4ef8 Mon Sep 17 00:00:00 2001 From: sivchari Date: Thu, 10 Jul 2025 14:20:57 +0900 Subject: [PATCH 28/31] add json patches Signed-off-by: sivchari --- .../main/clusterclass-quick-start.yaml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index 1a6c22ffb8c2..483557f7ba84 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -435,19 +435,25 @@ spec: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs" valueFrom: template: | name: v value: "{{ .kubeControlPlaneLogLevel }}" - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/-" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager" + value: {} + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs" valueFrom: template: | name: v value: "{{ .kubeControlPlaneLogLevel }}" - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler/extraArgs/-" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler" + value: {} + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler/extraArgs" valueFrom: template: | name: v From c89d73b83fda4cd4f1ecfe53957f731ec67214e6 Mon Sep 17 00:00:00 2001 From: sivchari Date: Thu, 10 Jul 2025 16:58:53 +0900 Subject: [PATCH 29/31] convert to list from map Signed-off-by: sivchari --- .../main/clusterclass-quick-start.yaml | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index 483557f7ba84..6cfb7dd1d99e 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -438,8 +438,8 @@ spec: path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs" valueFrom: template: | - name: v - value: "{{ .kubeControlPlaneLogLevel }}" + - name: v + value: "{{ .kubeControlPlaneLogLevel }}" - op: add path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager" value: {} @@ -447,8 +447,8 @@ spec: path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs" valueFrom: template: | - name: v - value: "{{ .kubeControlPlaneLogLevel }}" + - name: v + value: "{{ .kubeControlPlaneLogLevel }}" - op: add path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler" value: {} @@ -456,8 +456,8 @@ spec: path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler/extraArgs" valueFrom: template: | - name: v - value: "{{ .kubeControlPlaneLogLevel }}" + - name: v + value: "{{ .kubeControlPlaneLogLevel }}" - name: controlPlaneKubeletLogLevel enabledIf: "{{ if .kubeletLogLevel }}true{{end}}" description: "Configures control plane kubelets to log at the level set in the variable `kubeletLogLevel`." @@ -472,14 +472,14 @@ spec: path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: template: | - name: v - value: "{{ .kubeletLogLevel }}" + - name: v + value: "{{ .kubeletLogLevel }}" - op: add path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: template: | - name: v - value: "{{ .kubeletLogLevel }}" + - name: v + value: "{{ .kubeletLogLevel }}" - name: workerKubeletLogLevel enabledIf: "{{ if .kubeletLogLevel }}true{{end}}" description: "Configures worker kubelets to log at the level set in the variable `kubeletLogLevel`." @@ -510,8 +510,8 @@ spec: path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: template: | - name: v - value: "{{ .kubeletLogLevel }}" + - name: v + value: "{{ .kubeletLogLevel }}" --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate From 3ab5bd7785eea23593dc7e8930e75d91852c4cd1 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Thu, 10 Jul 2025 11:07:07 +0200 Subject: [PATCH 30/31] Final fixup --- .../main/clusterclass-quick-start.yaml | 15 ++++++--------- .../templates/clusterclass-quick-start.yaml | 8 ++++---- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index 6cfb7dd1d99e..92742205c4df 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -383,9 +383,6 @@ spec: matchResources: controlPlane: true jsonPatches: - - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer" - value: {} - op: add path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs" value: @@ -423,7 +420,6 @@ spec: runtimeClasses: [] namespaces: [kube-system] path: /etc/kubernetes/kube-apiserver-admission-pss.yaml - enabledIf: '{{ semverCompare ">= v1.24-0" .builtin.controlPlane.version }}' - name: controlPlaneLogLevel enabledIf: "{{ if .kubeControlPlaneLogLevel }}true{{end}}" description: "Configures control plane components and kubelet to run at the log level specified in the variable `kubeControlPlaneLogLevel`." @@ -435,10 +431,10 @@ spec: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" valueFrom: template: | - - name: v + name: v value: "{{ .kubeControlPlaneLogLevel }}" - op: add path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager" @@ -472,13 +468,13 @@ spec: path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: template: | - - name: v + name: v value: "{{ .kubeletLogLevel }}" - op: add path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: template: | - - name: v + name: v value: "{{ .kubeletLogLevel }}" - name: workerKubeletLogLevel enabledIf: "{{ if .kubeletLogLevel }}true{{end}}" @@ -510,7 +506,7 @@ spec: path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: template: | - - name: v + name: v value: "{{ .kubeletLogLevel }}" --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 @@ -574,6 +570,7 @@ spec: nodeDrainTimeoutSeconds: 1 kubeadmConfigSpec: clusterConfiguration: + apiServer: # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. certSANs: [localhost, host.docker.internal, "::", "::1", "127.0.0.1", "0.0.0.0"] initConfiguration: diff --git a/test/infrastructure/docker/templates/clusterclass-quick-start.yaml b/test/infrastructure/docker/templates/clusterclass-quick-start.yaml index e113391a6c87..6610c9ed6843 100644 --- a/test/infrastructure/docker/templates/clusterclass-quick-start.yaml +++ b/test/infrastructure/docker/templates/clusterclass-quick-start.yaml @@ -204,10 +204,10 @@ spec: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs" value: - name: admission-control-config-file - value: "/etc/kubernetes/kube-apiserver-admission-pss.yaml" + - name: admission-control-config-file + value: "/etc/kubernetes/kube-apiserver-admission-pss.yaml" - op: add path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraVolumes" value: @@ -226,7 +226,7 @@ spec: plugins: - name: PodSecurity configuration: - apiVersion: pod-security.admission.config.k8s.io/v1{{ if semverCompare "< v1.25" .builtin.controlPlane.version }}beta1{{ end }} + apiVersion: pod-security.admission.config.k8s.io/v1{{ if semverCompare "< v1.25-0" .builtin.controlPlane.version }}beta1{{ end }} kind: PodSecurityConfiguration defaults: enforce: "{{ .podSecurityStandard.enforce }}" From bd8948f85ee815687e0d43f8ef796edda7ab1792 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Thu, 10 Jul 2025 11:19:36 +0200 Subject: [PATCH 31/31] Fixup templates --- .../docker/templates/cluster-template-development-mp.yaml | 3 ++- .../docker/templates/cluster-template-development.yaml | 3 ++- .../docker/templates/cluster-template-in-memory.yaml | 5 +++-- .../docker/templates/clusterclass-in-memory.yaml | 2 -- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/test/infrastructure/docker/templates/cluster-template-development-mp.yaml b/test/infrastructure/docker/templates/cluster-template-development-mp.yaml index 837c8e02dc9b..5ad176fa12db 100644 --- a/test/infrastructure/docker/templates/cluster-template-development-mp.yaml +++ b/test/infrastructure/docker/templates/cluster-template-development-mp.yaml @@ -11,7 +11,8 @@ spec: cidrBlocks: ${POD_CIDR:=["192.168.0.0/16"]} serviceDomain: ${SERVICE_DOMAIN:="cluster.local"} topology: - class: quick-start + classRef: + name: quick-start controlPlane: replicas: ${CONTROL_PLANE_MACHINE_COUNT} variables: diff --git a/test/infrastructure/docker/templates/cluster-template-development.yaml b/test/infrastructure/docker/templates/cluster-template-development.yaml index c40badf99250..ef00269c12e0 100644 --- a/test/infrastructure/docker/templates/cluster-template-development.yaml +++ b/test/infrastructure/docker/templates/cluster-template-development.yaml @@ -11,7 +11,8 @@ spec: cidrBlocks: ${POD_CIDR:=["192.168.0.0/16"]} serviceDomain: ${SERVICE_DOMAIN:="cluster.local"} topology: - class: quick-start + classRef: + name: quick-start controlPlane: replicas: ${CONTROL_PLANE_MACHINE_COUNT} variables: diff --git a/test/infrastructure/docker/templates/cluster-template-in-memory.yaml b/test/infrastructure/docker/templates/cluster-template-in-memory.yaml index 28e62ba6ff5c..c2437ccdf811 100644 --- a/test/infrastructure/docker/templates/cluster-template-in-memory.yaml +++ b/test/infrastructure/docker/templates/cluster-template-in-memory.yaml @@ -11,8 +11,9 @@ spec: cidrBlocks: ${POD_CIDR:=["192.168.0.0/16"]} serviceDomain: ${SERVICE_DOMAIN:="cluster.local"} topology: - class: in-memory - classNamespace: ${NAMESPACE} + classRef: + name: in-memory + namespace: ${NAMESPACE} version: ${KUBERNETES_VERSION} controlPlane: replicas: ${CONTROL_PLANE_MACHINE_COUNT} diff --git a/test/infrastructure/docker/templates/clusterclass-in-memory.yaml b/test/infrastructure/docker/templates/clusterclass-in-memory.yaml index f960a620b3be..2ceab8339a58 100644 --- a/test/infrastructure/docker/templates/clusterclass-in-memory.yaml +++ b/test/infrastructure/docker/templates/clusterclass-in-memory.yaml @@ -108,8 +108,6 @@ metadata: name: in-memory spec: controlPlane: - metadata: - annotations: machineInfrastructure: templateRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2