diff --git a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-kcp.yaml index f6e60681e1d5..5dcb619935e8 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-kcp.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-kcp.yaml @@ -1,32 +1,32 @@ --- # DockerCluster object referenced by the Cluster object -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerCluster metadata: name: '${CLUSTER_NAME}' spec: failureDomains: - fd1: + - name: fd1 controlPlane: true - fd2: + - name: fd2 controlPlane: true - fd3: + - name: fd3 controlPlane: true - fd4: + - name: fd4 controlPlane: false - fd5: + - name: fd5 controlPlane: false - fd6: + - name: fd6 controlPlane: false - fd7: + - name: fd7 controlPlane: false - fd8: + - name: fd8 controlPlane: false --- # Cluster object with # - Reference to the KubeadmControlPlane object # - the label cni=${CLUSTER_NAME}-crs-0, so the cluster can be selected by the ClusterResourceSet. -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' @@ -40,16 +40,16 @@ spec: cidrBlocks: ['${DOCKER_POD_CIDRS}'] serviceDomain: '${DOCKER_SERVICE_DOMAIN}' infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerCluster name: '${CLUSTER_NAME}' controlPlaneRef: kind: KubeadmControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiGroup: controlplane.cluster.x-k8s.io name: "${CLUSTER_NAME}-control-plane" --- # DockerMachineTemplate object referenced by the KubeadmControlPlane object -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: "${CLUSTER_NAME}-control-plane" @@ -65,7 +65,7 @@ spec: # KubeadmControlPlane referenced by the Cluster object with # - the label kcp-adoption.step2, because it should be created in the second step of the kcp-adoption test. kind: KubeadmControlPlane -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: "${CLUSTER_NAME}-control-plane" labels: @@ -75,15 +75,11 @@ spec: machineTemplate: infrastructureRef: kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiGroup: infrastructure.cluster.x-k8s.io name: "${CLUSTER_NAME}-control-plane" kubeadmConfigSpec: clusterConfiguration: apiServer: # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] - initConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. - joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. version: "${KUBERNETES_VERSION}" diff --git a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml index 28ed20208e86..4ae1af9f9edf 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/cluster-with-topology.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' @@ -13,8 +13,9 @@ spec: cidrBlocks: ['${DOCKER_POD_CIDRS}'] serviceDomain: '${DOCKER_SERVICE_DOMAIN}' topology: - class: "quick-start" - classNamespace: '${CLUSTER_CLASS_NAMESPACE:-""}' + classRef: + name: "quick-start" + namespace: '${CLUSTER_CLASS_NAMESPACE:-${NAMESPACE}}' version: "${KUBERNETES_VERSION}" controlPlane: metadata: @@ -26,8 +27,8 @@ spec: Cluster.topology.controlPlane.annotation: "Cluster.topology.controlPlane.annotationValue" # Note: this annotation is propagated to Nodes. Cluster.topology.controlPlane.annotation.node.cluster.x-k8s.io: "Cluster.topology.controlPlane.nodeAnnotationValue" - nodeDeletionTimeout: "30s" - nodeVolumeDetachTimeout: "5m" + nodeDeletionTimeoutSeconds: 30 + nodeVolumeDetachTimeoutSeconds: 300 replicas: ${CONTROL_PLANE_MACHINE_COUNT} workers: machineDeployments: @@ -42,8 +43,8 @@ spec: Cluster.topology.machineDeployment.annotation: "Cluster.topology.machineDeployment.annotationValue" # Note: this annotation is propagated to Nodes. Shortened due to name length limitations Cluster.topology.md.annotation.node.cluster.x-k8s.io: "Cluster.topology.machineDeployment.nodeAnnotationValue" - nodeDeletionTimeout: "30s" - nodeVolumeDetachTimeout: "5m" + nodeDeletionTimeoutSeconds: 30 + nodeVolumeDetachTimeoutSeconds: 300 minReadySeconds: 5 replicas: ${WORKER_MACHINE_COUNT} failureDomain: fd4 @@ -62,12 +63,12 @@ spec: Cluster.topology.machinePool.label.node.cluster.x-k8s.io: "Cluster.topology.machinePool.nodeLabelValue" annotations: Cluster.topology.machinePool.annotation: "Cluster.topology.machinePool.annotationValue" - nodeDeletionTimeout: "30s" - nodeVolumeDetachTimeout: "5m" + nodeDeletionTimeoutSeconds: 30 + nodeVolumeDetachTimeoutSeconds: 300 minReadySeconds: 5 replicas: ${WORKER_MACHINE_COUNT} failureDomains: - - fd4 + - fd4 variables: # We set an empty value to use the default tag kubeadm init is using. - name: etcdImageTag diff --git a/test/e2e/data/infrastructure-docker/main/bases/crs.yaml b/test/e2e/data/infrastructure-docker/main/bases/crs.yaml index b1b61237dc62..c0880053b4d2 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/crs.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/crs.yaml @@ -10,7 +10,7 @@ binaryData: --- # ClusterResourceSet object with # a selector that targets all the Cluster with label cni=${CLUSTER_NAME}-crs-0 -apiVersion: addons.cluster.x-k8s.io/v1beta1 +apiVersion: addons.cluster.x-k8s.io/v1beta2 kind: ClusterResourceSet metadata: name: "${CLUSTER_NAME}-crs-0" diff --git a/test/e2e/data/infrastructure-docker/main/bases/md.yaml b/test/e2e/data/infrastructure-docker/main/bases/md.yaml index 5d42a2cf5e6a..3de78c3d9e84 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/md.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/md.yaml @@ -1,7 +1,7 @@ --- # DockerMachineTemplate referenced by the MachineDeployment and with # - extraMounts for the docker sock, thus allowing self-hosting test -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: "${CLUSTER_NAME}-md-0" @@ -15,7 +15,7 @@ spec: preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- # KubeadmConfigTemplate referenced by the MachineDeployment -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: "${CLUSTER_NAME}-md-0" @@ -23,10 +23,9 @@ spec: template: spec: joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. --- # MachineDeployment object -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineDeployment metadata: name: "${CLUSTER_NAME}-md-0" @@ -42,10 +41,10 @@ spec: bootstrap: configRef: name: "${CLUSTER_NAME}-md-0" - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiGroup: bootstrap.cluster.x-k8s.io kind: KubeadmConfigTemplate infrastructureRef: name: "${CLUSTER_NAME}-md-0" - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerMachineTemplate failureDomain: fd4 diff --git a/test/e2e/data/infrastructure-docker/main/bases/mp.yaml b/test/e2e/data/infrastructure-docker/main/bases/mp.yaml index 11ca197c8acd..e9bfa1b5b3f1 100644 --- a/test/e2e/data/infrastructure-docker/main/bases/mp.yaml +++ b/test/e2e/data/infrastructure-docker/main/bases/mp.yaml @@ -1,6 +1,6 @@ --- # MachinePool which references the DockerMachinePool and KubeadmConfigTemplate below -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachinePool metadata: name: "${CLUSTER_NAME}-mp-0" @@ -11,12 +11,12 @@ spec: spec: bootstrap: configRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiGroup: bootstrap.cluster.x-k8s.io kind: KubeadmConfig name: "${CLUSTER_NAME}-mp-0-config" clusterName: '${CLUSTER_NAME}' infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerMachinePool name: "${CLUSTER_NAME}-dmp-0" version: "${KUBERNETES_VERSION}" @@ -28,7 +28,7 @@ spec: - fd8 --- # DockerMachinePool using default values referenced by the MachinePool -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePool metadata: name: "${CLUSTER_NAME}-dmp-0" @@ -38,10 +38,9 @@ spec: preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- # KubeadmConfigTemplate referenced by the MachinePool -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfig metadata: name: "${CLUSTER_NAME}-mp-0-config" spec: joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-ignition/ignition.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-ignition/ignition.yaml index 42cde258b69e..ab9468837953 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-ignition/ignition.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-ignition/ignition.yaml @@ -1,5 +1,5 @@ kind: KubeadmControlPlane -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: "${CLUSTER_NAME}-control-plane" spec: @@ -10,21 +10,29 @@ spec: # We have to set the criSocket to containerd as kubeadm defaults to docker runtime if both containerd and docker sockets are found criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' - fail-swap-on: "false" - cgroup-root: "/kubelet" - runtime-cgroups: "/system.slice/containerd.service" + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + - name: fail-swap-on + value: "false" + - name: cgroup-root + value: "/kubelet" + - name: runtime-cgroups + value: "/system.slice/containerd.service" joinConfiguration: nodeRegistration: # We have to set the criSocket to containerd as kubeadm defaults to docker runtime if both containerd and docker sockets are found criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' - fail-swap-on: "false" - cgroup-root: "/kubelet" - runtime-cgroups: "/system.slice/containerd.service" + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + - name: fail-swap-on + value: "false" + - name: cgroup-root + value: "/kubelet" + - name: runtime-cgroups + value: "/system.slice/containerd.service" --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: "${CLUSTER_NAME}-md-0" @@ -47,7 +55,11 @@ spec: # We have to set the criSocket to containerd as kubeadm defaults to docker runtime if both containerd and docker sockets are found criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' - fail-swap-on: "false" - cgroup-root: "/kubelet" - runtime-cgroups: "/system.slice/containerd.service" + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + - name: fail-swap-on + value: "false" + - name: cgroup-root + value: "/kubelet" + - name: runtime-cgroups + value: "/system.slice/containerd.service" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-in-memory.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-in-memory.yaml index 4c5357ccb844..b867758b7a64 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-in-memory.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-in-memory.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: "${CLUSTER_NAME}" @@ -11,8 +11,9 @@ spec: cidrBlocks: ${POD_CIDR:=["192.168.0.0/16"]} serviceDomain: ${SERVICE_DOMAIN:="cluster.local"} topology: - class: in-memory - classNamespace: ${NAMESPACE} + classRef: + name: in-memory + namespace: ${NAMESPACE} version: ${KUBERNETES_VERSION} controlPlane: replicas: ${CONTROL_PLANE_MACHINE_COUNT} diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/cluster-ipv6.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/cluster-ipv6.yaml index 0401174fbd89..1f977967b45f 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/cluster-ipv6.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/cluster-ipv6.yaml @@ -1,5 +1,5 @@ --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' @@ -10,7 +10,7 @@ spec: pods: cidrBlocks: ['${DOCKER_POD_IPV6_CIDRS}'] --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerCluster metadata: name: '${CLUSTER_NAME}' diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml index 9315c5472381..d5849afab52a 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/kcp-ipv6.yaml @@ -1,6 +1,6 @@ --- kind: KubeadmControlPlane -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: "${CLUSTER_NAME}-control-plane" spec: @@ -15,8 +15,10 @@ spec: bindPort: 6443 nodeRegistration: kubeletExtraArgs: - node-ip: "::" + - name: node-ip + value: "::" joinConfiguration: nodeRegistration: kubeletExtraArgs: - node-ip: "::" + - name: node-ip + value: "::" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/md-ipv6.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/md-ipv6.yaml index 715888fdf3d6..c5d1e0c24828 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/md-ipv6.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-ipv6/md-ipv6.yaml @@ -1,5 +1,5 @@ --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: "${CLUSTER_NAME}-md-0" @@ -9,8 +9,10 @@ spec: initConfiguration: nodeRegistration: kubeletExtraArgs: - node-ip: "::" + - name: node-ip + value: "::" joinConfiguration: nodeRegistration: kubeletExtraArgs: - node-ip: "::" + - name: node-ip + value: "::" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml index 2b1224d2710e..b8b84ed2238e 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-adoption/step1/cluster-with-cp0.yaml @@ -1,6 +1,6 @@ --- # DockerCluster object referenced by the Cluster object -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerCluster metadata: name: '${CLUSTER_NAME}' @@ -8,7 +8,7 @@ metadata: # Cluster object with # - No reference to the KubeadmControlPlane object # - the label cni=${CLUSTER_NAME}-crs-0, so the cluster can be selected by the ClusterResourceSet. -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' @@ -22,19 +22,19 @@ spec: cidrBlocks: ['${DOCKER_POD_CIDRS}'] serviceDomain: '${DOCKER_SERVICE_DOMAIN}' infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerCluster name: '${CLUSTER_NAME}' --- # DockerMachine referenced by the Machine cp0 -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachine metadata: name: "${CLUSTER_NAME}-control-plane-0" spec: {} --- # KubeadmConfig referenced by the Machine cp0 -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfig metadata: name: "${CLUSTER_NAME}-control-plane-0" @@ -42,13 +42,9 @@ spec: clusterConfiguration: apiServer: certSANs: [localhost, 127.0.0.1] - initConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. - joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. --- # cp0 Machine -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Machine metadata: name: "${CLUSTER_NAME}-control-plane-0" @@ -60,9 +56,9 @@ spec: bootstrap: configRef: name: "${ CLUSTER_NAME }-control-plane-0" - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiGroup: bootstrap.cluster.x-k8s.io kind: KubeadmConfig infrastructureRef: name: "${ CLUSTER_NAME }-control-plane-0" - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerMachine diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml index 805ca5e732f5..1358b7dfc6b5 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-pre-drain/cluster.yaml @@ -1,12 +1,13 @@ --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' spec: topology: - class: quick-start + classRef: + name: quick-start controlPlane: metadata: annotations: - pre-drain.delete.hook.machine.cluster.x-k8s.io/kcp-ready-check: "true" \ No newline at end of file + pre-drain.delete.hook.machine.cluster.x-k8s.io/kcp-ready-check: "true" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml index 40a6baf85385..e66054c69622 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/cluster-with-kcp.yaml @@ -1,6 +1,6 @@ --- # DockerCluster object referenced by the Cluster object -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerCluster metadata: name: '${CLUSTER_NAME}' @@ -8,7 +8,7 @@ metadata: # Cluster object with # - Reference to the KubeadmControlPlane object # - the label cni=${CLUSTER_NAME}-crs-0, so the cluster can be selected by the ClusterResourceSet. -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' @@ -22,16 +22,16 @@ spec: cidrBlocks: ['${DOCKER_POD_CIDRS}'] serviceDomain: '${DOCKER_SERVICE_DOMAIN}' infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiGroup: infrastructure.cluster.x-k8s.io kind: DockerCluster name: '${CLUSTER_NAME}' controlPlaneRef: kind: KubeadmControlPlane - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiGroup: controlplane.cluster.x-k8s.io name: "${CLUSTER_NAME}-control-plane" --- # DockerMachineTemplate object referenced by the KubeadmControlPlane object -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: "${CLUSTER_NAME}-control-plane" @@ -42,7 +42,7 @@ spec: --- # KubeadmControlPlane referenced by the Cluster kind: KubeadmControlPlane -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: "${CLUSTER_NAME}-control-plane" spec: @@ -50,17 +50,13 @@ spec: machineTemplate: infrastructureRef: kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiGroup: infrastructure.cluster.x-k8s.io name: "${CLUSTER_NAME}-control-plane" kubeadmConfigSpec: clusterConfiguration: apiServer: # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] - initConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. - joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. files: - path: /wait-signal.sh content: | diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml index 39187cec0a40..653b44343125 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-remediation/mhc.yaml @@ -1,9 +1,9 @@ --- # MachineHealthCheck object with # - a selector that targets all the machines with label cluster.x-k8s.io/control-plane="" and the mhc-test: "fail" (the label is used to trigger remediation in a controlled way - by adding CP under MHC control intentionally -) -# - nodeStartupTimeout: 30s (to force remediation on nodes still provisioning) -# - unhealthyConditions triggering remediation after 10s the e2e.remediation.condition condition is set to false (to force remediation on nodes already provisioned) -apiVersion: cluster.x-k8s.io/v1beta1 +# - nodeStartupTimeoutSeconds: 30s (to force remediation on nodes still provisioning) +# - unhealthyNodeConditions triggering remediation after 10s the e2e.remediation.condition condition is set to false (to force remediation on nodes already provisioned) +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineHealthCheck metadata: name: "${CLUSTER_NAME}-mhc-0" @@ -14,8 +14,8 @@ spec: matchLabels: cluster.x-k8s.io/control-plane: "" mhc-test: "fail" - nodeStartupTimeout: 30s - unhealthyConditions: + nodeStartupTimeoutSeconds: 30 + unhealthyNodeConditions: - type: e2e.remediation.condition status: "False" - timeout: 10s + timeoutSeconds: 10 diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-scale-in/kustomization.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-scale-in/kustomization.yaml index 420dc3c07245..9985506ce769 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-scale-in/kustomization.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-kcp-scale-in/kustomization.yaml @@ -6,5 +6,5 @@ patches: - path: kcp-scale-in-variable.yaml target: group: cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: Cluster diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/md.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/md.yaml index 13968556b60a..d317bf328a99 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/md.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/md.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineDeployment metadata: name: "${CLUSTER_NAME}-md-0" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml index c10722590945..a1209cf6270b 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-md-remediation/mhc.yaml @@ -1,8 +1,8 @@ --- # MachineHealthCheck object with # - a selector that targets all the machines with label e2e.remediation.label="" -# - unhealthyConditions triggering remediation after 10s the condition is set -apiVersion: cluster.x-k8s.io/v1beta1 +# - unhealthyNodeConditions triggering remediation after 10s the condition is set +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineHealthCheck metadata: name: "${CLUSTER_NAME}-mhc-0" @@ -12,7 +12,7 @@ spec: selector: matchLabels: e2e.remediation.label: "" - unhealthyConditions: + unhealthyNodeConditions: - type: e2e.remediation.condition status: "False" - timeout: 10s + timeoutSeconds: 10 diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml index 36466c480d94..4920b2622eb8 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-autoscaler/cluster-autoscaler.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' @@ -13,17 +13,17 @@ spec: cidrBlocks: ['${DOCKER_POD_CIDRS}'] serviceDomain: '${DOCKER_SERVICE_DOMAIN}' topology: - class: "quick-start" + classRef: + name: "quick-start" version: "${KUBERNETES_VERSION}" controlPlane: - metadata: {} - nodeDeletionTimeout: "30s" + nodeDeletionTimeoutSeconds: 30 replicas: ${CONTROL_PLANE_MACHINE_COUNT} workers: machineDeployments: - class: "default-worker" name: "md-0" - nodeDeletionTimeout: "30s" + nodeDeletionTimeoutSeconds: 30 failureDomain: fd4 metadata: annotations: @@ -32,7 +32,7 @@ spec: machinePools: - class: "default-worker" name: "mp-0" - nodeDeletionTimeout: "30s" + nodeDeletionTimeoutSeconds: 30 failureDomains: - fd4 variables: diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml index 4becb1d1d900..a04442b4e078 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv4-primary/cluster.yaml @@ -1,11 +1,12 @@ --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' spec: topology: - class: quick-start + classRef: + name: quick-start variables: - name: ipv6Primary value: false diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml index d43dc35b4c8a..e4109076caf2 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-dualstack-ipv6-primary/cluster.yaml @@ -1,11 +1,12 @@ --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' spec: topology: - class: quick-start + classRef: + name: quick-start variables: - name: ipv6Primary value: true diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/cluster-topology-class.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/cluster-topology-class.yaml index c9bbfd605299..efb625ec74d9 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/cluster-topology-class.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-kcp-only/cluster-topology-class.yaml @@ -1,3 +1,3 @@ - op: add - path: /spec/topology/class + path: /spec/topology/classRef/name value: "quick-start-kcp-only" diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-no-workers/kustomization.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-no-workers/kustomization.yaml index e2b7e414c57e..cb1ffb120cb6 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-topology-no-workers/kustomization.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-topology-no-workers/kustomization.yaml @@ -6,10 +6,10 @@ patches: - path: disable-control-plane-taint-variable.yaml target: group: cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: Cluster - path: remove-topology-workers.yaml target: group: cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: Cluster diff --git a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml index c27d9bb62220..878c905593fa 100644 --- a/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/cluster-template-upgrades-runtimesdk/cluster-runtimesdk.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' @@ -13,31 +13,31 @@ spec: cidrBlocks: ['${DOCKER_POD_CIDRS}'] serviceDomain: '${DOCKER_SERVICE_DOMAIN}' topology: - class: "quick-start-runtimesdk" - classNamespace: '${CLUSTER_CLASS_NAMESPACE:-""}' + classRef: + name: "quick-start-runtimesdk" + namespace: '${CLUSTER_CLASS_NAMESPACE:-${NAMESPACE}}' version: "${KUBERNETES_VERSION}" controlPlane: - metadata: {} - nodeDeletionTimeout: "30s" + nodeDeletionTimeoutSeconds: 30 replicas: ${CONTROL_PLANE_MACHINE_COUNT} workers: machineDeployments: - class: "default-worker" name: "md-0" - nodeDeletionTimeout: "30s" - nodeVolumeDetachTimeout: "5m" + nodeDeletionTimeoutSeconds: 30 + nodeVolumeDetachTimeoutSeconds: 300 minReadySeconds: 5 replicas: ${WORKER_MACHINE_COUNT} failureDomain: fd4 machinePools: - class: "default-worker" name: "mp-0" - nodeDeletionTimeout: "30s" - nodeVolumeDetachTimeout: "5m" + nodeDeletionTimeoutSeconds: 30 + nodeVolumeDetachTimeoutSeconds: 300 minReadySeconds: 5 replicas: ${WORKER_MACHINE_COUNT} failureDomains: - - fd4 + - fd4 variables: - name: kubeadmControlPlaneMaxSurge value: "1" diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml index fbdbf653040c..abd2743258be 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-in-memory.yaml @@ -1,4 +1,4 @@ -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevClusterTemplate metadata: name: in-memory-cluster @@ -8,7 +8,7 @@ spec: backend: inMemory: {} --- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate metadata: name: in-memory-control-plane @@ -27,14 +27,16 @@ spec: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + - name: eviction-hard + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% joinConfiguration: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + - name: eviction-hard + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate metadata: name: in-memory-control-plane @@ -60,7 +62,7 @@ spec: startupDuration: "2s" startupJitter: "0.2" --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate metadata: name: in-memory-default-worker-machinetemplate @@ -86,7 +88,7 @@ spec: startupDuration: "2s" startupJitter: "0.2" --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: in-memory-default-worker-bootstraptemplate @@ -97,36 +99,35 @@ spec: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + - name: eviction-hard + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass metadata: name: in-memory spec: controlPlane: - metadata: - annotations: machineInfrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate name: in-memory-control-plane - ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: in-memory-control-plane machineHealthCheck: unhealthyConditions: - type: Ready - status: Unknown - timeout: 300s + status: "Unknown" + timeoutSeconds: 300 - type: Ready status: "False" - timeout: 300s + timeoutSeconds: 300 infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevClusterTemplate name: in-memory-cluster workers: @@ -134,25 +135,25 @@ spec: - class: default-worker template: bootstrap: - ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: in-memory-default-worker-bootstraptemplate infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate name: in-memory-default-worker-machinetemplate machineHealthCheck: unhealthyConditions: - type: Ready - status: Unknown - timeout: 300s + status: "Unknown" + timeoutSeconds: 300 - type: Ready status: "False" - timeout: 300s + timeoutSeconds: 300 patches: - name: test-patch external: - generateExtension: generate-patches.${EXTENSION_CONFIG_NAME:-"scale"} + generatePatchesExtension: generate-patches.${EXTENSION_CONFIG_NAME:-"scale"} discoverVariablesExtension: discover-variables.${EXTENSION_CONFIG_NAME:-"scale"} diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-kcp-only/kustomization.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-kcp-only/kustomization.yaml index 90dd95d33b84..079c5d9f5c1d 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-kcp-only/kustomization.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-kcp-only/kustomization.yaml @@ -5,15 +5,15 @@ patches: - path: remove-workers.yaml target: group: cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: ClusterClass - path: clusterclass-name.yaml target: group: cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: ClusterClass - path: remove-worker-patches.yaml target: group: cluster.x-k8s.io - version: v1beta1 + version: v1beta2 kind: ClusterClass diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml index 13f986f2d33d..35bcb59eb66d 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start-runtimesdk.yaml @@ -1,26 +1,26 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass metadata: name: quick-start-runtimesdk spec: controlPlane: - ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: quick-start-control-plane machineInfrastructure: - ref: + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 name: quick-start-control-plane namingStrategy: template: "{{ .cluster.name }}-cp-{{ .random }}" infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate name: quick-start-cluster - infrastructureNamingStrategy: + namingStrategy: template: "{{ .cluster.name }}-infra-{{ .random }}" workers: machineDeployments: @@ -29,13 +29,13 @@ spec: template: "{{ .cluster.name }}-md-{{ .machineDeployment.topologyName }}-{{ .random }}" template: bootstrap: - ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate name: quick-start-default-worker-machinetemplate machinePools: @@ -44,23 +44,23 @@ spec: template: "{{ .cluster.name }}-mp-{{ .machinePool.topologyName }}-{{ .random }}" template: bootstrap: - ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate name: quick-start-default-worker-machinepooltemplate patches: - name: test-patch external: - generateExtension: generate-patches.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} - validateExtension: validate-topology.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} + generatePatchesExtension: generate-patches.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} + validateTopologyExtension: validate-topology.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} discoverVariablesExtension: discover-variables.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate metadata: name: quick-start-cluster @@ -68,43 +68,39 @@ spec: template: spec: failureDomains: - fd1: + - name: fd1 controlPlane: true - fd2: + - name: fd2 controlPlane: true - fd3: + - name: fd3 controlPlane: true - fd4: + - name: fd4 controlPlane: false - fd5: + - name: fd5 controlPlane: false - fd6: + - name: fd6 controlPlane: false - fd7: + - name: fd7 controlPlane: false - fd8: + - name: fd8 controlPlane: false --- kind: KubeadmControlPlaneTemplate -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: quick-start-control-plane spec: template: spec: machineTemplate: - nodeDrainTimeout: 1s + nodeDrainTimeoutSeconds: 1 kubeadmConfigSpec: clusterConfiguration: apiServer: # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] - initConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. - joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-control-plane @@ -116,7 +112,7 @@ spec: hostPath: "/var/run/docker.sock" preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-default-worker-machinetemplate @@ -128,7 +124,7 @@ spec: hostPath: "/var/run/docker.sock" preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate metadata: name: quick-start-default-worker-machinepooltemplate @@ -141,7 +137,7 @@ spec: hostPath: "/var/run/docker.sock" preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: quick-start-default-worker-bootstraptemplate @@ -149,4 +145,3 @@ spec: template: spec: joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. diff --git a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml index 8dc1b002892f..92742205c4df 100644 --- a/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-docker/main/clusterclass-quick-start.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass metadata: name: quick-start @@ -9,24 +9,23 @@ spec: ClusterClass.controlPlane.label: "ClusterClass.controlPlane.labelValue" annotations: ClusterClass.controlPlane.annotation: "ClusterClass.controlPlane.annotationValue" - ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: quick-start-control-plane machineInfrastructure: - ref: + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 name: quick-start-control-plane machineHealthCheck: - maxUnhealthy: 100% - unhealthyConditions: + unhealthyNodeConditions: - type: e2e.remediation.condition status: "False" - timeout: 20s + timeoutSeconds: 20 infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate name: quick-start-cluster workers: @@ -39,18 +38,16 @@ spec: annotations: ClusterClass.machineDeployment.annotation: "ClusterClass.machineDeployment.annotationValue" bootstrap: - ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-md-default-worker-bootstraptemplate infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate name: quick-start-default-worker-machinetemplate - machineHealthCheck: - maxUnhealthy: 100% - # We are intentionally not setting the 'unhealthyConditions' here to test that the field is optional. + # We are intentionally not setting the 'unhealthyConditions' here to test that the field is optional. machinePools: - class: default-worker template: @@ -60,13 +57,13 @@ spec: annotations: ClusterClass.machinePool.annotation: "ClusterClass.machinePool.annotationValue" bootstrap: - ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-mp-default-worker-bootstraptemplate infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate name: quick-start-default-worker-machinepooltemplate variables: @@ -166,7 +163,7 @@ spec: - name: lbImageRepository definitions: - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate matchResources: infrastructureCluster: true @@ -181,7 +178,7 @@ spec: description: "Sets tag to use for the etcd image in the KubeadmControlPlane." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -197,7 +194,7 @@ spec: description: "Sets tag to use for the etcd image in the KubeadmControlPlane." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -211,7 +208,7 @@ spec: description: "Sets the container image that is used for running dockerMachines for the controlPlane and default-worker machineDeployments." definitions: - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate matchResources: machineDeploymentClass: @@ -224,7 +221,7 @@ spec: template: | kindest/node:{{ .builtin.machineDeployment.version | replace "+" "_" }} - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate matchResources: controlPlane: true @@ -235,7 +232,7 @@ spec: template: | kindest/node:{{ .builtin.controlPlane.version | replace "+" "_" }} - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate matchResources: machinePoolClass: @@ -254,7 +251,7 @@ spec: and reduce load to public registries. definitions: - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate matchResources: controlPlane: true @@ -273,7 +270,7 @@ spec: and reduce load to public registries. definitions: - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate matchResources: machinePoolClass: @@ -289,7 +286,7 @@ spec: enabledIf: '{{ ne .kubeadmControlPlaneMaxSurge "" }}' definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -302,7 +299,7 @@ spec: enabledIf: "{{ not .controlPlaneTaint }}" definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -318,23 +315,27 @@ spec: description: "Configures kubelet to run with an external cloud provider for control plane nodes." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cloud-provider" - value: "external" + path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" + value: + name: "cloud-provider" + value: "external" - op: add - path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/cloud-provider" - value: "external" + path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/-" + value: + name: "cloud-provider" + value: "external" - name: machineDeploymentExternalCloudProvider enabledIf: "{{ .externalCloudProvider }}" description: "Configures kubelet to run with an external cloud provider for machineDeployment nodes." definitions: - selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machineDeploymentClass: @@ -342,10 +343,12 @@ spec: - '*-worker' jsonPatches: - op: add - path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cloud-provider" - value: "external" + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" + value: + name: "cloud-provider" + value: "external" - selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machinePoolClass: @@ -353,14 +356,16 @@ spec: - '*-worker' jsonPatches: - op: add - path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/cloud-provider" - value: "external" + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" + value: + name: "cloud-provider" + value: "external" - name: localEndpointIPv6 enabledIf: "{{ .ipv6Primary }}" description: "Configures KCP to use IPv6 for its localAPIEndpoint." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -373,14 +378,16 @@ spec: description: "Adds an admission configuration for PodSecurity to the kube-apiserver." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/admission-control-config-file" - value: "/etc/kubernetes/kube-apiserver-admission-pss.yaml" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs" + value: + - name: admission-control-config-file + value: "/etc/kubernetes/kube-apiserver-admission-pss.yaml" - op: add path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraVolumes" value: @@ -413,53 +420,68 @@ spec: runtimeClasses: [] namespaces: [kube-system] path: /etc/kubernetes/kube-apiserver-admission-pss.yaml - enabledIf: '{{ semverCompare ">= v1.24-0" .builtin.controlPlane.version }}' - name: controlPlaneLogLevel enabledIf: "{{ if .kubeControlPlaneLogLevel }}true{{end}}" description: "Configures control plane components and kubelet to run at the log level specified in the variable `kubeControlPlaneLogLevel`." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/v" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs/-" valueFrom: - variable: kubeControlPlaneLogLevel + template: | + name: v + value: "{{ .kubeControlPlaneLogLevel }}" + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager" + value: {} - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs/v" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/controllerManager/extraArgs" valueFrom: - variable: kubeControlPlaneLogLevel + template: | + - name: v + value: "{{ .kubeControlPlaneLogLevel }}" + - op: add + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler" + value: {} - op: add - path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler/extraArgs/v" + path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/scheduler/extraArgs" valueFrom: - variable: kubeControlPlaneLogLevel + template: | + - name: v + value: "{{ .kubeControlPlaneLogLevel }}" - name: controlPlaneKubeletLogLevel enabledIf: "{{ if .kubeletLogLevel }}true{{end}}" description: "Configures control plane kubelets to log at the level set in the variable `kubeletLogLevel`." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true jsonPatches: - op: add - path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/v" + path: "/spec/template/spec/kubeadmConfigSpec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: - variable: kubeletLogLevel + template: | + name: v + value: "{{ .kubeletLogLevel }}" - op: add - path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/v" + path: "/spec/template/spec/kubeadmConfigSpec/initConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: - variable: kubeletLogLevel + template: | + name: v + value: "{{ .kubeletLogLevel }}" - name: workerKubeletLogLevel enabledIf: "{{ if .kubeletLogLevel }}true{{end}}" description: "Configures worker kubelets to log at the level set in the variable `kubeletLogLevel`." definitions: - selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machineDeploymentClass: @@ -467,11 +489,13 @@ spec: - '*-worker' jsonPatches: - op: add - path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/v" + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: - variable: kubeletLogLevel + template: | + name: v + value: "{{ .kubeletLogLevel }}" - selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machinePoolClass: @@ -479,11 +503,13 @@ spec: - '*-worker' jsonPatches: - op: add - path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/v" + path: "/spec/template/spec/joinConfiguration/nodeRegistration/kubeletExtraArgs/-" valueFrom: - variable: kubeletLogLevel + template: | + name: v + value: "{{ .kubeletLogLevel }}" --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate metadata: name: quick-start-cluster @@ -500,25 +526,25 @@ spec: InfrastructureClusterTemplate.template.annotation: "InfrastructureClusterTemplate.template.annotationValue" spec: failureDomains: - fd1: + - name: fd1 controlPlane: true - fd2: + - name: fd2 controlPlane: true - fd3: + - name: fd3 controlPlane: true - fd4: + - name: fd4 controlPlane: false - fd5: + - name: fd5 controlPlane: false - fd6: + - name: fd6 controlPlane: false - fd7: + - name: fd7 controlPlane: false - fd8: + - name: fd8 controlPlane: false --- kind: KubeadmControlPlaneTemplate -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: quick-start-control-plane labels: @@ -541,31 +567,24 @@ spec: ControlPlaneTemplate.machineTemplate.label: "ControlPlaneTemplate.machineTemplate.labelValue" annotations: ControlPlaneTemplate.machineTemplate.annotation: "ControlPlaneTemplate.machineTemplate.annotationValue" - nodeDrainTimeout: 1s + nodeDrainTimeoutSeconds: 1 kubeadmConfigSpec: clusterConfiguration: - # extraArgs must be non-empty for control plane components to enable patches from ClusterClass to work. - controllerManager: - extraArgs: - v: "0" - scheduler: - extraArgs: - v: "0" apiServer: - extraArgs: - v: "0" # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. certSANs: [localhost, host.docker.internal, "::", "::1", "127.0.0.1", "0.0.0.0"] initConfiguration: nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' joinConfiguration: nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-control-plane @@ -586,7 +605,7 @@ spec: hostPath: "/var/run/docker.sock" preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-default-worker-machinetemplate @@ -607,7 +626,7 @@ spec: hostPath: "/var/run/docker.sock" preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate metadata: name: quick-start-default-worker-machinepooltemplate @@ -629,7 +648,7 @@ spec: hostPath: "/var/run/docker.sock" preLoadImages: ${DOCKER_PRELOAD_IMAGES:-[]} --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: quick-start-md-default-worker-bootstraptemplate @@ -648,9 +667,10 @@ spec: joinConfiguration: nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider to work - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: quick-start-mp-default-worker-bootstraptemplate @@ -669,4 +689,5 @@ spec: joinConfiguration: nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider to work - eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' diff --git a/test/extension/handlers/topologymutation/handler.go b/test/extension/handlers/topologymutation/handler.go index adbe34358ddd..afe160c85448 100644 --- a/test/extension/handlers/topologymutation/handler.go +++ b/test/extension/handlers/topologymutation/handler.go @@ -34,12 +34,16 @@ import ( ctrl "sigs.k8s.io/controller-runtime" bootstrapv1beta1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" controlplanev1beta1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" "sigs.k8s.io/cluster-api/exp/runtime/topologymutation" infrav1beta1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta2" infraexpv1beta1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" + infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta2" "sigs.k8s.io/cluster-api/test/infrastructure/kind" ) @@ -58,15 +62,22 @@ type ExtensionHandlers struct { func NewExtensionHandlers() *ExtensionHandlers { scheme := runtime.NewScheme() _ = infrav1beta1.AddToScheme(scheme) + _ = infrav1.AddToScheme(scheme) _ = infraexpv1beta1.AddToScheme(scheme) + _ = infraexpv1.AddToScheme(scheme) _ = bootstrapv1beta1.AddToScheme(scheme) + _ = bootstrapv1.AddToScheme(scheme) _ = controlplanev1beta1.AddToScheme(scheme) + _ = controlplanev1.AddToScheme(scheme) return &ExtensionHandlers{ // Add the apiGroups being handled to the decoder decoder: serializer.NewCodecFactory(scheme).UniversalDecoder( infrav1beta1.GroupVersion, + infrav1.GroupVersion, + bootstrapv1.GroupVersion, bootstrapv1beta1.GroupVersion, controlplanev1beta1.GroupVersion, + controlplanev1.GroupVersion, ), } } @@ -86,19 +97,19 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo topologymutation.WalkTemplates(ctx, h.decoder, req, resp, func(ctx context.Context, obj runtime.Object, variables map[string]apiextensionsv1.JSON, _ runtimehooksv1.HolderReference) error { log := ctrl.LoggerFrom(ctx) - switch obj := obj.(type) { - case *infrav1beta1.DockerClusterTemplate: + switch obj.(type) { + case *infrav1beta1.DockerClusterTemplate, *infrav1.DockerClusterTemplate: if err := patchDockerClusterTemplate(ctx, obj, variables); err != nil { log.Error(err, "Error patching DockerClusterTemplate") return errors.Wrap(err, "error patching DockerClusterTemplate") } - case *controlplanev1beta1.KubeadmControlPlaneTemplate: + case *controlplanev1beta1.KubeadmControlPlaneTemplate, *controlplanev1.KubeadmControlPlaneTemplate: err := patchKubeadmControlPlaneTemplate(ctx, obj, variables) if err != nil { log.Error(err, "Error patching KubeadmControlPlaneTemplate") return errors.Wrapf(err, "error patching KubeadmControlPlaneTemplate") } - case *bootstrapv1beta1.KubeadmConfigTemplate: + case *bootstrapv1beta1.KubeadmConfigTemplate, *bootstrapv1.KubeadmConfigTemplate: // NOTE: KubeadmConfigTemplate could be linked to one or more of the existing MachineDeployment class; // the patchKubeadmConfigTemplate func shows how to implement patches only for KubeadmConfigTemplates // linked to a specific MachineDeployment class; another option is to check the holderRef value and call @@ -107,7 +118,7 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo log.Error(err, "Error patching KubeadmConfigTemplate") return errors.Wrap(err, "error patching KubeadmConfigTemplate") } - case *infrav1beta1.DockerMachineTemplate: + case *infrav1beta1.DockerMachineTemplate, *infrav1.DockerMachineTemplate: // NOTE: DockerMachineTemplate could be linked to the ControlPlane or one or more of the existing MachineDeployment class; // the patchDockerMachineTemplate func shows how to implement different patches for DockerMachineTemplate // linked to ControlPlane or for DockerMachineTemplate linked to MachineDeployment classes; another option @@ -116,7 +127,7 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo log.Error(err, "Error patching DockerMachineTemplate") return errors.Wrap(err, "error patching DockerMachineTemplate") } - case *infraexpv1beta1.DockerMachinePoolTemplate: + case *infraexpv1beta1.DockerMachinePoolTemplate, *infraexpv1.DockerMachinePoolTemplate: if err := patchDockerMachinePoolTemplate(ctx, obj, variables); err != nil { log.Error(err, "Error patching DockerMachinePoolTemplate") return errors.Wrap(err, "error patching DockerMachinePoolTemplate") @@ -129,7 +140,7 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo // patchDockerClusterTemplate patches the DockerClusterTemplate. // It sets the LoadBalancer.ImageRepository if the imageRepository variable is provided. // NOTE: this patch is not required for any special reason, it is used for testing the patch machinery itself. -func patchDockerClusterTemplate(_ context.Context, dockerClusterTemplate *infrav1beta1.DockerClusterTemplate, templateVariables map[string]apiextensionsv1.JSON) error { +func patchDockerClusterTemplate(_ context.Context, obj runtime.Object, templateVariables map[string]apiextensionsv1.JSON) error { imageRepo, err := topologymutation.GetStringVariable(templateVariables, "imageRepository") if err != nil { if topologymutation.IsNotFoundError(err) { @@ -138,7 +149,17 @@ func patchDockerClusterTemplate(_ context.Context, dockerClusterTemplate *infrav return errors.Wrap(err, "could not set DockerClusterTemplate loadBalancer imageRepository") } - dockerClusterTemplate.Spec.Template.Spec.LoadBalancer.ImageRepository = imageRepo + dockerClusterTemplateV1Beta1, ok := obj.(*infrav1beta1.DockerClusterTemplate) + if ok { + dockerClusterTemplateV1Beta1.Spec.Template.Spec.LoadBalancer.ImageRepository = imageRepo + return nil + } + + dockerClusterTemplate, ok := obj.(*infrav1.DockerClusterTemplate) + if ok { + dockerClusterTemplate.Spec.Template.Spec.LoadBalancer.ImageRepository = imageRepo + return nil + } return nil } @@ -146,7 +167,7 @@ func patchDockerClusterTemplate(_ context.Context, dockerClusterTemplate *infrav // patchKubeadmControlPlaneTemplate patches the ControlPlaneTemplate. // It sets the RolloutStrategy.RollingUpdate.MaxSurge if the kubeadmControlPlaneMaxSurge is provided. // NOTE: RolloutStrategy.RollingUpdate.MaxSurge patch is not required for any special reason, it is used for testing the patch machinery itself. -func patchKubeadmControlPlaneTemplate(ctx context.Context, kcpTemplate *controlplanev1beta1.KubeadmControlPlaneTemplate, templateVariables map[string]apiextensionsv1.JSON) error { +func patchKubeadmControlPlaneTemplate(ctx context.Context, obj runtime.Object, templateVariables map[string]apiextensionsv1.JSON) error { log := ctrl.LoggerFrom(ctx) // 1) Patch RolloutStrategy RollingUpdate MaxSurge with the value from the Cluster Topology variable. @@ -162,18 +183,35 @@ func patchKubeadmControlPlaneTemplate(ctx context.Context, kcpTemplate *controlp // This has to be converted to IntOrString type. kubeadmControlPlaneMaxSurgeIntOrString := intstrutil.Parse(kcpControlPlaneMaxSurge) log.Info(fmt.Sprintf("Setting KubeadmControlPlaneMaxSurge to %q", kubeadmControlPlaneMaxSurgeIntOrString.String())) - if kcpTemplate.Spec.Template.Spec.RolloutStrategy == nil { - kcpTemplate.Spec.Template.Spec.RolloutStrategy = &controlplanev1beta1.RolloutStrategy{} + + kcpTemplateV1Beta1, ok := obj.(*controlplanev1beta1.KubeadmControlPlaneTemplate) + if ok { + if kcpTemplateV1Beta1.Spec.Template.Spec.RolloutStrategy == nil { + kcpTemplateV1Beta1.Spec.Template.Spec.RolloutStrategy = &controlplanev1beta1.RolloutStrategy{} + } + if kcpTemplateV1Beta1.Spec.Template.Spec.RolloutStrategy.RollingUpdate == nil { + kcpTemplateV1Beta1.Spec.Template.Spec.RolloutStrategy.RollingUpdate = &controlplanev1beta1.RollingUpdate{} + } + kcpTemplateV1Beta1.Spec.Template.Spec.RolloutStrategy.RollingUpdate.MaxSurge = &kubeadmControlPlaneMaxSurgeIntOrString + return nil } - if kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate == nil { - kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate = &controlplanev1beta1.RollingUpdate{} + + kcpTemplate, ok := obj.(*controlplanev1.KubeadmControlPlaneTemplate) + if ok { + if kcpTemplate.Spec.Template.Spec.RolloutStrategy == nil { + kcpTemplate.Spec.Template.Spec.RolloutStrategy = &controlplanev1.RolloutStrategy{} + } + if kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate == nil { + kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate = &controlplanev1.RollingUpdate{} + } + kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate.MaxSurge = &kubeadmControlPlaneMaxSurgeIntOrString } - kcpTemplate.Spec.Template.Spec.RolloutStrategy.RollingUpdate.MaxSurge = &kubeadmControlPlaneMaxSurgeIntOrString + return nil } // patchKubeadmConfigTemplate patches the ControlPlaneTemplate. -func patchKubeadmConfigTemplate(_ context.Context, _ *bootstrapv1beta1.KubeadmConfigTemplate, _ map[string]apiextensionsv1.JSON) error { +func patchKubeadmConfigTemplate(_ context.Context, _ runtime.Object, _ map[string]apiextensionsv1.JSON) error { return nil } @@ -182,7 +220,7 @@ func patchKubeadmConfigTemplate(_ context.Context, _ *bootstrapv1beta1.KubeadmCo // the DockerMachineTemplate belongs to. // NOTE: this patch is not required anymore after the introduction of the kind mapper in kind, however we keep it // as example of version aware patches. -func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infrav1beta1.DockerMachineTemplate, templateVariables map[string]apiextensionsv1.JSON) error { +func patchDockerMachineTemplate(ctx context.Context, obj runtime.Object, templateVariables map[string]apiextensionsv1.JSON) error { log := ctrl.LoggerFrom(ctx) // If the DockerMachineTemplate belongs to the ControlPlane, set the images using the ControlPlane version. @@ -203,7 +241,16 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr kindMapping := kind.GetMapping(semVer, "") log.Info(fmt.Sprintf("Setting control plane custom image to %q", kindMapping.Image)) - dockerMachineTemplate.Spec.Template.Spec.CustomImage = kindMapping.Image + + dockerMachineTemplateV1Beta1, ok := obj.(*infrav1beta1.DockerMachineTemplate) + if ok { + dockerMachineTemplateV1Beta1.Spec.Template.Spec.CustomImage = kindMapping.Image + } + + dockerMachineTemplate, ok := obj.(*infrav1.DockerMachineTemplate) + if ok { + dockerMachineTemplate.Spec.Template.Spec.CustomImage = kindMapping.Image + } // return early if we have successfully patched a control plane dockerMachineTemplate return nil } @@ -229,7 +276,16 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr kindMapping := kind.GetMapping(semVer, "") log.Info(fmt.Sprintf("Setting MachineDeployment customImage to %q", kindMapping.Image)) - dockerMachineTemplate.Spec.Template.Spec.CustomImage = kindMapping.Image + + dockerMachineTemplateV1Beta1, ok := obj.(*infrav1beta1.DockerMachineTemplate) + if ok { + dockerMachineTemplateV1Beta1.Spec.Template.Spec.CustomImage = kindMapping.Image + } + + dockerMachineTemplate, ok := obj.(*infrav1.DockerMachineTemplate) + if ok { + dockerMachineTemplate.Spec.Template.Spec.CustomImage = kindMapping.Image + } return nil } @@ -237,7 +293,7 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr // It sets the CustomImage to an image for the version in use by the MachinePool. // NOTE: this patch is not required anymore after the introduction of the kind mapper in kind, however we keep it // as example of version aware patches. -func patchDockerMachinePoolTemplate(ctx context.Context, dockerMachinePoolTemplate *infraexpv1beta1.DockerMachinePoolTemplate, templateVariables map[string]apiextensionsv1.JSON) error { +func patchDockerMachinePoolTemplate(ctx context.Context, obj runtime.Object, templateVariables map[string]apiextensionsv1.JSON) error { log := ctrl.LoggerFrom(ctx) // If the DockerMachinePoolTemplate belongs to a MachinePool, set the images the MachinePool version. @@ -261,7 +317,17 @@ func patchDockerMachinePoolTemplate(ctx context.Context, dockerMachinePoolTempla kindMapping := kind.GetMapping(semVer, "") log.Info(fmt.Sprintf("Setting MachinePool customImage to %q", kindMapping.Image)) - dockerMachinePoolTemplate.Spec.Template.Spec.Template.CustomImage = kindMapping.Image + + dockerMachinePoolTemplateV1Beta1, ok := obj.(*infraexpv1beta1.DockerMachinePoolTemplate) + if ok { + dockerMachinePoolTemplateV1Beta1.Spec.Template.Spec.Template.CustomImage = kindMapping.Image + } + + dockerMachinePoolTemplate, ok := obj.(*infraexpv1.DockerMachinePoolTemplate) + if ok { + dockerMachinePoolTemplate.Spec.Template.Spec.Template.CustomImage = kindMapping.Image + } + return nil } diff --git a/test/extension/handlers/topologymutation/handler_test.go b/test/extension/handlers/topologymutation/handler_test.go index e11949714b6f..a21866f7d422 100644 --- a/test/extension/handlers/topologymutation/handler_test.go +++ b/test/extension/handlers/topologymutation/handler_test.go @@ -30,11 +30,11 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" . "sigs.k8s.io/controller-runtime/pkg/envtest/komega" - bootstrapv1beta1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1" - controlplanev1beta1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" - infrav1beta1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" - infraexpv1beta1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta2" + infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta2" ) var ( @@ -42,10 +42,10 @@ var ( ) func init() { - _ = infrav1beta1.AddToScheme(testScheme) - _ = infraexpv1beta1.AddToScheme(testScheme) - _ = controlplanev1beta1.AddToScheme(testScheme) - _ = bootstrapv1beta1.AddToScheme(testScheme) + _ = infrav1.AddToScheme(testScheme) + _ = infraexpv1.AddToScheme(testScheme) + _ = controlplanev1.AddToScheme(testScheme) + _ = bootstrapv1.AddToScheme(testScheme) } func Test_patchDockerClusterTemplate(t *testing.T) { @@ -53,29 +53,29 @@ func Test_patchDockerClusterTemplate(t *testing.T) { tests := []struct { name string - template *infrav1beta1.DockerClusterTemplate + template *infrav1.DockerClusterTemplate variables map[string]apiextensionsv1.JSON - expectedTemplate *infrav1beta1.DockerClusterTemplate + expectedTemplate *infrav1.DockerClusterTemplate expectedErr bool }{ { name: "no op if imageRepository is not set", - template: &infrav1beta1.DockerClusterTemplate{}, + template: &infrav1.DockerClusterTemplate{}, variables: nil, - expectedTemplate: &infrav1beta1.DockerClusterTemplate{}, + expectedTemplate: &infrav1.DockerClusterTemplate{}, }, { name: "set LoadBalancer.ImageRepository if imageRepository is set", - template: &infrav1beta1.DockerClusterTemplate{}, + template: &infrav1.DockerClusterTemplate{}, variables: map[string]apiextensionsv1.JSON{ "imageRepository": {Raw: toJSON("testImage")}, }, - expectedTemplate: &infrav1beta1.DockerClusterTemplate{ - Spec: infrav1beta1.DockerClusterTemplateSpec{ - Template: infrav1beta1.DockerClusterTemplateResource{ - Spec: infrav1beta1.DockerClusterSpec{ - LoadBalancer: infrav1beta1.DockerLoadBalancer{ - ImageMeta: infrav1beta1.ImageMeta{ + expectedTemplate: &infrav1.DockerClusterTemplate{ + Spec: infrav1.DockerClusterTemplateSpec{ + Template: infrav1.DockerClusterTemplateResource{ + Spec: infrav1.DockerClusterSpec{ + LoadBalancer: infrav1.DockerLoadBalancer{ + ImageMeta: infrav1.ImageMeta{ ImageRepository: "testImage", }, }, @@ -101,14 +101,14 @@ func Test_patchDockerClusterTemplate(t *testing.T) { func Test_patchKubeadmControlPlaneTemplate(t *testing.T) { tests := []struct { name string - template *controlplanev1beta1.KubeadmControlPlaneTemplate + template *controlplanev1.KubeadmControlPlaneTemplate variables map[string]apiextensionsv1.JSON - expectedTemplate *controlplanev1beta1.KubeadmControlPlaneTemplate + expectedTemplate *controlplanev1.KubeadmControlPlaneTemplate expectedErr bool }{ { name: "sets RolloutStrategy.RollingUpdate.MaxSurge if the kubeadmControlPlaneMaxSurge is provided", - template: &controlplanev1beta1.KubeadmControlPlaneTemplate{}, + template: &controlplanev1.KubeadmControlPlaneTemplate{}, variables: map[string]apiextensionsv1.JSON{ runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ @@ -117,12 +117,12 @@ func Test_patchKubeadmControlPlaneTemplate(t *testing.T) { })}, "kubeadmControlPlaneMaxSurge": {Raw: toJSON("1")}, }, - expectedTemplate: &controlplanev1beta1.KubeadmControlPlaneTemplate{ - Spec: controlplanev1beta1.KubeadmControlPlaneTemplateSpec{ - Template: controlplanev1beta1.KubeadmControlPlaneTemplateResource{ - Spec: controlplanev1beta1.KubeadmControlPlaneTemplateResourceSpec{ - RolloutStrategy: &controlplanev1beta1.RolloutStrategy{ - RollingUpdate: &controlplanev1beta1.RollingUpdate{MaxSurge: &intstr.IntOrString{IntVal: 1}}, + expectedTemplate: &controlplanev1.KubeadmControlPlaneTemplate{ + Spec: controlplanev1.KubeadmControlPlaneTemplateSpec{ + Template: controlplanev1.KubeadmControlPlaneTemplateResource{ + Spec: controlplanev1.KubeadmControlPlaneTemplateResourceSpec{ + RolloutStrategy: &controlplanev1.RolloutStrategy{ + RollingUpdate: &controlplanev1.RollingUpdate{MaxSurge: &intstr.IntOrString{IntVal: 1}}, }, }, }, @@ -150,21 +150,21 @@ func Test_patchDockerMachineTemplate(t *testing.T) { tests := []struct { name string - template *infrav1beta1.DockerMachineTemplate + template *infrav1.DockerMachineTemplate variables map[string]apiextensionsv1.JSON - expectedTemplate *infrav1beta1.DockerMachineTemplate + expectedTemplate *infrav1.DockerMachineTemplate expectedErr bool }{ { name: "fails if builtin.controlPlane.version nor builtin.machineDeployment.version is not set", - template: &infrav1beta1.DockerMachineTemplate{}, + template: &infrav1.DockerMachineTemplate{}, variables: nil, - expectedTemplate: &infrav1beta1.DockerMachineTemplate{}, + expectedTemplate: &infrav1.DockerMachineTemplate{}, expectedErr: true, }, { name: "sets customImage for templates linked to ControlPlane", - template: &infrav1beta1.DockerMachineTemplate{}, + template: &infrav1.DockerMachineTemplate{}, variables: map[string]apiextensionsv1.JSON{ runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ @@ -172,10 +172,10 @@ func Test_patchDockerMachineTemplate(t *testing.T) { }, })}, }, - expectedTemplate: &infrav1beta1.DockerMachineTemplate{ - Spec: infrav1beta1.DockerMachineTemplateSpec{ - Template: infrav1beta1.DockerMachineTemplateResource{ - Spec: infrav1beta1.DockerMachineSpec{ + expectedTemplate: &infrav1.DockerMachineTemplate{ + Spec: infrav1.DockerMachineTemplateSpec{ + Template: infrav1.DockerMachineTemplateResource{ + Spec: infrav1.DockerMachineSpec{ CustomImage: "kindest/node:v1.23.0", }, }, @@ -184,7 +184,7 @@ func Test_patchDockerMachineTemplate(t *testing.T) { }, { name: "sets customImage for templates linked to ControlPlane for pre versions", - template: &infrav1beta1.DockerMachineTemplate{}, + template: &infrav1.DockerMachineTemplate{}, variables: map[string]apiextensionsv1.JSON{ runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ @@ -192,10 +192,10 @@ func Test_patchDockerMachineTemplate(t *testing.T) { }, })}, }, - expectedTemplate: &infrav1beta1.DockerMachineTemplate{ - Spec: infrav1beta1.DockerMachineTemplateSpec{ - Template: infrav1beta1.DockerMachineTemplateResource{ - Spec: infrav1beta1.DockerMachineSpec{ + expectedTemplate: &infrav1.DockerMachineTemplate{ + Spec: infrav1.DockerMachineTemplateSpec{ + Template: infrav1.DockerMachineTemplateResource{ + Spec: infrav1.DockerMachineSpec{ CustomImage: "kindest/node:v1.23.0-rc.0", }, }, @@ -221,21 +221,21 @@ func Test_patchDockerMachinePoolTemplate(t *testing.T) { tests := []struct { name string - template *infraexpv1beta1.DockerMachinePoolTemplate + template *infraexpv1.DockerMachinePoolTemplate variables map[string]apiextensionsv1.JSON - expectedTemplate *infraexpv1beta1.DockerMachinePoolTemplate + expectedTemplate *infraexpv1.DockerMachinePoolTemplate expectedErr bool }{ { name: "fails if builtin.controlPlane.version nor builtin.machinePool.version is not set", - template: &infraexpv1beta1.DockerMachinePoolTemplate{}, + template: &infraexpv1.DockerMachinePoolTemplate{}, variables: nil, - expectedTemplate: &infraexpv1beta1.DockerMachinePoolTemplate{}, + expectedTemplate: &infraexpv1.DockerMachinePoolTemplate{}, expectedErr: true, }, { name: "sets customImage for templates linked to ControlPlane", - template: &infraexpv1beta1.DockerMachinePoolTemplate{}, + template: &infraexpv1.DockerMachinePoolTemplate{}, variables: map[string]apiextensionsv1.JSON{ runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ @@ -247,11 +247,11 @@ func Test_patchDockerMachinePoolTemplate(t *testing.T) { }, })}, }, - expectedTemplate: &infraexpv1beta1.DockerMachinePoolTemplate{ - Spec: infraexpv1beta1.DockerMachinePoolTemplateSpec{ - Template: infraexpv1beta1.DockerMachinePoolTemplateResource{ - Spec: infraexpv1beta1.DockerMachinePoolSpec{ - Template: infraexpv1beta1.DockerMachinePoolMachineTemplate{ + expectedTemplate: &infraexpv1.DockerMachinePoolTemplate{ + Spec: infraexpv1.DockerMachinePoolTemplateSpec{ + Template: infraexpv1.DockerMachinePoolTemplateResource{ + Spec: infraexpv1.DockerMachinePoolSpec{ + Template: infraexpv1.DockerMachinePoolMachineTemplate{ CustomImage: "kindest/node:v1.23.0", }, }, @@ -305,28 +305,28 @@ func TestHandler_GeneratePatches(t *testing.T) { }, }), } - kubeadmControlPlaneTemplate := controlplanev1beta1.KubeadmControlPlaneTemplate{ + kubeadmControlPlaneTemplate := controlplanev1.KubeadmControlPlaneTemplate{ TypeMeta: metav1.TypeMeta{ Kind: "KubeadmControlPlaneTemplate", - APIVersion: controlplanev1beta1.GroupVersion.String(), + APIVersion: controlplanev1.GroupVersion.String(), }, } - dockerMachineTemplate := infrav1beta1.DockerMachineTemplate{ + dockerMachineTemplate := infrav1.DockerMachineTemplate{ TypeMeta: metav1.TypeMeta{ Kind: "DockerMachineTemplate", - APIVersion: infrav1beta1.GroupVersion.String(), + APIVersion: infrav1.GroupVersion.String(), }, } - dockerMachinePoolTemplate := infraexpv1beta1.DockerMachinePoolTemplate{ + dockerMachinePoolTemplate := infraexpv1.DockerMachinePoolTemplate{ TypeMeta: metav1.TypeMeta{ Kind: "DockerMachinePoolTemplate", - APIVersion: infrav1beta1.GroupVersion.String(), + APIVersion: infrav1.GroupVersion.String(), }, } - dockerClusterTemplate := infrav1beta1.DockerClusterTemplate{ + dockerClusterTemplate := infrav1.DockerClusterTemplate{ TypeMeta: metav1.TypeMeta{ Kind: "DockerClusterTemplate", - APIVersion: infrav1beta1.GroupVersion.String(), + APIVersion: infrav1.GroupVersion.String(), }, } tests := []struct { @@ -349,7 +349,7 @@ func TestHandler_GeneratePatches(t *testing.T) { }, Items: []runtimehooksv1.GeneratePatchesResponseItem{ responseItem("1", `[ -{"op":"add","path":"/spec/template/spec/rolloutStrategy","value":{"rollingUpdate":{"maxSurge":3}}} +{"op":"add","path":"/spec","value":{"template": {"spec":{"rolloutStrategy": {"rollingUpdate":{"maxSurge":3}}}}}} ]`), responseItem("2", `[ {"op":"add","path":"/spec/template/spec/customImage","value":"kindest/node:v1.23.0"} diff --git a/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml b/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml index 114a3c3a895f..5211ddbe596a 100644 --- a/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml +++ b/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml @@ -5,19 +5,19 @@ metadata: spec: controlPlane: templateRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: quick-start-control-plane machineInfrastructure: templateRef: kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 name: quick-start-control-plane namingStrategy: template: "{{ .cluster.name }}-cp-{{ .random }}" infrastructure: templateRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate name: quick-start-cluster workers: @@ -28,12 +28,12 @@ spec: template: bootstrap: templateRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: templateRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate name: quick-start-default-worker-machinetemplate machinePools: @@ -43,12 +43,12 @@ spec: template: bootstrap: templateRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: templateRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate name: quick-start-default-worker-machinepooltemplate patches: @@ -58,7 +58,7 @@ spec: validateTopologyExtension: validate-topology.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} discoverVariablesExtension: discover-variables.${EXTENSION_CONFIG_NAME:-"k8s-upgrade-with-runtimesdk"} --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate metadata: name: quick-start-cluster @@ -66,26 +66,26 @@ spec: template: spec: failureDomains: - fd1: + - name: fd1 controlPlane: true - fd2: + - name: fd2 controlPlane: true - fd3: + - name: fd3 controlPlane: true - fd4: + - name: fd4 controlPlane: false - fd5: + - name: fd5 controlPlane: false - fd6: + - name: fd6 controlPlane: false - fd7: + - name: fd7 controlPlane: false - fd8: + - name: fd8 controlPlane: false loadBalancer: {} --- kind: KubeadmControlPlaneTemplate -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: quick-start-control-plane spec: @@ -103,7 +103,7 @@ spec: joinConfiguration: nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-control-plane @@ -114,7 +114,7 @@ spec: - containerPath: "/var/run/docker.sock" hostPath: "/var/run/docker.sock" --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-default-worker-machinetemplate @@ -125,7 +125,7 @@ spec: - containerPath: "/var/run/docker.sock" hostPath: "/var/run/docker.sock" --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate metadata: name: quick-start-default-worker-machinepooltemplate @@ -137,7 +137,7 @@ spec: - containerPath: "/var/run/docker.sock" hostPath: "/var/run/docker.sock" --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: quick-start-default-worker-bootstraptemplate diff --git a/test/infrastructure/docker/templates/cluster-template-development-mp.yaml b/test/infrastructure/docker/templates/cluster-template-development-mp.yaml index 9c42920be93c..5ad176fa12db 100644 --- a/test/infrastructure/docker/templates/cluster-template-development-mp.yaml +++ b/test/infrastructure/docker/templates/cluster-template-development-mp.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: "${CLUSTER_NAME}" @@ -11,9 +11,9 @@ spec: cidrBlocks: ${POD_CIDR:=["192.168.0.0/16"]} serviceDomain: ${SERVICE_DOMAIN:="cluster.local"} topology: - class: quick-start + classRef: + name: quick-start controlPlane: - metadata: {} replicas: ${CONTROL_PLANE_MACHINE_COUNT} variables: - name: imageRepository diff --git a/test/infrastructure/docker/templates/cluster-template-development.yaml b/test/infrastructure/docker/templates/cluster-template-development.yaml index 03a4c8efa8b0..ef00269c12e0 100644 --- a/test/infrastructure/docker/templates/cluster-template-development.yaml +++ b/test/infrastructure/docker/templates/cluster-template-development.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: "${CLUSTER_NAME}" @@ -11,9 +11,9 @@ spec: cidrBlocks: ${POD_CIDR:=["192.168.0.0/16"]} serviceDomain: ${SERVICE_DOMAIN:="cluster.local"} topology: - class: quick-start + classRef: + name: quick-start controlPlane: - metadata: {} replicas: ${CONTROL_PLANE_MACHINE_COUNT} variables: - name: imageRepository diff --git a/test/infrastructure/docker/templates/cluster-template-in-memory.yaml b/test/infrastructure/docker/templates/cluster-template-in-memory.yaml index 972af42bea16..c2437ccdf811 100644 --- a/test/infrastructure/docker/templates/cluster-template-in-memory.yaml +++ b/test/infrastructure/docker/templates/cluster-template-in-memory.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: "${CLUSTER_NAME}" @@ -11,8 +11,9 @@ spec: cidrBlocks: ${POD_CIDR:=["192.168.0.0/16"]} serviceDomain: ${SERVICE_DOMAIN:="cluster.local"} topology: - class: in-memory - classNamespace: ${NAMESPACE} + classRef: + name: in-memory + namespace: ${NAMESPACE} version: ${KUBERNETES_VERSION} controlPlane: replicas: ${CONTROL_PLANE_MACHINE_COUNT} diff --git a/test/infrastructure/docker/templates/clusterclass-in-memory.yaml b/test/infrastructure/docker/templates/clusterclass-in-memory.yaml index d6053aa256c7..2ceab8339a58 100644 --- a/test/infrastructure/docker/templates/clusterclass-in-memory.yaml +++ b/test/infrastructure/docker/templates/clusterclass-in-memory.yaml @@ -1,4 +1,4 @@ -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevClusterTemplate metadata: name: in-memory-cluster @@ -8,7 +8,7 @@ spec: backend: inMemory: {} --- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate metadata: name: in-memory-control-plane @@ -27,14 +27,16 @@ spec: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + - name: eviction-hard + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% joinConfiguration: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + - name: eviction-hard + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate metadata: name: in-memory-control-plane @@ -60,7 +62,7 @@ spec: startupDuration: "2s" startupJitter: "0.2" --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate metadata: name: in-memory-default-worker-machinetemplate @@ -86,7 +88,7 @@ spec: startupDuration: "2s" startupJitter: "0.2" --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: in-memory-default-worker-bootstraptemplate @@ -97,36 +99,35 @@ spec: nodeRegistration: criSocket: unix:///var/run/containerd/containerd.sock kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + - name: eviction-hard + value: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass metadata: name: in-memory spec: controlPlane: - metadata: - annotations: machineInfrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate name: in-memory-control-plane - ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: in-memory-control-plane machineHealthCheck: - unhealthyConditions: + unhealthyNodeConditions: - type: Ready status: Unknown - timeout: 300s + timeoutSeconds: 300 - type: Ready status: "False" - timeout: 300s + timeoutSeconds: 300 infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevClusterTemplate name: in-memory-cluster workers: @@ -134,20 +135,20 @@ spec: - class: default-worker template: bootstrap: - ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: in-memory-default-worker-bootstraptemplate infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DevMachineTemplate name: in-memory-default-worker-machinetemplate machineHealthCheck: - unhealthyConditions: + unhealthyNodeConditions: - type: Ready status: Unknown - timeout: 300s + timeoutSeconds: 300 - type: Ready status: "False" - timeout: 300s + timeoutSeconds: 300 diff --git a/test/infrastructure/docker/templates/clusterclass-quick-start.yaml b/test/infrastructure/docker/templates/clusterclass-quick-start.yaml index a2c11924544c..6610c9ed6843 100644 --- a/test/infrastructure/docker/templates/clusterclass-quick-start.yaml +++ b/test/infrastructure/docker/templates/clusterclass-quick-start.yaml @@ -1,29 +1,29 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass metadata: name: quick-start spec: controlPlane: - ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: quick-start-control-plane machineInfrastructure: - ref: + templateRef: kind: DockerMachineTemplate - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 name: quick-start-control-plane machineHealthCheck: - unhealthyConditions: + unhealthyNodeConditions: - type: Ready status: Unknown - timeout: 300s + timeoutSeconds: 300 - type: Ready status: "False" - timeout: 300s + timeoutSeconds: 300 infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate name: quick-start-cluster workers: @@ -31,34 +31,34 @@ spec: - class: default-worker template: bootstrap: - ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate name: quick-start-default-worker-machinetemplate machineHealthCheck: - unhealthyConditions: + unhealthyNodeConditions: - type: Ready status: Unknown - timeout: 300s + timeoutSeconds: 300 - type: Ready status: "False" - timeout: 300s + timeoutSeconds: 300 machinePools: - class: default-worker template: bootstrap: - ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: quick-start-default-worker-bootstraptemplate infrastructure: - ref: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + templateRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate name: quick-start-default-worker-machinepooltemplate variables: @@ -114,7 +114,7 @@ spec: enabledIf: '{{ ne .imageRepository "" }}' definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -128,7 +128,7 @@ spec: description: "Sets tag to use for the etcd image in the KubeadmControlPlane." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -144,7 +144,7 @@ spec: description: "Sets tag to use for the etcd image in the KubeadmControlPlane." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -158,7 +158,7 @@ spec: description: "Sets the container image that is used for running dockerMachines for the controlPlane and default-worker machineDeployments." definitions: - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate matchResources: machineDeploymentClass: @@ -171,7 +171,7 @@ spec: template: | kindest/node:{{ .builtin.machineDeployment.version | replace "+" "_" }} - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate matchResources: machinePoolClass: @@ -184,7 +184,7 @@ spec: template: | kindest/node:{{ .builtin.machinePool.version | replace "+" "_" }} - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate matchResources: controlPlane: true @@ -198,7 +198,7 @@ spec: description: "Adds an admission configuration for PodSecurity to the kube-apiserver." definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -206,7 +206,8 @@ spec: - op: add path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraArgs" value: - admission-control-config-file: "/etc/kubernetes/kube-apiserver-admission-pss.yaml" + - name: admission-control-config-file + value: "/etc/kubernetes/kube-apiserver-admission-pss.yaml" - op: add path: "/spec/template/spec/kubeadmConfigSpec/clusterConfiguration/apiServer/extraVolumes" value: @@ -225,7 +226,7 @@ spec: plugins: - name: PodSecurity configuration: - apiVersion: pod-security.admission.config.k8s.io/v1{{ if semverCompare "< v1.25" .builtin.controlPlane.version }}beta1{{ end }} + apiVersion: pod-security.admission.config.k8s.io/v1{{ if semverCompare "< v1.25-0" .builtin.controlPlane.version }}beta1{{ end }} kind: PodSecurityConfiguration defaults: enforce: "{{ .podSecurityStandard.enforce }}" @@ -241,7 +242,7 @@ spec: path: /etc/kubernetes/kube-apiserver-admission-pss.yaml enabledIf: "{{ .podSecurityStandard.enabled }}" --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerClusterTemplate metadata: name: quick-start-cluster @@ -250,7 +251,7 @@ spec: spec: {} --- kind: KubeadmControlPlaneTemplate -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: quick-start-control-plane spec: @@ -262,11 +263,17 @@ spec: # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] initConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider patch to work + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%' --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-control-plane @@ -277,7 +284,7 @@ spec: - containerPath: "/var/run/docker.sock" hostPath: "/var/run/docker.sock" --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachineTemplate metadata: name: quick-start-default-worker-machinetemplate @@ -288,7 +295,7 @@ spec: - containerPath: "/var/run/docker.sock" hostPath: "/var/run/docker.sock" --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: DockerMachinePoolTemplate metadata: name: quick-start-default-worker-machinepooltemplate @@ -297,7 +304,7 @@ spec: spec: template: {} --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: quick-start-default-worker-bootstraptemplate @@ -305,4 +312,7 @@ spec: template: spec: joinConfiguration: - nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + nodeRegistration: # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + kubeletExtraArgs: # having a not empty kubeletExtraArgs is required for the externalCloudProvider to work + - name: eviction-hard + value: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'