diff --git a/util/deprecated/v1beta1/annotations/doc.go b/util/deprecated/v1beta1/annotations/doc.go new file mode 100644 index 000000000000..7f253eb1b62e --- /dev/null +++ b/util/deprecated/v1beta1/annotations/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package annotations implements annotation helper functions. +// +// Deprecated: This package is deprecated and is going to be removed when support for v1beta1 will be dropped. Please see https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more details. +package annotations diff --git a/util/deprecated/v1beta1/annotations/helpers.go b/util/deprecated/v1beta1/annotations/helpers.go new file mode 100644 index 000000000000..92cb520fec78 --- /dev/null +++ b/util/deprecated/v1beta1/annotations/helpers.go @@ -0,0 +1,141 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "regexp" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) + +// IsPaused returns true if the Cluster is paused or the object has the `paused` annotation. +func IsPaused(cluster *clusterv1.Cluster, o metav1.Object) bool { + if cluster.Spec.Paused { + return true + } + return HasPaused(o) +} + +// IsExternallyManaged returns true if the object has the `managed-by` annotation. +func IsExternallyManaged(o metav1.Object) bool { + return hasAnnotation(o, clusterv1.ManagedByAnnotation) +} + +// HasPaused returns true if the object has the `paused` annotation. +func HasPaused(o metav1.Object) bool { + return hasAnnotation(o, clusterv1.PausedAnnotation) +} + +// HasSkipRemediation returns true if the object has the `skip-remediation` annotation. +func HasSkipRemediation(o metav1.Object) bool { + return hasAnnotation(o, clusterv1.MachineSkipRemediationAnnotation) +} + +// HasRemediateMachine returns true if the object has the `remediate-machine` annotation. +func HasRemediateMachine(o metav1.Object) bool { + return hasAnnotation(o, clusterv1.RemediateMachineAnnotation) +} + +// HasWithPrefix returns true if at least one of the annotations has the prefix specified. +func HasWithPrefix(prefix string, annotations map[string]string) bool { + for key := range annotations { + if strings.HasPrefix(key, prefix) { + return true + } + } + return false +} + +// ReplicasManagedByExternalAutoscaler returns true if the standard annotation for external autoscaler is present. +func ReplicasManagedByExternalAutoscaler(o metav1.Object) bool { + return hasTruthyAnnotationValue(o, clusterv1.ReplicasManagedByAnnotation) +} + +// AddAnnotations sets the desired annotations on the object and returns true if the annotations have changed. +func AddAnnotations(o metav1.Object, desired map[string]string) bool { + if len(desired) == 0 { + return false + } + annotations := o.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + hasChanged := false + for k, v := range desired { + if cur, ok := annotations[k]; !ok || cur != v { + annotations[k] = v + hasChanged = true + } + } + o.SetAnnotations(annotations) + return hasChanged +} + +// GetManagedAnnotations filters out and returns the CAPI-managed annotations for a Machine, with an optional list of regex patterns for user-specified annotations. +func GetManagedAnnotations(m *clusterv1.Machine, additionalSyncMachineAnnotations ...*regexp.Regexp) map[string]string { + // Always sync CAPI's bookkeeping annotations + managedAnnotations := map[string]string{ + clusterv1.ClusterNameAnnotation: m.Spec.ClusterName, + clusterv1.ClusterNamespaceAnnotation: m.GetNamespace(), + clusterv1.MachineAnnotation: m.Name, + } + if owner := metav1.GetControllerOfNoCopy(m); owner != nil { + managedAnnotations[clusterv1.OwnerKindAnnotation] = owner.Kind + managedAnnotations[clusterv1.OwnerNameAnnotation] = owner.Name + } + for key, value := range m.GetAnnotations() { + // Always sync CAPI's default annotation node domain + dnsSubdomainOrName := strings.Split(key, "/")[0] + if dnsSubdomainOrName == clusterv1.ManagedNodeAnnotationDomain || strings.HasSuffix(dnsSubdomainOrName, "."+clusterv1.ManagedNodeAnnotationDomain) { + managedAnnotations[key] = value + continue + } + // Sync if the annotations matches at least one user provided regex + for _, regex := range additionalSyncMachineAnnotations { + if regex.MatchString(key) { + managedAnnotations[key] = value + break + } + } + } + return managedAnnotations +} + +// hasAnnotation returns true if the object has the specified annotation. +func hasAnnotation(o metav1.Object, annotation string) bool { + annotations := o.GetAnnotations() + if annotations == nil { + return false + } + _, ok := annotations[annotation] + return ok +} + +// hasTruthyAnnotationValue returns true if the object has an annotation with a value that is not "false". +func hasTruthyAnnotationValue(o metav1.Object, annotation string) bool { + annotations := o.GetAnnotations() + if annotations == nil { + return false + } + if val, ok := annotations[annotation]; ok { + return val != "false" + } + return false +} diff --git a/util/deprecated/v1beta1/annotations/helpers_test.go b/util/deprecated/v1beta1/annotations/helpers_test.go new file mode 100644 index 000000000000..b6f6e761d6b3 --- /dev/null +++ b/util/deprecated/v1beta1/annotations/helpers_test.go @@ -0,0 +1,419 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotations + +import ( + "fmt" + "regexp" + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/test/builder" +) + +func TestAddAnnotations(t *testing.T) { + g := NewWithT(t) + + testcases := []struct { + name string + obj metav1.Object + input map[string]string + expected map[string]string + changed bool + }{ + { + name: "should return false if no changes are made", + obj: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "foo": "bar", + }, + }, + Spec: corev1.NodeSpec{}, + Status: corev1.NodeStatus{}, + }, + input: map[string]string{ + "foo": "bar", + }, + expected: map[string]string{ + "foo": "bar", + }, + changed: false, + }, + { + name: "should do nothing if no annotations are provided", + obj: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "foo": "bar", + }, + }, + Spec: corev1.NodeSpec{}, + Status: corev1.NodeStatus{}, + }, + input: map[string]string{}, + expected: map[string]string{ + "foo": "bar", + }, + changed: false, + }, + { + name: "should do nothing if no annotations are provided and have been nil before", + obj: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: nil, + }, + Spec: corev1.NodeSpec{}, + Status: corev1.NodeStatus{}, + }, + input: map[string]string{}, + expected: nil, + changed: false, + }, + { + name: "should return true if annotations are added", + obj: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "foo": "bar", + }, + }, + Spec: corev1.NodeSpec{}, + Status: corev1.NodeStatus{}, + }, + input: map[string]string{ + "thing1": "thing2", + "buzz": "blah", + }, + expected: map[string]string{ + "foo": "bar", + "thing1": "thing2", + "buzz": "blah", + }, + changed: true, + }, + { + name: "should return true if annotations are changed", + obj: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "foo": "bar", + }, + }, + Spec: corev1.NodeSpec{}, + Status: corev1.NodeStatus{}, + }, + input: map[string]string{ + "foo": "buzz", + }, + expected: map[string]string{ + "foo": "buzz", + }, + changed: true, + }, + { + name: "should return true if annotations are changed and have been nil before", + obj: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: nil, + }, + Spec: corev1.NodeSpec{}, + Status: corev1.NodeStatus{}, + }, + input: map[string]string{ + "foo": "buzz", + }, + expected: map[string]string{ + "foo": "buzz", + }, + changed: true, + }, + { + name: "should add annotations to an empty unstructured", + obj: &unstructured.Unstructured{}, + input: map[string]string{ + "foo": "buzz", + }, + expected: map[string]string{ + "foo": "buzz", + }, + changed: true, + }, + { + name: "should add annotations to a non empty unstructured", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + "foo": "bar", + }, + }, + }, + }, + input: map[string]string{ + "thing1": "thing2", + "buzz": "blah", + }, + expected: map[string]string{ + "foo": "bar", + "thing1": "thing2", + "buzz": "blah", + }, + changed: true, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(*testing.T) { + res := AddAnnotations(tc.obj, tc.input) + g.Expect(res).To(Equal(tc.changed)) + g.Expect(tc.obj.GetAnnotations()).To(Equal(tc.expected)) + }) + } +} + +func TestHasTruthyAnnotationValue(t *testing.T) { + tests := []struct { + name string + obj metav1.Object + annotationKey string + expected bool + }{ + { + name: "annotation does not exist", + obj: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "cluster.x-k8s.io/some-other-annotation": "", + }, + }, + Spec: corev1.NodeSpec{}, + Status: corev1.NodeStatus{}, + }, + annotationKey: "cluster.x-k8s.io/replicas-managed-by", + expected: false, + }, + { + name: "no val", + obj: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "cluster.x-k8s.io/replicas-managed-by": "", + }, + }, + Spec: corev1.NodeSpec{}, + Status: corev1.NodeStatus{}, + }, + annotationKey: "cluster.x-k8s.io/replicas-managed-by", + expected: true, + }, + { + name: "annotation exists, true value", + obj: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "cluster.x-k8s.io/replicas-managed-by": "true", + }, + }, + Spec: corev1.NodeSpec{}, + Status: corev1.NodeStatus{}, + }, + annotationKey: "cluster.x-k8s.io/replicas-managed-by", + expected: true, + }, + { + name: "annotation exists, random string value", + obj: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "cluster.x-k8s.io/replicas-managed-by": "foo", + }, + }, + Spec: corev1.NodeSpec{}, + Status: corev1.NodeStatus{}, + }, + annotationKey: "cluster.x-k8s.io/replicas-managed-by", + expected: true, + }, + { + name: "annotation exists, false value", + obj: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "cluster.x-k8s.io/replicas-managed-by": "false", + }, + }, + Spec: corev1.NodeSpec{}, + Status: corev1.NodeStatus{}, + }, + annotationKey: "cluster.x-k8s.io/replicas-managed-by", + expected: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + ret := hasTruthyAnnotationValue(tt.obj, tt.annotationKey) + if tt.expected { + g.Expect(ret).To(BeTrue()) + } else { + g.Expect(ret).To(BeFalse()) + } + }) + } +} + +func TestGetManagedAnnotations(t *testing.T) { + machineObj := newFakeMachine("default", "test-cluster") + ms := builder.MachineSet("default", "ms").Build() + ref := metav1.NewControllerRef(ms, ms.GroupVersionKind()) + + defaultAnnotations := map[string]string{ + clusterv1.ClusterNameAnnotation: machineObj.Spec.ClusterName, + clusterv1.ClusterNamespaceAnnotation: machineObj.GetNamespace(), + clusterv1.MachineAnnotation: machineObj.Name, + } + + additionalAnnotations := map[string]string{ + "foo": "bar", + "bar": "baz", + "example.test/node.cluster.x-k8s.io": "not-managed", + "gpu-node.cluster.x-k8s.io": "not-managed", + "example.test/node-restriction.kubernetes.io": "not-managed", + "gpu-node-restriction.kubernetes.io": "not-managed", + "wrong.test.foo.example.com": "", + } + + exampleRegex := regexp.MustCompile(`foo`) + defaultAndRegexAnnotations := map[string]string{} + for k, v := range defaultAnnotations { + defaultAndRegexAnnotations[k] = v + } + defaultAndRegexAnnotations["foo"] = "bar" + defaultAndRegexAnnotations["wrong.test.foo.example.com"] = "" + + ownerRefAnnotations := map[string]string{ + clusterv1.OwnerKindAnnotation: ms.Kind, + clusterv1.OwnerNameAnnotation: ms.Name, + } + defaultAndOwnerRefAnnotations := map[string]string{} + for k, v := range defaultAnnotations { + defaultAndOwnerRefAnnotations[k] = v + } + for k, v := range ownerRefAnnotations { + defaultAndOwnerRefAnnotations[k] = v + } + + allAnnotations := map[string]string{} + for k, v := range defaultAnnotations { + allAnnotations[k] = v + } + for k, v := range additionalAnnotations { + allAnnotations[k] = v + } + for k, v := range ownerRefAnnotations { + allAnnotations[k] = v + } + + tests := []struct { + name string + additionalSyncMachineAnnotations []*regexp.Regexp + allAnnotations map[string]string + managedAnnotations map[string]string + owned bool + }{ + { + name: "always sync default annotations", + additionalSyncMachineAnnotations: nil, + allAnnotations: allAnnotations, + managedAnnotations: defaultAnnotations, + }, + { + name: "sync additional defined annotations", + additionalSyncMachineAnnotations: []*regexp.Regexp{ + exampleRegex, + }, + allAnnotations: allAnnotations, + managedAnnotations: defaultAndRegexAnnotations, + }, + { + name: "sync all annotations", + additionalSyncMachineAnnotations: []*regexp.Regexp{ + regexp.MustCompile(`.*`), + }, + allAnnotations: allAnnotations, + managedAnnotations: allAnnotations, + }, + { + name: "sync owner annotations", + allAnnotations: allAnnotations, + managedAnnotations: defaultAndOwnerRefAnnotations, + owned: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testMachine := machineObj.DeepCopy() + + if tt.owned { + testMachine.SetOwnerReferences([]metav1.OwnerReference{*ref}) + } + + testMachine.SetAnnotations(tt.allAnnotations) + + g := NewWithT(t) + got := GetManagedAnnotations(testMachine, tt.additionalSyncMachineAnnotations...) + g.Expect(got).To(BeEquivalentTo(tt.managedAnnotations)) + }) + } +} + +func newFakeMachineSpec(namespace, clusterName string) clusterv1.MachineSpec { + return clusterv1.MachineSpec{ + ClusterName: clusterName, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha3", + Kind: "KubeadmConfigTemplate", + Name: fmt.Sprintf("%s-md-0", clusterName), + Namespace: namespace, + }, + }, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha3", + Kind: "FakeMachineTemplate", + Name: fmt.Sprintf("%s-md-0", clusterName), + Namespace: namespace, + }, + } +} + +func newFakeMachine(namespace, clusterName string) *clusterv1.Machine { + return &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ma-annotationtest", + Namespace: namespace, + }, + Spec: newFakeMachineSpec(namespace, clusterName), + } +} diff --git a/util/deprecated/v1beta1/collections/doc.go b/util/deprecated/v1beta1/collections/doc.go new file mode 100644 index 000000000000..30ef7a74082f --- /dev/null +++ b/util/deprecated/v1beta1/collections/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package collections implements collection utilities. +// +// Deprecated: This package is deprecated and is going to be removed when support for v1beta1 will be dropped. Please see https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more details. +package collections diff --git a/util/deprecated/v1beta1/collections/helpers.go b/util/deprecated/v1beta1/collections/helpers.go new file mode 100644 index 000000000000..c7ece52114c6 --- /dev/null +++ b/util/deprecated/v1beta1/collections/helpers.go @@ -0,0 +1,45 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collections + +import ( + "context" + + "github.com/pkg/errors" + "sigs.k8s.io/controller-runtime/pkg/client" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) + +// GetFilteredMachinesForCluster returns a list of machines that can be filtered or not. +// If no filter is supplied then all machines associated with the target cluster are returned. +func GetFilteredMachinesForCluster(ctx context.Context, c client.Reader, cluster *clusterv1.Cluster, filters ...Func) (Machines, error) { + ml := &clusterv1.MachineList{} + if err := c.List( + ctx, + ml, + client.InNamespace(cluster.Namespace), + client.MatchingLabels{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + ); err != nil { + return nil, errors.Wrap(err, "failed to list machines") + } + + machines := FromMachineList(ml) + return machines.Filter(filters...), nil +} diff --git a/util/deprecated/v1beta1/collections/machine_collection.go b/util/deprecated/v1beta1/collections/machine_collection.go new file mode 100644 index 000000000000..cc69cd428bf5 --- /dev/null +++ b/util/deprecated/v1beta1/collections/machine_collection.go @@ -0,0 +1,283 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Modified copy of k8s.io/apimachinery/pkg/util/sets/int64.go +// Modifications +// - int64 became *clusterv1.Machine +// - Empty type is removed +// - Sortable data type is removed in favor of util.MachinesByCreationTimestamp +// - nil checks added to account for the pointer +// - Added Filter, AnyFilter, and Oldest methods +// - Added FromMachineList initializer +// - Updated Has to also check for equality of Machines +// - Removed unused methods + +package collections + +import ( + "sort" + + "github.com/blang/semver/v4" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/version" +) + +// Machines is a set of Machines. +type Machines map[string]*clusterv1.Machine + +// MachinesByVersion sorts the list of Machine by spec.version, using their names as tie breaker. +// machines with no version are placed lower in the order. +type machinesByVersion []*clusterv1.Machine + +func (v machinesByVersion) Len() int { return len(v) } +func (v machinesByVersion) Swap(i, j int) { v[i], v[j] = v[j], v[i] } +func (v machinesByVersion) Less(i, j int) bool { + vi, _ := semver.ParseTolerant(*v[i].Spec.Version) + vj, _ := semver.ParseTolerant(*v[j].Spec.Version) + comp := version.Compare(vi, vj, version.WithBuildTags()) + if comp == 0 { + return v[i].Name < v[j].Name + } + return comp == -1 +} + +// machinesByCreationTimestamp sorts a list of Machine by creation timestamp, using their names as a tie breaker. +type machinesByCreationTimestamp []*clusterv1.Machine + +func (o machinesByCreationTimestamp) Len() int { return len(o) } +func (o machinesByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o machinesByCreationTimestamp) Less(i, j int) bool { + if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) +} + +// machinesByDeletionTimestamp sorts a list of Machines by deletion timestamp, using their names as a tie breaker. +// Machines without DeletionTimestamp go after machines with this field set. +type machinesByDeletionTimestamp []*clusterv1.Machine + +func (o machinesByDeletionTimestamp) Len() int { return len(o) } +func (o machinesByDeletionTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o machinesByDeletionTimestamp) Less(i, j int) bool { + if o[i].DeletionTimestamp == nil && o[j].DeletionTimestamp == nil { + return o[i].Name < o[j].Name + } + + if o[i].DeletionTimestamp == nil { + return false + } + + if o[j].DeletionTimestamp == nil { + return true + } + + if o[i].DeletionTimestamp.Equal(o[j].DeletionTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].DeletionTimestamp.Before(o[j].DeletionTimestamp) +} + +// New creates an empty Machines. +func New() Machines { + return make(Machines) +} + +// FromMachines creates a Machines from a list of values. +func FromMachines(machines ...*clusterv1.Machine) Machines { + ss := make(Machines, len(machines)) + ss.Insert(machines...) + return ss +} + +// FromMachineList creates a Machines from the given MachineList. +func FromMachineList(machineList *clusterv1.MachineList) Machines { + ss := make(Machines, len(machineList.Items)) + for i := range machineList.Items { + ss.Insert(&machineList.Items[i]) + } + return ss +} + +// ToMachineList creates a MachineList from the given Machines. +func ToMachineList(machines Machines) clusterv1.MachineList { + ml := clusterv1.MachineList{} + for _, m := range machines { + ml.Items = append(ml.Items, *m) + } + return ml +} + +// Has return true when the collection has the given machine. +func (s Machines) Has(machine *clusterv1.Machine) bool { + for _, m := range s { + if m.Name == machine.Name && m.Namespace == machine.Namespace { + return true + } + } + return false +} + +// Insert adds items to the set. +func (s Machines) Insert(machines ...*clusterv1.Machine) { + for i := range machines { + if machines[i] != nil { + m := machines[i] + s[m.Name] = m + } + } +} + +// Difference returns a copy without machines that are in the given collection. +func (s Machines) Difference(machines Machines) Machines { + return s.Filter(func(m *clusterv1.Machine) bool { + _, found := machines[m.Name] + return !found + }) +} + +// SortedByCreationTimestamp returns the machines sorted by creation timestamp. +func (s Machines) SortedByCreationTimestamp() []*clusterv1.Machine { + res := make(machinesByCreationTimestamp, 0, len(s)) + for _, value := range s { + res = append(res, value) + } + sort.Sort(res) + return res +} + +// SortedByDeletionTimestamp returns the machines sorted by deletion timestamp. +func (s Machines) SortedByDeletionTimestamp() []*clusterv1.Machine { + res := make(machinesByDeletionTimestamp, 0, len(s)) + for _, value := range s { + res = append(res, value) + } + sort.Sort(res) + return res +} + +// UnsortedList returns the slice with contents in random order. +func (s Machines) UnsortedList() []*clusterv1.Machine { + res := make([]*clusterv1.Machine, 0, len(s)) + for _, value := range s { + res = append(res, value) + } + return res +} + +// Len returns the size of the set. +func (s Machines) Len() int { + return len(s) +} + +// newFilteredMachineCollection creates a Machines from a filtered list of values. +func newFilteredMachineCollection(filter Func, machines ...*clusterv1.Machine) Machines { + ss := make(Machines, len(machines)) + for i := range machines { + m := machines[i] + if filter(m) { + ss.Insert(m) + } + } + return ss +} + +// Filter returns a Machines containing only the Machines that match all of the given MachineFilters. +func (s Machines) Filter(filters ...Func) Machines { + return newFilteredMachineCollection(And(filters...), s.UnsortedList()...) +} + +// AnyFilter returns a Machines containing only the Machines that match any of the given MachineFilters. +func (s Machines) AnyFilter(filters ...Func) Machines { + return newFilteredMachineCollection(Or(filters...), s.UnsortedList()...) +} + +// Oldest returns the Machine with the oldest CreationTimestamp. +func (s Machines) Oldest() *clusterv1.Machine { + if len(s) == 0 { + return nil + } + return s.SortedByCreationTimestamp()[0] +} + +// Newest returns the Machine with the most recent CreationTimestamp. +func (s Machines) Newest() *clusterv1.Machine { + if len(s) == 0 { + return nil + } + return s.SortedByCreationTimestamp()[len(s)-1] +} + +// OldestDeletionTimestamp returns the Machine with the oldest DeletionTimestamp. +func (s Machines) OldestDeletionTimestamp() *clusterv1.Machine { + if len(s) == 0 { + return nil + } + return s.SortedByDeletionTimestamp()[0] +} + +// DeepCopy returns a deep copy. +func (s Machines) DeepCopy() Machines { + result := make(Machines, len(s)) + for _, m := range s { + result.Insert(m.DeepCopy()) + } + return result +} + +// ConditionGetters returns the slice with machines converted into conditions.Getter. +func (s Machines) ConditionGetters() []conditions.Getter { + res := make([]conditions.Getter, 0, len(s)) + for _, v := range s { + value := *v + res = append(res, &value) + } + return res +} + +// Names returns a slice of the names of each machine in the collection. +// Useful for logging and test assertions. +func (s Machines) Names() []string { + names := make([]string, 0, s.Len()) + for _, m := range s { + names = append(names, m.Name) + } + return names +} + +// SortedByVersion returns the machines sorted by version. +func (s Machines) sortedByVersion() []*clusterv1.Machine { + res := make(machinesByVersion, 0, len(s)) + for _, value := range s { + res = append(res, value) + } + sort.Sort(res) + return res +} + +// LowestVersion returns the lowest version among all the machine with +// defined versions. If no machine has a defined version it returns an +// empty string. +func (s Machines) LowestVersion() *string { + machines := s.Filter(WithVersion()) + if len(machines) == 0 { + return nil + } + m := machines.sortedByVersion()[0] + return m.Spec.Version +} diff --git a/util/deprecated/v1beta1/collections/machine_collection_test.go b/util/deprecated/v1beta1/collections/machine_collection_test.go new file mode 100644 index 000000000000..5fb0c76f350d --- /dev/null +++ b/util/deprecated/v1beta1/collections/machine_collection_test.go @@ -0,0 +1,193 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collections_test + +import ( + "testing" + "time" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/collections" +) + +func TestMachineCollection(t *testing.T) { + t.Run("SortedByCreationTimestamp", func(t *testing.T) { + t.Run("should return the same number of machines as are in the collection", func(t *testing.T) { + g := NewWithT(t) + collection := machines() + sortedMachines := collection.SortedByCreationTimestamp() + g.Expect(sortedMachines).To(HaveLen(len(collection))) + g.Expect(sortedMachines[0].Name).To(Equal("machine-1")) + g.Expect(sortedMachines[len(sortedMachines)-1].Name).To(Equal("machine-5")) + g.Expect(collection.Oldest().Name).To(Equal("machine-1")) + }) + }) + t.Run("SortedByDeletionTimestamp", func(t *testing.T) { + t.Run("should return the same number of machines as are in the collection", func(t *testing.T) { + g := NewWithT(t) + collection := machines() + // Adding Machines without deletionTimestamp. + collection["machine-6"] = machine("machine-6") + collection["machine-7"] = machine("machine-7") + collection["machine-8"] = machine("machine-8") + + sortedMachines := collection.SortedByDeletionTimestamp() + g.Expect(sortedMachines).To(HaveLen(len(collection))) + g.Expect(sortedMachines[0].Name).To(Equal("machine-1")) + g.Expect(sortedMachines[len(sortedMachines)-1].Name).To(Equal("machine-8")) + g.Expect(collection.OldestDeletionTimestamp().Name).To(Equal("machine-1")) + }) + }) + t.Run("Difference", func(t *testing.T) { + t.Run("should return the collection with elements of the second collection removed", func(t *testing.T) { + g := NewWithT(t) + collection := machines() + c2 := collection.Filter(func(m *clusterv1.Machine) bool { + return m.Name != "machine-1" + }) + c3 := collection.Difference(c2) + // does not mutate + g.Expect(collection.Names()).To(ContainElement("machine-1")) + g.Expect(c3.Names()).To(ConsistOf("machine-1")) + }) + }) + t.Run("Names", func(t *testing.T) { + t.Run("should return a slice of names of each machine in the collection", func(t *testing.T) { + g := NewWithT(t) + g.Expect(collections.New().Names()).To(BeEmpty()) + g.Expect(collections.FromMachines(machine("1"), machine("2")).Names()).To(ConsistOf("1", "2")) + }) + }) +} + +func TestMachinesLowestVersion(t *testing.T) { + tests := []struct { + name string + machines collections.Machines + expected *string + }{ + { + name: "return empty for empty machines collection", + machines: collections.New(), + expected: nil, + }, + { + name: "return empty if machines dont have version", + machines: func() collections.Machines { + machines := collections.New() + machines.Insert(&clusterv1.Machine{}) + return machines + }(), + expected: nil, + }, + { + name: "return lowest version from machines", + machines: func() collections.Machines { + machines := collections.New() + machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-1"}, Spec: clusterv1.MachineSpec{ + Version: ptr.To("1.20"), + }}) + machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-2"}, Spec: clusterv1.MachineSpec{ + Version: ptr.To("1.19.8"), + }}) + machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-3"}, Spec: clusterv1.MachineSpec{ + Version: ptr.To(""), + }}) + return machines + }(), + expected: ptr.To("1.19.8"), + }, + { + name: "return lowest version from machines with pre release versions", + machines: func() collections.Machines { + machines := collections.New() + machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-1"}, Spec: clusterv1.MachineSpec{ + Version: ptr.To("1.20.1"), + }}) + machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-2"}, Spec: clusterv1.MachineSpec{ + Version: ptr.To("1.20.1-alpha.1"), + }}) + machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-3"}, Spec: clusterv1.MachineSpec{ + Version: ptr.To(""), + }}) + return machines + }(), + expected: ptr.To("1.20.1-alpha.1"), + }, + { + name: "return lowest version from machines with build identifier versions", + machines: func() collections.Machines { + machines := collections.New() + machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-1"}, Spec: clusterv1.MachineSpec{ + Version: ptr.To("1.20.1+xyz.2"), + }}) + machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-2"}, Spec: clusterv1.MachineSpec{ + Version: ptr.To("1.20.1+xyz.1"), + }}) + machines.Insert(&clusterv1.Machine{ObjectMeta: metav1.ObjectMeta{Name: "machine-3"}, Spec: clusterv1.MachineSpec{ + Version: ptr.To(""), + }}) + return machines + }(), + expected: ptr.To("1.20.1+xyz.1"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + g.Expect(tt.machines.LowestVersion()).To(Equal(tt.expected)) + }) + } +} + +/* Helper functions to build machine objects for tests. */ + +type machineOpt func(*clusterv1.Machine) + +func withTimestamps(timestamp metav1.Time) machineOpt { + return func(m *clusterv1.Machine) { + m.CreationTimestamp = timestamp + m.DeletionTimestamp = ×tamp + } +} + +func machine(name string, opts ...machineOpt) *clusterv1.Machine { + m := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + for _, opt := range opts { + opt(m) + } + return m +} + +func machines() collections.Machines { + return collections.Machines{ + "machine-4": machine("machine-4", withTimestamps(metav1.Time{Time: time.Date(2018, 04, 02, 03, 04, 05, 06, time.UTC)})), + "machine-5": machine("machine-5", withTimestamps(metav1.Time{Time: time.Date(2018, 05, 02, 03, 04, 05, 06, time.UTC)})), + "machine-2": machine("machine-2", withTimestamps(metav1.Time{Time: time.Date(2018, 02, 02, 03, 04, 05, 06, time.UTC)})), + "machine-1": machine("machine-1", withTimestamps(metav1.Time{Time: time.Date(2018, 01, 02, 03, 04, 05, 06, time.UTC)})), + "machine-3": machine("machine-3", withTimestamps(metav1.Time{Time: time.Date(2018, 03, 02, 03, 04, 05, 06, time.UTC)})), + } +} diff --git a/util/deprecated/v1beta1/collections/machine_filters.go b/util/deprecated/v1beta1/collections/machine_filters.go new file mode 100644 index 000000000000..e61cef07414e --- /dev/null +++ b/util/deprecated/v1beta1/collections/machine_filters.go @@ -0,0 +1,310 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collections + +import ( + "time" + + "github.com/blang/semver/v4" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "sigs.k8s.io/controller-runtime/pkg/client" + + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + util "sigs.k8s.io/cluster-api/util/deprecated/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" +) + +// Func is the functon definition for a filter. +type Func func(machine *clusterv1.Machine) bool + +// And returns a filter that returns true if all of the given filters returns true. +func And(filters ...Func) Func { + return func(machine *clusterv1.Machine) bool { + for _, f := range filters { + if !f(machine) { + return false + } + } + return true + } +} + +// Or returns a filter that returns true if any of the given filters returns true. +func Or(filters ...Func) Func { + return func(machine *clusterv1.Machine) bool { + for _, f := range filters { + if f(machine) { + return true + } + } + return false + } +} + +// Not returns a filter that returns the opposite of the given filter. +func Not(mf Func) Func { + return func(machine *clusterv1.Machine) bool { + return !mf(machine) + } +} + +// HasControllerRef is a filter that returns true if the machine has a controller ref. +func HasControllerRef(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + return metav1.GetControllerOf(machine) != nil +} + +// InFailureDomains returns a filter to find all machines +// in any of the given failure domains. +func InFailureDomains(failureDomains ...*string) Func { + return func(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + for i := range failureDomains { + fd := failureDomains[i] + if fd == nil { + if fd == machine.Spec.FailureDomain { + return true + } + continue + } + if machine.Spec.FailureDomain == nil { + continue + } + if *fd == *machine.Spec.FailureDomain { + return true + } + } + return false + } +} + +// OwnedMachines returns a filter to find all machines owned by specified owner. +// Usage: GetFilteredMachinesForCluster(ctx, client, cluster, OwnedMachines(controlPlane)). +func OwnedMachines(owner client.Object) func(machine *clusterv1.Machine) bool { + return func(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + return util.IsOwnedByObject(machine, owner) + } +} + +// ControlPlaneMachines returns a filter to find all control plane machines for a cluster, regardless of ownership. +// Usage: GetFilteredMachinesForCluster(ctx, client, cluster, ControlPlaneMachines(cluster.Name)). +func ControlPlaneMachines(clusterName string) func(machine *clusterv1.Machine) bool { + selector := ControlPlaneSelectorForCluster(clusterName) + return func(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + return selector.Matches(labels.Set(machine.Labels)) + } +} + +// AdoptableControlPlaneMachines returns a filter to find all un-controlled control plane machines. +// Usage: GetFilteredMachinesForCluster(ctx, client, cluster, AdoptableControlPlaneMachines(cluster.Name, controlPlane)). +func AdoptableControlPlaneMachines(clusterName string) func(machine *clusterv1.Machine) bool { + return And( + ControlPlaneMachines(clusterName), + Not(HasControllerRef), + ) +} + +// ActiveMachines returns a filter to find all active machines. +// Usage: GetFilteredMachinesForCluster(ctx, client, cluster, ActiveMachines). +func ActiveMachines(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + return machine.DeletionTimestamp.IsZero() +} + +// HasDeletionTimestamp returns a filter to find all machines that have a deletion timestamp. +func HasDeletionTimestamp(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + return !machine.DeletionTimestamp.IsZero() +} + +// IsUnhealthyAndOwnerRemediated returns a filter to find all machines that have a MachineHealthCheckSucceeded condition set to False, +// indicating a problem was detected on the machine, and the MachineOwnerRemediated condition set to False, indicating that the machine owner is +// responsible for performing remediation for the machine. +func IsUnhealthyAndOwnerRemediated(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + return conditions.IsFalse(machine, clusterv1.MachineHealthCheckSucceededCondition) && conditions.IsFalse(machine, clusterv1.MachineOwnerRemediatedCondition) +} + +// IsUnhealthy returns a filter to find all machines that have a MachineHealthCheckSucceeded condition set to False, +// indicating a problem was detected on the machine. +func IsUnhealthy(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + return conditions.IsFalse(machine, clusterv1.MachineHealthCheckSucceededCondition) +} + +// HasUnhealthyControlPlaneComponents returns a filter to find all unhealthy control plane machines that +// have any of the following control plane component conditions set to False: +// APIServerPodHealthy, ControllerManagerPodHealthy, SchedulerPodHealthy, EtcdPodHealthy & EtcdMemberHealthy (if using managed etcd). +// It is different from the HasUnhealthyCondition func which checks MachineHealthCheck conditions. +func HasUnhealthyControlPlaneComponents(isEtcdManaged bool) Func { + controlPlaneMachineHealthConditions := []clusterv1.ConditionType{ + controlplanev1.MachineAPIServerPodHealthyCondition, + controlplanev1.MachineControllerManagerPodHealthyCondition, + controlplanev1.MachineSchedulerPodHealthyCondition, + } + if isEtcdManaged { + controlPlaneMachineHealthConditions = append(controlPlaneMachineHealthConditions, + controlplanev1.MachineEtcdPodHealthyCondition, + controlplanev1.MachineEtcdMemberHealthyCondition, + ) + } + return func(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + + // The machine without a node could be in failure status due to the kubelet config error, or still provisioning components (including etcd). + // So do not treat it as unhealthy. + + for _, condition := range controlPlaneMachineHealthConditions { + // Do not return true when the condition is not set or is set to Unknown because + // it means a transient state and can not be considered as unhealthy. + // preflightCheckCondition() can cover these two cases and skip the scaling up/down. + if conditions.IsFalse(machine, condition) { + return true + } + } + return false + } +} + +// IsReady returns a filter to find all machines with the ReadyCondition equals to True. +func IsReady() Func { + return func(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + return conditions.IsTrue(machine, clusterv1.ReadyCondition) + } +} + +// ShouldRolloutAfter returns a filter to find all machines where +// CreationTimestamp < rolloutAfter < reconciliationTIme. +func ShouldRolloutAfter(reconciliationTime, rolloutAfter *metav1.Time) Func { + return func(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + if reconciliationTime == nil || rolloutAfter == nil { + return false + } + return machine.CreationTimestamp.Before(rolloutAfter) && rolloutAfter.Before(reconciliationTime) + } +} + +// ShouldRolloutBefore returns a filter to find all machine whose +// certificates will expire within the specified days. +func ShouldRolloutBefore(reconciliationTime *metav1.Time, rolloutBefore *controlplanev1.RolloutBefore) Func { + return func(machine *clusterv1.Machine) bool { + if rolloutBefore == nil || rolloutBefore.CertificatesExpiryDays == nil { + return false + } + if machine == nil || machine.Status.CertificatesExpiryDate == nil { + return false + } + certsExpiryTime := machine.Status.CertificatesExpiryDate.Time + return reconciliationTime.Add(time.Duration(*rolloutBefore.CertificatesExpiryDays) * 24 * time.Hour).After(certsExpiryTime) + } +} + +// HasAnnotationKey returns a filter to find all machines that have the +// specified Annotation key present. +func HasAnnotationKey(key string) Func { + return func(machine *clusterv1.Machine) bool { + if machine == nil || machine.Annotations == nil { + return false + } + if _, ok := machine.Annotations[key]; ok { + return true + } + return false + } +} + +// ControlPlaneSelectorForCluster returns the label selector necessary to get control plane machines for a given cluster. +func ControlPlaneSelectorForCluster(clusterName string) labels.Selector { + must := func(r *labels.Requirement, err error) labels.Requirement { + if err != nil { + panic(err) + } + return *r + } + return labels.NewSelector().Add( + must(labels.NewRequirement(clusterv1.ClusterNameLabel, selection.Equals, []string{clusterName})), + must(labels.NewRequirement(clusterv1.MachineControlPlaneLabel, selection.Exists, []string{})), + ) +} + +// MatchesKubernetesVersion returns a filter to find all machines that match a given Kubernetes version. +func MatchesKubernetesVersion(kubernetesVersion string) Func { + return func(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + if machine.Spec.Version == nil { + return false + } + return *machine.Spec.Version == kubernetesVersion + } +} + +// WithVersion returns a filter to find machine that have a non empty and valid version. +func WithVersion() Func { + return func(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + if machine.Spec.Version == nil { + return false + } + if _, err := semver.ParseTolerant(*machine.Spec.Version); err != nil { + return false + } + return true + } +} + +// HasNode returns a filter to find all machines that have a corresponding Kubernetes node. +func HasNode() Func { + return func(machine *clusterv1.Machine) bool { + if machine == nil { + return false + } + return machine.Status.NodeRef != nil + } +} diff --git a/util/deprecated/v1beta1/collections/machine_filters_test.go b/util/deprecated/v1beta1/collections/machine_filters_test.go new file mode 100644 index 000000000000..951dff53e37e --- /dev/null +++ b/util/deprecated/v1beta1/collections/machine_filters_test.go @@ -0,0 +1,562 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collections_test + +import ( + "testing" + "time" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/collections" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" +) + +func falseFilter(_ *clusterv1.Machine) bool { + return false +} + +func trueFilter(_ *clusterv1.Machine) bool { + return true +} + +func TestNot(t *testing.T) { + t.Run("returns false given a machine filter that returns true", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.Not(trueFilter)(m)).To(BeFalse()) + }) + t.Run("returns true given a machine filter that returns false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.Not(falseFilter)(m)).To(BeTrue()) + }) +} + +func TestAnd(t *testing.T) { + t.Run("returns true if both given machine filters return true", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.And(trueFilter, trueFilter)(m)).To(BeTrue()) + }) + t.Run("returns false if either given machine filter returns false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.And(trueFilter, falseFilter)(m)).To(BeFalse()) + }) +} + +func TestOr(t *testing.T) { + t.Run("returns true if either given machine filters return true", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.Or(trueFilter, falseFilter)(m)).To(BeTrue()) + }) + t.Run("returns false if both given machine filter returns false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.Or(falseFilter, falseFilter)(m)).To(BeFalse()) + }) +} + +func TestUnhealthyFilters(t *testing.T) { + t.Run("healthy machine (without HealthCheckSucceeded condition) should return false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.IsUnhealthy(m)).To(BeFalse()) + g.Expect(collections.IsUnhealthyAndOwnerRemediated(m)).To(BeFalse()) + }) + t.Run("healthy machine (with HealthCheckSucceeded condition == True) should return false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + conditions.MarkTrue(m, clusterv1.MachineHealthCheckSucceededCondition) + g.Expect(collections.IsUnhealthy(m)).To(BeFalse()) + g.Expect(collections.IsUnhealthyAndOwnerRemediated(m)).To(BeFalse()) + }) + t.Run("unhealthy machine NOT eligible for KCP remediation (with withHealthCheckSucceeded condition == False but without OwnerRemediated) should return false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + conditions.MarkFalse(m, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "") + g.Expect(collections.IsUnhealthy(m)).To(BeTrue()) + g.Expect(collections.IsUnhealthyAndOwnerRemediated(m)).To(BeFalse()) + }) + t.Run("unhealthy machine eligible for KCP (with HealthCheckSucceeded condition == False and with OwnerRemediated) should return true", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + conditions.MarkFalse(m, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineHasFailureReason, clusterv1.ConditionSeverityWarning, "") + conditions.MarkFalse(m, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") + g.Expect(collections.IsUnhealthy(m)).To(BeTrue()) + g.Expect(collections.IsUnhealthyAndOwnerRemediated(m)).To(BeTrue()) + }) +} + +func TestHasDeletionTimestamp(t *testing.T) { + t.Run("machine with deletion timestamp returns true", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + now := metav1.Now() + m.SetDeletionTimestamp(&now) + g.Expect(collections.HasDeletionTimestamp(m)).To(BeTrue()) + }) + t.Run("machine with nil deletion timestamp returns false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.HasDeletionTimestamp(m)).To(BeFalse()) + }) + t.Run("machine with zero deletion timestamp returns false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + zero := metav1.NewTime(time.Time{}) + m.SetDeletionTimestamp(&zero) + g.Expect(collections.HasDeletionTimestamp(m)).To(BeFalse()) + }) +} + +func TestShouldRolloutAfter(t *testing.T) { + reconciliationTime := metav1.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + t.Run("if the machine is nil it returns false", func(t *testing.T) { + g := NewWithT(t) + g.Expect(collections.ShouldRolloutAfter(&reconciliationTime, &reconciliationTime)(nil)).To(BeFalse()) + }) + t.Run("if the reconciliationTime is nil it returns false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.ShouldRolloutAfter(nil, &reconciliationTime)(m)).To(BeFalse()) + }) + t.Run("if the rolloutAfter is nil it returns false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.ShouldRolloutAfter(&reconciliationTime, nil)(m)).To(BeFalse()) + }) + t.Run("if rolloutAfter is after the reconciliation time, return false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + rolloutAfter := metav1.NewTime(reconciliationTime.Add(+1 * time.Hour)) + g.Expect(collections.ShouldRolloutAfter(&reconciliationTime, &rolloutAfter)(m)).To(BeFalse()) + }) + t.Run("if rolloutAfter is before the reconciliation time and the machine was created before rolloutAfter, return true", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + m.SetCreationTimestamp(metav1.NewTime(reconciliationTime.Add(-2 * time.Hour))) + rolloutAfter := metav1.NewTime(reconciliationTime.Add(-1 * time.Hour)) + g.Expect(collections.ShouldRolloutAfter(&reconciliationTime, &rolloutAfter)(m)).To(BeTrue()) + }) + t.Run("if rolloutAfter is before the reconciliation time and the machine was created after rolloutAfter, return false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + m.SetCreationTimestamp(metav1.NewTime(reconciliationTime.Add(+1 * time.Hour))) + rolloutAfter := metav1.NewTime(reconciliationTime.Add(-1 * time.Hour)) + g.Expect(collections.ShouldRolloutAfter(&reconciliationTime, &rolloutAfter)(m)).To(BeFalse()) + }) +} + +func TestShouldRolloutBeforeCertificatesExpire(t *testing.T) { + reconciliationTime := &metav1.Time{Time: time.Now()} + t.Run("if rolloutBefore is nil it should return false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.ShouldRolloutBefore(reconciliationTime, nil)(m)).To(BeFalse()) + }) + t.Run("if rolloutBefore.certificatesExpiryDays is nil it should return false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.ShouldRolloutBefore(reconciliationTime, &controlplanev1.RolloutBefore{})(m)).To(BeFalse()) + }) + t.Run("if machine is nil it should return false", func(t *testing.T) { + g := NewWithT(t) + rb := &controlplanev1.RolloutBefore{CertificatesExpiryDays: ptr.To[int32](10)} + g.Expect(collections.ShouldRolloutBefore(reconciliationTime, rb)(nil)).To(BeFalse()) + }) + t.Run("if the machine certificate expiry information is not available it should return false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + rb := &controlplanev1.RolloutBefore{CertificatesExpiryDays: ptr.To[int32](10)} + g.Expect(collections.ShouldRolloutBefore(reconciliationTime, rb)(m)).To(BeFalse()) + }) + t.Run("if the machine certificates are not going to expire within the expiry time it should return false", func(t *testing.T) { + g := NewWithT(t) + certificateExpiryTime := reconciliationTime.Add(60 * 24 * time.Hour) // certificates will expire in 60 days from 'now'. + m := &clusterv1.Machine{ + Status: clusterv1.MachineStatus{ + CertificatesExpiryDate: &metav1.Time{Time: certificateExpiryTime}, + }, + } + rb := &controlplanev1.RolloutBefore{CertificatesExpiryDays: ptr.To[int32](10)} + g.Expect(collections.ShouldRolloutBefore(reconciliationTime, rb)(m)).To(BeFalse()) + }) + t.Run("if machine certificates will expire within the expiry time then it should return true", func(t *testing.T) { + g := NewWithT(t) + certificateExpiryTime := reconciliationTime.Add(5 * 24 * time.Hour) // certificates will expire in 5 days from 'now'. + m := &clusterv1.Machine{ + Status: clusterv1.MachineStatus{ + CertificatesExpiryDate: &metav1.Time{Time: certificateExpiryTime}, + }, + } + rb := &controlplanev1.RolloutBefore{CertificatesExpiryDays: ptr.To[int32](10)} + g.Expect(collections.ShouldRolloutBefore(reconciliationTime, rb)(m)).To(BeTrue()) + }) +} + +func TestHashAnnotationKey(t *testing.T) { + t.Run("machine with specified annotation returns true", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + m.SetAnnotations(map[string]string{"test": ""}) + g.Expect(collections.HasAnnotationKey("test")(m)).To(BeTrue()) + }) + t.Run("machine with specified annotation with non-empty value returns true", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + m.SetAnnotations(map[string]string{"test": "blue"}) + g.Expect(collections.HasAnnotationKey("test")(m)).To(BeTrue()) + }) + t.Run("machine without specified annotation returns false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.HasAnnotationKey("foo")(m)).To(BeFalse()) + }) +} + +func TestInFailureDomain(t *testing.T) { + t.Run("nil machine returns false", func(t *testing.T) { + g := NewWithT(t) + g.Expect(collections.InFailureDomains(ptr.To("test"))(nil)).To(BeFalse()) + }) + t.Run("machine with given failure domain returns true", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: ptr.To("test")}} + g.Expect(collections.InFailureDomains(ptr.To("test"))(m)).To(BeTrue()) + }) + t.Run("machine with a different failure domain returns false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: ptr.To("notTest")}} + g.Expect(collections.InFailureDomains( + ptr.To("test"), + ptr.To("test2"), + ptr.To("test3"), + nil, + ptr.To("foo"))(m)).To(BeFalse()) + }) + t.Run("machine without failure domain returns false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.InFailureDomains(ptr.To("test"))(m)).To(BeFalse()) + }) + t.Run("machine without failure domain returns true, when nil used for failure domain", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.InFailureDomains(nil)(m)).To(BeTrue()) + }) + t.Run("machine with failure domain returns true, when one of multiple failure domains match", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{Spec: clusterv1.MachineSpec{FailureDomain: ptr.To("test")}} + g.Expect(collections.InFailureDomains(ptr.To("foo"), ptr.To("test"))(m)).To(BeTrue()) + }) +} + +func TestActiveMachinesInCluster(t *testing.T) { + t.Run("machine with deletion timestamp returns false", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + now := metav1.Now() + m.SetDeletionTimestamp(&now) + g.Expect(collections.ActiveMachines(m)).To(BeFalse()) + }) + t.Run("machine with nil deletion timestamp returns true", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + g.Expect(collections.ActiveMachines(m)).To(BeTrue()) + }) + t.Run("machine with zero deletion timestamp returns true", func(t *testing.T) { + g := NewWithT(t) + m := &clusterv1.Machine{} + zero := metav1.NewTime(time.Time{}) + m.SetDeletionTimestamp(&zero) + g.Expect(collections.ActiveMachines(m)).To(BeTrue()) + }) +} + +func TestMatchesKubernetesVersion(t *testing.T) { + t.Run("nil machine returns false", func(t *testing.T) { + g := NewWithT(t) + g.Expect(collections.MatchesKubernetesVersion("some_ver")(nil)).To(BeFalse()) + }) + + t.Run("nil machine.Spec.Version returns false", func(t *testing.T) { + g := NewWithT(t) + machine := &clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ + Version: nil, + }, + } + g.Expect(collections.MatchesKubernetesVersion("some_ver")(machine)).To(BeFalse()) + }) + + t.Run("machine.Spec.Version returns true if matches", func(t *testing.T) { + g := NewWithT(t) + kversion := "some_ver" + machine := &clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ + Version: &kversion, + }, + } + g.Expect(collections.MatchesKubernetesVersion("some_ver")(machine)).To(BeTrue()) + }) + + t.Run("machine.Spec.Version returns false if does not match", func(t *testing.T) { + g := NewWithT(t) + kversion := "some_ver_2" + machine := &clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ + Version: &kversion, + }, + } + g.Expect(collections.MatchesKubernetesVersion("some_ver")(machine)).To(BeFalse()) + }) +} + +func TestWithVersion(t *testing.T) { + t.Run("nil machine returns false", func(t *testing.T) { + g := NewWithT(t) + g.Expect(collections.WithVersion()(nil)).To(BeFalse()) + }) + + t.Run("nil machine.Spec.Version returns false", func(t *testing.T) { + g := NewWithT(t) + machine := &clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ + Version: nil, + }, + } + g.Expect(collections.WithVersion()(machine)).To(BeFalse()) + }) + + t.Run("empty machine.Spec.Version returns false", func(t *testing.T) { + g := NewWithT(t) + machine := &clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ + Version: ptr.To(""), + }, + } + g.Expect(collections.WithVersion()(machine)).To(BeFalse()) + }) + + t.Run("invalid machine.Spec.Version returns false", func(t *testing.T) { + g := NewWithT(t) + machine := &clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ + Version: ptr.To("1..20"), + }, + } + g.Expect(collections.WithVersion()(machine)).To(BeFalse()) + }) + + t.Run("valid machine.Spec.Version returns true", func(t *testing.T) { + g := NewWithT(t) + machine := &clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ + Version: ptr.To("1.20"), + }, + } + g.Expect(collections.WithVersion()(machine)).To(BeTrue()) + }) +} + +func TestGetFilteredMachinesForCluster(t *testing.T) { + g := NewWithT(t) + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "my-namespace", + Name: "my-cluster", + }, + } + + c := fake.NewClientBuilder(). + WithObjects(cluster, + testControlPlaneMachine("first-machine"), + testMachine("second-machine"), + testMachine("third-machine")). + Build() + + machines, err := collections.GetFilteredMachinesForCluster(ctx, c, cluster) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(machines).To(HaveLen(3)) + + // Test the ControlPlaneMachines works + machines, err = collections.GetFilteredMachinesForCluster(ctx, c, cluster, collections.ControlPlaneMachines("my-cluster")) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(machines).To(HaveLen(1)) + + // Test that the filters use AND logic instead of OR logic + nameFilter := func(cluster *clusterv1.Machine) bool { + return cluster.Name == "first-machine" + } + machines, err = collections.GetFilteredMachinesForCluster(ctx, c, cluster, collections.ControlPlaneMachines("my-cluster"), nameFilter) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(machines).To(HaveLen(1)) +} + +func TestHasNode(t *testing.T) { + t.Run("nil machine returns false", func(t *testing.T) { + g := NewWithT(t) + g.Expect(collections.HasNode()(nil)).To(BeFalse()) + }) + + t.Run("machine without node returns false", func(t *testing.T) { + g := NewWithT(t) + machine := &clusterv1.Machine{} + g.Expect(collections.HasNode()(machine)).To(BeFalse()) + }) + + t.Run("machine with node returns true", func(t *testing.T) { + g := NewWithT(t) + machine := &clusterv1.Machine{ + Status: clusterv1.MachineStatus{NodeRef: &corev1.ObjectReference{Name: "foo"}}, + } + g.Expect(collections.HasNode()(machine)).To(BeTrue()) + }) +} + +func TestHasUnhealthyControlPlaneComponentCondition(t *testing.T) { + t.Run("nil machine returns false", func(t *testing.T) { + g := NewWithT(t) + g.Expect(collections.HasUnhealthyControlPlaneComponents(false)(nil)).To(BeFalse()) + }) + + t.Run("machine without node returns false", func(t *testing.T) { + g := NewWithT(t) + machine := &clusterv1.Machine{} + g.Expect(collections.HasUnhealthyControlPlaneComponents(false)(machine)).To(BeFalse()) + }) + + t.Run("machine with all healthy controlPlane component conditions returns false when the Etcd is not managed", func(t *testing.T) { + g := NewWithT(t) + machine := &clusterv1.Machine{} + machine.Status.NodeRef = &corev1.ObjectReference{ + Name: "node1", + } + machine.Status.Conditions = clusterv1.Conditions{ + *conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), + *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), + *conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), + } + g.Expect(collections.HasUnhealthyControlPlaneComponents(false)(machine)).To(BeFalse()) + }) + + t.Run("machine with unhealthy 'APIServerPodHealthy' condition returns true when the Etcd is not managed", func(t *testing.T) { + g := NewWithT(t) + machine := &clusterv1.Machine{} + machine.Status.NodeRef = &corev1.ObjectReference{ + Name: "node1", + } + machine.Status.Conditions = clusterv1.Conditions{ + *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), + *conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), + *conditions.FalseCondition(controlplanev1.MachineAPIServerPodHealthyCondition, "", + clusterv1.ConditionSeverityWarning, ""), + } + g.Expect(collections.HasUnhealthyControlPlaneComponents(false)(machine)).To(BeTrue()) + }) + + t.Run("machine with unhealthy etcd component conditions returns false when Etcd is not managed", func(t *testing.T) { + g := NewWithT(t) + machine := &clusterv1.Machine{} + machine.Status.NodeRef = &corev1.ObjectReference{ + Name: "node1", + } + machine.Status.Conditions = clusterv1.Conditions{ + *conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), + *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), + *conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), + *conditions.FalseCondition(controlplanev1.MachineEtcdPodHealthyCondition, "", + clusterv1.ConditionSeverityWarning, ""), + *conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, "", + clusterv1.ConditionSeverityWarning, ""), + } + g.Expect(collections.HasUnhealthyControlPlaneComponents(false)(machine)).To(BeFalse()) + }) + + t.Run("machine with unhealthy etcd conditions returns true when Etcd is managed", func(t *testing.T) { + g := NewWithT(t) + machine := &clusterv1.Machine{} + machine.Status.NodeRef = &corev1.ObjectReference{ + Name: "node1", + } + machine.Status.Conditions = clusterv1.Conditions{ + *conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), + *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), + *conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), + *conditions.FalseCondition(controlplanev1.MachineEtcdPodHealthyCondition, "", + clusterv1.ConditionSeverityWarning, ""), + *conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, "", + clusterv1.ConditionSeverityWarning, ""), + } + g.Expect(collections.HasUnhealthyControlPlaneComponents(true)(machine)).To(BeTrue()) + }) + + t.Run("machine with all healthy controlPlane and the Etcd component conditions returns false when Etcd is managed", func(t *testing.T) { + g := NewWithT(t) + machine := &clusterv1.Machine{} + machine.Status.NodeRef = &corev1.ObjectReference{ + Name: "node1", + } + machine.Status.Conditions = clusterv1.Conditions{ + *conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition), + *conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition), + *conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition), + *conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyCondition), + *conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition), + } + g.Expect(collections.HasUnhealthyControlPlaneComponents(true)(machine)).To(BeFalse()) + }) +} + +func testControlPlaneMachine(name string) *clusterv1.Machine { + owned := true + ownedRef := []metav1.OwnerReference{ + { + Kind: "KubeadmControlPlane", + Name: "my-control-plane", + Controller: &owned, + }, + } + controlPlaneMachine := testMachine(name) + controlPlaneMachine.ObjectMeta.Labels[clusterv1.MachineControlPlaneLabel] = "" + controlPlaneMachine.OwnerReferences = ownedRef + + return controlPlaneMachine +} + +func testMachine(name string) *clusterv1.Machine { + return &clusterv1.Machine{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "my-namespace", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "my-cluster", + }, + }, + } +} diff --git a/util/deprecated/v1beta1/collections/suite_test.go b/util/deprecated/v1beta1/collections/suite_test.go new file mode 100644 index 000000000000..ce34a4303646 --- /dev/null +++ b/util/deprecated/v1beta1/collections/suite_test.go @@ -0,0 +1,38 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collections_test + +import ( + "os" + "testing" + + ctrl "sigs.k8s.io/controller-runtime" + + "sigs.k8s.io/cluster-api/internal/test/envtest" +) + +var ( + env *envtest.Environment + ctx = ctrl.SetupSignalHandler() +) + +func TestMain(m *testing.M) { + os.Exit(envtest.Run(ctx, envtest.RunInput{ + M: m, + SetupEnv: func(e *envtest.Environment) { env = e }, + })) +} diff --git a/util/deprecated/v1beta1/predicates/cluster_predicates.go b/util/deprecated/v1beta1/predicates/cluster_predicates.go new file mode 100644 index 000000000000..0462315450a6 --- /dev/null +++ b/util/deprecated/v1beta1/predicates/cluster_predicates.go @@ -0,0 +1,385 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicates + +import ( + "fmt" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" +) + +// ClusterCreateInfraReady returns a predicate that returns true for a create event when a cluster has Status.InfrastructureReady set as true +// it also returns true if the resource provided is not a Cluster to allow for use with controller-runtime NewControllerManagedBy. +// +// Deprecated: This predicate is deprecated and will be removed in a future version. On creation of a cluster the status will always be empty. +// Because of that the predicate would never return true for InfrastructureReady. +func ClusterCreateInfraReady(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + log := logger.WithValues("predicate", "ClusterCreateInfraReady", "eventType", "create") + if gvk, err := apiutil.GVKForObject(e.Object, scheme); err == nil { + log = log.WithValues(gvk.Kind, klog.KObj(e.Object)) + } + + c, ok := e.Object.(*clusterv1.Cluster) + if !ok { + log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.Object)) + return false + } + + // Only need to trigger a reconcile if the Cluster.Status.InfrastructureReady is true + if c.Status.InfrastructureReady { + log.V(6).Info("Cluster infrastructure is ready, allowing further processing") + return true + } + + log.V(4).Info("Cluster infrastructure is not ready, blocking further processing") + return false + }, + UpdateFunc: func(event.UpdateEvent) bool { return false }, + DeleteFunc: func(event.DeleteEvent) bool { return false }, + GenericFunc: func(event.GenericEvent) bool { return false }, + } +} + +// ClusterCreateNotPaused returns a predicate that returns true for a create event when a cluster has Spec.Paused set as false +// it also returns true if the resource provided is not a Cluster to allow for use with controller-runtime NewControllerManagedBy. +func ClusterCreateNotPaused(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + log := logger.WithValues("predicate", "ClusterCreateNotPaused", "eventType", "create") + if gvk, err := apiutil.GVKForObject(e.Object, scheme); err == nil { + log = log.WithValues(gvk.Kind, klog.KObj(e.Object)) + } + + c, ok := e.Object.(*clusterv1.Cluster) + if !ok { + log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.Object)) + return false + } + + // Only need to trigger a reconcile if the Cluster.Spec.Paused is false + if !c.Spec.Paused { + log.V(6).Info("Cluster is not paused, allowing further processing") + return true + } + + log.V(4).Info("Cluster is paused, blocking further processing") + return false + }, + UpdateFunc: func(event.UpdateEvent) bool { return false }, + DeleteFunc: func(event.DeleteEvent) bool { return false }, + GenericFunc: func(event.GenericEvent) bool { return false }, + } +} + +// ClusterUpdateInfraReady returns a predicate that returns true for an update event when a cluster has Status.InfrastructureReady changed from false to true +// it also returns true if the resource provided is not a Cluster to allow for use with controller-runtime NewControllerManagedBy. +func ClusterUpdateInfraReady(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + log := logger.WithValues("predicate", "ClusterUpdateInfraReady", "eventType", "update") + if gvk, err := apiutil.GVKForObject(e.ObjectOld, scheme); err == nil { + log = log.WithValues(gvk.Kind, klog.KObj(e.ObjectOld)) + } + + oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) + if !ok { + log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) + return false + } + + newCluster := e.ObjectNew.(*clusterv1.Cluster) + + if !oldCluster.Status.InfrastructureReady && newCluster.Status.InfrastructureReady { + log.V(6).Info("Cluster infrastructure became ready, allowing further processing") + return true + } + + log.V(4).Info("Cluster infrastructure did not become ready, blocking further processing") + return false + }, + CreateFunc: func(event.CreateEvent) bool { return false }, + DeleteFunc: func(event.DeleteEvent) bool { return false }, + GenericFunc: func(event.GenericEvent) bool { return false }, + } +} + +// ClusterUpdateUnpaused returns a predicate that returns true for an update event when a cluster has Spec.Paused changed from true to false +// it also returns true if the resource provided is not a Cluster to allow for use with controller-runtime NewControllerManagedBy. +func ClusterUpdateUnpaused(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + log := logger.WithValues("predicate", "ClusterUpdateUnpaused", "eventType", "update") + if gvk, err := apiutil.GVKForObject(e.ObjectOld, scheme); err == nil { + log = log.WithValues(gvk.Kind, klog.KObj(e.ObjectOld)) + } + + oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) + if !ok { + log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) + return false + } + + newCluster := e.ObjectNew.(*clusterv1.Cluster) + + if oldCluster.Spec.Paused && !newCluster.Spec.Paused { + log.V(4).Info("Cluster was unpaused, allowing further processing") + return true + } + + // This predicate always work in "or" with Paused predicates + // so the logs are adjusted to not provide false negatives/verbosity at V<=5. + log.V(6).Info("Cluster was not unpaused, blocking further processing") + return false + }, + CreateFunc: func(event.CreateEvent) bool { return false }, + DeleteFunc: func(event.DeleteEvent) bool { return false }, + GenericFunc: func(event.GenericEvent) bool { return false }, + } +} + +// ClusterUnpaused returns a Predicate that returns true on Cluster creation events where Cluster.Spec.Paused is false +// and Update events when Cluster.Spec.Paused transitions to false. +// This implements a common requirement for many cluster-api and provider controllers (such as Cluster Infrastructure +// controllers) to resume reconciliation when the Cluster is unpaused. +// Example use: +// +// err := controller.Watch( +// source.Kind(cache, &clusterv1.Cluster{}), +// handler.EnqueueRequestsFromMapFunc(clusterToMachines) +// predicates.ClusterUnpaused(mgr.GetScheme(), r.Log), +// ) +func ClusterUnpaused(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { + log := logger.WithValues("predicate", "ClusterUnpaused") + + // Use any to ensure we process either create or update events we care about + return Any(scheme, log, ClusterCreateNotPaused(scheme, log), ClusterUpdateUnpaused(scheme, log)) +} + +// ClusterPausedTransitions returns a predicate that returns true for an update event when a cluster has Spec.Paused changed. +func ClusterPausedTransitions(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + log := logger.WithValues("predicate", "ClusterPausedTransitions", "eventType", "update") + if gvk, err := apiutil.GVKForObject(e.ObjectOld, scheme); err == nil { + log = log.WithValues(gvk.Kind, klog.KObj(e.ObjectOld)) + } + + oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) + if !ok { + log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) + return false + } + + newCluster := e.ObjectNew.(*clusterv1.Cluster) + + if oldCluster.Spec.Paused && !newCluster.Spec.Paused { + log.V(6).Info("Cluster unpausing, allowing further processing") + return true + } + + if !oldCluster.Spec.Paused && newCluster.Spec.Paused { + log.V(6).Info("Cluster pausing, allowing further processing") + return true + } + + // This predicate always work in "or" with Paused predicates + // so the logs are adjusted to not provide false negatives/verbosity at V<=5. + log.V(6).Info("Cluster paused state was not changed, blocking further processing") + return false + }, + CreateFunc: func(event.CreateEvent) bool { return false }, + DeleteFunc: func(event.DeleteEvent) bool { return false }, + GenericFunc: func(event.GenericEvent) bool { return false }, + } +} + +// ClusterControlPlaneInitialized returns a Predicate that returns true on Update events +// when ControlPlaneInitializedCondition on a Cluster changes to true. +// Example use: +// +// err := controller.Watch( +// source.Kind(cache, &clusterv1.Cluster{}), +// handler.EnqueueRequestsFromMapFunc(clusterToMachines) +// predicates.ClusterControlPlaneInitialized(mgr.GetScheme(), r.Log), +// ) +func ClusterControlPlaneInitialized(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + log := logger.WithValues("predicate", "ClusterControlPlaneInitialized", "eventType", "update") + if gvk, err := apiutil.GVKForObject(e.ObjectOld, scheme); err == nil { + log = log.WithValues(gvk.Kind, klog.KObj(e.ObjectOld)) + } + + oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) + if !ok { + log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) + return false + } + + newCluster := e.ObjectNew.(*clusterv1.Cluster) + + if !conditions.IsTrue(oldCluster, clusterv1.ControlPlaneInitializedCondition) && + conditions.IsTrue(newCluster, clusterv1.ControlPlaneInitializedCondition) { + log.V(6).Info("Cluster ControlPlaneInitialized was set, allow further processing") + return true + } + + log.V(6).Info("Cluster ControlPlaneInitialized hasn't changed, blocking further processing") + return false + }, + CreateFunc: func(event.CreateEvent) bool { return false }, + DeleteFunc: func(event.DeleteEvent) bool { return false }, + GenericFunc: func(event.GenericEvent) bool { return false }, + } +} + +// ClusterPausedTransitionsOrInfrastructureReady returns a Predicate that returns true on Cluster Update events where +// either Cluster.Spec.Paused transitions or Cluster.Status.InfrastructureReady transitions to true. +// This implements a common requirement for some cluster-api and provider controllers (such as Machine Infrastructure +// controllers) to resume reconciliation when the Cluster gets paused or unpaused and when the infrastructure becomes ready. +// Example use: +// +// err := controller.Watch( +// source.Kind(cache, &clusterv1.Cluster{}), +// handler.EnqueueRequestsFromMapFunc(clusterToMachines) +// predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), r.Log), +// ) +func ClusterPausedTransitionsOrInfrastructureReady(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { + log := logger.WithValues("predicate", "ClusterPausedTransitionsOrInfrastructureReady") + + return Any(scheme, log, ClusterPausedTransitions(scheme, log), ClusterUpdateInfraReady(scheme, log)) +} + +// ClusterUnpausedAndInfrastructureReady returns a Predicate that returns true on Cluster creation events where +// both Cluster.Spec.Paused is false and Cluster.Status.InfrastructureReady is true and Update events when +// either Cluster.Spec.Paused transitions to false or Cluster.Status.InfrastructureReady transitions to true. +// This implements a common requirement for some cluster-api and provider controllers (such as Machine Infrastructure +// controllers) to resume reconciliation when the Cluster is unpaused and when the infrastructure becomes ready. +// Example use: +// +// err := controller.Watch( +// source.Kind(cache, &clusterv1.Cluster{}), +// handler.EnqueueRequestsFromMapFunc(clusterToMachines) +// predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetScheme(), r.Log), +// ) +// +// Deprecated: This predicate is deprecated and will be removed in a future version, +// use ClusterPausedTransitionsOrInfrastructureReady instead. +func ClusterUnpausedAndInfrastructureReady(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { + log := logger.WithValues("predicate", "ClusterUnpausedAndInfrastructureReady") + + // Only continue processing create events if both not paused and infrastructure is ready + createPredicates := All(scheme, log, ClusterCreateNotPaused(scheme, log), ClusterCreateInfraReady(scheme, log)) + + // Process update events if either Cluster is unpaused or infrastructure becomes ready + updatePredicates := Any(scheme, log, ClusterUpdateUnpaused(scheme, log), ClusterUpdateInfraReady(scheme, log)) + + // Use any to ensure we process either create or update events we care about + return Any(scheme, log, createPredicates, updatePredicates) +} + +// ClusterHasTopology returns a Predicate that returns true when cluster.Spec.Topology +// is NOT nil and false otherwise. +func ClusterHasTopology(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + return processIfTopologyManaged(scheme, logger.WithValues("predicate", "ClusterHasTopology", "eventType", "update"), e.ObjectNew) + }, + CreateFunc: func(e event.CreateEvent) bool { + return processIfTopologyManaged(scheme, logger.WithValues("predicate", "ClusterHasTopology", "eventType", "create"), e.Object) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return processIfTopologyManaged(scheme, logger.WithValues("predicate", "ClusterHasTopology", "eventType", "delete"), e.Object) + }, + GenericFunc: func(e event.GenericEvent) bool { + return processIfTopologyManaged(scheme, logger.WithValues("predicate", "ClusterHasTopology", "eventType", "generic"), e.Object) + }, + } +} + +func processIfTopologyManaged(scheme *runtime.Scheme, logger logr.Logger, object client.Object) bool { + if gvk, err := apiutil.GVKForObject(object, scheme); err == nil { + logger = logger.WithValues(gvk.Kind, klog.KObj(object)) + } + cluster, ok := object.(*clusterv1.Cluster) + if !ok { + logger.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", object)) + return false + } + + if cluster.Spec.Topology != nil { + logger.V(6).Info("Cluster has topology, allowing further processing") + return true + } + + logger.V(6).Info("Cluster does not have topology, blocking further processing") + return false +} + +// ClusterTopologyVersionChanged returns a Predicate that returns true when cluster.Spec.Topology.Version +// was changed. +func ClusterTopologyVersionChanged(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + logger := logger.WithValues("predicate", "ClusterTopologyVersionChanged", "eventType", "update") + if gvk, err := apiutil.GVKForObject(e.ObjectOld, scheme); err == nil { + logger = logger.WithValues(gvk.Kind, klog.KObj(e.ObjectOld)) + } + + oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) + if !ok { + logger.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) + return false + } + + newCluster := e.ObjectNew.(*clusterv1.Cluster) + + if oldCluster.Spec.Topology == nil || newCluster.Spec.Topology == nil { + logger.V(6).Info("Cluster does not have topology, blocking further processing") + return false + } + + if oldCluster.Spec.Topology.Version != newCluster.Spec.Topology.Version { + logger.V(6).Info("Cluster topology version has changed, allowing further processing") + return true + } + + logger.V(6).Info("Cluster topology version has not changed, blocking further processing") + return false + }, + CreateFunc: func(_ event.CreateEvent) bool { + return false + }, + DeleteFunc: func(_ event.DeleteEvent) bool { + return false + }, + GenericFunc: func(_ event.GenericEvent) bool { + return false + }, + } +} diff --git a/util/deprecated/v1beta1/predicates/cluster_predicates_test.go b/util/deprecated/v1beta1/predicates/cluster_predicates_test.go new file mode 100644 index 000000000000..74658f3d2ae7 --- /dev/null +++ b/util/deprecated/v1beta1/predicates/cluster_predicates_test.go @@ -0,0 +1,100 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicates_test + +import ( + "testing" + + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/predicates" +) + +func TestClusterControlplaneInitializedPredicate(t *testing.T) { + g := NewWithT(t) + predicate := predicates.ClusterControlPlaneInitialized(runtime.NewScheme(), logr.New(log.NullLogSink{})) + + markedFalse := clusterv1.Cluster{} + conditions.MarkFalse(&markedFalse, clusterv1.ControlPlaneInitializedCondition, clusterv1.MissingNodeRefReason, clusterv1.ConditionSeverityWarning, "") + + markedTrue := clusterv1.Cluster{} + conditions.MarkTrue(&markedTrue, clusterv1.ControlPlaneInitializedCondition) + + notMarked := clusterv1.Cluster{} + + testcases := []struct { + name string + oldCluster clusterv1.Cluster + newCluster clusterv1.Cluster + expected bool + }{ + { + name: "no conditions -> no conditions: should return false", + oldCluster: notMarked, + newCluster: notMarked, + expected: false, + }, + { + name: "no conditions -> true: should return true", + oldCluster: notMarked, + newCluster: markedTrue, + expected: true, + }, + { + name: "false -> true: should return true", + oldCluster: markedFalse, + newCluster: markedTrue, + expected: true, + }, + { + name: "no conditions -> false: should return false", + oldCluster: notMarked, + newCluster: markedFalse, + expected: false, + }, + { + name: "true -> false: should return false", + oldCluster: markedTrue, + newCluster: markedFalse, + expected: false, + }, + { + name: "true -> true: should return false", + oldCluster: markedTrue, + newCluster: markedTrue, + expected: false, + }, + } + + for i := range testcases { + tc := testcases[i] + t.Run(tc.name, func(*testing.T) { + ev := event.UpdateEvent{ + ObjectOld: &tc.oldCluster, + ObjectNew: &tc.newCluster, + } + + g.Expect(predicate.Update(ev)).To(Equal(tc.expected)) + }) + } +} diff --git a/util/deprecated/v1beta1/predicates/doc.go b/util/deprecated/v1beta1/predicates/doc.go new file mode 100644 index 000000000000..2b6388571c22 --- /dev/null +++ b/util/deprecated/v1beta1/predicates/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package predicates implements predicate utilities. +// +// Deprecated: This package is deprecated and is going to be removed when support for v1beta1 will be dropped. Please see https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more details. +package predicates diff --git a/util/deprecated/v1beta1/predicates/generic_predicates.go b/util/deprecated/v1beta1/predicates/generic_predicates.go new file mode 100644 index 000000000000..c6961b0c7192 --- /dev/null +++ b/util/deprecated/v1beta1/predicates/generic_predicates.go @@ -0,0 +1,340 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicates + +import ( + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/annotations" + "sigs.k8s.io/cluster-api/util/labels" +) + +// All returns a predicate that returns true only if all given predicates return true. +func All(scheme *runtime.Scheme, logger logr.Logger, predicates ...predicate.Funcs) predicate.Funcs { + return TypedAll(scheme, logger, predicates...) +} + +// TypedAll returns a predicate that returns true only if all given predicates return true. +func TypedAll[T client.Object](scheme *runtime.Scheme, logger logr.Logger, predicates ...predicate.TypedFuncs[T]) predicate.TypedFuncs[T] { + return predicate.TypedFuncs[T]{ + UpdateFunc: func(e event.TypedUpdateEvent[T]) bool { + log := logger.WithValues("predicateAggregation", "All") + if gvk, err := apiutil.GVKForObject(e.ObjectNew, scheme); err == nil { + log = log.WithValues(gvk.Kind, klog.KObj(e.ObjectNew)) + } + for _, p := range predicates { + if !p.UpdateFunc(e) { + log.V(6).Info("One of the provided predicates returned false, blocking further processing") + return false + } + } + log.V(6).Info("All provided predicates returned true, allowing further processing") + return true + }, + CreateFunc: func(e event.TypedCreateEvent[T]) bool { + log := logger.WithValues("predicateAggregation", "All") + if gvk, err := apiutil.GVKForObject(e.Object, scheme); err == nil { + log = log.WithValues(gvk.Kind, klog.KObj(e.Object)) + } + for _, p := range predicates { + if !p.CreateFunc(e) { + log.V(6).Info("One of the provided predicates returned false, blocking further processing") + return false + } + } + log.V(6).Info("All provided predicates returned true, allowing further processing") + return true + }, + DeleteFunc: func(e event.TypedDeleteEvent[T]) bool { + log := logger.WithValues("predicateAggregation", "All") + if gvk, err := apiutil.GVKForObject(e.Object, scheme); err == nil { + log = log.WithValues(gvk.Kind, klog.KObj(e.Object)) + } + for _, p := range predicates { + if !p.DeleteFunc(e) { + log.V(6).Info("One of the provided predicates returned false, blocking further processing") + return false + } + } + log.V(6).Info("All provided predicates returned true, allowing further processing") + return true + }, + GenericFunc: func(e event.TypedGenericEvent[T]) bool { + log := logger.WithValues("predicateAggregation", "All") + if gvk, err := apiutil.GVKForObject(e.Object, scheme); err == nil { + log = log.WithValues(gvk.Kind, klog.KObj(e.Object)) + } + for _, p := range predicates { + if !p.GenericFunc(e) { + log.V(6).Info("One of the provided predicates returned false, blocking further processing") + return false + } + } + log.V(6).Info("All provided predicates returned true, allowing further processing") + return true + }, + } +} + +// Any returns a predicate that returns true only if any given predicate returns true. +func Any(scheme *runtime.Scheme, logger logr.Logger, predicates ...predicate.Funcs) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + log := logger.WithValues("predicateAggregation", "Any") + if gvk, err := apiutil.GVKForObject(e.ObjectNew, scheme); err == nil { + log = log.WithValues(gvk.Kind, klog.KObj(e.ObjectNew)) + } + for _, p := range predicates { + if p.UpdateFunc(e) { + log.V(6).Info("One of the provided predicates returned true, allowing further processing") + return true + } + } + log.V(6).Info("All of the provided predicates returned false, blocking further processing") + return false + }, + CreateFunc: func(e event.CreateEvent) bool { + log := logger.WithValues("predicateAggregation", "Any") + if gvk, err := apiutil.GVKForObject(e.Object, scheme); err == nil { + log = log.WithValues(gvk.Kind, klog.KObj(e.Object)) + } + for _, p := range predicates { + if p.CreateFunc(e) { + log.V(6).Info("One of the provided predicates returned true, allowing further processing") + return true + } + } + log.V(6).Info("All of the provided predicates returned false, blocking further processing") + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + log := logger.WithValues("predicateAggregation", "Any") + if gvk, err := apiutil.GVKForObject(e.Object, scheme); err == nil { + log = log.WithValues(gvk.Kind, klog.KObj(e.Object)) + } + for _, p := range predicates { + if p.DeleteFunc(e) { + log.V(6).Info("One of the provided predicates returned true, allowing further processing") + return true + } + } + log.V(6).Info("All of the provided predicates returned false, blocking further processing") + return false + }, + GenericFunc: func(e event.GenericEvent) bool { + log := logger.WithValues("predicateAggregation", "Any") + if gvk, err := apiutil.GVKForObject(e.Object, scheme); err == nil { + log = log.WithValues(gvk.Kind, klog.KObj(e.Object)) + } + for _, p := range predicates { + if p.GenericFunc(e) { + log.V(6).Info("One of the provided predicates returned true, allowing further processing") + return true + } + } + log.V(6).Info("All of the provided predicates returned false, blocking further processing") + return false + }, + } +} + +// ResourceHasFilterLabel returns a predicate that returns true only if the provided resource contains +// a label with the WatchLabel key and the configured label value exactly. +func ResourceHasFilterLabel(scheme *runtime.Scheme, logger logr.Logger, labelValue string) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + return processIfLabelMatch(scheme, logger.WithValues("predicate", "ResourceHasFilterLabel", "eventType", "update"), e.ObjectNew, labelValue) + }, + CreateFunc: func(e event.CreateEvent) bool { + return processIfLabelMatch(scheme, logger.WithValues("predicate", "ResourceHasFilterLabel", "eventType", "create"), e.Object, labelValue) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return processIfLabelMatch(scheme, logger.WithValues("predicate", "ResourceHasFilterLabel", "eventType", "delete"), e.Object, labelValue) + }, + GenericFunc: func(e event.GenericEvent) bool { + return processIfLabelMatch(scheme, logger.WithValues("predicate", "ResourceHasFilterLabel", "eventType", "generic"), e.Object, labelValue) + }, + } +} + +// ResourceNotPaused returns a Predicate that returns true only if the provided resource does not contain the +// paused annotation. +// This implements a common requirement for all cluster-api and provider controllers skip reconciliation when the paused +// annotation is present for a resource. +// Example use: +// +// func (r *MyReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { +// controller, err := ctrl.NewControllerManagedBy(mgr). +// For(&v1.MyType{}). +// WithOptions(options). +// WithEventFilter(util.ResourceNotPaused(mgr.GetScheme(), r.Log)). +// Build(r) +// return err +// } +func ResourceNotPaused(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + return processIfNotPaused(scheme, logger.WithValues("predicate", "ResourceNotPaused", "eventType", "update"), e.ObjectNew) + }, + CreateFunc: func(e event.CreateEvent) bool { + return processIfNotPaused(scheme, logger.WithValues("predicate", "ResourceNotPaused", "eventType", "create"), e.Object) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return processIfNotPaused(scheme, logger.WithValues("predicate", "ResourceNotPaused", "eventType", "delete"), e.Object) + }, + GenericFunc: func(e event.GenericEvent) bool { + return processIfNotPaused(scheme, logger.WithValues("predicate", "ResourceNotPaused", "eventType", "generic"), e.Object) + }, + } +} + +// ResourceNotPausedAndHasFilterLabel returns a predicate that returns true only if the +// ResourceNotPaused and ResourceHasFilterLabel predicates return true. +func ResourceNotPausedAndHasFilterLabel(scheme *runtime.Scheme, logger logr.Logger, labelValue string) predicate.Funcs { + return All(scheme, logger, ResourceNotPaused(scheme, logger), ResourceHasFilterLabel(scheme, logger, labelValue)) +} + +func processIfNotPaused(scheme *runtime.Scheme, logger logr.Logger, obj client.Object) bool { + if gvk, err := apiutil.GVKForObject(obj, scheme); err == nil { + logger = logger.WithValues(gvk.Kind, klog.KObj(obj)) + } + if annotations.HasPaused(obj) { + logger.V(4).Info("Resource is paused, will not attempt to map resource") + return false + } + logger.V(6).Info("Resource is not paused, will attempt to map resource") + return true +} + +func processIfLabelMatch(scheme *runtime.Scheme, logger logr.Logger, obj client.Object, labelValue string) bool { + // Return early if no labelValue was set. + if labelValue == "" { + return true + } + + if gvk, err := apiutil.GVKForObject(obj, scheme); err == nil { + logger = logger.WithValues(gvk.Kind, klog.KObj(obj)) + } + if labels.HasWatchLabel(obj, labelValue) { + logger.V(6).Info("Resource matches label, will attempt to map resource") + return true + } + logger.V(4).Info("Resource does not match label, will not attempt to map resource") + return false +} + +// ResourceIsNotExternallyManaged returns a predicate that returns true only if the resource does not contain +// the externally managed annotation. +// This implements a requirement for InfraCluster providers to be able to ignore externally managed +// cluster infrastructure. +func ResourceIsNotExternallyManaged(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + return processIfNotExternallyManaged(scheme, logger.WithValues("predicate", "ResourceIsNotExternallyManaged", "eventType", "update"), e.ObjectNew) + }, + CreateFunc: func(e event.CreateEvent) bool { + return processIfNotExternallyManaged(scheme, logger.WithValues("predicate", "ResourceIsNotExternallyManaged", "eventType", "create"), e.Object) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return processIfNotExternallyManaged(scheme, logger.WithValues("predicate", "ResourceIsNotExternallyManaged", "eventType", "delete"), e.Object) + }, + GenericFunc: func(e event.GenericEvent) bool { + return processIfNotExternallyManaged(scheme, logger.WithValues("predicate", "ResourceIsNotExternallyManaged", "eventType", "generic"), e.Object) + }, + } +} + +func processIfNotExternallyManaged(scheme *runtime.Scheme, logger logr.Logger, obj client.Object) bool { + if gvk, err := apiutil.GVKForObject(obj, scheme); err == nil { + logger = logger.WithValues(gvk.Kind, klog.KObj(obj)) + } + if annotations.IsExternallyManaged(obj) { + logger.V(4).Info("Resource is externally managed, will not attempt to map resource") + return false + } + logger.V(6).Info("Resource is managed, will attempt to map resource") + return true +} + +// ResourceIsTopologyOwned returns a predicate that returns true only if the resource has +// the `topology.cluster.x-k8s.io/owned` label. +func ResourceIsTopologyOwned(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + return processIfTopologyOwned(scheme, logger.WithValues("predicate", "ResourceIsTopologyOwned", "eventType", "update"), e.ObjectNew) + }, + CreateFunc: func(e event.CreateEvent) bool { + return processIfTopologyOwned(scheme, logger.WithValues("predicate", "ResourceIsTopologyOwned", "eventType", "create"), e.Object) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return processIfTopologyOwned(scheme, logger.WithValues("predicate", "ResourceIsTopologyOwned", "eventType", "delete"), e.Object) + }, + GenericFunc: func(e event.GenericEvent) bool { + return processIfTopologyOwned(scheme, logger.WithValues("predicate", "ResourceIsTopologyOwned", "eventType", "generic"), e.Object) + }, + } +} + +func processIfTopologyOwned(scheme *runtime.Scheme, logger logr.Logger, obj client.Object) bool { + if gvk, err := apiutil.GVKForObject(obj, scheme); err == nil { + logger = logger.WithValues(gvk.Kind, klog.KObj(obj)) + } + if labels.IsTopologyOwned(obj) { + logger.V(6).Info("Resource is topology owned, will attempt to map resource") + return true + } + // We intentionally log this line only on level 6, because it will be very frequently + // logged for MachineDeployments and MachineSets not owned by a topology. + logger.V(6).Info("Resource is not topology owned, will not attempt to map resource") + return false +} + +// ResourceIsChanged returns a predicate that returns true only if the resource +// has changed. This predicate allows to drop resync events on additionally watched objects. +func ResourceIsChanged(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { + return TypedResourceIsChanged[client.Object](scheme, logger) +} + +// TypedResourceIsChanged returns a predicate that returns true only if the resource +// has changed. This predicate allows to drop resync events on additionally watched objects. +func TypedResourceIsChanged[T client.Object](scheme *runtime.Scheme, logger logr.Logger) predicate.TypedFuncs[T] { + log := logger.WithValues("predicate", "ResourceIsChanged") + return predicate.TypedFuncs[T]{ + UpdateFunc: func(e event.TypedUpdateEvent[T]) bool { + // Ensure we don't modify log from above. + log := log + if gvk, err := apiutil.GVKForObject(e.ObjectNew, scheme); err == nil { + log = log.WithValues(gvk.Kind, klog.KObj(e.ObjectNew)) + } + if e.ObjectOld.GetResourceVersion() == e.ObjectNew.GetResourceVersion() { + log.WithValues("eventType", "update").V(6).Info("Resource is not changed, will not attempt to map resource") + return false + } + log.WithValues("eventType", "update").V(6).Info("Resource is changed, will attempt to map resource") + return true + }, + CreateFunc: func(event.TypedCreateEvent[T]) bool { return true }, + DeleteFunc: func(event.TypedDeleteEvent[T]) bool { return true }, + GenericFunc: func(event.TypedGenericEvent[T]) bool { return true }, + } +} diff --git a/util/deprecated/v1beta1/retry.go b/util/deprecated/v1beta1/retry.go new file mode 100644 index 000000000000..fe87875e07ba --- /dev/null +++ b/util/deprecated/v1beta1/retry.go @@ -0,0 +1,48 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "time" + + "k8s.io/apimachinery/pkg/util/wait" +) + +const ( + backoffSteps = 10 + backoffFactor = 1.25 + backoffDuration = 5 + backoffJitter = 1.0 +) + +// Retry retries a given function with exponential backoff. +func Retry(fn wait.ConditionFunc, initialBackoffSec int) error { + if initialBackoffSec <= 0 { + initialBackoffSec = backoffDuration + } + backoffConfig := wait.Backoff{ + Steps: backoffSteps, + Factor: backoffFactor, + Duration: time.Duration(initialBackoffSec) * time.Second, + Jitter: backoffJitter, + } + retryErr := wait.ExponentialBackoff(backoffConfig, fn) + if retryErr != nil { + return retryErr + } + return nil +} diff --git a/util/deprecated/v1beta1/suite_test.go b/util/deprecated/v1beta1/suite_test.go new file mode 100644 index 000000000000..ce098dff0a60 --- /dev/null +++ b/util/deprecated/v1beta1/suite_test.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) + +var ( + ctx = ctrl.SetupSignalHandler() +) + +func init() { + utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) +} diff --git a/util/deprecated/v1beta1/util.go b/util/deprecated/v1beta1/util.go new file mode 100644 index 000000000000..5aad690cc1c7 --- /dev/null +++ b/util/deprecated/v1beta1/util.go @@ -0,0 +1,785 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package util implements utilities. +// +// Deprecated: This package is deprecated and is going to be removed when support for v1beta1 will be dropped. Please see https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more details. +package util + +import ( + "context" + "encoding/json" + "fmt" + "math" + "math/rand" + "reflect" + "strings" + "time" + + "github.com/blang/semver/v4" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + k8sversion "k8s.io/apimachinery/pkg/version" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/contract" + "sigs.k8s.io/cluster-api/util/labels/format" +) + +const ( + // CharSet defines the alphanumeric set for random string generation. + CharSet = "0123456789abcdefghijklmnopqrstuvwxyz" +) + +var ( + rnd = rand.New(rand.NewSource(time.Now().UnixNano())) //nolint:gosec + + // ErrNoCluster is returned when the cluster + // label could not be found on the object passed in. + ErrNoCluster = fmt.Errorf("no %q label present", clusterv1.ClusterNameLabel) + + // ErrUnstructuredFieldNotFound determines that a field + // in an unstructured object could not be found. + ErrUnstructuredFieldNotFound = fmt.Errorf("field not found") +) + +// RandomString returns a random alphanumeric string. +func RandomString(n int) string { + result := make([]byte, n) + for i := range result { + result[i] = CharSet[rnd.Intn(len(CharSet))] + } + return string(result) +} + +// Ordinalize takes an int and returns the ordinalized version of it. +// Eg. 1 --> 1st, 103 --> 103rd. +func Ordinalize(n int) string { + m := map[int]string{ + 0: "th", + 1: "st", + 2: "nd", + 3: "rd", + 4: "th", + 5: "th", + 6: "th", + 7: "th", + 8: "th", + 9: "th", + } + + an := int(math.Abs(float64(n))) + if an < 10 { + return fmt.Sprintf("%d%s", n, m[an]) + } + return fmt.Sprintf("%d%s", n, m[an%10]) +} + +// IsExternalManagedControlPlane returns a bool indicating whether the control plane referenced +// in the passed Unstructured resource is an externally managed control plane such as AKS, EKS, GKE, etc. +func IsExternalManagedControlPlane(controlPlane *unstructured.Unstructured) bool { + managed, found, err := unstructured.NestedBool(controlPlane.Object, "status", "externalManagedControlPlane") + if err != nil || !found { + return false + } + return managed +} + +// GetMachineIfExists gets a machine from the API server if it exists. +func GetMachineIfExists(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.Machine, error) { + if c == nil { + // Being called before k8s is setup as part of control plane VM creation + return nil, nil + } + + // Machines are identified by name + machine := &clusterv1.Machine{} + err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, machine) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, nil + } + return nil, err + } + + return machine, nil +} + +// IsControlPlaneMachine checks machine is a control plane node. +func IsControlPlaneMachine(machine *clusterv1.Machine) bool { + _, ok := machine.ObjectMeta.Labels[clusterv1.MachineControlPlaneLabel] + return ok +} + +// IsNodeReady returns true if a node is ready. +func IsNodeReady(node *corev1.Node) bool { + for _, condition := range node.Status.Conditions { + if condition.Type == corev1.NodeReady { + return condition.Status == corev1.ConditionTrue + } + } + + return false +} + +// GetClusterFromMetadata returns the Cluster object (if present) using the object metadata. +func GetClusterFromMetadata(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Cluster, error) { + if obj.Labels[clusterv1.ClusterNameLabel] == "" { + return nil, errors.WithStack(ErrNoCluster) + } + return GetClusterByName(ctx, c, obj.Namespace, obj.Labels[clusterv1.ClusterNameLabel]) +} + +// GetOwnerCluster returns the Cluster object owning the current resource. +func GetOwnerCluster(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Cluster, error) { + for _, ref := range obj.GetOwnerReferences() { + if ref.Kind != "Cluster" { + continue + } + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, errors.WithStack(err) + } + if gv.Group == clusterv1.GroupVersion.Group { + return GetClusterByName(ctx, c, obj.Namespace, ref.Name) + } + } + return nil, nil +} + +// GetClusterByName finds and return a Cluster object using the specified params. +func GetClusterByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.Cluster, error) { + cluster := &clusterv1.Cluster{} + key := client.ObjectKey{ + Namespace: namespace, + Name: name, + } + + if err := c.Get(ctx, key, cluster); err != nil { + return nil, errors.Wrapf(err, "failed to get Cluster/%s", name) + } + + return cluster, nil +} + +// ObjectKey returns client.ObjectKey for the object. +func ObjectKey(object metav1.Object) client.ObjectKey { + return client.ObjectKey{ + Namespace: object.GetNamespace(), + Name: object.GetName(), + } +} + +// ClusterToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for +// Cluster events and returns reconciliation requests for an infrastructure provider object. +func ClusterToInfrastructureMapFunc(ctx context.Context, gvk schema.GroupVersionKind, c client.Client, providerCluster client.Object) handler.MapFunc { + log := ctrl.LoggerFrom(ctx) + return func(ctx context.Context, o client.Object) []reconcile.Request { + cluster, ok := o.(*clusterv1.Cluster) + if !ok { + return nil + } + + // Return early if the InfrastructureRef is nil. + if cluster.Spec.InfrastructureRef == nil { + return nil + } + gk := gvk.GroupKind() + // Return early if the GroupKind doesn't match what we expect. + infraGK := cluster.Spec.InfrastructureRef.GroupVersionKind().GroupKind() + if gk != infraGK { + return nil + } + providerCluster := providerCluster.DeepCopyObject().(client.Object) + key := types.NamespacedName{Namespace: cluster.Namespace, Name: cluster.Spec.InfrastructureRef.Name} + + if err := c.Get(ctx, key, providerCluster); err != nil { + log.V(4).Info(fmt.Sprintf("Failed to get %T", providerCluster), "err", err) + return nil + } + + if annotations.IsExternallyManaged(providerCluster) { + log.V(4).Info(fmt.Sprintf("%T is externally managed, skipping mapping", providerCluster)) + return nil + } + + return []reconcile.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: cluster.Namespace, + Name: cluster.Spec.InfrastructureRef.Name, + }, + }, + } + } +} + +// GetOwnerMachine returns the Machine object owning the current resource. +func GetOwnerMachine(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Machine, error) { + for _, ref := range obj.GetOwnerReferences() { + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, err + } + if ref.Kind == "Machine" && gv.Group == clusterv1.GroupVersion.Group { + return GetMachineByName(ctx, c, obj.Namespace, ref.Name) + } + } + return nil, nil +} + +// GetMachineByName finds and return a Machine object using the specified params. +func GetMachineByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.Machine, error) { + m := &clusterv1.Machine{} + key := client.ObjectKey{Name: name, Namespace: namespace} + if err := c.Get(ctx, key, m); err != nil { + return nil, err + } + return m, nil +} + +// MachineToInfrastructureMapFunc returns a handler.ToRequestsFunc that watches for +// Machine events and returns reconciliation requests for an infrastructure provider object. +func MachineToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.MapFunc { + return func(_ context.Context, o client.Object) []reconcile.Request { + m, ok := o.(*clusterv1.Machine) + if !ok { + return nil + } + + gk := gvk.GroupKind() + // Return early if the GroupKind doesn't match what we expect. + infraGK := m.Spec.InfrastructureRef.GroupVersionKind().GroupKind() + if gk != infraGK { + return nil + } + + return []reconcile.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: m.Namespace, + Name: m.Spec.InfrastructureRef.Name, + }, + }, + } + } +} + +// HasOwnerRef returns true if the OwnerReference is already in the slice. It matches based on Group, Kind and Name. +func HasOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) bool { + return indexOwnerRef(ownerReferences, ref) > -1 +} + +// EnsureOwnerRef makes sure the slice contains the OwnerReference. +// Note: EnsureOwnerRef will update the version of the OwnerReference fi it exists with a different version. It will also update the UID. +func EnsureOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) []metav1.OwnerReference { + idx := indexOwnerRef(ownerReferences, ref) + if idx == -1 { + return append(ownerReferences, ref) + } + ownerReferences[idx] = ref + return ownerReferences +} + +// ReplaceOwnerRef re-parents an object from one OwnerReference to another +// It compares strictly based on UID to avoid reparenting across an intentional deletion: if an object is deleted +// and re-created with the same name and namespace, the only way to tell there was an in-progress deletion +// is by comparing the UIDs. +func ReplaceOwnerRef(ownerReferences []metav1.OwnerReference, source metav1.Object, target metav1.OwnerReference) []metav1.OwnerReference { + fi := -1 + for index, r := range ownerReferences { + if r.UID == source.GetUID() { + fi = index + ownerReferences[index] = target + break + } + } + if fi < 0 { + ownerReferences = append(ownerReferences, target) + } + return ownerReferences +} + +// RemoveOwnerRef returns the slice of owner references after removing the supplied owner ref. +// Note: RemoveOwnerRef ignores apiVersion and UID. It will remove the passed ownerReference where it matches Name, Group and Kind. +func RemoveOwnerRef(ownerReferences []metav1.OwnerReference, inputRef metav1.OwnerReference) []metav1.OwnerReference { + if index := indexOwnerRef(ownerReferences, inputRef); index != -1 { + return append(ownerReferences[:index], ownerReferences[index+1:]...) + } + return ownerReferences +} + +// HasExactOwnerRef returns true if the exact OwnerReference is already in the slice. +// It matches based on APIVersion, Kind, Name and Controller. +func HasExactOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) bool { + for _, r := range ownerReferences { + if r.APIVersion == ref.APIVersion && + r.Kind == ref.Kind && + r.Name == ref.Name && + r.UID == ref.UID && + ptr.Deref(r.Controller, false) == ptr.Deref(ref.Controller, false) { + return true + } + } + return false +} + +// indexOwnerRef returns the index of the owner reference in the slice if found, or -1. +func indexOwnerRef(ownerReferences []metav1.OwnerReference, ref metav1.OwnerReference) int { + for index, r := range ownerReferences { + if referSameObject(r, ref) { + return index + } + } + return -1 +} + +// IsOwnedByObject returns true if any of the owner references point to the given target. +// It matches the object based on the Group, Kind and Name. +func IsOwnedByObject(obj metav1.Object, target client.Object) bool { + for _, ref := range obj.GetOwnerReferences() { + if refersTo(&ref, target) { + return true + } + } + return false +} + +// IsControlledBy differs from metav1.IsControlledBy. This function matches on Group, Kind and Name. The metav1.IsControlledBy function matches on UID only. +func IsControlledBy(obj metav1.Object, owner client.Object) bool { + controllerRef := metav1.GetControllerOfNoCopy(obj) + if controllerRef == nil { + return false + } + return refersTo(controllerRef, owner) +} + +// Returns true if a and b point to the same object based on Group, Kind and Name. +func referSameObject(a, b metav1.OwnerReference) bool { + aGV, err := schema.ParseGroupVersion(a.APIVersion) + if err != nil { + return false + } + + bGV, err := schema.ParseGroupVersion(b.APIVersion) + if err != nil { + return false + } + + return aGV.Group == bGV.Group && a.Kind == b.Kind && a.Name == b.Name +} + +// Returns true if ref refers to obj based on Group, Kind and Name. +func refersTo(ref *metav1.OwnerReference, obj client.Object) bool { + refGv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return false + } + + gvk := obj.GetObjectKind().GroupVersionKind() + return refGv.Group == gvk.Group && ref.Kind == gvk.Kind && ref.Name == obj.GetName() +} + +// UnstructuredUnmarshalField is a wrapper around json and unstructured objects to decode and copy a specific field +// value into an object. +func UnstructuredUnmarshalField(obj *unstructured.Unstructured, v interface{}, fields ...string) error { + if obj == nil || obj.Object == nil { + return errors.Errorf("failed to unmarshal unstructured object: object is nil") + } + + value, found, err := unstructured.NestedFieldNoCopy(obj.Object, fields...) + if err != nil { + return errors.Wrapf(err, "failed to retrieve field %q from %q", strings.Join(fields, "."), obj.GroupVersionKind()) + } + if !found || value == nil { + return ErrUnstructuredFieldNotFound + } + valueBytes, err := json.Marshal(value) + if err != nil { + return errors.Wrapf(err, "failed to json-encode field %q value from %q", strings.Join(fields, "."), obj.GroupVersionKind()) + } + if err := json.Unmarshal(valueBytes, v); err != nil { + return errors.Wrapf(err, "failed to json-decode field %q value from %q", strings.Join(fields, "."), obj.GroupVersionKind()) + } + return nil +} + +// HasOwner checks if any of the references in the passed list match the given group from apiVersion and one of the given kinds. +func HasOwner(refList []metav1.OwnerReference, apiVersion string, kinds []string) bool { + gv, err := schema.ParseGroupVersion(apiVersion) + if err != nil { + return false + } + + kindMap := make(map[string]bool) + for _, kind := range kinds { + kindMap[kind] = true + } + + for _, mr := range refList { + mrGroupVersion, err := schema.ParseGroupVersion(mr.APIVersion) + if err != nil { + return false + } + + if mrGroupVersion.Group == gv.Group && kindMap[mr.Kind] { + return true + } + } + + return false +} + +// GetGVKMetadata retrieves a CustomResourceDefinition metadata from the API server using partial object metadata. +// +// This function is greatly more efficient than GetCRDWithContract and should be preferred in most cases. +func GetGVKMetadata(ctx context.Context, c client.Client, gvk schema.GroupVersionKind) (*metav1.PartialObjectMetadata, error) { + meta := &metav1.PartialObjectMetadata{} + meta.SetName(contract.CalculateCRDName(gvk.Group, gvk.Kind)) + meta.SetGroupVersionKind(apiextensionsv1.SchemeGroupVersion.WithKind("CustomResourceDefinition")) + if err := c.Get(ctx, client.ObjectKeyFromObject(meta), meta); err != nil { + return meta, errors.Wrap(err, "failed to retrieve metadata from GVK resource") + } + return meta, nil +} + +// KubeAwareAPIVersions is a sortable slice of kube-like version strings. +// +// Kube-like version strings are starting with a v, followed by a major version, +// optional "alpha" or "beta" strings followed by a minor version (e.g. v1, v2beta1). +// Versions will be sorted based on GA/alpha/beta first and then major and minor +// versions. e.g. v2, v1, v1beta2, v1beta1, v1alpha1. +type KubeAwareAPIVersions []string + +func (k KubeAwareAPIVersions) Len() int { return len(k) } +func (k KubeAwareAPIVersions) Swap(i, j int) { k[i], k[j] = k[j], k[i] } +func (k KubeAwareAPIVersions) Less(i, j int) bool { + return k8sversion.CompareKubeAwareVersionStrings(k[i], k[j]) < 0 +} + +// ClusterToTypedObjectsMapper returns a mapper function that gets a cluster and lists all objects for the object passed in +// and returns a list of requests. +// Note: This function uses the passed in typed ObjectList and thus with the default client configuration all list calls +// will be cached. +// NB: The objects are required to have `clusterv1.ClusterNameLabel` applied. +func ClusterToTypedObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.MapFunc, error) { + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return nil, err + } + + // Note: we create the typed ObjectList once here, so we don't have to use + // reflection in every execution of the actual event handler. + obj, err := scheme.New(gvk) + if err != nil { + return nil, errors.Wrapf(err, "failed to construct object of type %s", gvk) + } + objectList, ok := obj.(client.ObjectList) + if !ok { + return nil, errors.Errorf("expected object to be a client.ObjectList, is actually %T", obj) + } + + isNamespaced, err := isAPINamespaced(gvk, c.RESTMapper()) + if err != nil { + return nil, err + } + + return func(ctx context.Context, o client.Object) []ctrl.Request { + cluster, ok := o.(*clusterv1.Cluster) + if !ok { + return nil + } + + listOpts := []client.ListOption{ + client.MatchingLabels{ + clusterv1.ClusterNameLabel: cluster.Name, + }, + } + + if isNamespaced { + listOpts = append(listOpts, client.InNamespace(cluster.Namespace)) + } + + // Note: We have to DeepCopy objectList into a new variable. Otherwise + // we have a race condition between DeepCopyObject and client.List if this + // mapper func is called concurrently. + objectList := objectList.DeepCopyObject().(client.ObjectList) + if err := c.List(ctx, objectList, listOpts...); err != nil { + return nil + } + + objects, err := meta.ExtractList(objectList) + if err != nil { + return nil + } + + results := []ctrl.Request{} + for _, obj := range objects { + // Note: We don't check if the type cast succeeds as all items in an client.ObjectList + // are client.Objects. + o := obj.(client.Object) + results = append(results, ctrl.Request{ + NamespacedName: client.ObjectKey{Namespace: o.GetNamespace(), Name: o.GetName()}, + }) + } + return results + }, nil +} + +// MachineDeploymentToObjectsMapper returns a mapper function that gets a machinedeployment +// and lists all objects for the object passed in and returns a list of requests. +// NB: The objects are required to have `clusterv1.MachineDeploymentNameLabel` applied. +func MachineDeploymentToObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.MapFunc, error) { + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return nil, err + } + + // Note: we create the typed ObjectList once here, so we don't have to use + // reflection in every execution of the actual event handler. + obj, err := scheme.New(gvk) + if err != nil { + return nil, errors.Wrapf(err, "failed to construct object of type %s", gvk) + } + objectList, ok := obj.(client.ObjectList) + if !ok { + return nil, errors.Errorf("expected object to be a client.ObjectList, is actually %T", obj) + } + + isNamespaced, err := isAPINamespaced(gvk, c.RESTMapper()) + if err != nil { + return nil, err + } + + return func(ctx context.Context, o client.Object) []ctrl.Request { + md, ok := o.(*clusterv1.MachineDeployment) + if !ok { + return nil + } + + listOpts := []client.ListOption{ + client.MatchingLabels{ + clusterv1.MachineDeploymentNameLabel: md.Name, + }, + } + + if isNamespaced { + listOpts = append(listOpts, client.InNamespace(md.Namespace)) + } + + objectList = objectList.DeepCopyObject().(client.ObjectList) + if err := c.List(ctx, objectList, listOpts...); err != nil { + return nil + } + + objects, err := meta.ExtractList(objectList) + if err != nil { + return nil + } + + results := []ctrl.Request{} + for _, obj := range objects { + // Note: We don't check if the type cast succeeds as all items in an client.ObjectList + // are client.Objects. + o := obj.(client.Object) + results = append(results, ctrl.Request{ + NamespacedName: client.ObjectKey{Namespace: o.GetNamespace(), Name: o.GetName()}, + }) + } + return results + }, nil +} + +// MachineSetToObjectsMapper returns a mapper function that gets a machineset +// and lists all objects for the object passed in and returns a list of requests. +// NB: The objects are required to have `clusterv1.MachineSetNameLabel` applied. +func MachineSetToObjectsMapper(c client.Client, ro client.ObjectList, scheme *runtime.Scheme) (handler.MapFunc, error) { + gvk, err := apiutil.GVKForObject(ro, scheme) + if err != nil { + return nil, err + } + + // Note: we create the typed ObjectList once here, so we don't have to use + // reflection in every execution of the actual event handler. + obj, err := scheme.New(gvk) + if err != nil { + return nil, errors.Wrapf(err, "failed to construct object of type %s", gvk) + } + objectList, ok := obj.(client.ObjectList) + if !ok { + return nil, errors.Errorf("expected object to be a client.ObjectList, is actually %T", obj) + } + + isNamespaced, err := isAPINamespaced(gvk, c.RESTMapper()) + if err != nil { + return nil, err + } + + return func(ctx context.Context, o client.Object) []ctrl.Request { + ms, ok := o.(*clusterv1.MachineSet) + if !ok { + return nil + } + + listOpts := []client.ListOption{ + client.MatchingLabels{ + clusterv1.MachineSetNameLabel: format.MustFormatValue(ms.Name), + }, + } + + if isNamespaced { + listOpts = append(listOpts, client.InNamespace(ms.Namespace)) + } + + objectList = objectList.DeepCopyObject().(client.ObjectList) + if err := c.List(ctx, objectList, listOpts...); err != nil { + return nil + } + + objects, err := meta.ExtractList(objectList) + if err != nil { + return nil + } + + results := []ctrl.Request{} + for _, obj := range objects { + // Note: We don't check if the type cast succeeds as all items in an client.ObjectList + // are client.Objects. + o := obj.(client.Object) + results = append(results, ctrl.Request{ + NamespacedName: client.ObjectKey{Namespace: o.GetNamespace(), Name: o.GetName()}, + }) + } + return results + }, nil +} + +// isAPINamespaced detects if a GroupVersionKind is namespaced. +func isAPINamespaced(gk schema.GroupVersionKind, restmapper meta.RESTMapper) (bool, error) { + restMapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind}) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + + switch restMapping.Scope.Name() { + case "": + return false, errors.New("Scope cannot be identified. Empty scope returned") + case meta.RESTScopeNameRoot: + return false, nil + default: + return true, nil + } +} + +// ObjectReferenceToUnstructured converts an object reference to an unstructured object. +func ObjectReferenceToUnstructured(in corev1.ObjectReference) *unstructured.Unstructured { + out := &unstructured.Unstructured{} + out.SetKind(in.Kind) + out.SetAPIVersion(in.APIVersion) + out.SetNamespace(in.Namespace) + out.SetName(in.Name) + return out +} + +// IsSupportedVersionSkew will return true if a and b are no more than one minor version off from each other. +func IsSupportedVersionSkew(a, b semver.Version) bool { + if a.Major != b.Major { + return false + } + if a.Minor > b.Minor { + return a.Minor-b.Minor == 1 + } + return b.Minor-a.Minor <= 1 +} + +// LowestNonZeroResult compares two reconciliation results +// and returns the one with lowest requeue time. +func LowestNonZeroResult(i, j ctrl.Result) ctrl.Result { + switch { + case i.IsZero(): + return j + case j.IsZero(): + return i + case i.Requeue: + return i + case j.Requeue: + return j + case i.RequeueAfter < j.RequeueAfter: + return i + default: + return j + } +} + +// LowestNonZeroInt32 returns the lowest non-zero value of the two provided values. +func LowestNonZeroInt32(i, j int32) int32 { + if i == 0 { + return j + } + if j == 0 { + return i + } + if i < j { + return i + } + return j +} + +// IsNil returns an error if the passed interface is equal to nil or if it has an interface value of nil. +func IsNil(i interface{}) bool { + if i == nil { + return true + } + switch reflect.TypeOf(i).Kind() { + case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Slice, reflect.Interface, reflect.UnsafePointer, reflect.Func: + return reflect.ValueOf(i).IsValid() && reflect.ValueOf(i).IsNil() + } + return false +} + +// MergeMap merges maps. +// NOTE: In case a key exists in multiple maps, the value of the first map is preserved. +func MergeMap(maps ...map[string]string) map[string]string { + m := make(map[string]string) + for i := len(maps) - 1; i >= 0; i-- { + for k, v := range maps[i] { + m[k] = v + } + } + + // Nil the result if the map is empty, thus avoiding triggering infinite reconcile + // given that at json level label: {} or annotation: {} is different from no field, which is the + // corresponding value stored in etcd given that those fields are defined as omitempty. + if len(m) == 0 { + return nil + } + return m +} diff --git a/util/deprecated/v1beta1/util_test.go b/util/deprecated/v1beta1/util_test.go new file mode 100644 index 000000000000..6edc14b3f42d --- /dev/null +++ b/util/deprecated/v1beta1/util_test.go @@ -0,0 +1,1063 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "fmt" + "testing" + + "github.com/blang/semver/v4" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/labels/format" +) + +func TestMachineToInfrastructureMapFunc(t *testing.T) { + g := NewWithT(t) + + testcases := []struct { + name string + input schema.GroupVersionKind + request client.Object + output []reconcile.Request + }{ + { + name: "should reconcile infra-1", + input: schema.GroupVersionKind{ + Group: "foo.cluster.x-k8s.io", + Version: "v1alpha4", + Kind: "TestMachine", + }, + request: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-1", + }, + Spec: clusterv1.MachineSpec{ + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "foo.cluster.x-k8s.io/v1beta1", + Kind: "TestMachine", + Name: "infra-1", + }, + }, + }, + output: []reconcile.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: metav1.NamespaceDefault, + Name: "infra-1", + }, + }, + }, + }, + { + name: "should return no matching reconcile requests", + input: schema.GroupVersionKind{ + Group: "foo.cluster.x-k8s.io", + Version: "v1beta1", + Kind: "TestMachine", + }, + request: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-1", + }, + Spec: clusterv1.MachineSpec{ + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "bar.cluster.x-k8s.io/v1beta1", + Kind: "TestMachine", + Name: "bar-1", + }, + }, + }, + output: nil, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(*testing.T) { + fn := MachineToInfrastructureMapFunc(tc.input) + out := fn(ctx, tc.request) + g.Expect(out).To(BeComparableTo(tc.output)) + }) + } +} + +func TestClusterToInfrastructureMapFunc(t *testing.T) { + testcases := []struct { + name string + input schema.GroupVersionKind + request *clusterv1.Cluster + infrastructure client.Object + output []reconcile.Request + }{ + { + name: "should reconcile infra-1", + input: schema.GroupVersionKind{ + Group: "foo.cluster.x-k8s.io", + Version: "v1alpha4", + Kind: "TestCluster", + }, + request: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-1", + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: "foo.cluster.x-k8s.io/v1beta1", + Kind: "TestCluster", + Name: "infra-1", + }, + }, + }, + infrastructure: &unstructured.Unstructured{Object: map[string]interface{}{ + "apiVersion": "foo.cluster.x-k8s.io/v1beta1", + "kind": "TestCluster", + "metadata": map[string]interface{}{ + "namespace": metav1.NamespaceDefault, + "name": "infra-1", + }, + }}, + output: []reconcile.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: metav1.NamespaceDefault, + Name: "infra-1", + }, + }, + }, + }, + { + name: "should return no matching reconcile requests", + input: schema.GroupVersionKind{ + Group: "foo.cluster.x-k8s.io", + Version: "v1beta1", + Kind: "TestCluster", + }, + request: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-1", + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: "bar.cluster.x-k8s.io/v1beta1", + Kind: "TestCluster", + Name: "bar-1", + }, + }, + }, + output: nil, + }, + { + name: "Externally managed provider cluster is excluded", + input: schema.GroupVersionKind{ + Group: "foo.cluster.x-k8s.io", + Version: "v1alpha4", + Kind: "TestCluster", + }, + request: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: metav1.NamespaceDefault, + Name: "test-1", + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: "foo.cluster.x-k8s.io/v1beta1", + Kind: "TestCluster", + Name: "infra-1", + }, + }, + }, + infrastructure: &unstructured.Unstructured{Object: map[string]interface{}{ + "apiVersion": "foo.cluster.x-k8s.io/v1beta1", + "kind": "TestCluster", + "metadata": map[string]interface{}{ + "namespace": metav1.NamespaceDefault, + "name": "infra-1", + "annotations": map[string]interface{}{ + clusterv1.ManagedByAnnotation: "", + }, + }, + }}, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + clientBuilder := fake.NewClientBuilder() + if tc.infrastructure != nil { + clientBuilder.WithObjects(tc.infrastructure) + } + + // Unstructured simplifies testing but should not be used in real usage, because it will + // likely result in a duplicate cache in an unstructured projection. + referenceObject := &unstructured.Unstructured{} + referenceObject.SetAPIVersion(tc.request.Spec.InfrastructureRef.APIVersion) + referenceObject.SetKind(tc.request.Spec.InfrastructureRef.Kind) + + fn := ClusterToInfrastructureMapFunc(context.Background(), tc.input, clientBuilder.Build(), referenceObject) + out := fn(ctx, tc.request) + g.Expect(out).To(BeComparableTo(tc.output)) + }) + } +} + +func TestHasOwner(t *testing.T) { + g := NewWithT(t) + + tests := []struct { + name string + refList []metav1.OwnerReference + expected bool + }{ + { + name: "no ownership", + }, + { + name: "owned by cluster", + refList: []metav1.OwnerReference{ + { + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + }, + }, + expected: true, + }, + { + name: "owned by cluster from older version", + refList: []metav1.OwnerReference{ + { + Kind: "Cluster", + APIVersion: "cluster.x-k8s.io/v1alpha2", + }, + }, + expected: true, + }, + { + name: "owned by a MachineDeployment from older version", + refList: []metav1.OwnerReference{ + { + Kind: "MachineDeployment", + APIVersion: "cluster.x-k8s.io/v1alpha2", + }, + }, + expected: true, + }, + { + name: "owned by something else", + refList: []metav1.OwnerReference{ + { + Kind: "Pod", + APIVersion: "v1", + }, + { + Kind: "Deployment", + APIVersion: "apps/v1", + }, + }, + }, + { + name: "owner by a deployment", + refList: []metav1.OwnerReference{ + { + Kind: "MachineDeployment", + APIVersion: clusterv1.GroupVersion.String(), + }, + }, + expected: true, + }, + { + name: "right kind, wrong apiversion", + refList: []metav1.OwnerReference{ + { + Kind: "MachineDeployment", + APIVersion: "wrong/v2", + }, + }, + }, + { + name: "right apiversion, wrong kind", + refList: []metav1.OwnerReference{ + { + Kind: "Machine", + APIVersion: clusterv1.GroupVersion.String(), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(*testing.T) { + result := HasOwner( + test.refList, + clusterv1.GroupVersion.String(), + []string{"MachineDeployment", "Cluster"}, + ) + g.Expect(result).To(Equal(test.expected)) + }) + } +} + +type fakeMeta struct { + metav1.ObjectMeta + metav1.TypeMeta +} + +var _ runtime.Object = &fakeMeta{} + +func (*fakeMeta) DeepCopyObject() runtime.Object { + panic("not implemented") +} + +func TestIsOwnedByObject(t *testing.T) { + g := NewWithT(t) + + targetGroup := "ponies.info" + targetKind := "Rainbow" + targetName := "fri3ndsh1p" + + meta := fakeMeta{ + metav1.ObjectMeta{ + Name: targetName, + }, + metav1.TypeMeta{ + APIVersion: "ponies.info/v1", + Kind: targetKind, + }, + } + + tests := []struct { + name string + refs []metav1.OwnerReference + expected bool + }{ + { + name: "empty owner list", + }, + { + name: "single wrong name owner ref", + refs: []metav1.OwnerReference{{ + APIVersion: targetGroup + "/v1", + Kind: targetKind, + Name: "m4g1c", + }}, + }, + { + name: "single wrong group owner ref", + refs: []metav1.OwnerReference{{ + APIVersion: "dazzlings.info/v1", + Kind: "Twilight", + Name: "m4g1c", + }}, + }, + { + name: "single wrong kind owner ref", + refs: []metav1.OwnerReference{{ + APIVersion: targetGroup + "/v1", + Kind: "Twilight", + Name: "m4g1c", + }}, + }, + { + name: "single right owner ref", + refs: []metav1.OwnerReference{{ + APIVersion: targetGroup + "/v1", + Kind: targetKind, + Name: targetName, + }}, + expected: true, + }, + { + name: "single right owner ref (different version)", + refs: []metav1.OwnerReference{{ + APIVersion: targetGroup + "/v2alpha2", + Kind: targetKind, + Name: targetName, + }}, + expected: true, + }, + { + name: "multiple wrong refs", + refs: []metav1.OwnerReference{{ + APIVersion: targetGroup + "/v1", + Kind: targetKind, + Name: "m4g1c", + }, { + APIVersion: targetGroup + "/v1", + Kind: targetKind, + Name: "h4rm0ny", + }}, + }, + { + name: "multiple refs one right", + refs: []metav1.OwnerReference{{ + APIVersion: targetGroup + "/v1", + Kind: targetKind, + Name: "m4g1c", + }, { + APIVersion: targetGroup + "/v1", + Kind: targetKind, + Name: targetName, + }}, + expected: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(*testing.T) { + pointer := &metav1.ObjectMeta{ + OwnerReferences: test.refs, + } + + g.Expect(IsOwnedByObject(pointer, &meta)).To(Equal(test.expected), "Could not find a ref to %+v in %+v", meta, test.refs) + }) + } +} + +func TestGetOwnerClusterSuccessByName(t *testing.T) { + g := NewWithT(t) + + myCluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: metav1.NamespaceDefault, + }, + } + + c := fake.NewClientBuilder(). + WithObjects(myCluster). + Build() + + objm := metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + Name: "my-cluster", + }, + }, + Namespace: metav1.NamespaceDefault, + Name: "my-resource-owned-by-cluster", + } + cluster, err := GetOwnerCluster(ctx, c, objm) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster).NotTo(BeNil()) + + // Make sure API version does not matter + objm.OwnerReferences[0].APIVersion = "cluster.x-k8s.io/v1alpha1234" + cluster, err = GetOwnerCluster(ctx, c, objm) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cluster).NotTo(BeNil()) +} + +func TestGetOwnerMachineSuccessByName(t *testing.T) { + g := NewWithT(t) + + myMachine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-machine", + Namespace: metav1.NamespaceDefault, + }, + } + + c := fake.NewClientBuilder(). + WithObjects(myMachine). + Build() + + objm := metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Machine", + APIVersion: clusterv1.GroupVersion.String(), + Name: "my-machine", + }, + }, + Namespace: metav1.NamespaceDefault, + Name: "my-resource-owned-by-machine", + } + machine, err := GetOwnerMachine(ctx, c, objm) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(machine).NotTo(BeNil()) +} + +func TestGetOwnerMachineSuccessByNameFromDifferentVersion(t *testing.T) { + g := NewWithT(t) + + myMachine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-machine", + Namespace: metav1.NamespaceDefault, + }, + } + + c := fake.NewClientBuilder(). + WithObjects(myMachine). + Build() + + objm := metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Machine", + APIVersion: clusterv1.GroupVersion.Group + "/v1alpha2", + Name: "my-machine", + }, + }, + Namespace: metav1.NamespaceDefault, + Name: "my-resource-owned-by-machine", + } + machine, err := GetOwnerMachine(ctx, c, objm) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(machine).NotTo(BeNil()) +} + +func TestIsExternalManagedControlPlane(t *testing.T) { + g := NewWithT(t) + + t.Run("should return true if control plane status externalManagedControlPlane is true", func(*testing.T) { + controlPlane := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "status": map[string]interface{}{ + "externalManagedControlPlane": true, + }, + }, + } + result := IsExternalManagedControlPlane(controlPlane) + g.Expect(result).Should(BeTrue()) + }) + + t.Run("should return false if control plane status externalManagedControlPlane is false", func(*testing.T) { + controlPlane := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "status": map[string]interface{}{ + "externalManagedControlPlane": false, + }, + }, + } + result := IsExternalManagedControlPlane(controlPlane) + g.Expect(result).Should(BeFalse()) + }) + + t.Run("should return false if control plane status externalManagedControlPlane is not set", func(*testing.T) { + controlPlane := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "status": map[string]interface{}{ + "someOtherStatusField": "someValue", + }, + }, + } + result := IsExternalManagedControlPlane(controlPlane) + g.Expect(result).Should(BeFalse()) + }) +} + +func TestEnsureOwnerRef(t *testing.T) { + g := NewWithT(t) + + t.Run("should set ownerRef on an empty list", func(*testing.T) { + obj := &clusterv1.Machine{} + ref := metav1.OwnerReference{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: "test-cluster", + } + obj.OwnerReferences = EnsureOwnerRef(obj.OwnerReferences, ref) + g.Expect(obj.OwnerReferences).Should(ContainElement(ref)) + }) + + t.Run("should not duplicate owner references", func(*testing.T) { + obj := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: "test-cluster", + }, + }, + }, + } + ref := metav1.OwnerReference{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: "test-cluster", + } + obj.OwnerReferences = EnsureOwnerRef(obj.OwnerReferences, ref) + g.Expect(obj.OwnerReferences).Should(ContainElement(ref)) + g.Expect(obj.OwnerReferences).Should(HaveLen(1)) + }) + + t.Run("should update the APIVersion if duplicate", func(*testing.T) { + oldgvk := schema.GroupVersion{ + Group: clusterv1.GroupVersion.Group, + Version: "v1alpha2", + } + obj := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: oldgvk.String(), + Kind: "Cluster", + Name: "test-cluster", + }, + }, + }, + } + ref := metav1.OwnerReference{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: "test-cluster", + } + obj.OwnerReferences = EnsureOwnerRef(obj.OwnerReferences, ref) + g.Expect(obj.OwnerReferences).Should(ContainElement(ref)) + g.Expect(obj.OwnerReferences).Should(HaveLen(1)) + }) +} + +func TestClusterToObjectsMapper(t *testing.T) { + g := NewWithT(t) + + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test1", + }, + } + + table := []struct { + name string + objects []client.Object + input client.ObjectList + output []ctrl.Request + expectError bool + }{ + { + name: "should return a list of requests with labelled machines", + input: &clusterv1.MachineList{}, + objects: []client.Object{ + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine1", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "test1", + }, + }, + }, + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine2", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "test1", + }, + }, + }, + }, + output: []ctrl.Request{ + {NamespacedName: client.ObjectKey{Name: "machine1"}}, + {NamespacedName: client.ObjectKey{Name: "machine2"}}, + }, + }, + { + name: "should return a list of requests with labelled MachineDeployments", + input: &clusterv1.MachineDeploymentList{}, + objects: []client.Object{ + &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "md1", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "test1", + }, + }, + }, + &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "md2", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "test2", + }, + }, + }, + &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "md3", + Labels: map[string]string{ + clusterv1.ClusterNameLabel: "test1", + }, + }, + }, + &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "md4", + }, + }, + }, + output: []ctrl.Request{ + {NamespacedName: client.ObjectKey{Name: "md1"}}, + {NamespacedName: client.ObjectKey{Name: "md3"}}, + }, + }, + } + + for _, tc := range table { + tc.objects = append(tc.objects, cluster) + + scheme := runtime.NewScheme() + _ = clusterv1.AddToScheme(scheme) + + restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{clusterv1.GroupVersion}) + + // Add tc.input gvk to the restMapper. + gvk, err := apiutil.GVKForObject(tc.input, scheme) + g.Expect(err).ToNot(HaveOccurred()) + restMapper.Add(gvk, meta.RESTScopeNamespace) + + client := fake.NewClientBuilder().WithObjects(tc.objects...).WithRESTMapper(restMapper).Build() + f, err := ClusterToTypedObjectsMapper(client, tc.input, scheme) + g.Expect(err != nil, err).To(Equal(tc.expectError)) + g.Expect(f(ctx, cluster)).To(ConsistOf(tc.output)) + } +} + +func TestMachineDeploymentToObjectsMapper(t *testing.T) { + g := NewWithT(t) + + machineDeployment := &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-md-0", + }, + } + + table := []struct { + name string + objects []client.Object + output []ctrl.Request + expectError bool + }{ + { + name: "should return a list of requests with labelled machines", + objects: []client.Object{ + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine1", + Labels: map[string]string{ + clusterv1.MachineDeploymentNameLabel: machineDeployment.GetName(), + }, + }, + }, + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine2", + Labels: map[string]string{ + clusterv1.MachineDeploymentNameLabel: machineDeployment.GetName(), + }, + }, + }, + }, + output: []ctrl.Request{ + {NamespacedName: client.ObjectKey{Name: "machine1"}}, + {NamespacedName: client.ObjectKey{Name: "machine2"}}, + }, + }, + } + + for _, tc := range table { + tc.objects = append(tc.objects, machineDeployment) + + scheme := runtime.NewScheme() + _ = clusterv1.AddToScheme(scheme) + + restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{clusterv1.GroupVersion}) + + // Add tc.input gvk to the restMapper. + gvk, err := apiutil.GVKForObject(&clusterv1.MachineList{}, scheme) + g.Expect(err).ToNot(HaveOccurred()) + restMapper.Add(gvk, meta.RESTScopeNamespace) + + client := fake.NewClientBuilder().WithObjects(tc.objects...).WithRESTMapper(restMapper).Build() + f, err := MachineDeploymentToObjectsMapper(client, &clusterv1.MachineList{}, scheme) + g.Expect(err != nil, err).To(Equal(tc.expectError)) + g.Expect(f(ctx, machineDeployment)).To(ConsistOf(tc.output)) + } +} + +func TestMachineSetToObjectsMapper(t *testing.T) { + g := NewWithT(t) + + table := []struct { + name string + machineSet *clusterv1.MachineSet + objects []client.Object + output []ctrl.Request + expectError bool + }{ + { + name: "should return a list of requests with labelled machines", + machineSet: &clusterv1.MachineSet{ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-ms-0", + }}, + objects: []client.Object{ + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine1", + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: format.MustFormatValue("cluster-ms-0"), + }, + }, + }, + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine2", + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: format.MustFormatValue("cluster-ms-0"), + }, + }, + }, + }, + output: []ctrl.Request{ + {NamespacedName: client.ObjectKey{Name: "machine1"}}, + {NamespacedName: client.ObjectKey{Name: "machine2"}}, + }, + }, + { + name: "should return a list of requests with labelled machines when the machineset name is hashed in the label", + machineSet: &clusterv1.MachineSet{ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-ms-0-looooooooooooooooooooooooooooooooooooooooooooong-name", + }}, + objects: []client.Object{ + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine1", + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: format.MustFormatValue("cluster-ms-0-looooooooooooooooooooooooooooooooooooooooooooong-name"), + }, + }, + }, + &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine2", + Labels: map[string]string{ + clusterv1.MachineSetNameLabel: format.MustFormatValue("cluster-ms-0-looooooooooooooooooooooooooooooooooooooooooooong-name"), + }, + }, + }, + }, + output: []ctrl.Request{ + {NamespacedName: client.ObjectKey{Name: "machine1"}}, + {NamespacedName: client.ObjectKey{Name: "machine2"}}, + }, + }, + } + + for _, tc := range table { + tc.objects = append(tc.objects, tc.machineSet) + + scheme := runtime.NewScheme() + _ = clusterv1.AddToScheme(scheme) + + restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{clusterv1.GroupVersion}) + + // Add tc.input gvk to the restMapper. + gvk, err := apiutil.GVKForObject(&clusterv1.MachineList{}, scheme) + g.Expect(err).ToNot(HaveOccurred()) + restMapper.Add(gvk, meta.RESTScopeNamespace) + + client := fake.NewClientBuilder().WithObjects(tc.objects...).WithRESTMapper(restMapper).Build() + f, err := MachineSetToObjectsMapper(client, &clusterv1.MachineList{}, scheme) + g.Expect(err != nil, err).To(Equal(tc.expectError)) + g.Expect(f(ctx, tc.machineSet)).To(ConsistOf(tc.output)) + } +} + +func TestOrdinalize(t *testing.T) { + tests := []struct { + input int + expected string + }{ + {0, "0th"}, + {1, "1st"}, + {2, "2nd"}, + {43, "43rd"}, + {5, "5th"}, + {6, "6th"}, + {207, "207th"}, + {1008, "1008th"}, + {-109, "-109th"}, + {-0, "0th"}, + } + + for _, tt := range tests { + t.Run(fmt.Sprintf("ordinalize %d", tt.input), func(t *testing.T) { + g := NewWithT(t) + g.Expect(Ordinalize(tt.input)).To(Equal(tt.expected)) + }) + } +} + +func TestIsSupportedVersionSkew(t *testing.T) { + type args struct { + a semver.Version + b semver.Version + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "same version", + args: args{ + a: semver.MustParse("1.10.0"), + b: semver.MustParse("1.10.0"), + }, + want: true, + }, + { + name: "different patch version", + args: args{ + a: semver.MustParse("1.10.0"), + b: semver.MustParse("1.10.2"), + }, + want: true, + }, + { + name: "a + 1 minor version", + args: args{ + a: semver.MustParse("1.11.0"), + b: semver.MustParse("1.10.2"), + }, + want: true, + }, + { + name: "b + 1 minor version", + args: args{ + a: semver.MustParse("1.10.0"), + b: semver.MustParse("1.11.2"), + }, + want: true, + }, + { + name: "a + 2 minor versions", + args: args{ + a: semver.MustParse("1.12.0"), + b: semver.MustParse("1.10.0"), + }, + want: false, + }, + { + name: "b + 2 minor versions", + args: args{ + a: semver.MustParse("1.10.0"), + b: semver.MustParse("1.12.0"), + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := IsSupportedVersionSkew(tt.args.a, tt.args.b); got != tt.want { + t.Errorf("IsSupportedVersionSkew() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestRemoveOwnerRef(t *testing.T) { + g := NewWithT(t) + makeOwnerRefs := func() []metav1.OwnerReference { + return []metav1.OwnerReference{ + { + APIVersion: "dazzlings.info/v1", + Kind: "Twilight", + Name: "m4g1c", + }, + { + APIVersion: "bar.cluster.x-k8s.io/v1beta1", + Kind: "TestCluster", + Name: "bar-1", + }, + } + } + + tests := []struct { + name string + toBeRemoved metav1.OwnerReference + }{ + { + name: "owner reference present", + toBeRemoved: metav1.OwnerReference{ + APIVersion: "dazzlings.info/v1", + Kind: "Twilight", + Name: "m4g1c", + }, + }, + { + name: "owner reference not present", + toBeRemoved: metav1.OwnerReference{ + APIVersion: "dazzlings.info/v1", + Kind: "Twilight", + Name: "abcdef", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(*testing.T) { + // Use a fresh ownerRefs slice for each test, because RemoveOwnerRef may modify the underlying array. + ownerRefs := makeOwnerRefs() + ownerRefs = RemoveOwnerRef(ownerRefs, tt.toBeRemoved) + g.Expect(HasOwnerRef(ownerRefs, tt.toBeRemoved)).NotTo(BeTrue()) + }) + } +} + +func TestUnstructuredUnmarshalField(t *testing.T) { + tests := []struct { + name string + obj *unstructured.Unstructured + v interface{} + fields []string + wantErr bool + }{ + { + "return error if object is nil", + nil, + nil, + nil, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := UnstructuredUnmarshalField(tt.obj, tt.v, tt.fields...); (err != nil) != tt.wantErr { + t.Errorf("UnstructuredUnmarshalField() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +}