Skip to content

Commit 6c3bd5a

Browse files
authored
[feat] pause implementation for CAPL controllers (#663)
This PR introduces features to support the pause contract for InfraCluster and InfraMachine types. https://cluster-api.sigs.k8s.io/developer/providers/contracts/infra-cluster#infracluster-pausing https://cluster-api.sigs.k8s.io/developer/providers/contracts/infra-machine#inframachine-pausing In addition to pausing the LinodeCluster when the owner CAPI cluster is paused, this also makes sure that the referenced resource - the linodeVPC is paused by applying the pause annotation. The VPC controllers predicates are updated to ignore paused resources. In addition to pausing the LinodeMachine when the owner CAPI cluster is paused, this also makes sure that the referenced resource - the linodeFirewall is paused by applying the pause annotation. The Firewall controllers predicates are updated to ignore paused resources. Moreover, a paused condition is ALWAYS emitted for linodeCluster and linodeMachine objects in accordance with the contracts.
1 parent fbef357 commit 6c3bd5a

10 files changed

+158
-23
lines changed

internal/controller/linodecluster_controller.go

Lines changed: 57 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,8 @@ import (
3232
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3333
kutil "sigs.k8s.io/cluster-api/util"
3434
conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2"
35+
"sigs.k8s.io/cluster-api/util/patch"
36+
"sigs.k8s.io/cluster-api/util/paused"
3537
"sigs.k8s.io/cluster-api/util/predicates"
3638
ctrl "sigs.k8s.io/controller-runtime"
3739
"sigs.k8s.io/controller-runtime/pkg/builder"
@@ -116,14 +118,64 @@ func (r *LinodeClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques
116118
return r.reconcile(ctx, clusterScope, logger)
117119
}
118120

121+
func (r *LinodeClusterReconciler) reconcilePause(ctx context.Context, clusterScope *scope.ClusterScope, logger logr.Logger) error {
122+
// Pausing a cluster pauses the VPC as well.
123+
// First thing to do is handle a paused Cluster. Paused clusters shouldn't be deleted.
124+
isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, clusterScope.Client, clusterScope.Cluster, clusterScope.LinodeCluster)
125+
if err == nil && !isPaused && !conditionChanged {
126+
return nil
127+
}
128+
129+
if err != nil {
130+
return err
131+
}
132+
133+
if clusterScope.LinodeCluster.Spec.VPCRef == nil {
134+
logger.Info("Paused reconciliation is skipped due to missing VPC ref")
135+
return nil
136+
}
137+
138+
linodeVPC := infrav1alpha2.LinodeVPC{
139+
ObjectMeta: metav1.ObjectMeta{
140+
Namespace: clusterScope.LinodeCluster.Spec.VPCRef.Namespace,
141+
Name: clusterScope.LinodeCluster.Spec.VPCRef.Name,
142+
},
143+
}
144+
145+
if err := clusterScope.Client.Get(ctx, client.ObjectKeyFromObject(&linodeVPC), &linodeVPC); err != nil {
146+
return err
147+
}
148+
149+
annotations := linodeVPC.ObjectMeta.GetAnnotations()
150+
if annotations == nil {
151+
annotations = map[string]string{}
152+
}
153+
154+
if isPaused {
155+
logger.Info("CAPI cluster is paused, pausing VPC")
156+
// if we're paused, we should slap the pause annotation on our children
157+
// get the vpc & add the annotation
158+
annotations[clusterv1.PausedAnnotation] = "true"
159+
} else {
160+
// we are not paused here, but were previously paused (we can get here only if conditionChanged is true.
161+
logger.Info("CAPI cluster is no longer paused, removing pause annotation from VPC")
162+
delete(annotations, clusterv1.PausedAnnotation)
163+
}
164+
linodeVPC.SetAnnotations(annotations)
165+
vpcPatchHelper, err := patch.NewHelper(&linodeVPC, clusterScope.Client)
166+
if err != nil {
167+
return fmt.Errorf("failed to build patch helper for linode VPC object: %w", err)
168+
}
169+
return vpcPatchHelper.Patch(ctx, &linodeVPC)
170+
}
171+
119172
//nolint:cyclop // can't make it simpler with existing API
120173
func (r *LinodeClusterReconciler) reconcile(
121174
ctx context.Context,
122175
clusterScope *scope.ClusterScope,
123176
logger logr.Logger,
124177
) (res ctrl.Result, reterr error) {
125178
res = ctrl.Result{}
126-
127179
clusterScope.LinodeCluster.Status.Ready = false
128180
clusterScope.LinodeCluster.Status.FailureReason = nil
129181
clusterScope.LinodeCluster.Status.FailureMessage = util.Pointer("")
@@ -150,6 +202,10 @@ func (r *LinodeClusterReconciler) reconcile(
150202
return res, err
151203
}
152204

205+
if err := r.reconcilePause(ctx, clusterScope, logger); err != nil {
206+
return res, err
207+
}
208+
153209
// Handle deleted clusters
154210
if !clusterScope.LinodeCluster.DeletionTimestamp.IsZero() {
155211
if err := r.reconcileDelete(ctx, logger, clusterScope); err != nil {

internal/controller/linodecluster_controller_test.go

Lines changed: 29 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ import (
2727
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2828
"k8s.io/utils/ptr"
2929
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
30+
conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2"
3031
"sigs.k8s.io/cluster-api/util/patch"
3132
"sigs.k8s.io/controller-runtime/pkg/client"
3233
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -64,7 +65,7 @@ var _ = Describe("cluster-lifecycle", Ordered, Label("cluster", "cluster-lifecyc
6465
ObjectMeta: metadata,
6566
Spec: infrav1alpha2.LinodeClusterSpec{
6667
Region: "us-ord",
67-
VPCRef: &corev1.ObjectReference{Name: "vpctest"},
68+
VPCRef: &corev1.ObjectReference{Name: "vpctest", Namespace: defaultNamespace},
6869
},
6970
}
7071
linodeVPC := infrav1alpha2.LinodeVPC{
@@ -123,8 +124,12 @@ var _ = Describe("cluster-lifecycle", Ordered, Label("cluster", "cluster-lifecyc
123124
OneOf(
124125
Path(Result("", func(ctx context.Context, mck Mock) {
125126
reconciler.Client = k8sClient
127+
// first for pause reconciliation
126128
_, err := reconciler.reconcile(ctx, cScope, mck.Logger())
127129
Expect(err).NotTo(HaveOccurred())
130+
// second for real
131+
_, err = reconciler.reconcile(ctx, cScope, mck.Logger())
132+
Expect(err).NotTo(HaveOccurred())
128133
Expect(rec.ConditionTrue(&linodeCluster, ConditionPreflightLinodeVPCReady)).To(BeFalse())
129134
})),
130135
),
@@ -135,8 +140,13 @@ var _ = Describe("cluster-lifecycle", Ordered, Label("cluster", "cluster-lifecyc
135140
}),
136141
Result("", func(ctx context.Context, mck Mock) {
137142
reconciler.Client = k8sClient
143+
// first reconcile is for pause
138144
_, err := reconciler.reconcile(ctx, cScope, mck.Logger())
139145
Expect(err).NotTo(HaveOccurred())
146+
147+
// second reconcile is for real
148+
_, err = reconciler.reconcile(ctx, cScope, mck.Logger())
149+
Expect(err).NotTo(HaveOccurred())
140150
Expect(rec.ConditionTrue(&linodeCluster, ConditionPreflightLinodeNBFirewallReady)).To(BeFalse())
141151
}),
142152
),
@@ -319,10 +329,11 @@ var _ = Describe("cluster-lifecycle", Ordered, Label("cluster", "cluster-lifecyc
319329
clusterKey := client.ObjectKeyFromObject(&linodeCluster)
320330
Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed())
321331
Expect(linodeCluster.Status.Ready).To(BeTrue())
322-
Expect(linodeCluster.Status.Conditions).To(HaveLen(3))
323-
Expect(linodeCluster.Status.Conditions[0].Type).To(Equal(string(clusterv1.ReadyCondition)))
324-
Expect(linodeCluster.Status.Conditions[1].Type).To(Equal(ConditionPreflightLinodeNBFirewallReady))
325-
Expect(linodeCluster.Status.Conditions[2].Type).To(Equal(ConditionPreflightLinodeVPCReady))
332+
Expect(linodeCluster.Status.Conditions).To(HaveLen(4))
333+
Expect(conditions.Get(&linodeCluster, clusterv1.PausedV1Beta2Condition).Status).To(Equal(metav1.ConditionFalse))
334+
Expect(conditions.Get(&linodeCluster, string(clusterv1.ReadyCondition)).Status).To(Equal(metav1.ConditionTrue))
335+
Expect(conditions.Get(&linodeCluster, ConditionPreflightLinodeNBFirewallReady)).NotTo(BeNil())
336+
Expect(conditions.Get(&linodeCluster, ConditionPreflightLinodeVPCReady)).NotTo(BeNil())
326337
By("checking NB id")
327338
Expect(linodeCluster.Spec.Network.NodeBalancerID).To(Equal(&nodebalancerID))
328339

@@ -430,12 +441,16 @@ var _ = Describe("cluster-lifecycle-dns", Ordered, Label("cluster", "cluster-lif
430441
_, err := reconciler.reconcile(ctx, cScope, logr.Logger{})
431442
Expect(err).NotTo(HaveOccurred())
432443

444+
// Once more for pause
445+
_, err = reconciler.reconcile(ctx, cScope, logr.Logger{})
446+
Expect(err).NotTo(HaveOccurred())
433447
By("checking ready conditions")
434448
clusterKey := client.ObjectKeyFromObject(&linodeCluster)
435449
Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed())
436450
Expect(linodeCluster.Status.Ready).To(BeTrue())
437-
Expect(linodeCluster.Status.Conditions).To(HaveLen(1))
438-
Expect(linodeCluster.Status.Conditions[0].Type).To(Equal(string(clusterv1.ReadyCondition)))
451+
Expect(linodeCluster.Status.Conditions).To(HaveLen(2))
452+
readyCond := conditions.Get(&linodeCluster, string(clusterv1.ReadyCondition))
453+
Expect(readyCond).NotTo(BeNil())
439454

440455
By("checking controlPlaneEndpoint/NB host and port")
441456
Expect(linodeCluster.Spec.ControlPlaneEndpoint.Host).To(Equal(controlPlaneEndpointHost))
@@ -699,12 +714,17 @@ var _ = Describe("dns-override-endpoint", Ordered, Label("cluster", "dns-overrid
699714
_, err := reconciler.reconcile(ctx, cScope, logr.Logger{})
700715
Expect(err).NotTo(HaveOccurred())
701716

717+
// once more for pause
718+
_, err = reconciler.reconcile(ctx, cScope, logr.Logger{})
719+
Expect(err).NotTo(HaveOccurred())
720+
702721
By("checking ready conditions")
703722
clusterKey := client.ObjectKeyFromObject(&linodeCluster)
704723
Expect(k8sClient.Get(ctx, clusterKey, &linodeCluster)).To(Succeed())
705724
Expect(linodeCluster.Status.Ready).To(BeTrue())
706-
Expect(linodeCluster.Status.Conditions).To(HaveLen(1))
707-
Expect(linodeCluster.Status.Conditions[0].Type).To(Equal(string(clusterv1.ReadyCondition)))
725+
Expect(linodeCluster.Status.Conditions).To(HaveLen(2))
726+
cond := conditions.Get(&linodeCluster, string(clusterv1.ReadyCondition))
727+
Expect(cond).NotTo(BeNil())
708728

709729
By("checking controlPlaneEndpoint/NB host and port")
710730
Expect(linodeCluster.Spec.ControlPlaneEndpoint.Host).To(Equal(controlPlaneEndpointHost))

internal/controller/linodefirewall_controller_test.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,6 @@ var _ = Describe("lifecycle", Ordered, Label("firewalls", "lifecycle"), func() {
194194
Result("success", func(ctx context.Context, mck Mock) {
195195
_, err := reconciler.reconcile(ctx, mck.Logger(), &fwScope)
196196
Expect(err).NotTo(HaveOccurred())
197-
198197
Expect(k8sClient.Get(ctx, fwObjectKey, &linodeFW)).To(Succeed())
199198
Expect(*linodeFW.Spec.FirewallID).To(Equal(1))
200199
Expect(mck.Logs()).NotTo(ContainSubstring("failed to create Firewall"))

internal/controller/linodemachine_controller.go

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,8 @@ import (
3535
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3636
kutil "sigs.k8s.io/cluster-api/util"
3737
conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2"
38+
"sigs.k8s.io/cluster-api/util/patch"
39+
"sigs.k8s.io/cluster-api/util/paused"
3840
"sigs.k8s.io/cluster-api/util/predicates"
3941
ctrl "sigs.k8s.io/controller-runtime"
4042
"sigs.k8s.io/controller-runtime/pkg/builder"
@@ -169,6 +171,55 @@ func (r *LinodeMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques
169171
return r.reconcile(ctx, log, machineScope)
170172
}
171173

174+
func (r *LinodeMachineReconciler) reconcilePause(ctx context.Context, logger logr.Logger, machineScope *scope.MachineScope) error {
175+
// Pausing a machine Pauses the firewall referred by the machine
176+
isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, machineScope.Client, machineScope.Cluster, machineScope.LinodeMachine)
177+
178+
if err == nil && !isPaused && !conditionChanged {
179+
return nil
180+
}
181+
if err != nil {
182+
return err
183+
}
184+
if machineScope.LinodeMachine.Spec.FirewallRef == nil {
185+
logger.Info("Paused reconciliation is skipped due to missing Firewall ref")
186+
return nil
187+
}
188+
189+
linodeFW := infrav1alpha2.LinodeFirewall{
190+
ObjectMeta: metav1.ObjectMeta{
191+
Namespace: machineScope.LinodeMachine.Spec.FirewallRef.Namespace,
192+
Name: machineScope.LinodeMachine.Spec.FirewallRef.Name,
193+
},
194+
}
195+
196+
if err := machineScope.Client.Get(ctx, client.ObjectKeyFromObject(&linodeFW), &linodeFW); err != nil {
197+
return err
198+
}
199+
200+
annotations := linodeFW.ObjectMeta.GetAnnotations()
201+
if annotations == nil {
202+
annotations = map[string]string{}
203+
}
204+
205+
if isPaused {
206+
logger.Info("CAPI cluster is paused, pausing Firewall too")
207+
// if we're paused, we should slap the pause annotation on our children
208+
// get the firewall & add the annotation
209+
annotations[clusterv1.PausedAnnotation] = "true"
210+
} else {
211+
// we are not paused here, but were previously paused (we can get here only if conditionChanged is true.
212+
logger.Info("CAPI cluster is no longer paused, removing pause annotation from Firewall")
213+
delete(annotations, clusterv1.PausedAnnotation)
214+
}
215+
linodeFW.SetAnnotations(annotations)
216+
fwPatchHelper, err := patch.NewHelper(&linodeFW, machineScope.Client)
217+
if err != nil {
218+
return fmt.Errorf("failed to create patch helper for firewalls: %w", err)
219+
}
220+
return fwPatchHelper.Patch(ctx, &linodeFW)
221+
}
222+
172223
func (r *LinodeMachineReconciler) reconcile(ctx context.Context, logger logr.Logger, machineScope *scope.MachineScope) (res ctrl.Result, err error) {
173224
failureReason := util.UnknownError
174225
//nolint:dupl // Code duplication is simplicity in this case.
@@ -216,6 +267,11 @@ func (r *LinodeMachineReconciler) reconcile(ctx context.Context, logger logr.Log
216267
}
217268
}
218269

270+
// Pause
271+
if err := r.reconcilePause(ctx, logger, machineScope); err != nil {
272+
return ctrl.Result{}, err
273+
}
274+
219275
// Delete
220276
if !machineScope.LinodeMachine.ObjectMeta.DeletionTimestamp.IsZero() {
221277
failureReason = util.DeleteError

internal/controller/linodemachine_controller_test.go

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1928,7 +1928,6 @@ var _ = Describe("machine in PlacementGroup", Label("machine", "placementGroup")
19281928

19291929
It("creates a instance in a PlacementGroup with a firewall", func(ctx SpecContext) {
19301930
mockLinodeClient := mock.NewMockLinodeClient(mockCtrl)
1931-
19321931
helper, err := patch.NewHelper(&linodePlacementGroup, k8sClient)
19331932
Expect(err).NotTo(HaveOccurred())
19341933

@@ -1940,7 +1939,6 @@ var _ = Describe("machine in PlacementGroup", Label("machine", "placementGroup")
19401939
})
19411940

19421941
Expect(err).NotTo(HaveOccurred())
1943-
19441942
mScope := scope.MachineScope{
19451943
Client: k8sClient,
19461944
LinodeClient: mockLinodeClient,

internal/controller/linodeobjectstoragebucket_controller.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,7 @@ func (r *LinodeObjectStorageBucketReconciler) reconcile(ctx context.Context, bSc
115115
reterr = err
116116
}
117117
}()
118+
118119
if err := r.reconcileApply(ctx, bScope); err != nil {
119120
return res, err
120121
}
@@ -180,7 +181,7 @@ func (r *LinodeObjectStorageBucketReconciler) SetupWithManager(mgr ctrl.Manager,
180181
WithOptions(options).
181182
Owns(&corev1.Secret{}).
182183
WithEventFilter(predicate.And(
183-
predicates.ResourceHasFilterLabel(mgr.GetScheme(), mgr.GetLogger(), r.WatchFilterValue),
184+
predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), mgr.GetLogger(), r.WatchFilterValue),
184185
predicate.GenerationChangedPredicate{},
185186
)).
186187
Watches(

internal/controller/linodeobjectstoragebucket_controller_test.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ import (
2727
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2828
"k8s.io/apimachinery/pkg/runtime/schema"
2929
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
30+
conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2"
3031
"sigs.k8s.io/cluster-api/util/patch"
3132
"sigs.k8s.io/controller-runtime/pkg/client"
3233
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -104,7 +105,8 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() {
104105
Expect(obj.Status.Ready).To(BeTrue())
105106
Expect(obj.Status.FailureMessage).To(BeNil())
106107
Expect(obj.Status.Conditions).To(HaveLen(1))
107-
Expect(obj.Status.Conditions[0].Type).To(Equal(string(clusterv1.ReadyCondition)))
108+
readyCond := conditions.Get(&obj, string(clusterv1.ReadyCondition))
109+
Expect(readyCond).NotTo(BeNil())
108110
Expect(*obj.Status.Hostname).To(Equal("hostname"))
109111
Expect(obj.Status.CreationTime).NotTo(BeNil())
110112

@@ -139,6 +141,7 @@ var _ = Describe("lifecycle", Ordered, Label("bucket", "lifecycle"), func() {
139141
Result("error", func(ctx context.Context, mck Mock) {
140142
bScope.LinodeClient = mck.LinodeClient
141143
_, err := reconciler.reconcile(ctx, &bScope)
144+
Expect(err).NotTo(BeNil())
142145
Expect(err.Error()).To(ContainSubstring("get bucket error"))
143146
}),
144147
),

internal/controller/linodeobjectstoragekey_controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,7 @@ func (r *LinodeObjectStorageKeyReconciler) SetupWithManager(mgr ctrl.Manager, op
312312
WithOptions(options).
313313
Owns(&corev1.Secret{}).
314314
WithEventFilter(predicate.And(
315-
predicates.ResourceHasFilterLabel(mgr.GetScheme(), mgr.GetLogger(), r.WatchFilterValue),
315+
predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), mgr.GetLogger(), r.WatchFilterValue),
316316
predicate.GenerationChangedPredicate{},
317317
)).
318318
Watches(

internal/controller/linodeobjectstoragekey_controller_test.go

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ import (
3131
"k8s.io/utils/ptr"
3232
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3333
clusteraddonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1"
34+
conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2"
3435
"sigs.k8s.io/cluster-api/util/patch"
3536
"sigs.k8s.io/controller-runtime/pkg/client"
3637
"sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -101,6 +102,7 @@ var _ = Describe("lifecycle", Ordered, Label("key", "key-lifecycle"), func() {
101102
Result("error", func(ctx context.Context, mck Mock) {
102103
keyScope.LinodeClient = mck.LinodeClient
103104
_, err := reconciler.reconcile(ctx, &keyScope)
105+
Expect(err).NotTo(BeNil())
104106
Expect(err.Error()).To(ContainSubstring("create key error"))
105107
}),
106108
),
@@ -124,7 +126,8 @@ var _ = Describe("lifecycle", Ordered, Label("key", "key-lifecycle"), func() {
124126
Expect(key.Status.Ready).To(BeTrue())
125127
Expect(key.Status.FailureMessage).To(BeNil())
126128
Expect(key.Status.Conditions).To(HaveLen(1))
127-
Expect(key.Status.Conditions[0].Type).To(Equal(string(clusterv1.ReadyCondition)))
129+
readyCond := conditions.Get(&key, string(clusterv1.ReadyCondition))
130+
Expect(readyCond).NotTo(BeNil())
128131
Expect(key.Status.CreationTime).NotTo(BeNil())
129132
Expect(*key.Status.LastKeyGeneration).To(Equal(key.Spec.KeyGeneration))
130133
Expect(*key.Status.LastKeyGeneration).To(Equal(0))
@@ -398,7 +401,6 @@ var _ = Describe("custom-secret", Label("key", "key-custom-secret"), func() {
398401
Result("generates cluster-resource-set secret with templated data", func(ctx context.Context, mck Mock) {
399402
_, err := reconciler.reconcile(ctx, &keyScope)
400403
Expect(err).NotTo(HaveOccurred())
401-
402404
var secret corev1.Secret
403405
secretKey := client.ObjectKey{Namespace: "other", Name: "cluster-resource-set-custom-secret"}
404406
Expect(k8sClient.Get(ctx, secretKey, &secret)).To(Succeed())
@@ -494,11 +496,9 @@ var _ = Describe("errors", Label("key", "key-errors"), func() {
494496
}),
495497
Result("error", func(ctx context.Context, mck Mock) {
496498
keyScope.Client = mck.K8sClient
497-
498-
patchHelper, err := patch.NewHelper(keyScope.Key, mck.K8sClient)
499+
helper, err := patch.NewHelper(keyScope.Key, mck.K8sClient)
499500
Expect(err).NotTo(HaveOccurred())
500-
keyScope.PatchHelper = patchHelper
501-
501+
keyScope.PatchHelper = helper
502502
_, err = reconciler.reconcile(ctx, &keyScope)
503503
Expect(err.Error()).To(ContainSubstring("no kind is registered"))
504504
}),
@@ -511,7 +511,6 @@ var _ = Describe("errors", Label("key", "key-errors"), func() {
511511
keyScope.Key.Spec.KeyGeneration = 1
512512
keyScope.Key.Status.LastKeyGeneration = ptr.To(keyScope.Key.Spec.KeyGeneration)
513513
keyScope.Key.Status.AccessKeyRef = ptr.To(1)
514-
515514
keyScope.LinodeClient = mck.LinodeClient
516515
keyScope.Client = mck.K8sClient
517516
err := reconciler.reconcileApply(ctx, &keyScope)

0 commit comments

Comments
 (0)