Skip to content

Commit 920f7c6

Browse files
committed
split the behavior in defer to function and add test
Signed-off-by: sivchari <shibuuuu5@gmail.com>
1 parent aed4673 commit 920f7c6

File tree

3 files changed

+87
-42
lines changed

3 files changed

+87
-42
lines changed

controlplane/kubeadm/internal/controllers/controller.go

Lines changed: 47 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -210,46 +210,7 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.
210210
}
211211

212212
defer func() {
213-
// Always attempt to update status.
214-
if err := r.updateStatus(ctx, controlPlane); err != nil {
215-
if errors.Is(err, &internal.RemoteClusterConnectionError{}) {
216-
log.Error(err, "Could not connect to workload cluster to fetch status")
217-
} else {
218-
log.Error(err, "Failed to update KubeadmControlPlane status")
219-
reterr = kerrors.NewAggregate([]error{reterr, err})
220-
}
221-
}
222-
223-
r.updateV1Beta2Status(ctx, controlPlane)
224-
225-
// Always attempt to Patch the KubeadmControlPlane object and status after each reconciliation.
226-
patchOpts := []patch.Option{}
227-
if reterr == nil {
228-
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
229-
}
230-
if err := patchKubeadmControlPlane(ctx, patchHelper, kcp, patchOpts...); err != nil {
231-
log.Error(err, "Failed to patch KubeadmControlPlane")
232-
reterr = kerrors.NewAggregate([]error{reterr, err})
233-
}
234-
235-
// Only requeue if there is no error, Requeue or RequeueAfter and the object does not have a deletion timestamp.
236-
if reterr == nil && res.IsZero() && kcp.ObjectMeta.DeletionTimestamp.IsZero() {
237-
// Make KCP requeue in case node status is not ready, so we can check for node status without waiting for a full
238-
// resync (by default 10 minutes).
239-
// The alternative solution would be to watch the control plane nodes in the Cluster - similar to how the
240-
// MachineSet and MachineHealthCheck controllers watch the nodes under their control.
241-
if !kcp.Status.Ready {
242-
res = ctrl.Result{RequeueAfter: 20 * time.Second}
243-
}
244-
245-
// Make KCP requeue if ControlPlaneComponentsHealthyCondition is false so we can check for control plane component
246-
// status without waiting for a full resync (by default 10 minutes).
247-
// Otherwise this condition can lead to a delay in provisioning MachineDeployments when MachineSet preflight checks are enabled.
248-
// The alternative solution to this requeue would be watching the relevant pods inside each workload cluster which would be very expensive.
249-
if conditions.IsFalse(kcp, controlplanev1.ControlPlaneComponentsHealthyCondition) {
250-
res = ctrl.Result{RequeueAfter: 20 * time.Second}
251-
}
252-
}
213+
res, reterr = r.deferPatch(ctx, kcp, controlPlane, patchHelper)
253214
}()
254215

255216
if !kcp.ObjectMeta.DeletionTimestamp.IsZero() {
@@ -261,6 +222,52 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.
261222
return r.reconcile(ctx, controlPlane)
262223
}
263224

225+
func (r *KubeadmControlPlaneReconciler) deferPatch(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane, patchHelper *patch.Helper) (res ctrl.Result, reterr error) {
226+
log := ctrl.LoggerFrom(ctx)
227+
// Always attempt to update status.
228+
if err := r.updateStatus(ctx, controlPlane); err != nil {
229+
var connFailure *internal.RemoteClusterConnectionError
230+
if errors.As(err, &connFailure) {
231+
log.Error(err, "Could not connect to workload cluster to fetch status")
232+
} else {
233+
log.Error(err, "Failed to update KubeadmControlPlane status")
234+
reterr = kerrors.NewAggregate([]error{reterr, err})
235+
}
236+
}
237+
238+
r.updateV1Beta2Status(ctx, controlPlane)
239+
240+
// Always attempt to Patch the KubeadmControlPlane object and status after each reconciliation.
241+
patchOpts := []patch.Option{}
242+
if reterr == nil {
243+
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
244+
}
245+
if err := patchKubeadmControlPlane(ctx, patchHelper, kcp, patchOpts...); err != nil {
246+
log.Error(err, "Failed to patch KubeadmControlPlane")
247+
reterr = kerrors.NewAggregate([]error{reterr, err})
248+
}
249+
250+
// Only requeue if there is no error, Requeue or RequeueAfter and the object does not have a deletion timestamp.
251+
if reterr == nil && res.IsZero() && kcp.ObjectMeta.DeletionTimestamp.IsZero() {
252+
// Make KCP requeue in case node status is not ready, so we can check for node status without waiting for a full
253+
// resync (by default 10 minutes).
254+
// The alternative solution would be to watch the control plane nodes in the Cluster - similar to how the
255+
// MachineSet and MachineHealthCheck controllers watch the nodes under their control.
256+
if !kcp.Status.Ready {
257+
res = ctrl.Result{RequeueAfter: 20 * time.Second}
258+
}
259+
260+
// Make KCP requeue if ControlPlaneComponentsHealthyCondition is false so we can check for control plane component
261+
// status without waiting for a full resync (by default 10 minutes).
262+
// Otherwise this condition can lead to a delay in provisioning MachineDeployments when MachineSet preflight checks are enabled.
263+
// The alternative solution to this requeue would be watching the relevant pods inside each workload cluster which would be very expensive.
264+
if conditions.IsFalse(kcp, controlplanev1.ControlPlaneComponentsHealthyCondition) {
265+
res = ctrl.Result{RequeueAfter: 20 * time.Second}
266+
}
267+
}
268+
return
269+
}
270+
264271
// initControlPlaneScope initializes the control plane scope; this includes also checking for orphan machines and
265272
// adopt them if necessary.
266273
// The func also returns a boolean indicating if adoptableMachine have been found and processed, but this doesn't imply those machines

controlplane/kubeadm/internal/controllers/controller_test.go

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2822,6 +2822,45 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition
28222822
}
28232823
}
28242824

2825+
func TestKubeadmControlPlaneReconciler_deferPatch(t *testing.T) {
2826+
g := NewWithT(t)
2827+
2828+
ns, err := env.CreateNamespace(ctx, "test-reconcile-update-status")
2829+
g.Expect(err).ToNot(HaveOccurred())
2830+
2831+
cluster, kcp, _ := createClusterWithControlPlane(ns.Name)
2832+
g.Expect(env.Create(ctx, cluster)).To(Succeed())
2833+
g.Expect(env.Create(ctx, kcp)).To(Succeed())
2834+
defer func(do ...client.Object) {
2835+
g.Expect(env.Cleanup(ctx, do...)).To(Succeed())
2836+
}(kcp, ns)
2837+
2838+
r := &KubeadmControlPlaneReconciler{
2839+
Client: env,
2840+
SecretCachingClient: secretCachingClient,
2841+
managementCluster: &fakeManagementCluster{
2842+
WorkloadErr: &internal.RemoteClusterConnectionError{
2843+
Name: util.ObjectKey(cluster).String(),
2844+
Err: errors.New("connection error"),
2845+
},
2846+
},
2847+
}
2848+
2849+
controlPlane, _, err := r.initControlPlaneScope(ctx, cluster, kcp)
2850+
g.Expect(err).ToNot(HaveOccurred())
2851+
2852+
patchHelper, err := patch.NewHelper(kcp, env)
2853+
g.Expect(err).ToNot(HaveOccurred())
2854+
2855+
result, err := r.deferPatch(ctx, kcp, controlPlane, patchHelper)
2856+
// Should bump RemoteClusterConnectionError
2857+
g.Expect(err).NotTo(HaveOccurred())
2858+
g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: 20 * time.Second}))
2859+
2860+
// calling reconcile should return error
2861+
g.Expect(env.CleanupAndWait(ctx, cluster)).To(Succeed())
2862+
}
2863+
28252864
type fakeClusterCache struct {
28262865
clustercache.ClusterCache
28272866
lastProbeSuccessTime time.Time

controlplane/kubeadm/internal/controllers/status.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ import (
2323
"strings"
2424
"time"
2525

26-
"github.com/pkg/errors"
2726
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2827
"k8s.io/apimachinery/pkg/util/sets"
2928
"k8s.io/utils/ptr"
@@ -94,7 +93,7 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, contro
9493

9594
workloadCluster, err := controlPlane.GetWorkloadCluster(ctx)
9695
if err != nil {
97-
return errors.Wrap(err, "failed to create remote cluster client")
96+
return fmt.Errorf("failed to create remote cluster client: %w", err)
9897
}
9998
status, err := workloadCluster.ClusterStatus(ctx)
10099
if err != nil {

0 commit comments

Comments
 (0)