Skip to content

Commit 955b195

Browse files
committed
use std errors package
Signed-off-by: sivchari <shibuuuu5@gmail.com>
1 parent 155e660 commit 955b195

File tree

14 files changed

+205
-68
lines changed

14 files changed

+205
-68
lines changed

bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -194,11 +194,6 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
194194
// Lookup the cluster the config owner is associated with
195195
cluster, err := util.GetClusterByName(ctx, r.Client, configOwner.GetNamespace(), configOwner.ClusterName())
196196
if err != nil {
197-
if errors.Cause(err) == util.ErrNoCluster {
198-
log.Info(fmt.Sprintf("%s does not belong to a cluster yet, waiting until it's part of a cluster", configOwner.GetKind()))
199-
return ctrl.Result{}, nil
200-
}
201-
202197
if apierrors.IsNotFound(err) {
203198
log.Info("Cluster does not exist yet, waiting until it is created")
204199
return ctrl.Result{}, nil

cmd/clusterctl/client/cluster/template.go

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -303,10 +303,12 @@ func getGitHubClient(ctx context.Context, configVariablesClient config.Variables
303303
return github.NewClient(authenticatingHTTPClient), nil
304304
}
305305

306+
var errRateLimit = errors.New("rate limit for github api has been reached. Please wait one hour or get a personal API token and assign it to the GITHUB_TOKEN environment variable")
307+
306308
// handleGithubErr wraps error messages.
307309
func handleGithubErr(err error, message string, args ...interface{}) error {
308310
if _, ok := err.(*github.RateLimitError); ok {
309-
return errors.New("rate limit for github api has been reached. Please wait one hour or get a personal API token and assign it to the GITHUB_TOKEN environment variable")
311+
return errRateLimit
310312
}
311-
return errors.Wrapf(err, message, args...)
313+
return fmt.Errorf("%s: %w", fmt.Sprintf(message, args...), err)
312314
}

cmd/clusterctl/client/cluster/template_test.go

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@ import (
2929

3030
"github.com/google/go-github/v53/github"
3131
. "github.com/onsi/gomega"
32+
"github.com/pkg/errors"
3233
corev1 "k8s.io/api/core/v1"
3334
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3435

@@ -517,6 +518,43 @@ func Test_templateClient_GetFromURL(t *testing.T) {
517518
}
518519
}
519520

521+
func Test_handleGithubErr(t *testing.T) {
522+
tests := []struct {
523+
name string
524+
err error
525+
message string
526+
args []any
527+
want error
528+
}{
529+
{
530+
name: "Return error",
531+
err: errors.New("error"),
532+
message: "message %s and %s",
533+
args: []any{"arg1", "arg2"},
534+
want: fmt.Errorf("message arg1 and arg2: %w", errors.New("error")),
535+
},
536+
{
537+
name: "Return RateLimitError",
538+
err: &github.RateLimitError{
539+
Response: &http.Response{
540+
StatusCode: http.StatusForbidden,
541+
},
542+
},
543+
message: "",
544+
args: nil,
545+
want: errRateLimit,
546+
},
547+
}
548+
for _, tt := range tests {
549+
t.Run(tt.name, func(t *testing.T) {
550+
g := NewWithT(t)
551+
552+
got := handleGithubErr(tt.err, tt.message, tt.args...)
553+
g.Expect(got.Error()).To(Equal(tt.want.Error()))
554+
})
555+
}
556+
}
557+
520558
func mustParseURL(rawURL string) *url.URL {
521559
rURL, err := url.Parse(rawURL)
522560
if err != nil {

cmd/clusterctl/client/repository/repository_github.go

Lines changed: 13 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,8 @@ const (
5050
)
5151

5252
var (
53-
errNotFound = errors.New("404 Not Found")
53+
errNotFound = errors.New("404 Not Found")
54+
errRateLimit = errors.New("rate limit for github api has been reached. Please wait one hour or get a personal API token and assign it to the GITHUB_TOKEN environment variable")
5455

5556
// Caches used to limit the number of GitHub API calls.
5657

@@ -319,7 +320,7 @@ func (g *gitHubRepository) getVersions(ctx context.Context) ([]string, error) {
319320
if listReleasesErr != nil {
320321
retryError = g.handleGithubErr(listReleasesErr, "failed to get the list of releases")
321322
// Return immediately if we are rate limited.
322-
if _, ok := listReleasesErr.(*github.RateLimitError); ok {
323+
if errors.Is(retryError, errRateLimit) {
323324
return false, retryError
324325
}
325326
return false, nil
@@ -334,7 +335,7 @@ func (g *gitHubRepository) getVersions(ctx context.Context) ([]string, error) {
334335
if listReleasesErr != nil {
335336
retryError = g.handleGithubErr(listReleasesErr, "failed to get the list of releases")
336337
// Return immediately if we are rate limited.
337-
if _, ok := listReleasesErr.(*github.RateLimitError); ok {
338+
if errors.Is(retryError, errRateLimit) {
338339
return false, retryError
339340
}
340341
return false, nil
@@ -384,7 +385,7 @@ func (g *gitHubRepository) getReleaseByTag(ctx context.Context, tag string) (*gi
384385
return false, retryError
385386
}
386387
// Return immediately if we are rate limited.
387-
if _, ok := getReleasesErr.(*github.RateLimitError); ok {
388+
if errors.Is(retryError, errRateLimit) {
388389
return false, retryError
389390
}
390391
return false, nil
@@ -466,7 +467,7 @@ func (g *gitHubRepository) downloadFilesFromRelease(ctx context.Context, release
466467
if downloadReleaseError != nil {
467468
retryError = g.handleGithubErr(downloadReleaseError, "failed to download file %q from %q release", *release.TagName, fileName)
468469
// Return immediately if we are rate limited.
469-
if _, ok := downloadReleaseError.(*github.RateLimitError); ok {
470+
if errors.Is(retryError, errRateLimit) {
470471
return false, retryError
471472
}
472473
return false, nil
@@ -500,12 +501,13 @@ func (g *gitHubRepository) downloadFilesFromRelease(ctx context.Context, release
500501
// handleGithubErr wraps error messages.
501502
func (g *gitHubRepository) handleGithubErr(err error, message string, args ...interface{}) error {
502503
if _, ok := err.(*github.RateLimitError); ok {
503-
return errors.New("rate limit for github api has been reached. Please wait one hour or get a personal API token and assign it to the GITHUB_TOKEN environment variable")
504+
return errRateLimit
504505
}
505-
if ghErr, ok := err.(*github.ErrorResponse); ok {
506-
if ghErr.Response.StatusCode == http.StatusNotFound {
507-
return errNotFound
508-
}
506+
507+
var ghErr *github.ErrorResponse
508+
if errors.As(err, &ghErr) && ghErr.Response.StatusCode == http.StatusNotFound {
509+
return errNotFound
509510
}
510-
return errors.Wrapf(err, message, args...)
511+
512+
return fmt.Errorf("%s: %w", fmt.Sprintf(message, args...), err)
511513
}

cmd/clusterctl/client/repository/repository_github_test.go

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import (
2626

2727
"github.com/google/go-github/v53/github"
2828
. "github.com/onsi/gomega"
29+
"github.com/pkg/errors"
2930
"k8s.io/utils/ptr"
3031

3132
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
@@ -1108,3 +1109,54 @@ func Test_gitHubRepository_releaseNotFound(t *testing.T) {
11081109
})
11091110
}
11101111
}
1112+
1113+
func Test_handleGithubErr(t *testing.T) {
1114+
tests := []struct {
1115+
name string
1116+
err error
1117+
message string
1118+
args []any
1119+
want error
1120+
}{
1121+
{
1122+
name: "Return error",
1123+
err: errors.New("error"),
1124+
message: "message %s and %s",
1125+
args: []any{"arg1", "arg2"},
1126+
want: fmt.Errorf("message arg1 and arg2: %w", errors.New("error")),
1127+
},
1128+
{
1129+
name: "Return RateLimitError",
1130+
err: &github.RateLimitError{
1131+
Response: &http.Response{
1132+
StatusCode: http.StatusForbidden,
1133+
},
1134+
},
1135+
message: "",
1136+
args: nil,
1137+
want: errRateLimit,
1138+
},
1139+
{
1140+
name: "Return ErrorResponse",
1141+
err: &github.ErrorResponse{
1142+
Response: &http.Response{
1143+
StatusCode: http.StatusNotFound,
1144+
},
1145+
},
1146+
message: "",
1147+
args: nil,
1148+
want: errNotFound,
1149+
},
1150+
}
1151+
1152+
gRepo := &gitHubRepository{}
1153+
1154+
for _, tt := range tests {
1155+
t.Run(tt.name, func(t *testing.T) {
1156+
g := NewWithT(t)
1157+
1158+
got := gRepo.handleGithubErr(tt.err, tt.message, tt.args...)
1159+
g.Expect(got.Error()).To(Equal(tt.want.Error()))
1160+
})
1161+
}
1162+
}

controlplane/kubeadm/internal/controllers/controller.go

Lines changed: 51 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -213,56 +213,67 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.
213213
}
214214

215215
defer func() {
216-
// Always attempt to update status.
217-
if err := r.updateStatus(ctx, controlPlane); err != nil {
218-
var connFailure *internal.RemoteClusterConnectionError
219-
if errors.As(err, &connFailure) {
220-
log.Error(err, "Could not connect to workload cluster to fetch status")
221-
} else {
222-
log.Error(err, "Failed to update KubeadmControlPlane status")
223-
reterr = kerrors.NewAggregate([]error{reterr, err})
224-
}
225-
}
216+
res, reterr = r.deferPatch(ctx, kcp, controlPlane, patchHelper)
217+
}()
218+
219+
if !kcp.ObjectMeta.DeletionTimestamp.IsZero() {
220+
// Handle deletion reconciliation loop.
221+
return r.reconcileDelete(ctx, controlPlane)
222+
}
226223

227-
r.updateV1Beta2Status(ctx, controlPlane)
224+
// Handle normal reconciliation loop.
225+
return r.reconcile(ctx, controlPlane)
226+
}
228227

229-
// Always attempt to Patch the KubeadmControlPlane object and status after each reconciliation.
230-
patchOpts := []patch.Option{}
231-
if reterr == nil {
232-
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
233-
}
234-
if err := patchKubeadmControlPlane(ctx, patchHelper, kcp, patchOpts...); err != nil {
235-
log.Error(err, "Failed to patch KubeadmControlPlane")
228+
func (r *KubeadmControlPlaneReconciler) deferPatch(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane, patchHelper *patch.Helper) (ctrl.Result, error) {
229+
var (
230+
res ctrl.Result
231+
reterr error
232+
)
233+
log := ctrl.LoggerFrom(ctx)
234+
// Always attempt to update status.
235+
if err := r.updateStatus(ctx, controlPlane); err != nil {
236+
var connFailure *internal.RemoteClusterConnectionError
237+
if errors.As(err, &connFailure) {
238+
log.Error(err, "Could not connect to workload cluster to fetch status")
239+
} else {
240+
log.Error(err, "Failed to update KubeadmControlPlane status")
236241
reterr = kerrors.NewAggregate([]error{reterr, err})
237242
}
243+
}
238244

239-
// Only requeue if there is no error, Requeue or RequeueAfter and the object does not have a deletion timestamp.
240-
if reterr == nil && res.IsZero() && kcp.ObjectMeta.DeletionTimestamp.IsZero() {
241-
// Make KCP requeue in case node status is not ready, so we can check for node status without waiting for a full
242-
// resync (by default 10 minutes).
243-
// The alternative solution would be to watch the control plane nodes in the Cluster - similar to how the
244-
// MachineSet and MachineHealthCheck controllers watch the nodes under their control.
245-
if !kcp.Status.Ready {
246-
res = ctrl.Result{RequeueAfter: 20 * time.Second}
247-
}
245+
r.updateV1Beta2Status(ctx, controlPlane)
248246

249-
// Make KCP requeue if ControlPlaneComponentsHealthyCondition is false so we can check for control plane component
250-
// status without waiting for a full resync (by default 10 minutes).
251-
// Otherwise this condition can lead to a delay in provisioning MachineDeployments when MachineSet preflight checks are enabled.
252-
// The alternative solution to this requeue would be watching the relevant pods inside each workload cluster which would be very expensive.
253-
if conditions.IsFalse(kcp, controlplanev1.ControlPlaneComponentsHealthyCondition) {
254-
res = ctrl.Result{RequeueAfter: 20 * time.Second}
255-
}
247+
// Always attempt to Patch the KubeadmControlPlane object and status after each reconciliation.
248+
patchOpts := []patch.Option{}
249+
if reterr == nil {
250+
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
251+
}
252+
if err := patchKubeadmControlPlane(ctx, patchHelper, kcp, patchOpts...); err != nil {
253+
log.Error(err, "Failed to patch KubeadmControlPlane")
254+
reterr = kerrors.NewAggregate([]error{reterr, err})
255+
}
256+
257+
// Only requeue if there is no error, Requeue or RequeueAfter and the object does not have a deletion timestamp.
258+
if reterr == nil && res.IsZero() && kcp.ObjectMeta.DeletionTimestamp.IsZero() {
259+
// Make KCP requeue in case node status is not ready, so we can check for node status without waiting for a full
260+
// resync (by default 10 minutes).
261+
// The alternative solution would be to watch the control plane nodes in the Cluster - similar to how the
262+
// MachineSet and MachineHealthCheck controllers watch the nodes under their control.
263+
if !kcp.Status.Ready {
264+
res = ctrl.Result{RequeueAfter: 20 * time.Second}
256265
}
257-
}()
258266

259-
if !kcp.ObjectMeta.DeletionTimestamp.IsZero() {
260-
// Handle deletion reconciliation loop.
261-
return r.reconcileDelete(ctx, controlPlane)
267+
// Make KCP requeue if ControlPlaneComponentsHealthyCondition is false so we can check for control plane component
268+
// status without waiting for a full resync (by default 10 minutes).
269+
// Otherwise this condition can lead to a delay in provisioning MachineDeployments when MachineSet preflight checks are enabled.
270+
// The alternative solution to this requeue would be watching the relevant pods inside each workload cluster which would be very expensive.
271+
if conditions.IsFalse(kcp, controlplanev1.ControlPlaneComponentsHealthyCondition) {
272+
res = ctrl.Result{RequeueAfter: 20 * time.Second}
273+
}
262274
}
263275

264-
// Handle normal reconciliation loop.
265-
return r.reconcile(ctx, controlPlane)
276+
return res, reterr
266277
}
267278

268279
// initControlPlaneScope initializes the control plane scope; this includes also checking for orphan machines and

controlplane/kubeadm/internal/controllers/controller_test.go

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2823,6 +2823,45 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition
28232823
}
28242824
}
28252825

2826+
func TestKubeadmControlPlaneReconciler_deferPatch(t *testing.T) {
2827+
g := NewWithT(t)
2828+
2829+
ns, err := env.CreateNamespace(ctx, "test-reconcile-update-status")
2830+
g.Expect(err).ToNot(HaveOccurred())
2831+
2832+
cluster, kcp, _ := createClusterWithControlPlane(ns.Name)
2833+
g.Expect(env.Create(ctx, cluster)).To(Succeed())
2834+
g.Expect(env.Create(ctx, kcp)).To(Succeed())
2835+
defer func(do ...client.Object) {
2836+
g.Expect(env.Cleanup(ctx, do...)).To(Succeed())
2837+
}(kcp, ns)
2838+
2839+
r := &KubeadmControlPlaneReconciler{
2840+
Client: env,
2841+
SecretCachingClient: secretCachingClient,
2842+
managementCluster: &fakeManagementCluster{
2843+
WorkloadErr: &internal.RemoteClusterConnectionError{
2844+
Name: util.ObjectKey(cluster).String(),
2845+
Err: errors.New("connection error"),
2846+
},
2847+
},
2848+
}
2849+
2850+
controlPlane, _, err := r.initControlPlaneScope(ctx, cluster, kcp)
2851+
g.Expect(err).ToNot(HaveOccurred())
2852+
2853+
patchHelper, err := patch.NewHelper(kcp, env)
2854+
g.Expect(err).ToNot(HaveOccurred())
2855+
2856+
result, err := r.deferPatch(ctx, kcp, controlPlane, patchHelper)
2857+
// Should bump RemoteClusterConnectionError
2858+
g.Expect(err).NotTo(HaveOccurred())
2859+
g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: 20 * time.Second}))
2860+
2861+
// calling reconcile should return error
2862+
g.Expect(env.CleanupAndWait(ctx, cluster)).To(Succeed())
2863+
}
2864+
28262865
type fakeClusterCache struct {
28272866
clustercache.ClusterCache
28282867
lastProbeSuccessTime time.Time

controlplane/kubeadm/internal/controllers/status.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ import (
2323
"strings"
2424
"time"
2525

26-
"github.com/pkg/errors"
2726
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2827
"k8s.io/apimachinery/pkg/util/sets"
2928
"k8s.io/utils/ptr"
@@ -94,7 +93,7 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, contro
9493

9594
workloadCluster, err := controlPlane.GetWorkloadCluster(ctx)
9695
if err != nil {
97-
return errors.Wrap(err, "failed to create remote cluster client")
96+
return fmt.Errorf("failed to create remote cluster client: %w", err)
9897
}
9998
status, err := workloadCluster.ClusterStatus(ctx)
10099
if err != nil {

exp/internal/controllers/machinepool_controller_noderef.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, s *scope)
9292
// Get the Node references.
9393
nodeRefsResult, err := r.getNodeReferences(ctx, mp.Spec.ProviderIDList, mp.Spec.MinReadySeconds, s.nodeRefMap)
9494
if err != nil {
95-
if err == errNoAvailableNodes {
95+
if errors.Is(err, errNoAvailableNodes) {
9696
log.Info("Cannot assign NodeRefs to MachinePool, no matching Nodes")
9797
// No need to requeue here. Nodes emit an event that triggers reconciliation.
9898
return ctrl.Result{}, nil

exp/internal/controllers/machinepool_controller_phases.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, s *
300300
// Get and set Status.Replicas from the infrastructure provider.
301301
err = util.UnstructuredUnmarshalField(infraConfig, &mp.Status.Replicas, "status", "replicas")
302302
if err != nil {
303-
if err != util.ErrUnstructuredFieldNotFound {
303+
if !errors.Is(err, util.ErrUnstructuredFieldNotFound) {
304304
return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve replicas from infrastructure provider for MachinePool %q in namespace %q", mp.Name, mp.Namespace)
305305
}
306306
}

0 commit comments

Comments
 (0)