diff --git a/internal/cmd/controller/reconciler/schedule_controller.go b/internal/cmd/controller/reconciler/schedule_controller.go index add0d34687..84d707a6f8 100644 --- a/internal/cmd/controller/reconciler/schedule_controller.go +++ b/internal/cmd/controller/reconciler/schedule_controller.go @@ -265,16 +265,19 @@ func setClusterScheduled(ctx context.Context, c client.Client, name, namespace s return fmt.Errorf("%w, getting cluster: %w", fleetutil.ErrRetryable, err) } - // if the value is already the expected one, avoid the update - if cluster.Status.Scheduled == scheduled { + // if the values are already the expected ones, avoid the update + if cluster.Status.Scheduled == scheduled && !cluster.Status.ActiveSchedule { return nil } old := cluster.DeepCopy() cluster.Status.Scheduled = scheduled - if !scheduled { - cluster.Status.ActiveSchedule = false - } + + // This function is called either because we're updating a + // Schedule or because we're creating it. + // In both cases ActiveSchedule should be false as a Schedule + // always begins in OffSchedule mode until the first start call is executed. + cluster.Status.ActiveSchedule = false return updateClusterStatus(ctx, c, old, cluster) } @@ -411,12 +414,9 @@ func setClustersScheduled(ctx context.Context, c client.Client, clusters []strin } func updateScheduledClusters(ctx context.Context, scheduler quartz.Scheduler, c client.Client, clustersNew []string, clustersOld []string, namespace string) error { - // first look for clusters that are not scheduled yet and flag them as scheduled for _, cluster := range clustersNew { - if !slices.Contains(clustersOld, cluster) { - if err := setClusterScheduled(ctx, c, cluster, namespace, true); err != nil { - return err - } + if err := setClusterScheduled(ctx, c, cluster, namespace, true); err != nil { + return err } } diff --git a/internal/cmd/controller/reconciler/schedule_controller_test.go b/internal/cmd/controller/reconciler/schedule_controller_test.go index 47b704abd0..06e72ce0a6 100644 --- a/internal/cmd/controller/reconciler/schedule_controller_test.go +++ b/internal/cmd/controller/reconciler/schedule_controller_test.go @@ -19,6 +19,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) +type expected struct { + scheduledJob bool + statusScheduled bool + statusActiveSchedule bool +} + var _ = Describe("ScheduleReconciler", func() { var ( ctx context.Context @@ -271,7 +277,7 @@ var _ = Describe("ScheduleReconciler", func() { ObjectMeta: metav1.ObjectMeta{ Name: "test-cluster2", Namespace: "default", - Labels: map[string]string{"env": "test"}, + Labels: map[string]string{"env": "test", "foo": "bar"}, }, } @@ -305,10 +311,36 @@ var _ = Describe("ScheduleReconciler", func() { // clusters 1, 2 and 3 should be scheduled in the quartz.Scheduler and also // be flagged as Scheduled - checkClusterIsScheduled(scheduler, k8sclient, "test-cluster", "default", true, true) - checkClusterIsScheduled(scheduler, k8sclient, "test-cluster2", "default", true, true) - checkClusterIsScheduled(scheduler, k8sclient, "test-cluster3", "default", true, true) - checkClusterIsScheduled(scheduler, k8sclient, "test-cluster4", "default", false, false) + checkState(scheduler, k8sclient, "test-cluster", "default", + expected{scheduledJob: true, statusScheduled: true, statusActiveSchedule: false}) + checkState(scheduler, k8sclient, "test-cluster2", "default", + expected{scheduledJob: true, statusScheduled: true, statusActiveSchedule: false}) + checkState(scheduler, k8sclient, "test-cluster3", "default", + expected{scheduledJob: true, statusScheduled: true, statusActiveSchedule: false}) + checkState(scheduler, k8sclient, "test-cluster4", "default", + expected{scheduledJob: false, statusScheduled: false, statusActiveSchedule: false}) + + // force the start of the schedule (so it sets .Status.ActiveSchedule=true) + jobKey := scheduleKey(schedule) + job, err := scheduler.GetScheduledJob(jobKey) + Expect(err).NotTo(HaveOccurred()) + + cronDurationJob, ok := job.JobDetail().Job().(*CronDurationJob) + Expect(ok).To(BeTrue()) + + // Manually trigger start + err = cronDurationJob.executeStart(ctx) + Expect(err).NotTo(HaveOccurred()) + + // check now that the clusters have the expected values, specially Status.ActiveSchedule + checkState(scheduler, k8sclient, "test-cluster", "default", + expected{scheduledJob: true, statusScheduled: true, statusActiveSchedule: true}) + checkState(scheduler, k8sclient, "test-cluster2", "default", + expected{scheduledJob: true, statusScheduled: true, statusActiveSchedule: true}) + checkState(scheduler, k8sclient, "test-cluster3", "default", + expected{scheduledJob: true, statusScheduled: true, statusActiveSchedule: true}) + checkState(scheduler, k8sclient, "test-cluster4", "default", + expected{scheduledJob: false, statusScheduled: false, statusActiveSchedule: false}) // update the schedule, now it only looks for the label foo=bar scheduleUpdated := &fleet.Schedule{} @@ -326,27 +358,34 @@ var _ = Describe("ScheduleReconciler", func() { _, err = reconciler.Reconcile(ctx, req) Expect(err).NotTo(HaveOccurred()) - // now only 1 cluster should be flagged as scheduled and be in the scheduled jobs list - checkClusterIsScheduled(scheduler, k8sclient, "test-cluster", "default", false, false) - checkClusterIsScheduled(scheduler, k8sclient, "test-cluster2", "default", false, false) - checkClusterIsScheduled(scheduler, k8sclient, "test-cluster3", "default", false, false) - checkClusterIsScheduled(scheduler, k8sclient, "test-cluster4", "default", true, true) + // cluster 2 and 4 should be still targeted. + checkState(scheduler, k8sclient, "test-cluster", "default", + expected{scheduledJob: false, statusScheduled: false, statusActiveSchedule: false}) + // cluster 2 had Status.ActiveSchedule set to true, but because we updated the Schedule + // it should be back to false. + checkState(scheduler, k8sclient, "test-cluster2", "default", + expected{scheduledJob: true, statusScheduled: true, statusActiveSchedule: false}) + checkState(scheduler, k8sclient, "test-cluster3", "default", + expected{scheduledJob: false, statusScheduled: false, statusActiveSchedule: false}) + checkState(scheduler, k8sclient, "test-cluster4", "default", + expected{scheduledJob: true, statusScheduled: true, statusActiveSchedule: false}) }) }) }) -func checkClusterIsScheduled( +func checkState( scheduler quartz.Scheduler, k8sclient client.Client, cluster, namespace string, - scheduledExpected, flaggedAsScheduledExpected bool) { + expectedState expected) { isScheduled, err := isClusterScheduled(scheduler, cluster, namespace) Expect(err).NotTo(HaveOccurred()) - Expect(isScheduled).To(Equal(scheduledExpected)) + Expect(isScheduled).To(Equal(expectedState.scheduledJob)) key := client.ObjectKey{Name: cluster, Namespace: namespace} clusterObj := &fleet.Cluster{} err = k8sclient.Get(context.Background(), key, clusterObj) Expect(err).NotTo(HaveOccurred()) - Expect(clusterObj.Status.Scheduled).To(Equal(flaggedAsScheduledExpected)) + Expect(clusterObj.Status.Scheduled).To(Equal(expectedState.statusScheduled)) + Expect(clusterObj.Status.ActiveSchedule).To(Equal(expectedState.statusActiveSchedule)) }