Skip to content

Commit 1dfafb3

Browse files
pbasovs3rj1k
authored andcommitted
Allow API Loadbalancer Health Monitor configuration
1 parent 3fc544b commit 1dfafb3

File tree

7 files changed

+216
-11
lines changed

7 files changed

+216
-11
lines changed

api/v1beta1/types.go

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -879,6 +879,35 @@ type APIServerLoadBalancer struct {
879879
// Flavor is the flavor name that will be used to create the APIServerLoadBalancer Spec.
880880
//+optional
881881
Flavor optional.String `json:"flavor,omitempty"`
882+
883+
// Monitor contains configuration for the load balancer health monitor.
884+
//+optional
885+
Monitor *APIServerLoadBalancerMonitor `json:"monitor,omitempty"`
886+
}
887+
888+
// APIServerLoadBalancerMonitor contains configuration for the load balancer health monitor.
889+
type APIServerLoadBalancerMonitor struct {
890+
// Delay is the time in seconds between sending probes to members. Default is 10.
891+
//+optional
892+
//+kubebuilder:validation:Minimum=1
893+
Delay *int `json:"delay,omitempty"`
894+
895+
// Timeout is the maximum time in seconds for a monitor to wait for a connection to be established before it times out. Default is 5.
896+
//+optional
897+
//+kubebuilder:validation:Minimum=1
898+
Timeout *int `json:"timeout,omitempty"`
899+
900+
// MaxRetries is the number of successful checks before changing the operating status of the member to ONLINE. Default is 5.
901+
//+optional
902+
//+kubebuilder:validation:Minimum=1
903+
//+kubebuilder:validation:Maximum=10
904+
MaxRetries *int `json:"maxRetries,omitempty"`
905+
906+
// MaxRetriesDown is the number of allowed check failures before changing the operating status of the member to ERROR. Default is 3.
907+
//+optional
908+
//+kubebuilder:validation:Minimum=1
909+
//+kubebuilder:validation:Maximum=10
910+
MaxRetriesDown *int `json:"maxRetriesDown,omitempty"`
882911
}
883912

884913
func (s *APIServerLoadBalancer) IsZero() bool {

config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclusters.yaml

Lines changed: 30 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

config/crd/bases/infrastructure.cluster.x-k8s.io_openstackclustertemplates.yaml

Lines changed: 30 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pkg/clients/loadbalancer.go

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,7 @@ type LbClient interface {
5454
DeletePoolMember(poolID string, lbMemberID string) error
5555
CreateMonitor(opts monitors.CreateOptsBuilder) (*monitors.Monitor, error)
5656
ListMonitors(opts monitors.ListOptsBuilder) ([]monitors.Monitor, error)
57+
UpdateMonitor(id string, opts monitors.UpdateOptsBuilder) (*monitors.Monitor, error)
5758
DeleteMonitor(id string) error
5859
ListLoadBalancerProviders() ([]providers.Provider, error)
5960
ListOctaviaVersions() ([]apiversions.APIVersion, error)
@@ -239,6 +240,15 @@ func (l lbClient) ListMonitors(opts monitors.ListOptsBuilder) ([]monitors.Monito
239240
return monitors.ExtractMonitors(allPages)
240241
}
241242

243+
func (l lbClient) UpdateMonitor(id string, opts monitors.UpdateOptsBuilder) (*monitors.Monitor, error) {
244+
mc := metrics.NewMetricPrometheusContext("loadbalancer_healthmonitor", "update")
245+
monitor, err := monitors.Update(context.TODO(), l.serviceClient, id, opts).Extract()
246+
if mc.ObserveRequest(err) != nil {
247+
return nil, err
248+
}
249+
return monitor, nil
250+
}
251+
242252
func (l lbClient) DeleteMonitor(id string) error {
243253
mc := metrics.NewMetricPrometheusContext("loadbalancer_healthmonitor", "delete")
244254
err := monitors.Delete(context.TODO(), l.serviceClient, id).ExtractErr()

pkg/clients/mock/loadbalancer.go

Lines changed: 15 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pkg/cloud/services/loadbalancer/loadbalancer.go

Lines changed: 96 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -406,8 +406,7 @@ func (s *Service) reconcileAPILoadBalancerListener(lb *loadbalancers.LoadBalance
406406
if err != nil {
407407
return err
408408
}
409-
410-
if err := s.getOrCreateMonitor(openStackCluster, lbPortObjectsName, pool.ID, lb.ID); err != nil {
409+
if err := s.getOrUpdateMonitor(openStackCluster, lbPortObjectsName, pool.ID, lb.ID); err != nil {
411410
return err
412411
}
413412

@@ -532,13 +531,83 @@ func (s *Service) getOrCreatePool(openStackCluster *infrav1.OpenStackCluster, po
532531
return pool, nil
533532
}
534533

535-
func (s *Service) getOrCreateMonitor(openStackCluster *infrav1.OpenStackCluster, monitorName, poolID, lbID string) error {
534+
func (s *Service) getOrUpdateMonitor(openStackCluster *infrav1.OpenStackCluster, monitorName, poolID, lbID string) error {
536535
monitor, err := s.checkIfMonitorExists(monitorName)
537536
if err != nil {
538537
return err
539538
}
540539

540+
monitorConfig := openStackCluster.Spec.APIServerLoadBalancer.Monitor
541+
542+
// Default values for monitor
543+
const (
544+
defaultDelay = 10
545+
defaultTimeout = 5
546+
defaultMaxRetries = 5
547+
defaultMaxRetriesDown = 3
548+
)
549+
541550
if monitor != nil {
551+
needsUpdate := false
552+
monitorUpdateOpts := monitors.UpdateOpts{}
553+
554+
if (monitorConfig == nil || monitorConfig.Delay == nil) && monitor.Delay != defaultDelay {
555+
s.scope.Logger().Info("Monitor delay needs update to default", "current", monitor.Delay, "default", defaultDelay)
556+
monitorUpdateOpts.Delay = defaultDelay
557+
needsUpdate = true
558+
} else if monitorConfig != nil && monitorConfig.Delay != nil && monitor.Delay != *monitorConfig.Delay {
559+
s.scope.Logger().Info("Monitor delay needs update", "current", monitor.Delay, "desired", *monitorConfig.Delay)
560+
monitorUpdateOpts.Delay = *monitorConfig.Delay
561+
needsUpdate = true
562+
}
563+
564+
if (monitorConfig == nil || monitorConfig.Timeout == nil) && monitor.Timeout != defaultTimeout {
565+
s.scope.Logger().Info("Monitor timeout needs update to default", "current", monitor.Timeout, "default", defaultTimeout)
566+
monitorUpdateOpts.Timeout = defaultTimeout
567+
needsUpdate = true
568+
} else if monitorConfig != nil && monitorConfig.Timeout != nil && monitor.Timeout != *monitorConfig.Timeout {
569+
s.scope.Logger().Info("Monitor timeout needs update", "current", monitor.Timeout, "desired", *monitorConfig.Timeout)
570+
monitorUpdateOpts.Timeout = *monitorConfig.Timeout
571+
needsUpdate = true
572+
}
573+
574+
if (monitorConfig == nil || monitorConfig.MaxRetries == nil) && monitor.MaxRetries != defaultMaxRetries {
575+
s.scope.Logger().Info("Monitor maxRetries needs update to default", "current", monitor.MaxRetries, "default", defaultMaxRetries)
576+
monitorUpdateOpts.MaxRetries = defaultMaxRetries
577+
needsUpdate = true
578+
} else if monitorConfig != nil && monitorConfig.MaxRetries != nil && monitor.MaxRetries != *monitorConfig.MaxRetries {
579+
s.scope.Logger().Info("Monitor maxRetries needs update", "current", monitor.MaxRetries, "desired", *monitorConfig.MaxRetries)
580+
monitorUpdateOpts.MaxRetries = *monitorConfig.MaxRetries
581+
needsUpdate = true
582+
}
583+
584+
if (monitorConfig == nil || monitorConfig.MaxRetriesDown == nil) && monitor.MaxRetriesDown != defaultMaxRetriesDown {
585+
s.scope.Logger().Info("Monitor maxRetriesDown needs update to default", "current", monitor.MaxRetriesDown, "default", defaultMaxRetriesDown)
586+
monitorUpdateOpts.MaxRetriesDown = defaultMaxRetriesDown
587+
needsUpdate = true
588+
} else if monitorConfig != nil && monitorConfig.MaxRetriesDown != nil && monitor.MaxRetriesDown != *monitorConfig.MaxRetriesDown {
589+
s.scope.Logger().Info("Monitor maxRetriesDown needs update", "current", monitor.MaxRetriesDown, "desired", *monitorConfig.MaxRetriesDown)
590+
monitorUpdateOpts.MaxRetriesDown = *monitorConfig.MaxRetriesDown
591+
needsUpdate = true
592+
}
593+
594+
if needsUpdate {
595+
s.scope.Logger().Info("Updating load balancer monitor", "loadBalancerID", lbID, "name", monitorName, "monitorID", monitor.ID)
596+
597+
updatedMonitor, err := s.loadbalancerClient.UpdateMonitor(monitor.ID, monitorUpdateOpts)
598+
if err != nil {
599+
record.Warnf(openStackCluster, "FailedUpdateMonitor", "Failed to update monitor %s with id %s: %v", monitorName, monitor.ID, err)
600+
return err
601+
}
602+
603+
if _, err = s.waitForLoadBalancerActive(lbID); err != nil {
604+
record.Warnf(openStackCluster, "FailedUpdateMonitor", "Failed to update monitor %s with id %s: wait for load balancer active %s: %v", monitorName, monitor.ID, lbID, err)
605+
return err
606+
}
607+
608+
record.Eventf(openStackCluster, "SuccessfulUpdateMonitor", "Updated monitor %s with id %s", monitorName, updatedMonitor.ID)
609+
}
610+
542611
return nil
543612
}
544613

@@ -548,13 +617,29 @@ func (s *Service) getOrCreateMonitor(openStackCluster *infrav1.OpenStackCluster,
548617
Name: monitorName,
549618
PoolID: poolID,
550619
Type: "TCP",
551-
Delay: 10,
552-
MaxRetries: 5,
553-
MaxRetriesDown: 3,
554-
Timeout: 5,
620+
Delay: defaultDelay,
621+
Timeout: defaultTimeout,
622+
MaxRetries: defaultMaxRetries,
623+
MaxRetriesDown: defaultMaxRetriesDown,
624+
}
625+
626+
if monitorConfig != nil {
627+
if monitorConfig.Delay != nil {
628+
monitorCreateOpts.Delay = *monitorConfig.Delay
629+
}
630+
if monitorConfig.MaxRetries != nil {
631+
monitorCreateOpts.MaxRetries = *monitorConfig.MaxRetries
632+
}
633+
if monitorConfig.MaxRetriesDown != nil {
634+
monitorCreateOpts.MaxRetriesDown = *monitorConfig.MaxRetriesDown
635+
}
636+
if monitorConfig.Timeout != nil {
637+
monitorCreateOpts.Timeout = *monitorConfig.Timeout
638+
}
555639
}
556-
monitor, err = s.loadbalancerClient.CreateMonitor(monitorCreateOpts)
557-
// Skip creating monitor if it is not supported by Octavia provider
640+
641+
newMonitor, err := s.loadbalancerClient.CreateMonitor(monitorCreateOpts)
642+
558643
if capoerrors.IsNotImplementedError(err) {
559644
record.Warnf(openStackCluster, "SkippedCreateMonitor", "Health Monitor is not created as it's not implemented with the current Octavia provider.")
560645
return nil
@@ -566,11 +651,11 @@ func (s *Service) getOrCreateMonitor(openStackCluster *infrav1.OpenStackCluster,
566651
}
567652

568653
if _, err = s.waitForLoadBalancerActive(lbID); err != nil {
569-
record.Warnf(openStackCluster, "FailedCreateMonitor", "Failed to create monitor %s with id %s: wait for load balancer active %s: %v", monitorName, monitor.ID, lbID, err)
654+
record.Warnf(openStackCluster, "FailedCreateMonitor", "Failed to create monitor %s with id %s: wait for load balancer active %s: %v", monitorName, newMonitor.ID, lbID, err)
570655
return err
571656
}
572657

573-
record.Eventf(openStackCluster, "SuccessfulCreateMonitor", "Created monitor %s with id %s", monitorName, monitor.ID)
658+
record.Eventf(openStackCluster, "SuccessfulCreateMonitor", "Created monitor %s with id %s", monitorName, newMonitor.ID)
574659
return nil
575660
}
576661

pkg/webhooks/openstackcluster_webhook.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,12 @@ func (*openStackClusterWebhook) ValidateUpdate(_ context.Context, oldObjRaw, new
145145
newObj.Spec.APIServerLoadBalancer.AllowedCIDRs = []string{}
146146
}
147147

148+
// Allow changes on APIServerLB monitors
149+
if newObj.Spec.APIServerLoadBalancer != nil && oldObj.Spec.APIServerLoadBalancer != nil {
150+
oldObj.Spec.APIServerLoadBalancer.Monitor = &infrav1.APIServerLoadBalancerMonitor{}
151+
newObj.Spec.APIServerLoadBalancer.Monitor = &infrav1.APIServerLoadBalancerMonitor{}
152+
}
153+
148154
// Allow changes to the availability zones.
149155
oldObj.Spec.ControlPlaneAvailabilityZones = []string{}
150156
newObj.Spec.ControlPlaneAvailabilityZones = []string{}

0 commit comments

Comments
 (0)