-
Notifications
You must be signed in to change notification settings - Fork 1k
Add topologySpreadConstraints configuration to pod spec. #2530
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
263242a
63f8657
89e2405
8c9840c
ad4dd00
f52d9bc
a40ba6e
3a8fc1b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -111,6 +111,7 @@ var OperatorConfigCRDResourceColumns = []apiextv1.CustomResourceColumnDefinition | |
|
||
var min0 = 0.0 | ||
var min1 = 1.0 | ||
var minLength1 int64 = 1 | ||
var minDisable = -1.0 | ||
|
||
// PostgresCRDResourceValidation to check applied manifest parameters | ||
|
@@ -895,6 +896,34 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ | |
}, | ||
}, | ||
}, | ||
"topologySpreadConstraints": { | ||
Type: "array", | ||
Nullable: true, | ||
Items: &apiextv1.JSONSchemaPropsOrArray{ | ||
Schema: &apiextv1.JSONSchemaProps{ | ||
Type: "object", | ||
Properties: map[string]apiextv1.JSONSchemaProps{ | ||
"maxSkew": { | ||
Type: "integer", | ||
Format: "int32", | ||
Minimum: &min1, | ||
}, | ||
"topologyKey": { | ||
Type: "string", | ||
MinLength: &minLength1, | ||
}, | ||
"whenUnsatisfiable": { | ||
Type: "string", | ||
Enum: []apiextv1.JSON{ | ||
{Raw: []byte(`"DoNotSchedule"`)}, | ||
{Raw: []byte(`"ScheduleAnyway"`)}, | ||
}, | ||
}, | ||
}, | ||
Required: []string{"maxSkew", "topologyKey", "whenUnsatisfiable"}, | ||
}, | ||
}, | ||
}, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. same here with the |
||
"useLoadBalancer": { | ||
Type: "boolean", | ||
Description: "deprecated", | ||
|
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -499,6 +499,11 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa | |
needsRollUpdate = true | ||
reasons = append(reasons, "new statefulset's pod affinity does not match the current one") | ||
} | ||
if !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.TopologySpreadConstraints, statefulSet.Spec.Template.Spec.TopologySpreadConstraints) { | ||
needsReplace = true | ||
needsRollUpdate = true | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. does this really need to trigger a rolling update of pods executed by operator? Will not K8s take care of it then once the statefulset is replaced? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I see, but is this wrong too? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. hm good point. Maybe we can leave as is for now. With rolling update we make sure pods immediately adhere the new constraints. |
||
reasons = append(reasons, "new statefulset's pod topologySpreadConstraints does not match the current one") | ||
} | ||
if len(c.Statefulset.Spec.Template.Spec.Tolerations) != len(statefulSet.Spec.Template.Spec.Tolerations) { | ||
needsReplace = true | ||
needsRollUpdate = true | ||
|
Original file line number | Diff line number | Diff line change | ||||||||
---|---|---|---|---|---|---|---|---|---|---|
|
@@ -603,6 +603,13 @@ func generatePodAntiAffinity(podAffinityTerm v1.PodAffinityTerm, preferredDuring | |||||||||
return podAntiAffinity | ||||||||||
} | ||||||||||
|
||||||||||
func generateTopologySpreadConstraints(labels labels.Set, topologySpreadConstraints []v1.TopologySpreadConstraint) []v1.TopologySpreadConstraint { | ||||||||||
for _, topologySpreadConstraint := range topologySpreadConstraints { | ||||||||||
topologySpreadConstraint.LabelSelector = &metav1.LabelSelector{MatchLabels: labels} | ||||||||||
} | ||||||||||
return topologySpreadConstraints | ||||||||||
} | ||||||||||
|
||||||||||
func tolerations(tolerationsSpec *[]v1.Toleration, podToleration map[string]string) []v1.Toleration { | ||||||||||
// allow to override tolerations by postgresql manifest | ||||||||||
if len(*tolerationsSpec) > 0 { | ||||||||||
|
@@ -808,6 +815,7 @@ func (c *Cluster) generatePodTemplate( | |||||||||
initContainers []v1.Container, | ||||||||||
sidecarContainers []v1.Container, | ||||||||||
sharePgSocketWithSidecars *bool, | ||||||||||
topologySpreadConstraintsSpec []v1.TopologySpreadConstraint, | ||||||||||
tolerationsSpec *[]v1.Toleration, | ||||||||||
spiloRunAsUser *int64, | ||||||||||
spiloRunAsGroup *int64, | ||||||||||
|
@@ -877,6 +885,10 @@ func (c *Cluster) generatePodTemplate( | |||||||||
podSpec.PriorityClassName = priorityClassName | ||||||||||
} | ||||||||||
|
||||||||||
if len(topologySpreadConstraintsSpec) > 0 { | ||||||||||
podSpec.TopologySpreadConstraints = generateTopologySpreadConstraints(labels, topologySpreadConstraintsSpec) | ||||||||||
} | ||||||||||
|
||||||||||
Comment on lines
+888
to
+891
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
you only do a range loop inside the function so this len check is not necessary There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I updated it. |
||||||||||
if sharePgSocketWithSidecars != nil && *sharePgSocketWithSidecars { | ||||||||||
addVarRunVolume(&podSpec) | ||||||||||
} | ||||||||||
|
@@ -1468,6 +1480,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef | |||||||||
initContainers, | ||||||||||
sidecarContainers, | ||||||||||
c.OpConfig.SharePgSocketWithSidecars, | ||||||||||
spec.TopologySpreadConstraints, | ||||||||||
&tolerationSpec, | ||||||||||
effectiveRunAsUser, | ||||||||||
effectiveRunAsGroup, | ||||||||||
|
@@ -2346,6 +2359,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) { | |||||||||
|
||||||||||
tolerationsSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) | ||||||||||
|
||||||||||
topologySpreadConstraintsSpec := generateTopologySpreadConstraints(labels, spec.TopologySpreadConstraints) | ||||||||||
|
||||||||||
// re-use the method that generates DB pod templates | ||||||||||
if podTemplate, err = c.generatePodTemplate( | ||||||||||
c.Namespace, | ||||||||||
|
@@ -2355,6 +2370,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) { | |||||||||
[]v1.Container{}, | ||||||||||
[]v1.Container{}, | ||||||||||
util.False(), | ||||||||||
topologySpreadConstraintsSpec, | ||||||||||
&tolerationsSpec, | ||||||||||
nil, | ||||||||||
nil, | ||||||||||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I would expect that the e2e test patches the Postgresql manifest and adds topologySpreadConstraints to then check if the pods spread evenly. But you're only patching the nodes here?