Skip to content

Commit 71f75a2

Browse files
committed
CRD migration: add option for storage version migration via status
1 parent 819a296 commit 71f75a2

File tree

8 files changed

+88
-53
lines changed

8 files changed

+88
-53
lines changed

bootstrap/kubeadm/main.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -365,7 +365,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) {
365365
// Note: The kubebuilder RBAC markers above has to be kept in sync
366366
// with the CRDs that should be migrated by this provider.
367367
Config: map[client.Object]crdmigrator.ByObjectConfig{
368-
&bootstrapv1.KubeadmConfig{}: {UseCache: true},
368+
&bootstrapv1.KubeadmConfig{}: {UseCache: true, UseStatusForStorageVersionMigration: true},
369369
&bootstrapv1.KubeadmConfigTemplate{}: {UseCache: false},
370370
},
371371
// The CRDMigrator is run with only concurrency 1 to ensure we don't overwhelm the apiserver by patching a

config/rbac/role.yaml

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -159,6 +159,13 @@ rules:
159159
- patch
160160
- update
161161
- watch
162+
- apiGroups:
163+
- ipam.cluster.x-k8s.io
164+
resources:
165+
- ipaddressclaims/status
166+
verbs:
167+
- patch
168+
- update
162169
- apiGroups:
163170
- runtime.cluster.x-k8s.io
164171
resources:

controllers/crdmigrator/crd_migrator.go

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,12 @@ type ByObjectConfig struct {
102102
// Note: If this is enabled, we will use the corresponding Go type of the object for Get & List calls to avoid
103103
// creating additional informers for UnstructuredList/PartialObjectMetadataList.
104104
UseCache bool
105+
106+
// UseStatusForStorageVersionMigration configures if the storage version migration for this CRD should
107+
// be triggered via the status endpoint instead of an update on the CRs directly (which is the default).
108+
// As mutating and validating webhooks are usually not configured on the status subresource this can help to
109+
// avoid mutating & validation webhook errors that would block the no-op updates and thus the storage migration.
110+
UseStatusForStorageVersionMigration bool
105111
}
106112

107113
func (r *CRDMigrator) SetupWithManager(ctx context.Context, mgr ctrl.Manager, controllerOptions controller.Options) error {
@@ -231,7 +237,7 @@ func (r *CRDMigrator) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.R
231237

232238
// If phase should be run and .status.storedVersions != [storageVersion], run storage version migration.
233239
if r.crdMigrationPhasesToRun.Has(StorageVersionMigrationPhase) && storageVersionMigrationRequired(crd, storageVersion) {
234-
if err := r.reconcileStorageVersionMigration(ctx, crd, customResourceObjects, storageVersion); err != nil {
240+
if err := r.reconcileStorageVersionMigration(ctx, crd, migrationConfig, customResourceObjects, storageVersion); err != nil {
235241
return ctrl.Result{}, err
236242
}
237243

@@ -253,18 +259,13 @@ func (r *CRDMigrator) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.R
253259
}
254260

255261
func storageVersionForCRD(crd *apiextensionsv1.CustomResourceDefinition) (string, error) {
256-
var storageVersion string
257262
for _, v := range crd.Spec.Versions {
258263
if v.Storage {
259-
storageVersion = v.Name
260-
break
264+
return v.Name, nil
261265
}
262266
}
263-
if storageVersion == "" {
264-
return "", errors.Errorf("could not find storage version for CustomResourceDefinition %s", crd.Name)
265-
}
266267

267-
return storageVersion, nil
268+
return "", errors.Errorf("could not find storage version for CustomResourceDefinition %s", crd.Name)
268269
}
269270

270271
func storageVersionMigrationRequired(crd *apiextensionsv1.CustomResourceDefinition, storageVersion string) bool {
@@ -361,7 +362,7 @@ func listObjectsFromAPIReader(ctx context.Context, c client.Reader, objectList c
361362
return objs, nil
362363
}
363364

364-
func (r *CRDMigrator) reconcileStorageVersionMigration(ctx context.Context, crd *apiextensionsv1.CustomResourceDefinition, customResourceObjects []client.Object, storageVersion string) error {
365+
func (r *CRDMigrator) reconcileStorageVersionMigration(ctx context.Context, crd *apiextensionsv1.CustomResourceDefinition, migrationConfig ByObjectConfig, customResourceObjects []client.Object, storageVersion string) error {
365366
if len(customResourceObjects) == 0 {
366367
return nil
367368
}
@@ -407,7 +408,12 @@ func (r *CRDMigrator) reconcileStorageVersionMigration(ctx context.Context, crd
407408
u.SetResourceVersion(obj.GetResourceVersion())
408409

409410
log.V(4).Info("Migrating to new storage version", gvk.Kind, klog.KObj(u))
410-
err := r.Client.Patch(ctx, u, client.Apply, client.FieldOwner("crdmigrator"))
411+
var err error
412+
if migrationConfig.UseStatusForStorageVersionMigration {
413+
err = r.Client.Status().Patch(ctx, u, client.Apply, client.FieldOwner("crdmigrator"))
414+
} else {
415+
err = r.Client.Patch(ctx, u, client.Apply, client.FieldOwner("crdmigrator"))
416+
}
411417
// If we got a NotFound error, the object no longer exists so no need to update it.
412418
// If we got a Conflict error, another client wrote the object already so no need to update it.
413419
if err != nil && !apierrors.IsNotFound(err) && !apierrors.IsConflict(err) {

controllers/crdmigrator/crd_migrator_test.go

Lines changed: 47 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -63,43 +63,63 @@ func TestReconcile(t *testing.T) {
6363
crdObjectKey := client.ObjectKey{Name: crdName}
6464

6565
tests := []struct {
66-
name string
67-
skipCRDMigrationPhases []Phase
68-
useCache bool
66+
name string
67+
skipCRDMigrationPhases []Phase
68+
useCache bool
69+
useStatusForStorageVersionMigration bool
6970
}{
7071
{
71-
name: "run both StorageVersionMigration and CleanupManagedFields with cache",
72-
skipCRDMigrationPhases: nil,
73-
useCache: true,
72+
name: "run both StorageVersionMigration and CleanupManagedFields with cache",
73+
skipCRDMigrationPhases: nil,
74+
useCache: true,
75+
useStatusForStorageVersionMigration: false,
7476
},
7577
{
76-
name: "run both StorageVersionMigration and CleanupManagedFields without cache",
77-
skipCRDMigrationPhases: nil,
78-
useCache: false,
78+
name: "run both StorageVersionMigration and CleanupManagedFields without cache",
79+
skipCRDMigrationPhases: nil,
80+
useCache: false,
81+
useStatusForStorageVersionMigration: false,
7982
},
8083
{
81-
name: "run only CleanupManagedFields with cache",
82-
skipCRDMigrationPhases: []Phase{StorageVersionMigrationPhase},
83-
useCache: true,
84+
name: "run both StorageVersionMigration and CleanupManagedFields with cache (using status)",
85+
skipCRDMigrationPhases: nil,
86+
useCache: true,
87+
useStatusForStorageVersionMigration: true,
8488
},
8589
{
86-
name: "run only CleanupManagedFields without cache",
87-
skipCRDMigrationPhases: []Phase{StorageVersionMigrationPhase},
88-
useCache: false,
90+
name: "run both StorageVersionMigration and CleanupManagedFields without cache (using status)",
91+
skipCRDMigrationPhases: nil,
92+
useCache: false,
93+
useStatusForStorageVersionMigration: true,
8994
},
9095
{
91-
name: "run only StorageVersionMigration with cache",
92-
skipCRDMigrationPhases: []Phase{CleanupManagedFieldsPhase},
93-
useCache: true,
96+
name: "run only CleanupManagedFields with cache",
97+
skipCRDMigrationPhases: []Phase{StorageVersionMigrationPhase},
98+
useCache: true,
99+
useStatusForStorageVersionMigration: false,
94100
},
95101
{
96-
name: "run only StorageVersionMigration without cache",
97-
skipCRDMigrationPhases: []Phase{CleanupManagedFieldsPhase},
98-
useCache: false,
102+
name: "run only CleanupManagedFields without cache",
103+
skipCRDMigrationPhases: []Phase{StorageVersionMigrationPhase},
104+
useCache: false,
105+
useStatusForStorageVersionMigration: false,
99106
},
100107
{
101-
name: "skip all",
102-
skipCRDMigrationPhases: []Phase{StorageVersionMigrationPhase, CleanupManagedFieldsPhase},
108+
name: "run only StorageVersionMigration with cache",
109+
skipCRDMigrationPhases: []Phase{CleanupManagedFieldsPhase},
110+
useCache: true,
111+
useStatusForStorageVersionMigration: false,
112+
},
113+
{
114+
name: "run only StorageVersionMigration without cache",
115+
skipCRDMigrationPhases: []Phase{CleanupManagedFieldsPhase},
116+
useCache: false,
117+
useStatusForStorageVersionMigration: false,
118+
},
119+
{
120+
name: "skip all",
121+
skipCRDMigrationPhases: []Phase{StorageVersionMigrationPhase, CleanupManagedFieldsPhase},
122+
useStatusForStorageVersionMigration: false,
103123
},
104124
}
105125
for _, tt := range tests {
@@ -127,19 +147,19 @@ func TestReconcile(t *testing.T) {
127147
// Create manager for all steps.
128148
skipCRDMigrationPhases := sets.Set[Phase]{}.Insert(tt.skipCRDMigrationPhases...)
129149
managerT1, err := createManagerWithCRDMigrator(tt.skipCRDMigrationPhases, map[client.Object]ByObjectConfig{
130-
&t1v1beta1.TestCluster{}: {UseCache: tt.useCache},
150+
&t1v1beta1.TestCluster{}: {UseCache: tt.useCache, UseStatusForStorageVersionMigration: tt.useStatusForStorageVersionMigration},
131151
}, t1v1beta1.AddToScheme)
132152
g.Expect(err).ToNot(HaveOccurred())
133153
managerT2, err := createManagerWithCRDMigrator(tt.skipCRDMigrationPhases, map[client.Object]ByObjectConfig{
134-
&t2v1beta2.TestCluster{}: {UseCache: tt.useCache},
154+
&t2v1beta2.TestCluster{}: {UseCache: tt.useCache, UseStatusForStorageVersionMigration: tt.useStatusForStorageVersionMigration},
135155
}, t2v1beta2.AddToScheme)
136156
g.Expect(err).ToNot(HaveOccurred())
137157
managerT3, err := createManagerWithCRDMigrator(tt.skipCRDMigrationPhases, map[client.Object]ByObjectConfig{
138-
&t3v1beta2.TestCluster{}: {UseCache: tt.useCache},
158+
&t3v1beta2.TestCluster{}: {UseCache: tt.useCache, UseStatusForStorageVersionMigration: tt.useStatusForStorageVersionMigration},
139159
}, t3v1beta2.AddToScheme)
140160
g.Expect(err).ToNot(HaveOccurred())
141161
managerT4, err := createManagerWithCRDMigrator(tt.skipCRDMigrationPhases, map[client.Object]ByObjectConfig{
142-
&t4v1beta2.TestCluster{}: {UseCache: tt.useCache},
162+
&t4v1beta2.TestCluster{}: {UseCache: tt.useCache, UseStatusForStorageVersionMigration: tt.useStatusForStorageVersionMigration},
143163
}, t4v1beta2.AddToScheme)
144164
g.Expect(err).ToNot(HaveOccurred())
145165

controlplane/kubeadm/main.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -408,7 +408,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) {
408408
// Note: The kubebuilder RBAC markers above has to be kept in sync
409409
// with the CRDs that should be migrated by this provider.
410410
Config: map[client.Object]crdmigrator.ByObjectConfig{
411-
&controlplanev1.KubeadmControlPlane{}: {UseCache: true},
411+
&controlplanev1.KubeadmControlPlane{}: {UseCache: true, UseStatusForStorageVersionMigration: true},
412412
&controlplanev1.KubeadmControlPlaneTemplate{}: {UseCache: false},
413413
},
414414
// The CRDMigrator is run with only concurrency 1 to ensure we don't overwhelm the apiserver by patching a

docs/book/src/developer/providers/migrations/v1.9-to-v1.10.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,3 +39,4 @@ maintainers of providers and consumers of our Go API.
3939
- Note: The CRD migrator will add the `crd-migration.cluster.x-k8s.io/observed-generation` annotation on the CRD object,
4040
please ensure that if these CRD objects are deployed with a tool like kapp / Argo / Flux the annotation is not continuously removed.
4141
- For all CRs that should be migrated by the `CRDMigrator`: verbs: `get;list;watch;patch;update`
42+
- For all CRs with `UseStatusForStorageVersionMigration: true` verbs: `update;patch` on their `/status` resource (e.g. `ipaddressclaims/status`)

main.go

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -286,6 +286,7 @@ func InitFlags(fs *pflag.FlagSet) {
286286
// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions;customresourcedefinitions/status,verbs=update;patch,resourceNames=clusterclasses.cluster.x-k8s.io;clusterresourcesetbindings.addons.cluster.x-k8s.io;clusterresourcesets.addons.cluster.x-k8s.io;clusters.cluster.x-k8s.io;extensionconfigs.runtime.cluster.x-k8s.io;ipaddressclaims.ipam.cluster.x-k8s.io;ipaddresses.ipam.cluster.x-k8s.io;machinedeployments.cluster.x-k8s.io;machinedrainrules.cluster.x-k8s.io;machinehealthchecks.cluster.x-k8s.io;machinepools.cluster.x-k8s.io;machines.cluster.x-k8s.io;machinesets.cluster.x-k8s.io
287287
// ADD CR RBAC for CRD Migrator.
288288
// +kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddresses;ipaddressclaims,verbs=get;list;watch;patch;update
289+
// +kubebuilder:rbac:groups=ipam.cluster.x-k8s.io,resources=ipaddressclaims/status,verbs=patch;update
289290
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinedrainrules,verbs=get;list;watch;patch;update
290291

291292
func main() {
@@ -491,24 +492,24 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager, watchNamespaces map
491492
// with the CRDs that should be migrated by this provider.
492493
crdMigratorConfig := map[client.Object]crdmigrator.ByObjectConfig{
493494
&addonsv1.ClusterResourceSetBinding{}: {UseCache: true},
494-
&addonsv1.ClusterResourceSet{}: {UseCache: true},
495-
&clusterv1.Cluster{}: {UseCache: true},
496-
&clusterv1.MachineDeployment{}: {UseCache: true},
495+
&addonsv1.ClusterResourceSet{}: {UseCache: true, UseStatusForStorageVersionMigration: true},
496+
&clusterv1.Cluster{}: {UseCache: true, UseStatusForStorageVersionMigration: true},
497+
&clusterv1.MachineDeployment{}: {UseCache: true, UseStatusForStorageVersionMigration: true},
497498
&clusterv1.MachineDrainRule{}: {UseCache: true},
498-
&clusterv1.MachineHealthCheck{}: {UseCache: true},
499-
&clusterv1.Machine{}: {UseCache: true},
500-
&clusterv1.MachineSet{}: {UseCache: true},
499+
&clusterv1.MachineHealthCheck{}: {UseCache: true, UseStatusForStorageVersionMigration: true},
500+
&clusterv1.Machine{}: {UseCache: true, UseStatusForStorageVersionMigration: true},
501+
&clusterv1.MachineSet{}: {UseCache: true, UseStatusForStorageVersionMigration: true},
501502
&ipamv1.IPAddress{}: {UseCache: false},
502-
&ipamv1.IPAddressClaim{}: {UseCache: false},
503+
&ipamv1.IPAddressClaim{}: {UseCache: false, UseStatusForStorageVersionMigration: true},
503504
}
504505
if feature.Gates.Enabled(feature.ClusterTopology) {
505-
crdMigratorConfig[&clusterv1.ClusterClass{}] = crdmigrator.ByObjectConfig{UseCache: true}
506+
crdMigratorConfig[&clusterv1.ClusterClass{}] = crdmigrator.ByObjectConfig{UseCache: true, UseStatusForStorageVersionMigration: true}
506507
}
507508
if feature.Gates.Enabled(feature.RuntimeSDK) {
508-
crdMigratorConfig[&runtimev1.ExtensionConfig{}] = crdmigrator.ByObjectConfig{UseCache: true}
509+
crdMigratorConfig[&runtimev1.ExtensionConfig{}] = crdmigrator.ByObjectConfig{UseCache: true, UseStatusForStorageVersionMigration: true}
509510
}
510511
if feature.Gates.Enabled(feature.MachinePool) {
511-
crdMigratorConfig[&expv1.MachinePool{}] = crdmigrator.ByObjectConfig{UseCache: true}
512+
crdMigratorConfig[&expv1.MachinePool{}] = crdmigrator.ByObjectConfig{UseCache: true, UseStatusForStorageVersionMigration: true}
512513
}
513514
crdMigratorSkipPhases := []crdmigrator.Phase{}
514515
for _, p := range skipCRDMigrationPhases {

test/infrastructure/docker/main.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -383,13 +383,13 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) {
383383
// Note: The kubebuilder RBAC markers above has to be kept in sync
384384
// with the CRDs that should be migrated by this provider.
385385
crdMigratorConfig := map[client.Object]crdmigrator.ByObjectConfig{
386-
&infrav1.DockerCluster{}: {UseCache: true},
386+
&infrav1.DockerCluster{}: {UseCache: true, UseStatusForStorageVersionMigration: true},
387387
&infrav1.DockerClusterTemplate{}: {UseCache: false},
388-
&infrav1.DockerMachine{}: {UseCache: true},
388+
&infrav1.DockerMachine{}: {UseCache: true, UseStatusForStorageVersionMigration: true},
389389
&infrav1.DockerMachineTemplate{}: {UseCache: false},
390-
&infrav1.DevCluster{}: {UseCache: true},
390+
&infrav1.DevCluster{}: {UseCache: true, UseStatusForStorageVersionMigration: true},
391391
&infrav1.DevClusterTemplate{}: {UseCache: false},
392-
&infrav1.DevMachine{}: {UseCache: true},
392+
&infrav1.DevMachine{}: {UseCache: true, UseStatusForStorageVersionMigration: true},
393393
&infrav1.DevMachineTemplate{}: {UseCache: false},
394394
}
395395
if feature.Gates.Enabled(feature.MachinePool) {

0 commit comments

Comments
 (0)