Skip to content

Commit 120c5ec

Browse files
committed
feat: enable filtered secret watch with feature flag
Signed-off-by: Anish Ramasekar <anish.ramasekar@gmail.com>
1 parent 6ece961 commit 120c5ec

File tree

7 files changed

+235
-65
lines changed

7 files changed

+235
-65
lines changed

cmd/secrets-store-csi-driver/main.go

Lines changed: 28 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,6 @@ var (
5252
logFormatJSON = flag.Bool("log-format-json", false, "set log formatter to json")
5353
providerVolumePath = flag.String("provider-volume", "/etc/kubernetes/secrets-store-csi-providers", "Volume path for provider")
5454
// this will be removed in a future release
55-
_ = flag.String("min-provider-version", "", "[DEPRECATED] set minimum supported provider versions with current driver")
5655
metricsAddr = flag.String("metrics-addr", ":8095", "The address the metric endpoint binds to")
5756
// grpcSupportedProviders is a ; separated string that can contain a list of providers. The reason it's a string is to allow scenarios
5857
// where the driver is being used with 2 providers, one which supports grpc and other using binary for provider.
@@ -62,6 +61,12 @@ var (
6261
enableProfile = flag.Bool("enable-pprof", false, "enable pprof profiling")
6362
profilePort = flag.Int("pprof-port", 6065, "port for pprof profiling")
6463

64+
// enable filtered watch for secrets. The filtering is done on the csi driver label: secrets-store.csi.k8s.io/managed=true
65+
// this label is set for all Kubernetes secrets created by the CSI driver. For Kubernetes secrets used to provide credentials
66+
// for use with the CSI driver, set the label by running: kubectl label secret secrets-store-creds secrets-store.csi.k8s.io/managed=true
67+
// This feature flag will be enabled by default after n+2 releases giving time for users to label all their existing credential secrets.
68+
filteredWatchSecret = flag.Bool("filtered-watch-secret", false, "enable filtered watch for secrets with label secrets-store.csi.k8s.io/managed=true")
69+
6570
scheme = runtime.NewScheme()
6671
)
6772

@@ -90,6 +95,9 @@ func main() {
9095
klog.ErrorS(http.ListenAndServe(addr, nil), "unable to start profiling server")
9196
}()
9297
}
98+
if *filteredWatchSecret {
99+
klog.Infof("Filtered watch for secret based on secrets-store.csi.k8s.io/managed=true label enabled")
100+
}
93101

94102
// initialize metrics exporter before creating measurements
95103
err := metrics.InitMetricsExporter()
@@ -100,18 +108,29 @@ func main() {
100108
cfg := ctrl.GetConfigOrDie()
101109
cfg.UserAgent = version.GetUserAgent("controller")
102110

111+
// this enables filtered watch of pods based on the node name
112+
// only pods running on the same node as the csi driver will be cached
113+
fieldSelectorByResource := map[string]string{
114+
"pods": fields.OneTermEqualSelector("spec.nodeName", *nodeID).String(),
115+
}
116+
// this enables filtered watch of secretproviderclasspodstatuses based on the internal node label
117+
// internal.secrets-store.csi.k8s.io/node-name=<node name> added by csi driver
118+
labelSelectorByResource := map[string]string{
119+
"secretproviderclasspodstatuses": fmt.Sprintf("%s=%s", v1alpha1.InternalNodeLabel, *nodeID),
120+
}
121+
// this enables filtered watch of secrets based on the label (secrets-store.csi.k8s.io/managed=true)
122+
// added to the secrets created by the CSI driver
123+
if *filteredWatchSecret {
124+
labelSelectorByResource["secrets"] = fmt.Sprintf("%s=true", controllers.SecretManagedLabel)
125+
}
126+
103127
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
104128
Scheme: scheme,
105129
MetricsBindAddress: *metricsAddr,
106130
LeaderElection: false,
107131
NewCache: cache.Builder(cache.Options{
108-
FieldSelectorByResource: map[string]string{
109-
"pods": fields.OneTermEqualSelector("spec.nodeName", *nodeID).String(),
110-
},
111-
LabelSelectorByResource: map[string]string{
112-
"secretproviderclasspodstatuses": fmt.Sprintf("%s=%s", v1alpha1.InternalNodeLabel, *nodeID),
113-
"secrets": "secrets-store.csi.k8s.io/managed=true",
114-
},
132+
FieldSelectorByResource: fieldSelectorByResource,
133+
LabelSelectorByResource: labelSelectorByResource,
115134
}),
116135
})
117136
if err != nil {
@@ -161,7 +180,7 @@ func main() {
161180
}()
162181

163182
if *enableSecretRotation {
164-
rec, err := rotation.NewReconciler(scheme, *providerVolumePath, *nodeID, *rotationPollInterval, providerClients)
183+
rec, err := rotation.NewReconciler(scheme, *providerVolumePath, *nodeID, *rotationPollInterval, providerClients, *filteredWatchSecret)
165184
if err != nil {
166185
klog.Fatalf("failed to initialize rotation reconciler, error: %+v", err)
167186
}

controllers/secretproviderclasspodstatus_controller.go

Lines changed: 51 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import (
2424
"time"
2525

2626
"k8s.io/client-go/kubernetes"
27+
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
2728
"sigs.k8s.io/controller-runtime/pkg/event"
2829
"sigs.k8s.io/controller-runtime/pkg/predicate"
2930

@@ -40,7 +41,6 @@ import (
4041

4142
ctrl "sigs.k8s.io/controller-runtime"
4243
"sigs.k8s.io/controller-runtime/pkg/client"
43-
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
4444

4545
corev1 "k8s.io/api/core/v1"
4646
v1 "k8s.io/api/core/v1"
@@ -54,7 +54,7 @@ import (
5454
)
5555

5656
const (
57-
secretManagedLabel = "secrets-store.csi.k8s.io/managed"
57+
SecretManagedLabel = "secrets-store.csi.k8s.io/managed"
5858
secretCreationFailedReason = "FailedToCreateSecret"
5959
)
6060

@@ -110,7 +110,7 @@ func (r *SecretProviderClassPodStatusReconciler) Patcher(ctx context.Context) er
110110

111111
spcPodStatusList := &v1alpha1.SecretProviderClassPodStatusList{}
112112
spcMap := make(map[string]v1alpha1.SecretProviderClass)
113-
secretOwnerMap := make(map[types.NamespacedName][]*v1alpha1.SecretProviderClassPodStatus)
113+
secretOwnerMap := make(map[types.NamespacedName][]metav1.OwnerReference)
114114
// get a list of all spc pod status that belong to the node
115115
err := r.reader.List(ctx, spcPodStatusList, r.ListOptionsLabelSelector())
116116
if err != nil {
@@ -121,21 +121,56 @@ func (r *SecretProviderClassPodStatusReconciler) Patcher(ctx context.Context) er
121121
for i := range spcPodStatuses {
122122
spcName := spcPodStatuses[i].Status.SecretProviderClassName
123123
spc := &v1alpha1.SecretProviderClass{}
124+
namespace := spcPodStatuses[i].Namespace
125+
124126
if val, exists := spcMap[spcPodStatuses[i].Namespace+"/"+spcName]; exists {
125127
spc = &val
126128
} else {
127-
if err := r.reader.Get(ctx, client.ObjectKey{Namespace: spcPodStatuses[i].Namespace, Name: spcName}, spc); err != nil {
129+
if err := r.reader.Get(ctx, client.ObjectKey{Namespace: namespace, Name: spcName}, spc); err != nil {
128130
return fmt.Errorf("failed to get spc %s, err: %+v", spcName, err)
129131
}
130-
spcMap[spcPodStatuses[i].Namespace+"/"+spcName] = *spc
132+
spcMap[namespace+"/"+spcName] = *spc
133+
}
134+
// get the pod and check if the pod has a owner reference
135+
pod := &v1.Pod{}
136+
err = r.reader.Get(ctx, client.ObjectKey{Namespace: namespace, Name: spcPodStatuses[i].Status.PodName}, pod)
137+
if err != nil {
138+
return fmt.Errorf("failed to fetch pod during patching, err: %+v", err)
131139
}
140+
var ownerRefs []metav1.OwnerReference
141+
for _, ownerRef := range pod.GetOwnerReferences() {
142+
ownerRefs = append(ownerRefs, metav1.OwnerReference{
143+
APIVersion: ownerRef.APIVersion,
144+
Kind: ownerRef.Kind,
145+
UID: ownerRef.UID,
146+
Name: ownerRef.Name,
147+
})
148+
}
149+
// If a pod has no owner references, then it's a static pod and
150+
// doesn't belong to a replicaset. In this case, use the spcps as
151+
// owner reference just like we do it today
152+
if len(ownerRefs) == 0 {
153+
// Create a new owner ref.
154+
gvk, err := apiutil.GVKForObject(&spcPodStatuses[i], r.scheme)
155+
if err != nil {
156+
return err
157+
}
158+
ref := metav1.OwnerReference{
159+
APIVersion: gvk.GroupVersion().String(),
160+
Kind: gvk.Kind,
161+
UID: spcPodStatuses[i].GetUID(),
162+
Name: spcPodStatuses[i].GetName(),
163+
}
164+
ownerRefs = append(ownerRefs, ref)
165+
}
166+
132167
for _, secret := range spc.Spec.SecretObjects {
133168
key := types.NamespacedName{Name: secret.SecretName, Namespace: spcPodStatuses[i].Namespace}
134169
val, exists := secretOwnerMap[key]
135170
if exists {
136-
secretOwnerMap[key] = append(val, &spcPodStatuses[i])
171+
secretOwnerMap[key] = append(val, ownerRefs...)
137172
} else {
138-
secretOwnerMap[key] = []*v1alpha1.SecretProviderClassPodStatus{&spcPodStatuses[i]}
173+
secretOwnerMap[key] = ownerRefs
139174
}
140175
}
141176
}
@@ -282,7 +317,7 @@ func (r *SecretProviderClassPodStatusReconciler) Reconcile(ctx context.Context,
282317
// Set secrets-store.csi.k8s.io/managed=true label on the secret that's created and managed
283318
// by the secrets-store-csi-driver. This label will be used to perform a filtered list watch
284319
// only on secrets created and managed by the driver
285-
labelsMap[secretManagedLabel] = "true"
320+
labelsMap[SecretManagedLabel] = "true"
286321

287322
createFn := func() (bool, error) {
288323
if err := r.createK8sSecret(ctx, secretName, req.Namespace, datamap, labelsMap, secretType); err != nil {
@@ -384,7 +419,7 @@ func (r *SecretProviderClassPodStatusReconciler) createK8sSecret(ctx context.Con
384419
}
385420

386421
// patchSecretWithOwnerRef patches the secret owner reference with the spc pod status
387-
func (r *SecretProviderClassPodStatusReconciler) patchSecretWithOwnerRef(ctx context.Context, name, namespace string, spcPodStatus ...*v1alpha1.SecretProviderClassPodStatus) error {
422+
func (r *SecretProviderClassPodStatusReconciler) patchSecretWithOwnerRef(ctx context.Context, name, namespace string, ownerRefs ...metav1.OwnerReference) error {
388423
secret := &corev1.Secret{}
389424
secretKey := types.NamespacedName{
390425
Namespace: namespace,
@@ -401,23 +436,23 @@ func (r *SecretProviderClassPodStatusReconciler) patchSecretWithOwnerRef(ctx con
401436
patch := client.MergeFromWithOptions(secret.DeepCopy(), client.MergeFromWithOptimisticLock{})
402437
needsPatch := false
403438

439+
secretOwnerRefs := secret.GetOwnerReferences()
404440
secretOwnerMap := make(map[string]types.UID)
405-
for _, or := range secret.GetOwnerReferences() {
441+
for _, or := range secretOwnerRefs {
406442
secretOwnerMap[or.Name] = or.UID
407443
}
408444

409-
for i := range spcPodStatus {
410-
if _, exists := secretOwnerMap[spcPodStatus[i].Name]; exists {
445+
for i := range ownerRefs {
446+
if _, exists := secretOwnerMap[ownerRefs[i].Name]; exists {
411447
continue
412448
}
413449
needsPatch = true
414-
err := controllerutil.SetOwnerReference(spcPodStatus[i], secret, r.scheme)
415-
if err != nil {
416-
return err
417-
}
450+
klog.Infof("Adding %s/%s as owner ref for %s/%s", ownerRefs[i].APIVersion, ownerRefs[i].Name, namespace, name)
451+
secretOwnerRefs = append(secretOwnerRefs, ownerRefs[i])
418452
}
419453

420454
if needsPatch {
455+
secret.SetOwnerReferences(secretOwnerRefs)
421456
return r.writer.Patch(ctx, secret, patch)
422457
}
423458
return nil

0 commit comments

Comments
 (0)