Skip to content

Commit 59a15da

Browse files
jan-gderekbit
authored andcommitted
e2e: confirm the cleanup of PVs with legacy affinity attributes
This applies a small refactor to the e2e tests to ensure that the newer provisioner is capable of siting helper pods correctly to clean up PVs with "legacy" affinity constraints. The kind cluster itself is reconfigured to ensure that all nodes have `metadata.name` != `metadata.labels["kubernetes.io/hostname"]`, which is an assumption that does not hold for many cloud providers.
1 parent ea957ee commit 59a15da

File tree

6 files changed

+86
-21
lines changed

6 files changed

+86
-21
lines changed

test/pod_test.go

Lines changed: 32 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -82,38 +82,38 @@ func TestPVCTestSuite(t *testing.T) {
8282
func (p *PodTestSuite) TestPodWithHostPathVolume() {
8383
p.kustomizeDir = "pod"
8484

85-
runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType)
85+
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType)
8686
}
8787

8888
func (p *PodTestSuite) TestPodWithLocalVolume() {
8989
p.kustomizeDir = "pod-with-local-volume"
9090

91-
runTest(p, []string{p.config.IMAGE}, "ready", localVolumeType)
91+
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), localVolumeType)
9292
}
9393

9494
func (p *PodTestSuite) TestPodWithLocalVolumeDefault() {
9595
p.kustomizeDir = "pod-with-default-local-volume"
9696

97-
runTest(p, []string{p.config.IMAGE}, "ready", localVolumeType)
97+
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), localVolumeType)
9898
}
9999

100100
func (p *PodTestSuite) TestPodWithNodeAffinity() {
101101
p.kustomizeDir = "pod-with-node-affinity"
102102

103-
runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType)
103+
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType)
104104
}
105105

106106
func (p *PodTestSuite) TestPodWithRWOPVolume() {
107107
p.kustomizeDir = "pod-with-rwop-volume"
108108

109-
runTest(p, []string{p.config.IMAGE}, "ready", localVolumeType)
109+
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), localVolumeType)
110110
}
111111

112112
func (p *PodTestSuite) TestPodWithSecurityContext() {
113113
p.kustomizeDir = "pod-with-security-context"
114114
kustomizeDir := testdataFile(p.kustomizeDir)
115115

116-
runTest(p, []string{p.config.IMAGE}, "podscheduled", hostPathVolumeType)
116+
runTest(p, []string{p.config.IMAGE}, waitCondition("podscheduled"), hostPathVolumeType)
117117

118118
cmd := fmt.Sprintf(`kubectl get pod -l %s=%s -o=jsonpath='{.items[0].status.conditions[?(@.type=="Ready")].reason}'`, LabelKey, LabelValue)
119119

@@ -142,22 +142,33 @@ loop:
142142
func (p *PodTestSuite) TestPodWithSubpath() {
143143
p.kustomizeDir = "pod-with-subpath"
144144

145-
runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType)
145+
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType)
146146
}
147147

148148
func (p *PodTestSuite) xxTestPodWithMultipleStorageClasses() {
149149
p.kustomizeDir = "multiple-storage-classes"
150150

151-
runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType)
151+
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType)
152152
}
153153

154154
func (p *PodTestSuite) TestPodWithCustomPathPatternStorageClasses() {
155155
p.kustomizeDir = "custom-path-pattern"
156156

157-
runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType)
157+
runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType)
158158
}
159159

160-
func runTest(p *PodTestSuite, images []string, waitCondition, volumeType string) {
160+
func (p *PodTestSuite) TestPodWithLegacyAffinityConstraint() {
161+
// The helper pod should be correctly scheduled
162+
p.kustomizeDir = "pv-with-legacy-affinity"
163+
164+
runTest(p, []string{p.config.IMAGE}, "kubectl wait pv pvc-to-clean-up --for delete --timeout=120s", "")
165+
}
166+
167+
func waitCondition(waitCondition string) string {
168+
return fmt.Sprintf("kubectl wait pod -l %s=%s --for condition=%s --timeout=120s", LabelKey, LabelValue, waitCondition)
169+
}
170+
171+
func runTest(p *PodTestSuite, images []string, waitCmd, volumeType string) {
161172
kustomizeDir := testdataFile(p.kustomizeDir)
162173

163174
var cmds []string
@@ -171,7 +182,7 @@ func runTest(p *PodTestSuite, images []string, waitCondition, volumeType string)
171182
cmds,
172183
fmt.Sprintf("kustomize edit add label %s:%s -f", LabelKey, LabelValue),
173184
"kustomize build | kubectl apply -f -",
174-
fmt.Sprintf("kubectl wait pod -l %s=%s --for condition=%s --timeout=120s", LabelKey, LabelValue, waitCondition),
185+
waitCmd,
175186
)
176187

177188
for _, cmd := range cmds {
@@ -188,13 +199,15 @@ func runTest(p *PodTestSuite, images []string, waitCondition, volumeType string)
188199
}
189200
}
190201

191-
typeCheckCmd := fmt.Sprintf("kubectl get pv $(%s) -o jsonpath='{.spec.%s}'", "kubectl get pv -o jsonpath='{.items[0].metadata.name}'", volumeType)
192-
c := createCmd(p.T(), typeCheckCmd, kustomizeDir, p.config.envs(), nil)
193-
typeCheckOutput, err := c.CombinedOutput()
194-
if err != nil {
195-
p.FailNow("", "failed to check volume type: %v", err)
196-
}
197-
if len(typeCheckOutput) == 0 || !strings.Contains(string(typeCheckOutput), "path") {
198-
p.FailNow("volume Type not correct")
202+
if volumeType != "" {
203+
typeCheckCmd := fmt.Sprintf("kubectl get pv $(%s) -o jsonpath='{.spec.%s}'", "kubectl get pv -o jsonpath='{.items[0].metadata.name}'", volumeType)
204+
c := createCmd(p.T(), typeCheckCmd, kustomizeDir, p.config.envs(), nil)
205+
typeCheckOutput, err := c.CombinedOutput()
206+
if err != nil {
207+
p.FailNow("", "failed to check volume type: %v", err)
208+
}
209+
if len(typeCheckOutput) == 0 || !strings.Contains(string(typeCheckOutput), "path") {
210+
p.FailNow("volume Type not correct")
211+
}
199212
}
200213
}

test/testdata/kind-cluster.yaml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,8 @@ kind: Cluster
33
nodes:
44
- role: control-plane
55
- role: worker
6+
labels:
7+
kubernetes.io/hostname: kind-worker1.hostname
68
- role: worker
9+
labels:
10+
kubernetes.io/hostname: kind-worker2.hostname

test/testdata/pod-with-node-affinity/patch.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,4 +11,4 @@ spec:
1111
- key: kubernetes.io/hostname
1212
operator: In
1313
values:
14-
- kind-worker
14+
- kind-worker1.hostname
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
apiVersion: kustomize.config.k8s.io/v1beta1
2+
kind: Kustomization
3+
resources:
4+
- ../../../deploy
5+
- pv.yaml
6+
commonLabels:
7+
app: local-path-provisioner
8+
images:
9+
- name: rancher/local-path-provisioner
10+
newTag: dev
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
apiVersion: v1
2+
kind: PersistentVolume
3+
metadata:
4+
annotations:
5+
local.path.provisioner/selected-node: kind-worker
6+
pv.kubernetes.io/provisioned-by: rancher.io/local-path
7+
finalizers:
8+
- kubernetes.io/pv-protection
9+
labels:
10+
test/avoid-cleanup: "true"
11+
name: pvc-to-clean-up
12+
spec:
13+
accessModes:
14+
- ReadWriteOnce
15+
capacity:
16+
storage: 100Mi
17+
hostPath:
18+
path: /opt/local-path-provisioner/default/local-path-pvc
19+
type: DirectoryOrCreate
20+
nodeAffinity:
21+
required:
22+
nodeSelectorTerms:
23+
- matchExpressions:
24+
- key: kubernetes.io/hostname
25+
operator: In
26+
values:
27+
- kind-worker1.hostname
28+
claimRef:
29+
apiVersion: v1
30+
kind: PersistentVolumeClaim
31+
name: no-such-pvc
32+
namespace: default
33+
# The PVC "definitely doesn't exist any more"
34+
resourceVersion: "1"
35+
uid: 12345678-1234-5678-9abc-123456789abc
36+
persistentVolumeReclaimPolicy: Delete
37+
storageClassName: local-path-custom-path-pattern
38+
volumeMode: Filesystem

test/util.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ func testdataFile(fields ...string) string {
7878
func deleteKustomizeDeployment(t *testing.T, kustomizeDir string, envs []string) error {
7979
_, err := runCmd(
8080
t,
81-
"kustomize build | kubectl delete --timeout=180s -f -",
81+
"kustomize build | kubectl delete --timeout=180s -f - -l 'test/avoid-cleanup!=true'",
8282
testdataFile(kustomizeDir),
8383
envs,
8484
nil,

0 commit comments

Comments
 (0)