Skip to content

Commit d7bb304

Browse files
🌱 Test BYO certificates (#10681)
* Test BYO certificates This adds an integration test for BYO CA certificate. It is practically a copy of TestReconcileInitializeControlPlane but with a custom CA and a check to verify that the generated kubeconfig is really using this CA. Signed-off-by: Lennart Jern <lennart.jern@est.tech> * Addressing comments Signed-off-by: Muhammad Adil Ghaffar <muhammad.adil.ghaffar@est.tech> * Use individual cache options for test suites Signed-off-by: Lennart Jern <lennart.jern@est.tech> --------- Signed-off-by: Lennart Jern <lennart.jern@est.tech> Signed-off-by: Muhammad Adil Ghaffar <muhammad.adil.ghaffar@est.tech> Co-authored-by: Muhammad Adil Ghaffar <muhammad.adil.ghaffar@est.tech>
1 parent d9f3257 commit d7bb304

File tree

4 files changed

+276
-2
lines changed

4 files changed

+276
-2
lines changed

controlplane/kubeadm/internal/controllers/controller_test.go

Lines changed: 242 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ import (
2424
"crypto/x509/pkix"
2525
"fmt"
2626
"math/big"
27+
"path"
2728
"sync"
2829
"testing"
2930
"time"
@@ -37,6 +38,7 @@ import (
3738
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
3839
"k8s.io/apimachinery/pkg/types"
3940
"k8s.io/apimachinery/pkg/util/intstr"
41+
"k8s.io/client-go/tools/clientcmd"
4042
"k8s.io/client-go/tools/record"
4143
"k8s.io/utils/ptr"
4244
ctrl "sigs.k8s.io/controller-runtime"
@@ -1406,6 +1408,246 @@ kubernetesVersion: metav1.16.1
14061408
}, 30*time.Second).Should(Succeed())
14071409
}
14081410

1411+
func TestReconcileInitializeControlPlane_withUserCA(t *testing.T) {
1412+
setup := func(t *testing.T, g *WithT) *corev1.Namespace {
1413+
t.Helper()
1414+
1415+
t.Log("Creating the namespace")
1416+
ns, err := env.CreateNamespace(ctx, "test-kcp-reconcile-initializecontrolplane")
1417+
g.Expect(err).ToNot(HaveOccurred())
1418+
1419+
return ns
1420+
}
1421+
1422+
teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace) {
1423+
t.Helper()
1424+
1425+
t.Log("Deleting the namespace")
1426+
g.Expect(env.Delete(ctx, ns)).To(Succeed())
1427+
}
1428+
1429+
g := NewWithT(t)
1430+
namespace := setup(t, g)
1431+
defer teardown(t, g, namespace)
1432+
1433+
cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: namespace.Name})
1434+
cluster.Spec = clusterv1.ClusterSpec{
1435+
ControlPlaneEndpoint: clusterv1.APIEndpoint{
1436+
Host: "test.local",
1437+
Port: 9999,
1438+
},
1439+
}
1440+
1441+
caCertificate := &secret.Certificate{
1442+
Purpose: secret.ClusterCA,
1443+
CertFile: path.Join(secret.DefaultCertificatesDir, "ca.crt"),
1444+
KeyFile: path.Join(secret.DefaultCertificatesDir, "ca.key"),
1445+
}
1446+
// The certificate is user provided so no owner references should be added.
1447+
g.Expect(caCertificate.Generate()).To(Succeed())
1448+
certSecret := &corev1.Secret{
1449+
ObjectMeta: metav1.ObjectMeta{
1450+
Namespace: namespace.Name,
1451+
Name: cluster.Name + "-ca",
1452+
Labels: map[string]string{
1453+
clusterv1.ClusterNameLabel: cluster.Name,
1454+
},
1455+
},
1456+
Data: map[string][]byte{
1457+
secret.TLSKeyDataName: caCertificate.KeyPair.Key,
1458+
secret.TLSCrtDataName: caCertificate.KeyPair.Cert,
1459+
},
1460+
Type: clusterv1.ClusterSecretType,
1461+
}
1462+
1463+
g.Expect(env.Create(ctx, cluster)).To(Succeed())
1464+
patchHelper, err := patch.NewHelper(cluster, env)
1465+
g.Expect(err).ToNot(HaveOccurred())
1466+
cluster.Status = clusterv1.ClusterStatus{InfrastructureReady: true}
1467+
g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed())
1468+
1469+
g.Expect(env.CreateAndWait(ctx, certSecret)).To(Succeed())
1470+
1471+
genericInfrastructureMachineTemplate := &unstructured.Unstructured{
1472+
Object: map[string]interface{}{
1473+
"kind": "GenericInfrastructureMachineTemplate",
1474+
"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
1475+
"metadata": map[string]interface{}{
1476+
"name": "infra-foo",
1477+
"namespace": cluster.Namespace,
1478+
},
1479+
"spec": map[string]interface{}{
1480+
"template": map[string]interface{}{
1481+
"spec": map[string]interface{}{
1482+
"hello": "world",
1483+
},
1484+
},
1485+
},
1486+
},
1487+
}
1488+
g.Expect(env.CreateAndWait(ctx, genericInfrastructureMachineTemplate)).To(Succeed())
1489+
1490+
kcp := &controlplanev1.KubeadmControlPlane{
1491+
ObjectMeta: metav1.ObjectMeta{
1492+
Namespace: cluster.Namespace,
1493+
Name: "foo",
1494+
OwnerReferences: []metav1.OwnerReference{
1495+
{
1496+
Kind: "Cluster",
1497+
APIVersion: clusterv1.GroupVersion.String(),
1498+
Name: cluster.Name,
1499+
UID: cluster.UID,
1500+
},
1501+
},
1502+
},
1503+
Spec: controlplanev1.KubeadmControlPlaneSpec{
1504+
Replicas: nil,
1505+
Version: "v1.16.6",
1506+
MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
1507+
InfrastructureRef: corev1.ObjectReference{
1508+
Kind: genericInfrastructureMachineTemplate.GetKind(),
1509+
APIVersion: genericInfrastructureMachineTemplate.GetAPIVersion(),
1510+
Name: genericInfrastructureMachineTemplate.GetName(),
1511+
Namespace: cluster.Namespace,
1512+
},
1513+
},
1514+
KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{},
1515+
},
1516+
}
1517+
g.Expect(env.Create(ctx, kcp)).To(Succeed())
1518+
1519+
corednsCM := &corev1.ConfigMap{
1520+
ObjectMeta: metav1.ObjectMeta{
1521+
Name: "coredns",
1522+
Namespace: namespace.Name,
1523+
},
1524+
Data: map[string]string{
1525+
"Corefile": "original-core-file",
1526+
},
1527+
}
1528+
g.Expect(env.Create(ctx, corednsCM)).To(Succeed())
1529+
1530+
kubeadmCM := &corev1.ConfigMap{
1531+
ObjectMeta: metav1.ObjectMeta{
1532+
Name: "kubeadm-config",
1533+
Namespace: namespace.Name,
1534+
},
1535+
Data: map[string]string{
1536+
"ClusterConfiguration": `apiServer:
1537+
dns:
1538+
type: CoreDNS
1539+
imageRepository: registry.k8s.io
1540+
kind: ClusterConfiguration
1541+
kubernetesVersion: metav1.16.1`,
1542+
},
1543+
}
1544+
g.Expect(env.Create(ctx, kubeadmCM)).To(Succeed())
1545+
1546+
corednsDepl := &appsv1.Deployment{
1547+
ObjectMeta: metav1.ObjectMeta{
1548+
Name: "coredns",
1549+
Namespace: namespace.Name,
1550+
},
1551+
Spec: appsv1.DeploymentSpec{
1552+
Selector: &metav1.LabelSelector{
1553+
MatchLabels: map[string]string{
1554+
"coredns": "",
1555+
},
1556+
},
1557+
Template: corev1.PodTemplateSpec{
1558+
ObjectMeta: metav1.ObjectMeta{
1559+
Name: "coredns",
1560+
Labels: map[string]string{
1561+
"coredns": "",
1562+
},
1563+
},
1564+
Spec: corev1.PodSpec{
1565+
Containers: []corev1.Container{{
1566+
Name: "coredns",
1567+
Image: "registry.k8s.io/coredns:1.6.2",
1568+
}},
1569+
},
1570+
},
1571+
},
1572+
}
1573+
g.Expect(env.Create(ctx, corednsDepl)).To(Succeed())
1574+
1575+
r := &KubeadmControlPlaneReconciler{
1576+
Client: env,
1577+
SecretCachingClient: secretCachingClient,
1578+
recorder: record.NewFakeRecorder(32),
1579+
managementCluster: &fakeManagementCluster{
1580+
Management: &internal.Management{Client: env},
1581+
Workload: &fakeWorkloadCluster{
1582+
Workload: &internal.Workload{
1583+
Client: env,
1584+
},
1585+
Status: internal.ClusterStatus{},
1586+
},
1587+
},
1588+
managementClusterUncached: &fakeManagementCluster{
1589+
Management: &internal.Management{Client: env},
1590+
Workload: &fakeWorkloadCluster{
1591+
Workload: &internal.Workload{
1592+
Client: env,
1593+
},
1594+
Status: internal.ClusterStatus{},
1595+
},
1596+
},
1597+
ssaCache: ssa.NewCache("test-controller"),
1598+
}
1599+
1600+
result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
1601+
g.Expect(err).ToNot(HaveOccurred())
1602+
// this first requeue is to add finalizer
1603+
g.Expect(result).To(BeComparableTo(ctrl.Result{}))
1604+
g.Expect(env.GetAPIReader().Get(ctx, util.ObjectKey(kcp), kcp)).To(Succeed())
1605+
g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
1606+
1607+
g.Eventually(func(g Gomega) {
1608+
_, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
1609+
g.Expect(err).ToNot(HaveOccurred())
1610+
g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Name: kcp.Name, Namespace: kcp.Namespace}, kcp)).To(Succeed())
1611+
// Expect the referenced infrastructure template to have a Cluster Owner Reference.
1612+
g.Expect(env.GetAPIReader().Get(ctx, util.ObjectKey(genericInfrastructureMachineTemplate), genericInfrastructureMachineTemplate)).To(Succeed())
1613+
g.Expect(genericInfrastructureMachineTemplate.GetOwnerReferences()).To(ContainElement(metav1.OwnerReference{
1614+
APIVersion: clusterv1.GroupVersion.String(),
1615+
Kind: "Cluster",
1616+
Name: cluster.Name,
1617+
UID: cluster.UID,
1618+
}))
1619+
1620+
// Always expect that the Finalizer is set on the passed in resource
1621+
g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
1622+
1623+
g.Expect(kcp.Status.Selector).NotTo(BeEmpty())
1624+
g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(1))
1625+
g.Expect(conditions.IsFalse(kcp, controlplanev1.AvailableCondition)).To(BeTrue())
1626+
1627+
// Verify that the kubeconfig is using the custom CA
1628+
kBytes, err := kubeconfig.FromSecret(ctx, env, util.ObjectKey(cluster))
1629+
g.Expect(err).ToNot(HaveOccurred())
1630+
g.Expect(kBytes).NotTo(BeEmpty())
1631+
k, err := clientcmd.Load(kBytes)
1632+
g.Expect(err).ToNot(HaveOccurred())
1633+
g.Expect(k).NotTo(BeNil())
1634+
g.Expect(k.Clusters[cluster.Name]).NotTo(BeNil())
1635+
g.Expect(k.Clusters[cluster.Name].CertificateAuthorityData).To(Equal(caCertificate.KeyPair.Cert))
1636+
1637+
machineList := &clusterv1.MachineList{}
1638+
g.Expect(env.GetAPIReader().List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed())
1639+
g.Expect(machineList.Items).To(HaveLen(1))
1640+
1641+
machine := machineList.Items[0]
1642+
g.Expect(machine.Name).To(HavePrefix(kcp.Name))
1643+
// Newly cloned infra objects should have the infraref annotation.
1644+
infraObj, err := external.Get(ctx, r.Client, &machine.Spec.InfrastructureRef)
1645+
g.Expect(err).ToNot(HaveOccurred())
1646+
g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, genericInfrastructureMachineTemplate.GetName()))
1647+
g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, genericInfrastructureMachineTemplate.GroupVersionKind().GroupKind().String()))
1648+
}, 30*time.Second).Should(Succeed())
1649+
}
1650+
14091651
func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) {
14101652
setup := func(t *testing.T, g *WithT) (*corev1.Namespace, *clusterv1.Cluster) {
14111653
t.Helper()

controlplane/kubeadm/internal/controllers/suite_test.go

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,13 @@ import (
2323
"testing"
2424

2525
corev1 "k8s.io/api/core/v1"
26+
"k8s.io/apimachinery/pkg/labels"
27+
"k8s.io/apimachinery/pkg/selection"
2628
ctrl "sigs.k8s.io/controller-runtime"
29+
"sigs.k8s.io/controller-runtime/pkg/cache"
2730
"sigs.k8s.io/controller-runtime/pkg/client"
2831

32+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
2933
"sigs.k8s.io/cluster-api/internal/test/envtest"
3034
)
3135

@@ -48,8 +52,19 @@ func TestMain(m *testing.M) {
4852
panic(fmt.Sprintf("unable to create secretCachingClient: %v", err))
4953
}
5054
}
55+
req, _ := labels.NewRequirement(clusterv1.ClusterNameLabel, selection.Exists, nil)
56+
clusterSecretCacheSelector := labels.NewSelector().Add(*req)
5157
os.Exit(envtest.Run(ctx, envtest.RunInput{
5258
M: m,
59+
ManagerCacheOptions: cache.Options{
60+
ByObject: map[client.Object]cache.ByObject{
61+
// Only cache Secrets with the cluster name label.
62+
// This is similar to the real world.
63+
&corev1.Secret{}: {
64+
Label: clusterSecretCacheSelector,
65+
},
66+
},
67+
},
5368
ManagerUncachedObjs: []client.Object{
5469
&corev1.ConfigMap{},
5570
&corev1.Secret{},

exp/addons/internal/controllers/suite_test.go

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,12 +25,15 @@ import (
2525

2626
corev1 "k8s.io/api/core/v1"
2727
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
28+
"k8s.io/apimachinery/pkg/labels"
29+
"k8s.io/apimachinery/pkg/selection"
2830
"k8s.io/utils/ptr"
2931
ctrl "sigs.k8s.io/controller-runtime"
3032
"sigs.k8s.io/controller-runtime/pkg/cache"
3133
"sigs.k8s.io/controller-runtime/pkg/client"
3234
"sigs.k8s.io/controller-runtime/pkg/controller"
3335

36+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
3437
"sigs.k8s.io/cluster-api/api/v1beta1/index"
3538
"sigs.k8s.io/cluster-api/controllers/clustercache"
3639
"sigs.k8s.io/cluster-api/controllers/remote"
@@ -113,9 +116,20 @@ func TestMain(m *testing.M) {
113116
}
114117
}
115118

119+
req, _ := labels.NewRequirement(clusterv1.ClusterNameLabel, selection.Exists, nil)
120+
clusterSecretCacheSelector := labels.NewSelector().Add(*req)
116121
os.Exit(envtest.Run(ctx, envtest.RunInput{
117122
M: m,
118123
SetupEnv: func(e *envtest.Environment) { env = e },
124+
ManagerCacheOptions: cache.Options{
125+
ByObject: map[client.Object]cache.ByObject{
126+
// Only cache Secrets with the cluster name label.
127+
// This is similar to the real world.
128+
&corev1.Secret{}: {
129+
Label: clusterSecretCacheSelector,
130+
},
131+
},
132+
},
119133
ManagerUncachedObjs: []client.Object{
120134
&corev1.ConfigMap{},
121135
&corev1.Secret{},

internal/test/envtest/environment.go

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ import (
4646
"k8s.io/klog/v2"
4747
"k8s.io/utils/ptr"
4848
ctrl "sigs.k8s.io/controller-runtime"
49+
"sigs.k8s.io/controller-runtime/pkg/cache"
4950
"sigs.k8s.io/controller-runtime/pkg/client"
5051
"sigs.k8s.io/controller-runtime/pkg/config"
5152
"sigs.k8s.io/controller-runtime/pkg/envtest"
@@ -122,6 +123,7 @@ func init() {
122123
type RunInput struct {
123124
M *testing.M
124125
ManagerUncachedObjs []client.Object
126+
ManagerCacheOptions cache.Options
125127
SetupIndexes func(ctx context.Context, mgr ctrl.Manager)
126128
SetupReconcilers func(ctx context.Context, mgr ctrl.Manager)
127129
SetupEnv func(e *Environment)
@@ -147,7 +149,7 @@ func Run(ctx context.Context, input RunInput) int {
147149
}
148150

149151
// Bootstrapping test environment
150-
env := newEnvironment(input.ManagerUncachedObjs...)
152+
env := newEnvironment(input.ManagerCacheOptions, input.ManagerUncachedObjs...)
151153

152154
if input.SetupIndexes != nil {
153155
input.SetupIndexes(ctx, env.Manager)
@@ -229,7 +231,7 @@ type Environment struct {
229231
//
230232
// This function should be called only once for each package you're running tests within,
231233
// usually the environment is initialized in a suite_test.go file within a `BeforeSuite` ginkgo block.
232-
func newEnvironment(uncachedObjs ...client.Object) *Environment {
234+
func newEnvironment(managerCacheOptions cache.Options, uncachedObjs ...client.Object) *Environment {
233235
// Get the root of the current file to use in CRD paths.
234236
_, filename, _, _ := goruntime.Caller(0) //nolint:dogsled
235237
root := path.Join(path.Dir(filename), "..", "..", "..")
@@ -310,6 +312,7 @@ func newEnvironment(uncachedObjs ...client.Object) *Environment {
310312
Host: host,
311313
},
312314
),
315+
Cache: managerCacheOptions,
313316
}
314317

315318
mgr, err := ctrl.NewManager(env.Config, options)

0 commit comments

Comments
 (0)