diff --git a/CHANGELOG.md b/CHANGELOG.md index ec758d49..b4b1eb15 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ All notable changes to this project will be documented in this file. - test: ZooKeeper 3.9.2 removed ([#853]). - Support for Kafka 3.7.1 and 3.8.0 removed ([#860]). +- Remove the `-nodeport` discovery ConfigMap ([#868]). [#840]: https://github.com/stackabletech/kafka-operator/pull/840 [#844]: https://github.com/stackabletech/kafka-operator/pull/844 @@ -48,6 +49,7 @@ All notable changes to this project will be documented in this file. [#860]: https://github.com/stackabletech/kafka-operator/pull/860 [#861]: https://github.com/stackabletech/kafka-operator/pull/861 [#862]: https://github.com/stackabletech/kafka-operator/pull/862 +[#868]: https://github.com/stackabletech/kafka-operator/pull/868 ## [25.3.0] - 2025-03-21 diff --git a/docs/modules/kafka/pages/reference/discovery.adoc b/docs/modules/kafka/pages/reference/discovery.adoc index fe21ced5..eb30da5b 100644 --- a/docs/modules/kafka/pages/reference/discovery.adoc +++ b/docs/modules/kafka/pages/reference/discovery.adoc @@ -9,8 +9,6 @@ The Stackable Operator for Apache Kafka publishes a xref:concepts:service_discov The bundle includes a thrift connection string to access the Kafka broker service. This string may be used by other operators or tools to configure their products with access to Kafka. This is limited to internal cluster access. -NOTE: The operator also creates a deprecated secondary discovery ConfigMap named `\{clusterName\}-nodeport`. In 24.7 and older, this ConfigMap was used to access the Kafka installation from outside the Kubernetes cluster. In 24.11, this was replaced by xref:usage-guide/listenerclass.adoc[Listener-based exposition], and the `-nodeport` ConfigMap was made equivalent to the primary one. This behaviour is deprecated as of 25.3, and will be removed in the next release. Any existing uses of the `-nodeport` ConfigMap should be migrated to the primary. See https://github.com/stackabletech/kafka-operator/issues/765[the deprecation issue] for more details. - == Example Given the following Kafka cluster: diff --git a/rust/operator-binary/src/discovery.rs b/rust/operator-binary/src/discovery.rs index d48d6213..7a6d01b4 100644 --- a/rust/operator-binary/src/discovery.rs +++ b/rust/operator-binary/src/discovery.rs @@ -55,77 +55,33 @@ pub enum Error { }, } -/// Builds discovery [`ConfigMap`]s for connecting to a [`v1alpha1::KafkaCluster`] for all expected -/// scenarios. -pub async fn build_discovery_configmaps( +/// Build a discovery [`ConfigMap`] containing information about how to connect to a certain +/// [`v1alpha1::KafkaCluster`]. +pub fn build_discovery_configmap( kafka: &v1alpha1::KafkaCluster, owner: &impl Resource, resolved_product_image: &ResolvedProductImage, kafka_security: &KafkaTlsSecurity, listeners: &[listener::v1alpha1::Listener], -) -> Result, Error> { - let name = owner.name_unchecked(); +) -> Result { let port_name = if kafka_security.has_kerberos_enabled() { kafka_security.bootstrap_port_name() } else { kafka_security.client_port_name() }; - Ok(vec![ - build_discovery_configmap( - kafka, - owner, - resolved_product_image, - &name, - listener_hosts(listeners, port_name)?, - )?, - { - let mut nodeport = build_discovery_configmap( - kafka, - owner, - resolved_product_image, - &format!("{name}-nodeport"), - listener_hosts(listeners, port_name)?, - )?; - nodeport - .metadata - .annotations - .get_or_insert_with(Default::default) - .insert( - "stackable.tech/deprecated".to_string(), - format!( - "Deprecated in 25.3, and scheduled for removal in the next version. \ - Use {name:?} instead. \ - See https://github.com/stackabletech/kafka-operator/issues/765 for more." - ), - ); - nodeport - }, - ]) -} -/// Build a discovery [`ConfigMap`] containing information about how to connect to a certain -/// [`v1alpha1::KafkaCluster`]. -/// -/// `hosts` will usually come from [`listener_hosts`]. -fn build_discovery_configmap( - kafka: &v1alpha1::KafkaCluster, - owner: &impl Resource, - resolved_product_image: &ResolvedProductImage, - name: &str, - hosts: impl IntoIterator, u16)>, -) -> Result { // Write a list of bootstrap servers in the format that Kafka clients: // "{host1}:{port1},{host2:port2},..." - let bootstrap_servers = hosts + let bootstrap_servers = listener_hosts(listeners, port_name)? .into_iter() - .map(|(host, port)| format!("{}:{}", host.into(), port)) + .map(|(host, port)| format!("{}:{}", host, port)) .collect::>() .join(","); ConfigMapBuilder::new() .metadata( ObjectMetaBuilder::new() .name_and_namespace(kafka) - .name(name) + .name(owner.name_unchecked()) .ownerreference_from_resource(owner, None, Some(true)) .with_context(|_| ObjectMissingMetadataForOwnerRefSnafu { kafka: ObjectRef::from_obj(kafka), diff --git a/rust/operator-binary/src/kafka_controller.rs b/rust/operator-binary/src/kafka_controller.rs index 8c020db2..5c9f0378 100644 --- a/rust/operator-binary/src/kafka_controller.rs +++ b/rust/operator-binary/src/kafka_controller.rs @@ -84,7 +84,7 @@ use crate::{ security::KafkaTlsSecurity, v1alpha1, }, - discovery::{self, build_discovery_configmaps}, + discovery::{self, build_discovery_configmap}, kerberos::{self, add_kerberos_pod_config}, operations::{ graceful_shutdown::{add_graceful_shutdown_config, graceful_shutdown_config_properties}, @@ -596,21 +596,19 @@ pub async fn reconcile_kafka( .context(FailedToCreatePdbSnafu)?; } - for discovery_cm in build_discovery_configmaps( + let discovery_cm = build_discovery_configmap( kafka, kafka, &resolved_product_image, &kafka_security, &bootstrap_listeners, ) - .await - .context(BuildDiscoveryConfigSnafu)? - { - cluster_resources - .add(client, discovery_cm) - .await - .context(ApplyDiscoveryConfigSnafu)?; - } + .context(BuildDiscoveryConfigSnafu)?; + + cluster_resources + .add(client, discovery_cm) + .await + .context(ApplyDiscoveryConfigSnafu)?; let cluster_operation_cond_builder = ClusterOperationsConditionBuilder::new(&kafka.spec.cluster_operation);