From 05e5c16cc7e90a14a95b77d6fff8d9fff93f8815 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Wed, 26 Mar 2025 14:09:21 +0100 Subject: [PATCH 01/22] output listener refs --- deploy/helm/hbase-operator/crds/crds.yaml | 38 ++- .../helm/hbase-operator/templates/roles.yaml | 6 + rust/operator-binary/src/crd/mod.rs | 257 +++++++++++++++--- rust/operator-binary/src/discovery.rs | 33 ++- rust/operator-binary/src/hbase_controller.rs | 150 +++++----- 5 files changed, 349 insertions(+), 135 deletions(-) diff --git a/deploy/helm/hbase-operator/crds/crds.yaml b/deploy/helm/hbase-operator/crds/crds.yaml index 6294fe40..5669bed5 100644 --- a/deploy/helm/hbase-operator/crds/crds.yaml +++ b/deploy/helm/hbase-operator/crds/crds.yaml @@ -73,20 +73,6 @@ spec: hdfsConfigMapName: description: Name of the [discovery ConfigMap](https://docs.stackable.tech/home/nightly/concepts/service_discovery) for an HDFS cluster. type: string - listenerClass: - default: cluster-internal - description: |- - This field controls which type of Service the Operator creates for this HbaseCluster: - - * cluster-internal: Use a ClusterIP service - - * external-unstable: Use a NodePort service - - This is a temporary solution with the goal to keep yaml manifests forward compatible. In the future, this setting will control which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) will be used to expose the service, and ListenerClass names will stay the same, allowing for a non-breaking change. - enum: - - cluster-internal - - external-unstable - type: string vectorAggregatorConfigMapName: description: Name of the Vector aggregator [discovery ConfigMap](https://docs.stackable.tech/home/nightly/concepts/service_discovery). It must contain the key `ADDRESS` with the address of the Vector aggregator. Follow the [logging tutorial](https://docs.stackable.tech/home/nightly/tutorials/logging-vector-aggregator) to learn how to configure log aggregation with Vector. nullable: true @@ -210,6 +196,10 @@ spec: hbaseRootdir: nullable: true type: string + listenerClass: + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + nullable: true + type: string logging: default: containers: {} @@ -460,6 +450,10 @@ spec: hbaseRootdir: nullable: true type: string + listenerClass: + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + nullable: true + type: string logging: default: containers: {} @@ -691,6 +685,10 @@ spec: hbaseRootdir: nullable: true type: string + listenerClass: + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + nullable: true + type: string logging: default: containers: {} @@ -969,6 +967,10 @@ spec: hbaseRootdir: nullable: true type: string + listenerClass: + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + nullable: true + type: string logging: default: containers: {} @@ -1228,6 +1230,10 @@ spec: hbaseRootdir: nullable: true type: string + listenerClass: + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + nullable: true + type: string logging: default: containers: {} @@ -1478,6 +1484,10 @@ spec: hbaseRootdir: nullable: true type: string + listenerClass: + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + nullable: true + type: string logging: default: containers: {} diff --git a/deploy/helm/hbase-operator/templates/roles.yaml b/deploy/helm/hbase-operator/templates/roles.yaml index f13d450b..7bc43bdb 100644 --- a/deploy/helm/hbase-operator/templates/roles.yaml +++ b/deploy/helm/hbase-operator/templates/roles.yaml @@ -77,6 +77,12 @@ rules: verbs: - create - patch + - apiGroups: + - listeners.stackable.tech + resources: + - listeners + verbs: + - get - apiGroups: - {{ include "operator.name" . }}.stackable.tech resources: diff --git a/rust/operator-binary/src/crd/mod.rs b/rust/operator-binary/src/crd/mod.rs index fd7a5aee..b67cf23f 100644 --- a/rust/operator-binary/src/crd/mod.rs +++ b/rust/operator-binary/src/crd/mod.rs @@ -1,5 +1,10 @@ -use std::collections::{BTreeMap, HashMap}; +use std::{ + borrow::Cow, + collections::{BTreeMap, HashMap}, + num::TryFromIntError, +}; +use futures::future::try_join_all; use product_config::types::PropertyNameKind; use security::AuthenticationConfig; use serde::{Deserialize, Serialize}; @@ -9,6 +14,7 @@ use stackable_operator::{ commons::{ affinity::StackableAffinity, cluster_operation::ClusterOperation, + listener::Listener, product_image_selection::ProductImage, resources::{ CpuLimitsFragment, MemoryLimitsFragment, NoRuntimeLimits, NoRuntimeLimitsFragment, @@ -20,7 +26,7 @@ use stackable_operator::{ merge::{Atomic, Merge}, }, k8s_openapi::{ - api::core::v1::{EnvVar, PodTemplateSpec}, + api::core::v1::{EnvVar, Pod, PodTemplateSpec}, apimachinery::pkg::api::resource::Quantity, DeepMerge, }, @@ -31,6 +37,7 @@ use stackable_operator::{ schemars::{self, JsonSchema}, status::condition::{ClusterCondition, HasStatusCondition}, time::Duration, + utils::cluster_info::KubernetesClusterInfo, }; use stackable_versioned::versioned; use strum::{Display, EnumIter, EnumString}; @@ -81,6 +88,10 @@ pub const HBASE_REST_UI_PORT: u16 = 8085; // Newer versions use the same port as the UI because Hbase provides it's own metrics API pub const METRICS_PORT: u16 = 9100; +pub const DEFAULT_LISTENER_CLASS: &str = "cluster-internal"; +pub const LISTENER_VOLUME_NAME: &str = "listener"; +pub const LISTENER_VOLUME_DIR: &str = "/stackable/listener"; + const DEFAULT_REGION_MOVER_TIMEOUT: Duration = Duration::from_minutes_unchecked(59); const DEFAULT_REGION_MOVER_DELTA_TO_SHUTDOWN: Duration = Duration::from_minutes_unchecked(1); @@ -106,6 +117,29 @@ pub enum Error { #[snafu(display("incompatible merge types"))] IncompatibleMergeTypes, + + #[snafu(display("object has no associated namespace"))] + NoNamespace, + + #[snafu(display("unable to get {listener} (for {pod})"))] + GetPodListener { + source: stackable_operator::client::Error, + listener: ObjectRef, + pod: ObjectRef, + }, + + #[snafu(display("{listener} (for {pod}) has no address"))] + PodListenerHasNoAddress { + listener: ObjectRef, + pod: ObjectRef, + }, + + #[snafu(display("port {port} ({port_name:?}) is out of bounds, must be within {range:?}", range = 0..=u16::MAX))] + PortOutOfBounds { + source: TryFromIntError, + port_name: String, + port: i32, + }, } #[versioned(version(name = "v1alpha1"))] @@ -175,18 +209,6 @@ pub mod versioned { /// for a ZooKeeper cluster. pub zookeeper_config_map_name: String, - /// This field controls which type of Service the Operator creates for this HbaseCluster: - /// - /// * cluster-internal: Use a ClusterIP service - /// - /// * external-unstable: Use a NodePort service - /// - /// This is a temporary solution with the goal to keep yaml manifests forward compatible. - /// In the future, this setting will control which [ListenerClass](DOCS_BASE_URL_PLACEHOLDER/listener-operator/listenerclass.html) - /// will be used to expose the service, and ListenerClass names will stay the same, allowing for a non-breaking change. - #[serde(default)] - pub listener_class: CurrentlySupportedListenerClasses, - /// Settings related to user [authentication](DOCS_BASE_URL_PLACEHOLDER/usage-guide/security). pub authentication: Option, @@ -547,6 +569,173 @@ impl v1alpha1::HbaseCluster { } .to_string() } + + pub fn rolegroup_ref( + &self, + role_name: impl Into, + group_name: impl Into, + ) -> RoleGroupRef { + RoleGroupRef { + cluster: ObjectRef::from_obj(self), + role: role_name.into(), + role_group: group_name.into(), + } + } + + pub fn rolegroup_ref_and_replicas( + &self, + role: &HbaseRole, + ) -> Vec<(RoleGroupRef, u16)> { + match role { + HbaseRole::Master => self + .spec + .masters + .iter() + .flat_map(|role| &role.role_groups) + // Order rolegroups consistently, to avoid spurious downstream rewrites + .collect::>() + .into_iter() + .map(|(rolegroup_name, role_group)| { + ( + self.rolegroup_ref(HbaseRole::Master.to_string(), rolegroup_name), + role_group.replicas.unwrap_or_default(), + ) + }) + .collect(), + HbaseRole::RegionServer => self + .spec + .region_servers + .iter() + .flat_map(|role| &role.role_groups) + // Order rolegroups consistently, to avoid spurious downstream rewrites + .collect::>() + .into_iter() + .map(|(rolegroup_name, role_group)| { + ( + self.rolegroup_ref(HbaseRole::RegionServer.to_string(), rolegroup_name), + role_group.replicas.unwrap_or_default(), + ) + }) + .collect(), + HbaseRole::RestServer => self + .spec + .rest_servers + .iter() + .flat_map(|role| &role.role_groups) + // Order rolegroups consistently, to avoid spurious downstream rewrites + .collect::>() + .into_iter() + .map(|(rolegroup_name, role_group)| { + ( + self.rolegroup_ref(HbaseRole::RestServer.to_string(), rolegroup_name), + role_group.replicas.unwrap_or_default(), + ) + }) + .collect(), + } + } + + pub fn pod_refs( + &self, + role: &HbaseRole, + hbase_version: &str, + ) -> Result, Error> { + let ns = self.metadata.namespace.clone().context(NoNamespaceSnafu)?; + let rolegroup_ref_and_replicas = self.rolegroup_ref_and_replicas(role); + + Ok(rolegroup_ref_and_replicas + .iter() + .flat_map(|(rolegroup_ref, replicas)| { + let ns = ns.clone(); + (0..*replicas).map(move |i| HbasePodRef { + namespace: ns.clone(), + role_group_service_name: rolegroup_ref.object_name(), + pod_name: format!("{}-{}", rolegroup_ref.object_name(), i), + ports: self + .ports(role, hbase_version) + .iter() + .map(|(n, p)| (n.clone(), *p)) + .collect(), + fqdn_override: None, + }) + }) + .collect()) + } + + pub async fn listener_refs( + &self, + client: &stackable_operator::client::Client, + role: &HbaseRole, + hbase_version: &str, + ) -> Result, Error> { + let pod_refs = self.pod_refs(role, hbase_version)?; + try_join_all(pod_refs.into_iter().map(|pod_ref| async { + let listener_name = format!("{}-{LISTENER_VOLUME_NAME}", pod_ref.pod_name); + let listener_ref = + || ObjectRef::::new(&listener_name).within(&pod_ref.namespace); + let pod_obj_ref = + || ObjectRef::::new(&pod_ref.pod_name).within(&pod_ref.namespace); + let listener = client + .get::(&listener_name, &pod_ref.namespace) + .await + .context(GetPodListenerSnafu { + listener: listener_ref(), + pod: pod_obj_ref(), + })?; + let listener_address = listener + .status + .and_then(|s| s.ingress_addresses?.into_iter().next()) + .context(PodListenerHasNoAddressSnafu { + listener: listener_ref(), + pod: pod_obj_ref(), + })?; + Ok(HbasePodRef { + fqdn_override: Some(listener_address.address), + ports: listener_address + .ports + .into_iter() + .map(|(port_name, port)| { + let port = u16::try_from(port).context(PortOutOfBoundsSnafu { + port_name: &port_name, + port, + })?; + Ok((port_name, port)) + }) + .collect::>()?, + ..pod_ref + }) + })) + .await + } +} + +/// Reference to a single `Pod` that is a component of a [`HbaseCluster`] +/// +/// Used for service discovery. +#[derive(Debug)] +pub struct HbasePodRef { + pub namespace: String, + pub role_group_service_name: String, + pub pod_name: String, + pub fqdn_override: Option, + pub ports: HashMap, +} + +impl HbasePodRef { + pub fn fqdn(&self, cluster_info: &KubernetesClusterInfo) -> Cow { + self.fqdn_override.as_deref().map_or_else( + || { + Cow::Owned(format!( + "{pod_name}.{role_group_service_name}.{namespace}.svc.{cluster_domain}", + pod_name = self.pod_name, + role_group_service_name = self.role_group_service_name, + namespace = self.namespace, + cluster_domain = cluster_info.cluster_domain, + )) + }, + Cow::Borrowed, + ) + } } pub fn merged_env(rolegroup_config: Option<&BTreeMap>) -> Vec { @@ -565,27 +754,6 @@ pub fn merged_env(rolegroup_config: Option<&BTreeMap>) -> Vec String { - match self { - CurrentlySupportedListenerClasses::ClusterInternal => "ClusterIP".to_string(), - CurrentlySupportedListenerClasses::ExternalUnstable => "NodePort".to_string(), - } - } -} - #[derive(Clone, Debug, Deserialize, Eq, Hash, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct KerberosConfig { @@ -710,6 +878,7 @@ impl HbaseRole { affinity: get_affinity(cluster_name, self, hdfs_discovery_cm_name), graceful_shutdown_timeout: Some(graceful_shutdown_timeout), requested_secret_lifetime: Some(requested_secret_lifetime), + listener_class: Some(DEFAULT_LISTENER_CLASS.to_string()), } } @@ -810,6 +979,7 @@ impl AnyConfigFragment { cli_opts: None, }, requested_secret_lifetime: Some(HbaseRole::DEFAULT_REGION_SECRET_LIFETIME), + listener_class: Some(DEFAULT_LISTENER_CLASS.to_string()), }) } HbaseRole::RestServer => AnyConfigFragment::RestServer(HbaseConfigFragment { @@ -821,6 +991,7 @@ impl AnyConfigFragment { HbaseRole::DEFAULT_REST_SERVER_GRACEFUL_SHUTDOWN_TIMEOUT, ), requested_secret_lifetime: Some(HbaseRole::DEFAULT_REST_SECRET_LIFETIME), + listener_class: Some(DEFAULT_LISTENER_CLASS.to_string()), }), HbaseRole::Master => AnyConfigFragment::Master(HbaseConfigFragment { hbase_rootdir: None, @@ -831,6 +1002,7 @@ impl AnyConfigFragment { HbaseRole::DEFAULT_MASTER_GRACEFUL_SHUTDOWN_TIMEOUT, ), requested_secret_lifetime: Some(HbaseRole::DEFAULT_MASTER_SECRET_LIFETIME), + listener_class: Some(DEFAULT_LISTENER_CLASS.to_string()), }), } } @@ -908,6 +1080,10 @@ pub struct HbaseConfig { /// Please note that this can be shortened by the `maxCertificateLifetime` setting on the SecretClass issuing the TLS certificate. #[fragment_attrs(serde(default))] pub requested_secret_lifetime: Option, + + /// This field controls which [ListenerClass](DOCS_BASE_URL_PLACEHOLDER/listener-operator/listenerclass.html) is used to expose this rolegroup. + /// All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + pub listener_class: String, } impl Configuration for HbaseConfigFragment { @@ -1061,6 +1237,10 @@ pub struct RegionServerConfig { /// The operator will compute a timeout period for the region move that will not exceed the graceful shutdown timeout. #[fragment_attrs(serde(default))] pub region_mover: RegionMover, + + /// This field controls which [ListenerClass](DOCS_BASE_URL_PLACEHOLDER/listener-operator/listenerclass.html) is used to expose this rolegroup. + /// All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + pub listener_class: String, } impl Configuration for RegionServerConfigFragment { @@ -1181,6 +1361,13 @@ impl AnyServiceConfig { AnyServiceConfig::RestServer(config) => config.requested_secret_lifetime, } } + pub fn listener_class(&self) -> String { + match self { + AnyServiceConfig::Master(config) => config.listener_class.clone(), + AnyServiceConfig::RegionServer(config) => config.listener_class.clone(), + AnyServiceConfig::RestServer(config) => config.listener_class.clone(), + } + } /// Returns command line arguments to pass on to the region mover tool. /// The following arguments are excluded because they are already part of the diff --git a/rust/operator-binary/src/discovery.rs b/rust/operator-binary/src/discovery.rs index 3b55ac8f..c6a5977a 100644 --- a/rust/operator-binary/src/discovery.rs +++ b/rust/operator-binary/src/discovery.rs @@ -6,12 +6,12 @@ use stackable_operator::{ builder::{configmap::ConfigMapBuilder, meta::ObjectMetaBuilder}, commons::product_image_selection::ResolvedProductImage, k8s_openapi::api::core::v1::ConfigMap, - kube::runtime::reflector::ObjectRef, + kube::{runtime::reflector::ObjectRef, ResourceExt}, utils::cluster_info::KubernetesClusterInfo, }; use crate::{ - crd::{v1alpha1, HbaseRole, HBASE_SITE_XML}, + crd::{v1alpha1, HbasePodRef, HbaseRole, HBASE_SITE_XML}, hbase_controller::build_recommended_labels, kerberos::{self, kerberos_discovery_config_properties}, zookeeper::ZookeeperConnectionInformation, @@ -84,3 +84,32 @@ pub fn build_discovery_configmap( .build() .context(BuildConfigMapSnafu) } + +pub fn build_endpoint_configmap( + hbase: &v1alpha1::HbaseCluster, + resolved_product_image: &ResolvedProductImage, + _role_podrefs: &[HbasePodRef], +) -> Result { + let name = hbase.name_unchecked(); + ConfigMapBuilder::new() + .metadata( + ObjectMetaBuilder::new() + .name_and_namespace(hbase) + .name(format!("{name}-endpoint")) + .ownerreference_from_resource(hbase, None, Some(true)) + .with_context(|_| ObjectMissingMetadataForOwnerRefSnafu { + hbase: ObjectRef::from_obj(hbase), + })? + .with_recommended_labels(build_recommended_labels( + hbase, + &resolved_product_image.app_version_label, + &HbaseRole::RegionServer.to_string(), + "discovery", + )) + .context(ObjectMetaSnafu)? + .build(), + ) + .add_data("XXX", "YYY") + .build() + .context(BuildConfigMapSnafu) +} diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index 6bfdfab6..9b4861af 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -61,7 +61,7 @@ use stackable_operator::{ time::Duration, utils::cluster_info::KubernetesClusterInfo, }; -use strum::{EnumDiscriminants, IntoStaticStr, ParseError}; +use strum::{EnumDiscriminants, IntoEnumIterator, IntoStaticStr, ParseError}; use crate::{ config::jvm::{ @@ -69,11 +69,12 @@ use crate::{ construct_role_specific_non_heap_jvm_args, }, crd::{ - merged_env, v1alpha1, AnyServiceConfig, Container, HbaseClusterStatus, HbaseRole, APP_NAME, - HBASE_ENV_SH, HBASE_REST_PORT_NAME_HTTP, HBASE_REST_PORT_NAME_HTTPS, HBASE_SITE_XML, - JVM_SECURITY_PROPERTIES_FILE, SSL_CLIENT_XML, SSL_SERVER_XML, + merged_env, v1alpha1, AnyServiceConfig, Container, HbaseClusterStatus, HbasePodRef, + HbaseRole, APP_NAME, HBASE_ENV_SH, HBASE_REST_PORT_NAME_HTTP, HBASE_REST_PORT_NAME_HTTPS, + HBASE_SITE_XML, JVM_SECURITY_PROPERTIES_FILE, LISTENER_VOLUME_DIR, LISTENER_VOLUME_NAME, + SSL_CLIENT_XML, SSL_SERVER_XML, }, - discovery::build_discovery_configmap, + discovery::{build_discovery_configmap, build_endpoint_configmap}, kerberos::{ self, add_kerberos_pod_config, kerberos_config_properties, kerberos_ssl_client_settings, kerberos_ssl_server_settings, @@ -316,6 +317,14 @@ pub enum Error { #[snafu(display("failed to construct JVM arguments"))] ConstructJvmArgument { source: crate::config::jvm::Error }, + + #[snafu(display("failed to build Labels"))] + LabelBuild { + source: stackable_operator::kvp::LabelError, + }, + + #[snafu(display("cannot collect discovery configuration"))] + CollectDiscoveryConfig { source: crate::crd::Error }, } type Result = std::result::Result; @@ -383,26 +392,6 @@ pub async fn reconcile_hbase( ) .context(CreateClusterResourcesSnafu)?; - let region_server_role_service = - build_region_server_role_service(hbase, &resolved_product_image)?; - cluster_resources - .add(client, region_server_role_service) - .await - .context(ApplyRoleServiceSnafu)?; - - // discovery config map - let discovery_cm = build_discovery_configmap( - hbase, - &client.kubernetes_cluster_info, - &zookeeper_connection_information, - &resolved_product_image, - ) - .context(BuildDiscoveryConfigMapSnafu)?; - cluster_resources - .add(client, discovery_cm) - .await - .context(ApplyDiscoveryConfigMapSnafu)?; - let (rbac_sa, rbac_rolebinding) = build_rbac_resources( hbase, APP_NAME, @@ -493,6 +482,40 @@ pub async fn reconcile_hbase( } } + let mut listener_refs = Vec::::new(); + + for role in HbaseRole::iter() { + listener_refs.extend( + hbase + .listener_refs(client, &role, &resolved_product_image.product_version) + .await + .context(CollectDiscoveryConfigSnafu)?, + ); + } + + tracing::info!("Listener references: {:#?}", listener_refs); + + let endpoint_cm = build_endpoint_configmap(hbase, &resolved_product_image, &listener_refs) + .context(BuildDiscoveryConfigMapSnafu)?; + cluster_resources + .add(client, endpoint_cm) + .await + .context(ApplyDiscoveryConfigMapSnafu)?; + + // Discovery CM will fail to build until the rest of the cluster has been deployed, so do it last + // so that failure won't inhibit the rest of the cluster from booting up. + let discovery_cm = build_discovery_configmap( + hbase, + &client.kubernetes_cluster_info, + &zookeeper_connection_information, + &resolved_product_image, + ) + .context(BuildDiscoveryConfigMapSnafu)?; + cluster_resources + .add(client, discovery_cm) + .await + .context(ApplyDiscoveryConfigMapSnafu)?; + let cluster_operation_cond_builder = ClusterOperationsConditionBuilder::new(&hbase.spec.cluster_operation); @@ -512,59 +535,6 @@ pub async fn reconcile_hbase( Ok(Action::await_change()) } -/// The server-role service is the primary endpoint that should be used by clients that do not perform internal load balancing, -/// including targets outside of the cluster. -pub fn build_region_server_role_service( - hbase: &v1alpha1::HbaseCluster, - resolved_product_image: &ResolvedProductImage, -) -> Result { - let role = HbaseRole::RegionServer; - let role_name = role.to_string(); - let role_svc_name = hbase - .server_role_service_name() - .context(GlobalServiceNameNotFoundSnafu)?; - let ports = hbase - .ports(&role, &resolved_product_image.product_version) - .into_iter() - .map(|(name, value)| ServicePort { - name: Some(name), - port: i32::from(value), - protocol: Some("TCP".to_string()), - ..ServicePort::default() - }) - .collect(); - - let metadata = ObjectMetaBuilder::new() - .name_and_namespace(hbase) - .name(&role_svc_name) - .ownerreference_from_resource(hbase, None, Some(true)) - .context(ObjectMissingMetadataForOwnerRefSnafu)? - .with_recommended_labels(build_recommended_labels( - hbase, - &resolved_product_image.app_version_label, - &role_name, - "global", - )) - .context(ObjectMetaSnafu)? - .build(); - - let service_selector_labels = - Labels::role_selector(hbase, APP_NAME, &role_name).context(BuildLabelSnafu)?; - - let service_spec = ServiceSpec { - type_: Some(hbase.spec.cluster_config.listener_class.k8s_service_type()), - ports: Some(ports), - selector: Some(service_selector_labels.into()), - ..ServiceSpec::default() - }; - - Ok(Service { - metadata, - spec: Some(service_spec), - status: None, - }) -} - /// The rolegroup [`ConfigMap`] configures the rolegroup based on the configuration given by the administrator #[allow(clippy::too_many_arguments)] fn build_rolegroup_config_map( @@ -901,6 +871,8 @@ fn build_rolegroup_statefulset( .context(AddVolumeMountSnafu)? .add_volume_mount("log", STACKABLE_LOG_DIR) .context(AddVolumeMountSnafu)? + .add_volume_mount(LISTENER_VOLUME_NAME, LISTENER_VOLUME_DIR) + .context(AddVolumeMountSnafu)? .add_container_ports(ports) .resources(merged_config.resources().clone().into()) .startup_probe(startup_probe) @@ -909,13 +881,17 @@ fn build_rolegroup_statefulset( let mut pod_builder = PodBuilder::new(); + let recommended_object_labels = build_recommended_labels( + hbase, + hbase_version, + &rolegroup_ref.role, + &rolegroup_ref.role_group, + ); + let recommended_labels = + Labels::recommended(recommended_object_labels.clone()).context(LabelBuildSnafu)?; + let pb_metadata = ObjectMetaBuilder::new() - .with_recommended_labels(build_recommended_labels( - hbase, - hbase_version, - &rolegroup_ref.role, - &rolegroup_ref.role_group, - )) + .with_recommended_labels(recommended_object_labels) .context(ObjectMetaSnafu)? .build(); @@ -948,6 +924,12 @@ fn build_rolegroup_statefulset( )), ) .context(AddVolumeSnafu)? + .add_listener_volume_by_listener_class( + LISTENER_VOLUME_NAME, + &merged_config.listener_class(), + &recommended_labels, + ) + .context(AddVolumeSnafu)? .service_account_name(service_account.name_any()) .security_context( PodSecurityContextBuilder::new() From 776e273871017b825c51c9d3f14d8c9ff7297ec3 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Wed, 26 Mar 2025 16:37:19 +0100 Subject: [PATCH 02/22] add listener refs to config map --- rust/operator-binary/src/crd/mod.rs | 2 +- rust/operator-binary/src/discovery.rs | 66 +++++++++++++------- rust/operator-binary/src/hbase_controller.rs | 12 ++-- 3 files changed, 53 insertions(+), 27 deletions(-) diff --git a/rust/operator-binary/src/crd/mod.rs b/rust/operator-binary/src/crd/mod.rs index b67cf23f..ca3768fe 100644 --- a/rust/operator-binary/src/crd/mod.rs +++ b/rust/operator-binary/src/crd/mod.rs @@ -561,7 +561,7 @@ impl v1alpha1::HbaseCluster { } /// Name of the port used by the Web UI, which depends on HTTPS usage - fn ui_port_name(&self) -> String { + pub fn ui_port_name(&self) -> String { if self.has_https_enabled() { HBASE_UI_PORT_NAME_HTTPS } else { diff --git a/rust/operator-binary/src/discovery.rs b/rust/operator-binary/src/discovery.rs index c6a5977a..3d42aaba 100644 --- a/rust/operator-binary/src/discovery.rs +++ b/rust/operator-binary/src/discovery.rs @@ -88,28 +88,50 @@ pub fn build_discovery_configmap( pub fn build_endpoint_configmap( hbase: &v1alpha1::HbaseCluster, resolved_product_image: &ResolvedProductImage, - _role_podrefs: &[HbasePodRef], + role_podrefs: BTreeMap>, ) -> Result { let name = hbase.name_unchecked(); - ConfigMapBuilder::new() - .metadata( - ObjectMetaBuilder::new() - .name_and_namespace(hbase) - .name(format!("{name}-endpoint")) - .ownerreference_from_resource(hbase, None, Some(true)) - .with_context(|_| ObjectMissingMetadataForOwnerRefSnafu { - hbase: ObjectRef::from_obj(hbase), - })? - .with_recommended_labels(build_recommended_labels( - hbase, - &resolved_product_image.app_version_label, - &HbaseRole::RegionServer.to_string(), - "discovery", - )) - .context(ObjectMetaSnafu)? - .build(), - ) - .add_data("XXX", "YYY") - .build() - .context(BuildConfigMapSnafu) + let mut cm = ConfigMapBuilder::new(); + + let cmm = cm.metadata( + ObjectMetaBuilder::new() + .name_and_namespace(hbase) + .name(format!("{name}-ui-endpoints")) + .ownerreference_from_resource(hbase, None, Some(true)) + .with_context(|_| ObjectMissingMetadataForOwnerRefSnafu { + hbase: ObjectRef::from_obj(hbase), + })? + .with_recommended_labels(build_recommended_labels( + hbase, + &resolved_product_image.app_version_label, + "hbase-ui", + "discovery", + )) + .context(ObjectMetaSnafu)? + .build(), + ); + + for role_podref in role_podrefs { + let role_name = role_podref.0; + for podref in role_podref.1 { + if let HbasePodRef { + fqdn_override: Some(fqdn_override), + ports, + .. + } = podref + { + if let Some(ui_port) = ports.get(&hbase.ui_port_name()) { + cmm.add_data( + format!("hbase.{role_name}.ui"), + format!("{fqdn_override}:{ui_port}"), + ); + // the UI endpoint for one replica per role + // is enough for the config map + break; + } + } + } + } + + cm.build().context(BuildConfigMapSnafu) } diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index 9b4861af..5fc64ec7 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -482,10 +482,11 @@ pub async fn reconcile_hbase( } } - let mut listener_refs = Vec::::new(); + let mut listener_refs: BTreeMap> = BTreeMap::new(); for role in HbaseRole::iter() { - listener_refs.extend( + listener_refs.insert( + role.to_string(), hbase .listener_refs(client, &role, &resolved_product_image.product_version) .await @@ -493,9 +494,12 @@ pub async fn reconcile_hbase( ); } - tracing::info!("Listener references: {:#?}", listener_refs); + tracing::info!( + "Listener references written to the ConfigMap: {:?}", + listener_refs + ); - let endpoint_cm = build_endpoint_configmap(hbase, &resolved_product_image, &listener_refs) + let endpoint_cm = build_endpoint_configmap(hbase, &resolved_product_image, listener_refs) .context(BuildDiscoveryConfigMapSnafu)?; cluster_resources .add(client, endpoint_cm) From 967239dd2110a3507cb2ce8aad41c71d68e67de8 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Wed, 26 Mar 2025 17:14:19 +0100 Subject: [PATCH 03/22] changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4651df61..69b8e828 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,12 @@ ## [Unreleased] +### Added + +- Added listener support for HBase ([#639]). + +[#639]: https://github.com/stackabletech/hbase-operator/pull/639 + ## [25.3.0] - 2025-03-21 ### Added From 4eb4e358251712f2e6627c269927148117945058 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Wed, 26 Mar 2025 17:14:58 +0100 Subject: [PATCH 04/22] changed logging statement --- rust/operator-binary/src/hbase_controller.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index 5fc64ec7..2e353f9d 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -495,8 +495,8 @@ pub async fn reconcile_hbase( } tracing::info!( - "Listener references written to the ConfigMap: {:?}", - listener_refs + ?listener_refs, + "Listener references written to the ConfigMap" ); let endpoint_cm = build_endpoint_configmap(hbase, &resolved_product_image, listener_refs) From 96593c7f1d6ced31942c811ef6e19de2a4a2045c Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Thu, 27 Mar 2025 12:13:39 +0100 Subject: [PATCH 05/22] integration test --- .../kuttl/external-access/00-assert.yaml.j2 | 10 +++ ...tor-aggregator-discovery-configmap.yaml.j2 | 9 +++ .../kuttl/external-access/00-patch-ns.yaml.j2 | 9 +++ .../kuttl/external-access/00-rbac.yaml.j2 | 29 +++++++++ .../kuttl/external-access/01-assert.yaml | 12 ++++ .../01-install-zookeeper.yaml.j2 | 29 +++++++++ .../kuttl/external-access/02-assert.yaml | 28 ++++++++ .../external-access/02-install-hdfs.yaml.j2 | 39 +++++++++++ .../kuttl/external-access/03-assert.yaml | 28 ++++++++ .../external-access/03-install-hbase.yaml.j2 | 47 ++++++++++++++ .../external-access/30-access-hbase.txt.j2 | 65 +++++++++++++++++++ .../external-access/30-access-hbase.yaml | 6 ++ .../kuttl/external-access/30-assert.yaml | 11 ++++ tests/test-definition.yaml | 6 ++ 14 files changed, 328 insertions(+) create mode 100644 tests/templates/kuttl/external-access/00-assert.yaml.j2 create mode 100644 tests/templates/kuttl/external-access/00-install-vector-aggregator-discovery-configmap.yaml.j2 create mode 100644 tests/templates/kuttl/external-access/00-patch-ns.yaml.j2 create mode 100644 tests/templates/kuttl/external-access/00-rbac.yaml.j2 create mode 100644 tests/templates/kuttl/external-access/01-assert.yaml create mode 100644 tests/templates/kuttl/external-access/01-install-zookeeper.yaml.j2 create mode 100644 tests/templates/kuttl/external-access/02-assert.yaml create mode 100644 tests/templates/kuttl/external-access/02-install-hdfs.yaml.j2 create mode 100644 tests/templates/kuttl/external-access/03-assert.yaml create mode 100644 tests/templates/kuttl/external-access/03-install-hbase.yaml.j2 create mode 100644 tests/templates/kuttl/external-access/30-access-hbase.txt.j2 create mode 100644 tests/templates/kuttl/external-access/30-access-hbase.yaml create mode 100644 tests/templates/kuttl/external-access/30-assert.yaml diff --git a/tests/templates/kuttl/external-access/00-assert.yaml.j2 b/tests/templates/kuttl/external-access/00-assert.yaml.j2 new file mode 100644 index 00000000..50b1d4c3 --- /dev/null +++ b/tests/templates/kuttl/external-access/00-assert.yaml.j2 @@ -0,0 +1,10 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +{% endif %} diff --git a/tests/templates/kuttl/external-access/00-install-vector-aggregator-discovery-configmap.yaml.j2 b/tests/templates/kuttl/external-access/00-install-vector-aggregator-discovery-configmap.yaml.j2 new file mode 100644 index 00000000..2d6a0df5 --- /dev/null +++ b/tests/templates/kuttl/external-access/00-install-vector-aggregator-discovery-configmap.yaml.j2 @@ -0,0 +1,9 @@ +{% if lookup('env', 'VECTOR_AGGREGATOR') %} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: vector-aggregator-discovery +data: + ADDRESS: {{ lookup('env', 'VECTOR_AGGREGATOR') }} +{% endif %} diff --git a/tests/templates/kuttl/external-access/00-patch-ns.yaml.j2 b/tests/templates/kuttl/external-access/00-patch-ns.yaml.j2 new file mode 100644 index 00000000..67185acf --- /dev/null +++ b/tests/templates/kuttl/external-access/00-patch-ns.yaml.j2 @@ -0,0 +1,9 @@ +{% if test_scenario['values']['openshift'] == 'true' %} +# see https://github.com/stackabletech/issues/issues/566 +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: kubectl patch namespace $NAMESPACE -p '{"metadata":{"labels":{"pod-security.kubernetes.io/enforce":"privileged"}}}' + timeout: 120 +{% endif %} diff --git a/tests/templates/kuttl/external-access/00-rbac.yaml.j2 b/tests/templates/kuttl/external-access/00-rbac.yaml.j2 new file mode 100644 index 00000000..7ee61d23 --- /dev/null +++ b/tests/templates/kuttl/external-access/00-rbac.yaml.j2 @@ -0,0 +1,29 @@ +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: test-role +rules: +{% if test_scenario['values']['openshift'] == "true" %} + - apiGroups: ["security.openshift.io"] + resources: ["securitycontextconstraints"] + resourceNames: ["privileged"] + verbs: ["use"] +{% endif %} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-sa +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: test-rb +subjects: + - kind: ServiceAccount + name: test-sa +roleRef: + kind: Role + name: test-role + apiGroup: rbac.authorization.k8s.io diff --git a/tests/templates/kuttl/external-access/01-assert.yaml b/tests/templates/kuttl/external-access/01-assert.yaml new file mode 100644 index 00000000..e0766c49 --- /dev/null +++ b/tests/templates/kuttl/external-access/01-assert.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-zk-server-default +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/external-access/01-install-zookeeper.yaml.j2 b/tests/templates/kuttl/external-access/01-install-zookeeper.yaml.j2 new file mode 100644 index 00000000..0a331d50 --- /dev/null +++ b/tests/templates/kuttl/external-access/01-install-zookeeper.yaml.j2 @@ -0,0 +1,29 @@ +--- +apiVersion: zookeeper.stackable.tech/v1alpha1 +kind: ZookeeperCluster +metadata: + name: test-zk +spec: + image: + productVersion: "{{ test_scenario['values']['zookeeper-latest'] }}" + pullPolicy: IfNotPresent +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + clusterConfig: + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} + servers: + config: + gracefulShutdownTimeout: 1m + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 1 +--- +apiVersion: zookeeper.stackable.tech/v1alpha1 +kind: ZookeeperZnode +metadata: + name: test-znode +spec: + clusterRef: + name: test-zk diff --git a/tests/templates/kuttl/external-access/02-assert.yaml b/tests/templates/kuttl/external-access/02-assert.yaml new file mode 100644 index 00000000..99b25f8e --- /dev/null +++ b/tests/templates/kuttl/external-access/02-assert.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hdfs-namenode-default +status: + readyReplicas: 2 + replicas: 2 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hdfs-journalnode-default +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hdfs-datanode-default +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/external-access/02-install-hdfs.yaml.j2 b/tests/templates/kuttl/external-access/02-install-hdfs.yaml.j2 new file mode 100644 index 00000000..f9194a60 --- /dev/null +++ b/tests/templates/kuttl/external-access/02-install-hdfs.yaml.j2 @@ -0,0 +1,39 @@ +--- +apiVersion: hdfs.stackable.tech/v1alpha1 +kind: HdfsCluster +metadata: + name: test-hdfs +spec: + image: + productVersion: "{{ test_scenario['values']['hdfs-latest'] }}" + pullPolicy: IfNotPresent + clusterConfig: + dfsReplication: 1 + zookeeperConfigMapName: test-znode +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} + nameNodes: + config: + gracefulShutdownTimeout: 1m + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 2 + dataNodes: + config: + gracefulShutdownTimeout: 1m + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 1 + journalNodes: + config: + gracefulShutdownTimeout: 1m + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + roleGroups: + default: + replicas: 1 diff --git a/tests/templates/kuttl/external-access/03-assert.yaml b/tests/templates/kuttl/external-access/03-assert.yaml new file mode 100644 index 00000000..3c34faad --- /dev/null +++ b/tests/templates/kuttl/external-access/03-assert.yaml @@ -0,0 +1,28 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hbase-master-default +status: + readyReplicas: 2 + replicas: 2 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hbase-regionserver-default +status: + readyReplicas: 2 + replicas: 2 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hbase-restserver-default +status: + readyReplicas: 2 + replicas: 2 diff --git a/tests/templates/kuttl/external-access/03-install-hbase.yaml.j2 b/tests/templates/kuttl/external-access/03-install-hbase.yaml.j2 new file mode 100644 index 00000000..6caef467 --- /dev/null +++ b/tests/templates/kuttl/external-access/03-install-hbase.yaml.j2 @@ -0,0 +1,47 @@ +--- +apiVersion: hbase.stackable.tech/v1alpha1 +kind: HbaseCluster +metadata: + name: test-hbase +spec: + image: +{% if test_scenario['values']['hbase'].find(",") > 0 %} + custom: "{{ test_scenario['values']['hbase'].split(',')[1] }}" + productVersion: "{{ test_scenario['values']['hbase'].split(',')[0] }}" +{% else %} + productVersion: "{{ test_scenario['values']['hbase'] }}" +{% endif %} + pullPolicy: IfNotPresent + clusterConfig: + hdfsConfigMapName: test-hdfs-namenode-default + zookeeperConfigMapName: test-znode +{% if lookup('env', 'VECTOR_AGGREGATOR') %} + vectorAggregatorConfigMapName: vector-aggregator-discovery +{% endif %} + masters: + config: + gracefulShutdownTimeout: 1m + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable + roleGroups: + default: + replicas: 2 + regionServers: + config: + gracefulShutdownTimeout: 1m + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-unstable + roleGroups: + default: + replicas: 2 + restServers: + config: + gracefulShutdownTimeout: 1m + logging: + enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: cluster-internal + roleGroups: + default: + replicas: 2 diff --git a/tests/templates/kuttl/external-access/30-access-hbase.txt.j2 b/tests/templates/kuttl/external-access/30-access-hbase.txt.j2 new file mode 100644 index 00000000..42f1fa9b --- /dev/null +++ b/tests/templates/kuttl/external-access/30-access-hbase.txt.j2 @@ -0,0 +1,65 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: access-hbase +spec: + template: + spec: + serviceAccountName: test-sa + containers: + - name: access-hbase +{% if test_scenario['values']['hbase'].find(",") > 0 %} + image: "{{ test_scenario['values']['hbase'].split(',')[1] }}" +{% else %} + image: oci.stackable.tech/sdp/hbase:{{ test_scenario['values']['hbase'] }}-stackable0.0.0-dev +{% endif %} + imagePullPolicy: IfNotPresent + command: + - /bin/bash + - /tmp/script/script.sh + env: + - name: MASTER_UI + valueFrom: + configMapKeyRef: + name: test-hbase-ui-endpoints + key: hbase.master.ui + - name: REGIONSERVER_UI + valueFrom: + configMapKeyRef: + name: test-hbase-ui-endpoints + key: hbase.regionserver.ui + - name: RESTSERVER_UI + valueFrom: + configMapKeyRef: + name: test-hbase-ui-endpoints + key: hbase.restserver.ui + volumeMounts: + - name: script + mountPath: /tmp/script + volumes: + - name: script + configMap: + name: access-hbase-script + securityContext: + fsGroup: 1000 + runAsGroup: 1000 + runAsUser: 1000 + restartPolicy: OnFailure +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: access-hbase-script +data: + script.sh: | + set -euxo pipefail + + echo "Attempting to reach master at $MASTER_UI..." + curl --retry 0 -f -s -o /dev/null -w "%{http_code}" $MASTER_UI | grep 200 + echo "Attempting to reach region-server at $REGIONSERVER_UI..." + curl --retry 0 -f -s -o /dev/null -w "%{http_code}" $REGIONSERVER_UI | grep 200 + echo "Attempting to reach rest-server at $RESTSERVER_UI..." + curl --retry 0 -f -s -o /dev/null -w "%{http_code}" $RESTSERVER_UI | grep 200 + + echo "All tests successful!" diff --git a/tests/templates/kuttl/external-access/30-access-hbase.yaml b/tests/templates/kuttl/external-access/30-access-hbase.yaml new file mode 100644 index 00000000..353e30ee --- /dev/null +++ b/tests/templates/kuttl/external-access/30-access-hbase.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + # We need to replace $NAMESPACE (by KUTTL) + - script: envsubst '$NAMESPACE' < 30-access-hbase.txt | kubectl apply -n $NAMESPACE -f - diff --git a/tests/templates/kuttl/external-access/30-assert.yaml b/tests/templates/kuttl/external-access/30-assert.yaml new file mode 100644 index 00000000..763b8a40 --- /dev/null +++ b/tests/templates/kuttl/external-access/30-assert.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +timeout: 600 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: access-hbase +status: + succeeded: 1 diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index ba37dcd1..b7afd697 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -142,6 +142,12 @@ tests: - hdfs-latest - zookeeper-latest - openshift + - name: external-access + dimensions: + - hbase + - hdfs-latest + - zookeeper-latest + - openshift suites: - name: nightly patch: From ae1654a911dc3a8e9175209f1a70ab6b23713bc3 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Thu, 27 Mar 2025 13:15:53 +0100 Subject: [PATCH 06/22] changed listener class designation --- deploy/helm/hbase-operator/crds/crds.yaml | 12 ++++++------ rust/operator-binary/src/crd/mod.rs | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/deploy/helm/hbase-operator/crds/crds.yaml b/deploy/helm/hbase-operator/crds/crds.yaml index 5669bed5..591eaca9 100644 --- a/deploy/helm/hbase-operator/crds/crds.yaml +++ b/deploy/helm/hbase-operator/crds/crds.yaml @@ -197,7 +197,7 @@ spec: nullable: true type: string listenerClass: - description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. nullable: true type: string logging: @@ -451,7 +451,7 @@ spec: nullable: true type: string listenerClass: - description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. nullable: true type: string logging: @@ -686,7 +686,7 @@ spec: nullable: true type: string listenerClass: - description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. nullable: true type: string logging: @@ -968,7 +968,7 @@ spec: nullable: true type: string listenerClass: - description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. nullable: true type: string logging: @@ -1231,7 +1231,7 @@ spec: nullable: true type: string listenerClass: - description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. nullable: true type: string logging: @@ -1485,7 +1485,7 @@ spec: nullable: true type: string listenerClass: - description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. nullable: true type: string logging: diff --git a/rust/operator-binary/src/crd/mod.rs b/rust/operator-binary/src/crd/mod.rs index ca3768fe..4386c08d 100644 --- a/rust/operator-binary/src/crd/mod.rs +++ b/rust/operator-binary/src/crd/mod.rs @@ -1082,7 +1082,7 @@ pub struct HbaseConfig { pub requested_secret_lifetime: Option, /// This field controls which [ListenerClass](DOCS_BASE_URL_PLACEHOLDER/listener-operator/listenerclass.html) is used to expose this rolegroup. - /// All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + /// All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. pub listener_class: String, } @@ -1239,7 +1239,7 @@ pub struct RegionServerConfig { pub region_mover: RegionMover, /// This field controls which [ListenerClass](DOCS_BASE_URL_PLACEHOLDER/listener-operator/listenerclass.html) is used to expose this rolegroup. - /// All roles should have a stable ListenerClass, such as `cluster-internal` or `external-stable`. + /// All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. pub listener_class: String, } From 53ea8bca2ae20094be024def5cb65c631e5fa959 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Thu, 27 Mar 2025 15:18:00 +0100 Subject: [PATCH 07/22] update listener class docs --- .../pages/usage-guide/listenerclass.adoc | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/docs/modules/hbase/pages/usage-guide/listenerclass.adoc b/docs/modules/hbase/pages/usage-guide/listenerclass.adoc index 1f6d48b8..3bbcb3b0 100644 --- a/docs/modules/hbase/pages/usage-guide/listenerclass.adoc +++ b/docs/modules/hbase/pages/usage-guide/listenerclass.adoc @@ -1,18 +1,20 @@ = Service exposition with ListenerClasses +:description: Configure HBase service exposure using ListenerClasses to control internal and external access for all roles. -Apache HBase offers an API. -The operator deploys a service called `` (where `` is the name of the HbaseCluster) through which HBase can be reached. - -This service can have either the `cluster-internal` or `external-unstable` type. -`external-stable` is not supported for HBase at the moment. -Read more about the types in the xref:concepts:service-exposition.adoc[service exposition] documentation at platform level. - -This is how the listener class is configured: +The operator deploys a xref:listener-operator:listener.adoc[Listener] for each Master, Regionserver and Restserver pod. +They all default to only being accessible from within the Kubernetes cluster, but this can be changed by setting `.spec.{masters,regionServers,restServers}.config.listenerClass`: [source,yaml] ---- spec: - clusterConfig: - listenerClass: cluster-internal # <1> + masters: + config: + listenerClass: external-unstable # <1> + regionServers: + config: + listenerClass: external-unstable + restServers: + config: + listenerClass: external-unstable ---- -<1> The default `cluster-internal` setting. +<1> Specify one of `external-stable`, `external-unstable`, `cluster-internal` (the default setting is `cluster-internal`) From f8cc1c7a3c8358f8b64be8b1c0f416e8aaa8898d Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Fri, 28 Mar 2025 16:56:10 +0100 Subject: [PATCH 08/22] working tests --- rust/operator-binary/src/discovery.rs | 9 ++-- rust/operator-binary/src/hbase_controller.rs | 47 +++++++++++-------- .../external-access/30-access-hbase.txt.j2 | 46 +++++++++++++----- .../kuttl/kerberos/30-install-hbase.yaml.j2 | 4 +- .../kuttl/opa/30-install-hbase.yaml.j2 | 1 - .../kuttl/smoke/30-install-hbase.yaml.j2 | 4 +- 6 files changed, 73 insertions(+), 38 deletions(-) diff --git a/rust/operator-binary/src/discovery.rs b/rust/operator-binary/src/discovery.rs index 3d42aaba..14de3ac7 100644 --- a/rust/operator-binary/src/discovery.rs +++ b/rust/operator-binary/src/discovery.rs @@ -113,6 +113,9 @@ pub fn build_endpoint_configmap( for role_podref in role_podrefs { let role_name = role_podref.0; + // podrefs are written into the collection by replica index + // and can be retrieved in the same order + let mut i = 0; for podref in role_podref.1 { if let HbasePodRef { fqdn_override: Some(fqdn_override), @@ -122,12 +125,10 @@ pub fn build_endpoint_configmap( { if let Some(ui_port) = ports.get(&hbase.ui_port_name()) { cmm.add_data( - format!("hbase.{role_name}.ui"), + format!("hbase.{role_name}-{i}.ui"), format!("{fqdn_override}:{ui_port}"), ); - // the UI endpoint for one replica per role - // is enough for the config map - break; + i += 1; } } } diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index 2e353f9d..c6b2aafb 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -484,27 +484,36 @@ pub async fn reconcile_hbase( let mut listener_refs: BTreeMap> = BTreeMap::new(); - for role in HbaseRole::iter() { - listener_refs.insert( - role.to_string(), - hbase - .listener_refs(client, &role, &resolved_product_image.product_version) - .await - .context(CollectDiscoveryConfigSnafu)?, + // TODO if the listeners are persisted then they will still be present in the + // cluster and can all be found. Or move this up into the role loop above and + // only process those rolegroups where the listener class requires persistence. + if hbase.spec.cluster_operation.reconciliation_paused || hbase.spec.cluster_operation.stopped { + tracing::info!( + "Cluster is in a transitional state so do not attempt to collect listener + information that will only be active once cluster has exited transitional state." + ); + } else { + for role in HbaseRole::iter() { + listener_refs.insert( + role.to_string(), + hbase + .listener_refs(client, &role, &resolved_product_image.product_version) + .await + .context(CollectDiscoveryConfigSnafu)?, + ); + } + tracing::info!( + "Listener references written to the ConfigMap {:#?}", + listener_refs ); - } - - tracing::info!( - ?listener_refs, - "Listener references written to the ConfigMap" - ); - let endpoint_cm = build_endpoint_configmap(hbase, &resolved_product_image, listener_refs) - .context(BuildDiscoveryConfigMapSnafu)?; - cluster_resources - .add(client, endpoint_cm) - .await - .context(ApplyDiscoveryConfigMapSnafu)?; + let endpoint_cm = build_endpoint_configmap(hbase, &resolved_product_image, listener_refs) + .context(BuildDiscoveryConfigMapSnafu)?; + cluster_resources + .add(client, endpoint_cm) + .await + .context(ApplyDiscoveryConfigMapSnafu)?; + } // Discovery CM will fail to build until the rest of the cluster has been deployed, so do it last // so that failure won't inhibit the rest of the cluster from booting up. diff --git a/tests/templates/kuttl/external-access/30-access-hbase.txt.j2 b/tests/templates/kuttl/external-access/30-access-hbase.txt.j2 index 42f1fa9b..79769e66 100644 --- a/tests/templates/kuttl/external-access/30-access-hbase.txt.j2 +++ b/tests/templates/kuttl/external-access/30-access-hbase.txt.j2 @@ -19,21 +19,36 @@ spec: - /bin/bash - /tmp/script/script.sh env: - - name: MASTER_UI + - name: MASTER_UI_0 valueFrom: configMapKeyRef: name: test-hbase-ui-endpoints - key: hbase.master.ui - - name: REGIONSERVER_UI + key: hbase.master-0.ui + - name: MASTER_UI_1 valueFrom: configMapKeyRef: name: test-hbase-ui-endpoints - key: hbase.regionserver.ui - - name: RESTSERVER_UI + key: hbase.master-1.ui + - name: REGIONSERVER_UI_0 valueFrom: configMapKeyRef: name: test-hbase-ui-endpoints - key: hbase.restserver.ui + key: hbase.regionserver-0.ui + - name: REGIONSERVER_UI_1 + valueFrom: + configMapKeyRef: + name: test-hbase-ui-endpoints + key: hbase.regionserver-1.ui + - name: RESTSERVER_UI_0 + valueFrom: + configMapKeyRef: + name: test-hbase-ui-endpoints + key: hbase.restserver-0.ui + - name: RESTSERVER_UI_1 + valueFrom: + configMapKeyRef: + name: test-hbase-ui-endpoints + key: hbase.restserver-1.ui volumeMounts: - name: script mountPath: /tmp/script @@ -55,11 +70,18 @@ data: script.sh: | set -euxo pipefail - echo "Attempting to reach master at $MASTER_UI..." - curl --retry 0 -f -s -o /dev/null -w "%{http_code}" $MASTER_UI | grep 200 - echo "Attempting to reach region-server at $REGIONSERVER_UI..." - curl --retry 0 -f -s -o /dev/null -w "%{http_code}" $REGIONSERVER_UI | grep 200 - echo "Attempting to reach rest-server at $RESTSERVER_UI..." - curl --retry 0 -f -s -o /dev/null -w "%{http_code}" $RESTSERVER_UI | grep 200 + echo "Attempting to reach master at $MASTER_UI_0..." + curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${MASTER_UI_0}" | grep 200 + echo "Attempting to reach region-server at $REGIONSERVER_UI_0..." + curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${REGIONSERVER_UI_0}" | grep 200 + echo "Attempting to reach rest-server at $RESTSERVER_UI_0..." + curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${RESTSERVER_UI_0}" | grep 200 + + echo "Attempting to reach master at $MASTER_UI_1..." + curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${MASTER_UI_1}" | grep 200 + echo "Attempting to reach region-server at $REGIONSERVER_UI_1..." + curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${REGIONSERVER_UI_1}" | grep 200 + echo "Attempting to reach rest-server at $RESTSERVER_UI_1..." + curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${RESTSERVER_UI_1}" | grep 200 echo "All tests successful!" diff --git a/tests/templates/kuttl/kerberos/30-install-hbase.yaml.j2 b/tests/templates/kuttl/kerberos/30-install-hbase.yaml.j2 index 28766a48..a21046d1 100644 --- a/tests/templates/kuttl/kerberos/30-install-hbase.yaml.j2 +++ b/tests/templates/kuttl/kerberos/30-install-hbase.yaml.j2 @@ -28,7 +28,6 @@ commands: clusterConfig: hdfsConfigMapName: hdfs zookeeperConfigMapName: hbase-znode - listenerClass: {{ test_scenario['values']['listener-class'] }} authentication: tlsSecretClass: tls kerberos: @@ -41,6 +40,7 @@ commands: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: {{ test_scenario['values']['listener-class'] }} resources: memory: limit: 1536Mi @@ -52,6 +52,7 @@ commands: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: {{ test_scenario['values']['listener-class'] }} roleGroups: default: replicas: 2 @@ -60,6 +61,7 @@ commands: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: {{ test_scenario['values']['listener-class'] }} roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/opa/30-install-hbase.yaml.j2 b/tests/templates/kuttl/opa/30-install-hbase.yaml.j2 index 92fda28c..b2d01a8f 100644 --- a/tests/templates/kuttl/opa/30-install-hbase.yaml.j2 +++ b/tests/templates/kuttl/opa/30-install-hbase.yaml.j2 @@ -51,7 +51,6 @@ commands: clusterConfig: hdfsConfigMapName: hdfs zookeeperConfigMapName: hbase-znode - listenerClass: 'cluster-internal' authentication: tlsSecretClass: tls kerberos: diff --git a/tests/templates/kuttl/smoke/30-install-hbase.yaml.j2 b/tests/templates/kuttl/smoke/30-install-hbase.yaml.j2 index 53e9a98e..7535a3e8 100644 --- a/tests/templates/kuttl/smoke/30-install-hbase.yaml.j2 +++ b/tests/templates/kuttl/smoke/30-install-hbase.yaml.j2 @@ -15,7 +15,6 @@ spec: clusterConfig: hdfsConfigMapName: test-hdfs zookeeperConfigMapName: test-znode - listenerClass: {{ test_scenario['values']['listener-class'] }} {% if lookup('env', 'VECTOR_AGGREGATOR') %} vectorAggregatorConfigMapName: vector-aggregator-discovery {% endif %} @@ -23,6 +22,7 @@ spec: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: {{ test_scenario['values']['listener-class'] }} roleGroups: default: configOverrides: @@ -34,6 +34,7 @@ spec: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: {{ test_scenario['values']['listener-class'] }} roleGroups: default: configOverrides: @@ -45,6 +46,7 @@ spec: config: logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: {{ test_scenario['values']['listener-class'] }} resources: memory: limit: 1Gi From 7a2756b4796a399fc6c8a0012e7932d898d2796a Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Mon, 31 Mar 2025 13:55:11 +0200 Subject: [PATCH 09/22] wip: experimental command --- rust/operator-binary/src/hbase_controller.rs | 32 +++++++++++++++++--- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index c6b2aafb..4bde8b94 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -8,6 +8,7 @@ use std::{ }; use const_format::concatcp; +use indoc::formatdoc; use product_config::{ types::PropertyNameKind, writer::{to_hadoop_xml, to_java_properties_string, PropertiesWriterError}, @@ -861,14 +862,24 @@ fn build_rolegroup_statefulset( }, ]); + let role_name = hbase_role.cli_role_name(); let mut hbase_container = ContainerBuilder::new("hbase").expect("ContainerBuilder not created"); hbase_container .image_from_product_image(resolved_product_image) - .command(vec!["/stackable/hbase/bin/hbase-entrypoint.sh".to_string()]) - .args(vec![ - hbase_role.cli_role_name(), - hbase_service_domain_name(hbase, rolegroup_ref, cluster_info)?, - hbase.service_port(hbase_role).to_string(), + .command(command()) + .args(vec![formatdoc!{" + {export_address} + {export_port} + {export_hostname} + {entrypoint} {role} {domain} {port}", + export_address = format!("export HBASE_CONF_hbase_{role_name}_hostname=$(cat /stackable/listener/default-address/address);").to_string(), + export_port = format!("export HBASE_CONF_hbase_{role_name}_port=$(cat /stackable/listener/default-address/ports/ui-http);").to_string(), + export_hostname = "export HOSTNAME=$(cat /stackable/listener/default-address/address);".to_string(), + entrypoint = "/stackable/hbase/bin/hbase-entrypoint.sh".to_string(), + role = role_name, + domain = hbase_service_domain_name(hbase, rolegroup_ref, cluster_info)?, + port = hbase.service_port(hbase_role).to_string(), + } ]) .add_env_vars(merged_env) // Needed for the `containerdebug` process to log it's tracing information to. @@ -1059,6 +1070,17 @@ fn build_rolegroup_statefulset( }) } +/// Returns the container command. +fn command() -> Vec { + vec![ + "/bin/bash".to_string(), + "-x".to_string(), + "-euo".to_string(), + "pipefail".to_string(), + "-c".to_string(), + ] +} + fn write_hbase_env_sh<'a, T>(properties: T) -> String where T: Iterator, From 2affa65d09cb84fd6b673f89c6057767c76a4df0 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Mon, 31 Mar 2025 13:58:31 +0200 Subject: [PATCH 10/22] cleanup start command --- rust/operator-binary/src/hbase_controller.rs | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index 4bde8b94..8fc11e77 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -867,20 +867,13 @@ fn build_rolegroup_statefulset( hbase_container .image_from_product_image(resolved_product_image) .command(command()) - .args(vec![formatdoc!{" - {export_address} - {export_port} - {export_hostname} + .args(vec![formatdoc! {" {entrypoint} {role} {domain} {port}", - export_address = format!("export HBASE_CONF_hbase_{role_name}_hostname=$(cat /stackable/listener/default-address/address);").to_string(), - export_port = format!("export HBASE_CONF_hbase_{role_name}_port=$(cat /stackable/listener/default-address/ports/ui-http);").to_string(), - export_hostname = "export HOSTNAME=$(cat /stackable/listener/default-address/address);".to_string(), entrypoint = "/stackable/hbase/bin/hbase-entrypoint.sh".to_string(), role = role_name, domain = hbase_service_domain_name(hbase, rolegroup_ref, cluster_info)?, port = hbase.service_port(hbase_role).to_string(), - } - ]) + }]) .add_env_vars(merged_env) // Needed for the `containerdebug` process to log it's tracing information to. .add_env_var( From 652999e8db97ac13bdbf6e03789592b27a422dfc Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Mon, 31 Mar 2025 17:31:12 +0200 Subject: [PATCH 11/22] added hostname/port to hbase-env.sh --- rust/operator-binary/src/hbase_controller.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index 8fc11e77..081127b5 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -1136,17 +1136,22 @@ fn build_hbase_env_sh( let role_specific_non_heap_jvm_args = construct_role_specific_non_heap_jvm_args(hbase, hbase_role, role_group, product_version) .context(ConstructJvmArgumentSnafu)?; + let port_name = &hbase.ui_port_name(); match hbase_role { HbaseRole::Master => { result.insert( "HBASE_MASTER_OPTS".to_string(), - role_specific_non_heap_jvm_args, + format!( + "{role_specific_non_heap_jvm_args} -Dhbase.master.hostname=$(cat {LISTENER_VOLUME_DIR}/default-address/address) -Dhbase.master.port=$(cat {LISTENER_VOLUME_DIR}/default-address/ports/{port_name})" + ) ); } HbaseRole::RegionServer => { result.insert( "HBASE_REGIONSERVER_OPTS".to_string(), - role_specific_non_heap_jvm_args, + format!( + "{role_specific_non_heap_jvm_args} -Dhbase.regionserver.hostname=$(cat {LISTENER_VOLUME_DIR}/default-address/address) -Dhbase.regionserver.port=$(cat {LISTENER_VOLUME_DIR}/default-address/ports/{port_name})" + ) ); } HbaseRole::RestServer => { From c8a26447c44fa8641b3e8b72cf7382e86259dd0d Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Tue, 1 Apr 2025 12:33:35 +0200 Subject: [PATCH 12/22] wip: will move startup logic to hbase-entrypoint.sh --- rust/operator-binary/src/hbase_controller.rs | 59 ++++++++++++++++++-- 1 file changed, 53 insertions(+), 6 deletions(-) diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index 081127b5..4c9d0253 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -584,6 +584,39 @@ fn build_rolegroup_config_map( hbase_site_config .extend(hbase_opa_config.map_or(vec![], |config| config.hbase_site_config())); + match hbase_role { + HbaseRole::Master => { + hbase_site_config.insert( + "hbase.master.hostname".to_string(), + "${HBASE_SERVICE_HOST}".to_string(), + ); + hbase_site_config.insert( + "hbase.master.port".to_string(), + "${HBASE_SERVICE_PORT}".to_string(), + ) + } + HbaseRole::RegionServer => { + hbase_site_config.insert( + "hbase.regionserver.hostname".to_string(), + "${HBASE_SERVICE_HOST}".to_string(), + ); + hbase_site_config.insert( + "hbase.regionserver.port".to_string(), + "${HBASE_SERVICE_PORT}".to_string(), + ) + } + HbaseRole::RestServer => { + hbase_site_config.insert( + "hbase.rest.hostname".to_string(), + "${HBASE_SERVICE_HOST}".to_string(), + ); + hbase_site_config.insert( + "hbase.rest.port".to_string(), + "${HBASE_SERVICE_PORT}".to_string(), + ) + } + }; + // configOverride come last hbase_site_config.extend(config.clone()); hbase_site_xml = to_hadoop_xml( @@ -868,7 +901,15 @@ fn build_rolegroup_statefulset( .image_from_product_image(resolved_product_image) .command(command()) .args(vec![formatdoc! {" + {wait} + {list} + {update_host} + {update_port} {entrypoint} {role} {domain} {port}", + wait = "until [ -f /stackable/conf/hbase-site.xml ]; do sleep 1; done;".to_string(), + list = "ls -al /stackable/conf/hbase-site.xml".to_string(), + update_host = "sed -i 's|\\${HBASE_SERVICE_HOST}|${HBASE_SERVICE_HOST}|g' /stackable/conf/hbase-site.xml".to_string(), + update_port = "sed -i 's|\\${HBASE_SERVICE_PORT}|${HBASE_SERVICE_PORT}|g' /stackable/conf/hbase-site.xml".to_string(), entrypoint = "/stackable/hbase/bin/hbase-entrypoint.sh".to_string(), role = role_name, domain = hbase_service_domain_name(hbase, rolegroup_ref, cluster_info)?, @@ -1137,21 +1178,27 @@ fn build_hbase_env_sh( construct_role_specific_non_heap_jvm_args(hbase, hbase_role, role_group, product_version) .context(ConstructJvmArgumentSnafu)?; let port_name = &hbase.ui_port_name(); + + result.insert( + "HBASE_SERVICE_HOST".to_owned(), + format!("$(cat {LISTENER_VOLUME_DIR}/default-address/address)"), + ); + result.insert( + "HBASE_SERVICE_PORT".to_owned(), + format!("$(cat {LISTENER_VOLUME_DIR}/default-address/ports/{port_name})"), + ); + match hbase_role { HbaseRole::Master => { result.insert( "HBASE_MASTER_OPTS".to_string(), - format!( - "{role_specific_non_heap_jvm_args} -Dhbase.master.hostname=$(cat {LISTENER_VOLUME_DIR}/default-address/address) -Dhbase.master.port=$(cat {LISTENER_VOLUME_DIR}/default-address/ports/{port_name})" - ) + role_specific_non_heap_jvm_args, ); } HbaseRole::RegionServer => { result.insert( "HBASE_REGIONSERVER_OPTS".to_string(), - format!( - "{role_specific_non_heap_jvm_args} -Dhbase.regionserver.hostname=$(cat {LISTENER_VOLUME_DIR}/default-address/address) -Dhbase.regionserver.port=$(cat {LISTENER_VOLUME_DIR}/default-address/ports/{port_name})" - ) + role_specific_non_heap_jvm_args, ); } HbaseRole::RestServer => { From dd793c063b9f9f35568e5f6825bce874f0b30acd Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Tue, 1 Apr 2025 17:39:28 +0200 Subject: [PATCH 13/22] cleaned up/reverted previous wip --- rust/operator-binary/src/hbase_controller.rs | 21 ++------------------ 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index 4c9d0253..212e7e49 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -901,19 +901,12 @@ fn build_rolegroup_statefulset( .image_from_product_image(resolved_product_image) .command(command()) .args(vec![formatdoc! {" - {wait} - {list} - {update_host} - {update_port} - {entrypoint} {role} {domain} {port}", - wait = "until [ -f /stackable/conf/hbase-site.xml ]; do sleep 1; done;".to_string(), - list = "ls -al /stackable/conf/hbase-site.xml".to_string(), - update_host = "sed -i 's|\\${HBASE_SERVICE_HOST}|${HBASE_SERVICE_HOST}|g' /stackable/conf/hbase-site.xml".to_string(), - update_port = "sed -i 's|\\${HBASE_SERVICE_PORT}|${HBASE_SERVICE_PORT}|g' /stackable/conf/hbase-site.xml".to_string(), + {entrypoint} {role} {domain} {port} {port_name}", entrypoint = "/stackable/hbase/bin/hbase-entrypoint.sh".to_string(), role = role_name, domain = hbase_service_domain_name(hbase, rolegroup_ref, cluster_info)?, port = hbase.service_port(hbase_role).to_string(), + port_name = hbase.ui_port_name(), }]) .add_env_vars(merged_env) // Needed for the `containerdebug` process to log it's tracing information to. @@ -1177,16 +1170,6 @@ fn build_hbase_env_sh( let role_specific_non_heap_jvm_args = construct_role_specific_non_heap_jvm_args(hbase, hbase_role, role_group, product_version) .context(ConstructJvmArgumentSnafu)?; - let port_name = &hbase.ui_port_name(); - - result.insert( - "HBASE_SERVICE_HOST".to_owned(), - format!("$(cat {LISTENER_VOLUME_DIR}/default-address/address)"), - ); - result.insert( - "HBASE_SERVICE_PORT".to_owned(), - format!("$(cat {LISTENER_VOLUME_DIR}/default-address/ports/{port_name})"), - ); match hbase_role { HbaseRole::Master => { From bd1db17df56b7fd6f97d61eee081aee6c27b1e36 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Tue, 1 Apr 2025 17:41:23 +0200 Subject: [PATCH 14/22] cleaned up/reverted previous wip II --- rust/operator-binary/src/hbase_controller.rs | 33 -------------------- 1 file changed, 33 deletions(-) diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index 212e7e49..a4347c60 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -584,39 +584,6 @@ fn build_rolegroup_config_map( hbase_site_config .extend(hbase_opa_config.map_or(vec![], |config| config.hbase_site_config())); - match hbase_role { - HbaseRole::Master => { - hbase_site_config.insert( - "hbase.master.hostname".to_string(), - "${HBASE_SERVICE_HOST}".to_string(), - ); - hbase_site_config.insert( - "hbase.master.port".to_string(), - "${HBASE_SERVICE_PORT}".to_string(), - ) - } - HbaseRole::RegionServer => { - hbase_site_config.insert( - "hbase.regionserver.hostname".to_string(), - "${HBASE_SERVICE_HOST}".to_string(), - ); - hbase_site_config.insert( - "hbase.regionserver.port".to_string(), - "${HBASE_SERVICE_PORT}".to_string(), - ) - } - HbaseRole::RestServer => { - hbase_site_config.insert( - "hbase.rest.hostname".to_string(), - "${HBASE_SERVICE_HOST}".to_string(), - ); - hbase_site_config.insert( - "hbase.rest.port".to_string(), - "${HBASE_SERVICE_PORT}".to_string(), - ) - } - }; - // configOverride come last hbase_site_config.extend(config.clone()); hbase_site_xml = to_hadoop_xml( From 80adf71c25ba13bda45594fff9b019fe81ada50a Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Tue, 1 Apr 2025 18:03:29 +0200 Subject: [PATCH 15/22] correct callout --- rust/operator-binary/src/hbase_controller.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index a4347c60..c9934e21 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -868,12 +868,11 @@ fn build_rolegroup_statefulset( .image_from_product_image(resolved_product_image) .command(command()) .args(vec![formatdoc! {" - {entrypoint} {role} {domain} {port} {port_name}", + {entrypoint} {role} {domain} {port}", entrypoint = "/stackable/hbase/bin/hbase-entrypoint.sh".to_string(), role = role_name, domain = hbase_service_domain_name(hbase, rolegroup_ref, cluster_info)?, port = hbase.service_port(hbase_role).to_string(), - port_name = hbase.ui_port_name(), }]) .add_env_vars(merged_env) // Needed for the `containerdebug` process to log it's tracing information to. From aee4fc26c85894a360bb8be9542a7a41055ac8a2 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Wed, 2 Apr 2025 16:27:28 +0200 Subject: [PATCH 16/22] use pvcs for externally-reachable endpoints --- deploy/helm/hbase-operator/crds/crds.yaml | 36 ++++- rust/operator-binary/src/crd/mod.rs | 132 +++++++++++------- rust/operator-binary/src/hbase_controller.rs | 98 ++++++++----- .../03-install-hbase.yaml.j2 | 3 + .../cluster-operation/10-pause-hbase.yaml.j2 | 3 + .../cluster-operation/20-stop-hbase.yaml.j2 | 3 + .../30-restart-hbase.yaml.j2 | 3 + .../external-access/03-install-hbase.yaml.j2 | 6 +- .../external-access/30-access-hbase.txt.j2 | 6 +- tests/test-definition.yaml | 1 + 10 files changed, 199 insertions(+), 92 deletions(-) diff --git a/deploy/helm/hbase-operator/crds/crds.yaml b/deploy/helm/hbase-operator/crds/crds.yaml index 591eaca9..e8182efb 100644 --- a/deploy/helm/hbase-operator/crds/crds.yaml +++ b/deploy/helm/hbase-operator/crds/crds.yaml @@ -197,7 +197,11 @@ spec: nullable: true type: string listenerClass: - description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. + enum: + - cluster-internal + - external-unstable + - external-stable nullable: true type: string logging: @@ -451,7 +455,11 @@ spec: nullable: true type: string listenerClass: - description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. + enum: + - cluster-internal + - external-unstable + - external-stable nullable: true type: string logging: @@ -686,7 +694,11 @@ spec: nullable: true type: string listenerClass: - description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. + enum: + - cluster-internal + - external-unstable + - external-stable nullable: true type: string logging: @@ -968,7 +980,11 @@ spec: nullable: true type: string listenerClass: - description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. + enum: + - cluster-internal + - external-unstable + - external-stable nullable: true type: string logging: @@ -1231,7 +1247,11 @@ spec: nullable: true type: string listenerClass: - description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. + enum: + - cluster-internal + - external-unstable + - external-stable nullable: true type: string logging: @@ -1485,7 +1505,11 @@ spec: nullable: true type: string listenerClass: - description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. + description: This field controls which [ListenerClass](https://docs.stackable.tech/home/nightly/listener-operator/listenerclass.html) is used to expose this rolegroup. + enum: + - cluster-internal + - external-unstable + - external-stable nullable: true type: string logging: diff --git a/rust/operator-binary/src/crd/mod.rs b/rust/operator-binary/src/crd/mod.rs index 4386c08d..5e92e2d2 100644 --- a/rust/operator-binary/src/crd/mod.rs +++ b/rust/operator-binary/src/crd/mod.rs @@ -88,7 +88,8 @@ pub const HBASE_REST_UI_PORT: u16 = 8085; // Newer versions use the same port as the UI because Hbase provides it's own metrics API pub const METRICS_PORT: u16 = 9100; -pub const DEFAULT_LISTENER_CLASS: &str = "cluster-internal"; +pub const DEFAULT_LISTENER_CLASS: SupportedListenerClasses = + SupportedListenerClasses::ClusterInternal; pub const LISTENER_VOLUME_NAME: &str = "listener"; pub const LISTENER_VOLUME_DIR: &str = "/stackable/listener"; @@ -666,46 +667,54 @@ impl v1alpha1::HbaseCluster { &self, client: &stackable_operator::client::Client, role: &HbaseRole, + merged_config: &AnyServiceConfig, hbase_version: &str, ) -> Result, Error> { - let pod_refs = self.pod_refs(role, hbase_version)?; - try_join_all(pod_refs.into_iter().map(|pod_ref| async { - let listener_name = format!("{}-{LISTENER_VOLUME_NAME}", pod_ref.pod_name); - let listener_ref = - || ObjectRef::::new(&listener_name).within(&pod_ref.namespace); - let pod_obj_ref = - || ObjectRef::::new(&pod_ref.pod_name).within(&pod_ref.namespace); - let listener = client - .get::(&listener_name, &pod_ref.namespace) - .await - .context(GetPodListenerSnafu { - listener: listener_ref(), - pod: pod_obj_ref(), - })?; - let listener_address = listener - .status - .and_then(|s| s.ingress_addresses?.into_iter().next()) - .context(PodListenerHasNoAddressSnafu { - listener: listener_ref(), - pod: pod_obj_ref(), - })?; - Ok(HbasePodRef { - fqdn_override: Some(listener_address.address), - ports: listener_address - .ports - .into_iter() - .map(|(port_name, port)| { - let port = u16::try_from(port).context(PortOutOfBoundsSnafu { - port_name: &port_name, - port, - })?; - Ok((port_name, port)) - }) - .collect::>()?, - ..pod_ref - }) - })) - .await + // only externally-reachable listeners are relevant + if merged_config.listener_class().discoverable() { + let pod_refs = self.pod_refs(role, hbase_version)?; + try_join_all(pod_refs.into_iter().map(|pod_ref| async { + // N.B. use the naming convention for persistent listener volumes as we + // have specified above that we only want externally-reachable endpoints. + let listener_name = format!("{LISTENER_VOLUME_NAME}-{}", pod_ref.pod_name); + let listener_ref = + || ObjectRef::::new(&listener_name).within(&pod_ref.namespace); + let pod_obj_ref = + || ObjectRef::::new(&pod_ref.pod_name).within(&pod_ref.namespace); + let listener = client + .get::(&listener_name, &pod_ref.namespace) + .await + .context(GetPodListenerSnafu { + listener: listener_ref(), + pod: pod_obj_ref(), + })?; + let listener_address = listener + .status + .and_then(|s| s.ingress_addresses?.into_iter().next()) + .context(PodListenerHasNoAddressSnafu { + listener: listener_ref(), + pod: pod_obj_ref(), + })?; + Ok(HbasePodRef { + fqdn_override: Some(listener_address.address), + ports: listener_address + .ports + .into_iter() + .map(|(port_name, port)| { + let port = u16::try_from(port).context(PortOutOfBoundsSnafu { + port_name: &port_name, + port, + })?; + Ok((port_name, port)) + }) + .collect::>()?, + ..pod_ref + }) + })) + .await + } else { + Ok(vec![]) + } } } @@ -878,7 +887,7 @@ impl HbaseRole { affinity: get_affinity(cluster_name, self, hdfs_discovery_cm_name), graceful_shutdown_timeout: Some(graceful_shutdown_timeout), requested_secret_lifetime: Some(requested_secret_lifetime), - listener_class: Some(DEFAULT_LISTENER_CLASS.to_string()), + listener_class: Some(DEFAULT_LISTENER_CLASS), } } @@ -979,7 +988,7 @@ impl AnyConfigFragment { cli_opts: None, }, requested_secret_lifetime: Some(HbaseRole::DEFAULT_REGION_SECRET_LIFETIME), - listener_class: Some(DEFAULT_LISTENER_CLASS.to_string()), + listener_class: Some(DEFAULT_LISTENER_CLASS), }) } HbaseRole::RestServer => AnyConfigFragment::RestServer(HbaseConfigFragment { @@ -991,7 +1000,7 @@ impl AnyConfigFragment { HbaseRole::DEFAULT_REST_SERVER_GRACEFUL_SHUTDOWN_TIMEOUT, ), requested_secret_lifetime: Some(HbaseRole::DEFAULT_REST_SECRET_LIFETIME), - listener_class: Some(DEFAULT_LISTENER_CLASS.to_string()), + listener_class: Some(DEFAULT_LISTENER_CLASS), }), HbaseRole::Master => AnyConfigFragment::Master(HbaseConfigFragment { hbase_rootdir: None, @@ -1002,7 +1011,7 @@ impl AnyConfigFragment { HbaseRole::DEFAULT_MASTER_GRACEFUL_SHUTDOWN_TIMEOUT, ), requested_secret_lifetime: Some(HbaseRole::DEFAULT_MASTER_SECRET_LIFETIME), - listener_class: Some(DEFAULT_LISTENER_CLASS.to_string()), + listener_class: Some(DEFAULT_LISTENER_CLASS), }), } } @@ -1082,8 +1091,7 @@ pub struct HbaseConfig { pub requested_secret_lifetime: Option, /// This field controls which [ListenerClass](DOCS_BASE_URL_PLACEHOLDER/listener-operator/listenerclass.html) is used to expose this rolegroup. - /// All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. - pub listener_class: String, + pub listener_class: SupportedListenerClasses, } impl Configuration for HbaseConfigFragment { @@ -1239,8 +1247,7 @@ pub struct RegionServerConfig { pub region_mover: RegionMover, /// This field controls which [ListenerClass](DOCS_BASE_URL_PLACEHOLDER/listener-operator/listenerclass.html) is used to expose this rolegroup. - /// All roles should have a direct ListenerClass, such as `cluster-internal` or `external-unstable`. - pub listener_class: String, + pub listener_class: SupportedListenerClasses, } impl Configuration for RegionServerConfigFragment { @@ -1312,6 +1319,35 @@ impl Configuration for RegionServerConfigFragment { } } +#[derive(Clone, Debug, Default, Display, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] +#[serde(rename_all = "PascalCase")] +pub enum SupportedListenerClasses { + #[default] + #[serde(rename = "cluster-internal")] + #[strum(serialize = "cluster-internal")] + ClusterInternal, + + #[serde(rename = "external-unstable")] + #[strum(serialize = "external-unstable")] + ExternalUnstable, + + #[serde(rename = "external-stable")] + #[strum(serialize = "external-stable")] + ExternalStable, +} + +impl Atomic for SupportedListenerClasses {} + +impl SupportedListenerClasses { + pub fn discoverable(&self) -> bool { + match self { + SupportedListenerClasses::ClusterInternal => false, + SupportedListenerClasses::ExternalUnstable => true, + SupportedListenerClasses::ExternalStable => true, + } + } +} + #[derive(Clone, Debug, Default, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct HbaseClusterStatus { @@ -1361,7 +1397,7 @@ impl AnyServiceConfig { AnyServiceConfig::RestServer(config) => config.requested_secret_lifetime, } } - pub fn listener_class(&self) -> String { + pub fn listener_class(&self) -> SupportedListenerClasses { match self { AnyServiceConfig::Master(config) => config.listener_class.clone(), AnyServiceConfig::RegionServer(config) => config.listener_class.clone(), diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index c9934e21..e875187d 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -21,8 +21,14 @@ use stackable_operator::{ configmap::ConfigMapBuilder, meta::ObjectMetaBuilder, pod::{ - container::ContainerBuilder, resources::ResourceRequirementsBuilder, - security::PodSecurityContextBuilder, PodBuilder, + container::ContainerBuilder, + resources::ResourceRequirementsBuilder, + security::PodSecurityContextBuilder, + volume::{ + ListenerOperatorVolumeSourceBuilder, ListenerOperatorVolumeSourceBuilderError, + ListenerReference, + }, + PodBuilder, }, }, cluster_resources::{ClusterResourceApplyStrategy, ClusterResources}, @@ -62,7 +68,7 @@ use stackable_operator::{ time::Duration, utils::cluster_info::KubernetesClusterInfo, }; -use strum::{EnumDiscriminants, IntoEnumIterator, IntoStaticStr, ParseError}; +use strum::{EnumDiscriminants, IntoStaticStr, ParseError}; use crate::{ config::jvm::{ @@ -326,6 +332,11 @@ pub enum Error { #[snafu(display("cannot collect discovery configuration"))] CollectDiscoveryConfig { source: crate::crd::Error }, + + #[snafu(display("failed to build listener volume"))] + BuildListenerVolume { + source: ListenerOperatorVolumeSourceBuilderError, + }, } type Result = std::result::Result; @@ -411,6 +422,7 @@ pub async fn reconcile_hbase( .context(ApplyRoleBindingSnafu)?; let mut ss_cond_builder = StatefulSetConditionBuilder::default(); + let mut listener_refs: BTreeMap> = BTreeMap::new(); for (role_name, group_config) in validated_config.iter() { let hbase_role = HbaseRole::from_str(role_name).context(UnidentifiedHbaseRoleSnafu { @@ -470,6 +482,30 @@ pub async fn reconcile_hbase( rolegroup: rolegroup.clone(), })?, ); + // if the replicas are changed at the same time as the reconciliation + // being paused, it may be possible to have listeners that are *expected* + // (according to their replica number) but which are not yet created, so + // deactivate this action in such cases. + if hbase.spec.cluster_operation.reconciliation_paused + || hbase.spec.cluster_operation.stopped + { + tracing::info!( + "Cluster is in a transitional state so do not attempt to collect listener information that will only be active once cluster has returned to a non-transitional state." + ); + } else { + listener_refs.insert( + hbase_role.to_string(), + hbase + .listener_refs( + client, + &hbase_role, + &merged_config, + &resolved_product_image.product_version, + ) + .await + .context(CollectDiscoveryConfigSnafu)?, + ); + } } let role_config = hbase.role_config(&hbase_role); @@ -483,31 +519,12 @@ pub async fn reconcile_hbase( } } - let mut listener_refs: BTreeMap> = BTreeMap::new(); - - // TODO if the listeners are persisted then they will still be present in the - // cluster and can all be found. Or move this up into the role loop above and - // only process those rolegroups where the listener class requires persistence. - if hbase.spec.cluster_operation.reconciliation_paused || hbase.spec.cluster_operation.stopped { - tracing::info!( - "Cluster is in a transitional state so do not attempt to collect listener - information that will only be active once cluster has exited transitional state." - ); - } else { - for role in HbaseRole::iter() { - listener_refs.insert( - role.to_string(), - hbase - .listener_refs(client, &role, &resolved_product_image.product_version) - .await - .context(CollectDiscoveryConfigSnafu)?, - ); - } - tracing::info!( - "Listener references written to the ConfigMap {:#?}", - listener_refs - ); + tracing::debug!( + "Listener references prepared for the ConfigMap {:#?}", + listener_refs + ); + if !listener_refs.is_empty() { let endpoint_cm = build_endpoint_configmap(hbase, &resolved_product_image, listener_refs) .context(BuildDiscoveryConfigMapSnafu)?; cluster_resources @@ -941,12 +958,6 @@ fn build_rolegroup_statefulset( )), ) .context(AddVolumeSnafu)? - .add_listener_volume_by_listener_class( - LISTENER_VOLUME_NAME, - &merged_config.listener_class(), - &recommended_labels, - ) - .context(AddVolumeSnafu)? .service_account_name(service_account.name_any()) .security_context( PodSecurityContextBuilder::new() @@ -956,6 +967,26 @@ fn build_rolegroup_statefulset( .build(), ); + let pvcs = if merged_config.listener_class().discoverable() { + let pvc = ListenerOperatorVolumeSourceBuilder::new( + &ListenerReference::ListenerClass(merged_config.listener_class().to_string()), + &recommended_labels, + ) + .context(BuildListenerVolumeSnafu)? + .build_pvc(LISTENER_VOLUME_NAME.to_string()) + .context(BuildListenerVolumeSnafu)?; + Some(vec![pvc]) + } else { + pod_builder + .add_listener_volume_by_listener_class( + LISTENER_VOLUME_NAME, + &merged_config.listener_class().to_string(), + &recommended_labels, + ) + .context(AddVolumeSnafu)?; + None + }; + if let Some(ContainerLogConfig { choice: Some(ContainerLogConfigChoice::Custom(CustomContainerLogConfig { @@ -1053,6 +1084,7 @@ fn build_rolegroup_statefulset( }, service_name: rolegroup_ref.object_name(), template: pod_template, + volume_claim_templates: pvcs, ..StatefulSetSpec::default() }; diff --git a/tests/templates/kuttl/cluster-operation/03-install-hbase.yaml.j2 b/tests/templates/kuttl/cluster-operation/03-install-hbase.yaml.j2 index 4732f740..6989c892 100644 --- a/tests/templates/kuttl/cluster-operation/03-install-hbase.yaml.j2 +++ b/tests/templates/kuttl/cluster-operation/03-install-hbase.yaml.j2 @@ -29,6 +29,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -37,6 +38,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -45,6 +47,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/cluster-operation/10-pause-hbase.yaml.j2 b/tests/templates/kuttl/cluster-operation/10-pause-hbase.yaml.j2 index ffc8c7c0..0431cf88 100644 --- a/tests/templates/kuttl/cluster-operation/10-pause-hbase.yaml.j2 +++ b/tests/templates/kuttl/cluster-operation/10-pause-hbase.yaml.j2 @@ -32,6 +32,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -40,6 +41,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -48,6 +50,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 2 # ignored because reconciliation is paused diff --git a/tests/templates/kuttl/cluster-operation/20-stop-hbase.yaml.j2 b/tests/templates/kuttl/cluster-operation/20-stop-hbase.yaml.j2 index 0f4c5665..8bc4007f 100644 --- a/tests/templates/kuttl/cluster-operation/20-stop-hbase.yaml.j2 +++ b/tests/templates/kuttl/cluster-operation/20-stop-hbase.yaml.j2 @@ -32,6 +32,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -40,6 +41,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -48,6 +50,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 # set to 0 by the operator because cluster is stopped diff --git a/tests/templates/kuttl/cluster-operation/30-restart-hbase.yaml.j2 b/tests/templates/kuttl/cluster-operation/30-restart-hbase.yaml.j2 index 388110b2..9a29aff5 100644 --- a/tests/templates/kuttl/cluster-operation/30-restart-hbase.yaml.j2 +++ b/tests/templates/kuttl/cluster-operation/30-restart-hbase.yaml.j2 @@ -32,6 +32,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -40,6 +41,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 @@ -48,6 +50,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} + listenerClass: external-stable roleGroups: default: replicas: 1 diff --git a/tests/templates/kuttl/external-access/03-install-hbase.yaml.j2 b/tests/templates/kuttl/external-access/03-install-hbase.yaml.j2 index 6caef467..30dd61f4 100644 --- a/tests/templates/kuttl/external-access/03-install-hbase.yaml.j2 +++ b/tests/templates/kuttl/external-access/03-install-hbase.yaml.j2 @@ -23,7 +23,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} - listenerClass: external-stable + listenerClass: {{ test_scenario['values']['listener-class'] }} roleGroups: default: replicas: 2 @@ -32,7 +32,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} - listenerClass: external-unstable + listenerClass: {{ test_scenario['values']['listener-class'] }} roleGroups: default: replicas: 2 @@ -41,7 +41,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} - listenerClass: cluster-internal + listenerClass: {{ test_scenario['values']['listener-class'] }} roleGroups: default: replicas: 2 diff --git a/tests/templates/kuttl/external-access/30-access-hbase.txt.j2 b/tests/templates/kuttl/external-access/30-access-hbase.txt.j2 index 79769e66..666a15a9 100644 --- a/tests/templates/kuttl/external-access/30-access-hbase.txt.j2 +++ b/tests/templates/kuttl/external-access/30-access-hbase.txt.j2 @@ -18,6 +18,7 @@ spec: command: - /bin/bash - /tmp/script/script.sh +{% if test_scenario['values']['listener-class'] == 'external-unstable' %} env: - name: MASTER_UI_0 valueFrom: @@ -49,6 +50,7 @@ spec: configMapKeyRef: name: test-hbase-ui-endpoints key: hbase.restserver-1.ui +{% endif %} volumeMounts: - name: script mountPath: /tmp/script @@ -69,7 +71,7 @@ metadata: data: script.sh: | set -euxo pipefail - +{% if test_scenario['values']['listener-class'] == 'external-unstable' %} echo "Attempting to reach master at $MASTER_UI_0..." curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${MASTER_UI_0}" | grep 200 echo "Attempting to reach region-server at $REGIONSERVER_UI_0..." @@ -83,5 +85,5 @@ data: curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${REGIONSERVER_UI_1}" | grep 200 echo "Attempting to reach rest-server at $RESTSERVER_UI_1..." curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${RESTSERVER_UI_1}" | grep 200 - +{% endif %} echo "All tests successful!" diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index b7afd697..cfcb5510 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -148,6 +148,7 @@ tests: - hdfs-latest - zookeeper-latest - openshift + - listener-class suites: - name: nightly patch: From bf969a083506cd21b4fa5e6f86df2d05da105a61 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Wed, 2 Apr 2025 16:33:33 +0200 Subject: [PATCH 17/22] added comment --- rust/operator-binary/src/hbase_controller.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index e875187d..6879b78c 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -967,6 +967,7 @@ fn build_rolegroup_statefulset( .build(), ); + // externally-reachable listener endpoints should use a pvc volume... let pvcs = if merged_config.listener_class().discoverable() { let pvc = ListenerOperatorVolumeSourceBuilder::new( &ListenerReference::ListenerClass(merged_config.listener_class().to_string()), @@ -977,6 +978,7 @@ fn build_rolegroup_statefulset( .context(BuildListenerVolumeSnafu)?; Some(vec![pvc]) } else { + // ...whereas others will use ephemeral volumes pod_builder .add_listener_volume_by_listener_class( LISTENER_VOLUME_NAME, From 90efe2bf1cbdd043450f5a341ba559d2e609cced Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Thu, 3 Apr 2025 10:59:04 +0200 Subject: [PATCH 18/22] docs --- .../pages/usage-guide/listenerclass.adoc | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/docs/modules/hbase/pages/usage-guide/listenerclass.adoc b/docs/modules/hbase/pages/usage-guide/listenerclass.adoc index 3bbcb3b0..4c19e5e7 100644 --- a/docs/modules/hbase/pages/usage-guide/listenerclass.adoc +++ b/docs/modules/hbase/pages/usage-guide/listenerclass.adoc @@ -17,4 +17,21 @@ spec: config: listenerClass: external-unstable ---- -<1> Specify one of `external-stable`, `external-unstable`, `cluster-internal` (the default setting is `cluster-internal`) +<1> Specify one of `external-stable`, `external-unstable`, `cluster-internal` (the default setting is `cluster-internal`). +This can be set separately for all three roles. + +Externally-reachable endpoints (i.e. where listener-class = `external-unstable` or `external-unstable`) are written to a ConfigMap called `-ui-endpoints`, listing each rolegroup by replica: + +[source,yaml] +---- +apiVersion: v1 +data: + hbase.master-0.ui: 172.19.0.3:32353 + hbase.master-1.ui: 172.19.0.5:31817 + hbase.regionserver-0.ui: 172.19.0.3:31719 + hbase.regionserver-1.ui: 172.19.0.5:30626 + hbase.restserver-0.ui: 172.19.0.3:31790 + hbase.restserver-1.ui: 172.19.0.5:32292 +kind: ConfigMap +... +---- From 831399543078757dac3b37491003b98b8e395b13 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Tue, 8 Apr 2025 17:33:25 +0200 Subject: [PATCH 19/22] resolve listener-class across role-groups/role --- rust/operator-binary/src/crd/mod.rs | 178 +++++++++++++----- rust/operator-binary/src/discovery.rs | 8 +- rust/operator-binary/src/hbase_controller.rs | 44 ++--- .../kuttl/external-access/03-assert.yaml | 24 ++- .../external-access/03-install-hbase.yaml.j2 | 18 +- .../external-access/30-access-hbase.txt.j2 | 34 +--- tests/test-definition.yaml | 1 - 7 files changed, 198 insertions(+), 109 deletions(-) diff --git a/rust/operator-binary/src/crd/mod.rs b/rust/operator-binary/src/crd/mod.rs index 14d81c60..20da6bfe 100644 --- a/rust/operator-binary/src/crd/mod.rs +++ b/rust/operator-binary/src/crd/mod.rs @@ -583,6 +583,12 @@ impl v1alpha1::HbaseCluster { } } + /// Returns rolegroup and replica information for a specific role. + /// We can't pass through the merged config for a particular role-group + /// here as we need more than the config. As this will be called by role, + /// the merged listener-class is called so that only role-group information + /// for externally-reachable services (based on their listener class) are + /// included in the collection. pub fn rolegroup_ref_and_replicas( &self, role: &HbaseRole, @@ -596,6 +602,9 @@ impl v1alpha1::HbaseCluster { // Order rolegroups consistently, to avoid spurious downstream rewrites .collect::>() .into_iter() + .filter(|(rolegroup_name, _)| { + self.resolved_listener_class_discoverable(role, rolegroup_name) + }) .map(|(rolegroup_name, role_group)| { ( self.rolegroup_ref(HbaseRole::Master.to_string(), rolegroup_name), @@ -611,6 +620,9 @@ impl v1alpha1::HbaseCluster { // Order rolegroups consistently, to avoid spurious downstream rewrites .collect::>() .into_iter() + .filter(|(rolegroup_name, _)| { + self.resolved_listener_class_discoverable(role, rolegroup_name) + }) .map(|(rolegroup_name, role_group)| { ( self.rolegroup_ref(HbaseRole::RegionServer.to_string(), rolegroup_name), @@ -626,6 +638,9 @@ impl v1alpha1::HbaseCluster { // Order rolegroups consistently, to avoid spurious downstream rewrites .collect::>() .into_iter() + .filter(|(rolegroup_name, _)| { + self.resolved_listener_class_discoverable(role, rolegroup_name) + }) .map(|(rolegroup_name, role_group)| { ( self.rolegroup_ref(HbaseRole::RestServer.to_string(), rolegroup_name), @@ -636,6 +651,19 @@ impl v1alpha1::HbaseCluster { } } + fn resolved_listener_class_discoverable( + &self, + role: &HbaseRole, + rolegroup_name: &&String, + ) -> bool { + let listener_class = self.merged_listener_class(role, rolegroup_name); + if let Some(listener_class) = listener_class { + listener_class.discoverable() + } else { + false + } + } + pub fn pod_refs( &self, role: &HbaseRole, @@ -667,53 +695,113 @@ impl v1alpha1::HbaseCluster { &self, client: &stackable_operator::client::Client, role: &HbaseRole, - merged_config: &AnyServiceConfig, hbase_version: &str, ) -> Result, Error> { - // only externally-reachable listeners are relevant - if merged_config.listener_class().discoverable() { - let pod_refs = self.pod_refs(role, hbase_version)?; - try_join_all(pod_refs.into_iter().map(|pod_ref| async { - // N.B. use the naming convention for persistent listener volumes as we - // have specified above that we only want externally-reachable endpoints. - let listener_name = format!("{LISTENER_VOLUME_NAME}-{}", pod_ref.pod_name); - let listener_ref = - || ObjectRef::::new(&listener_name).within(&pod_ref.namespace); - let pod_obj_ref = - || ObjectRef::::new(&pod_ref.pod_name).within(&pod_ref.namespace); - let listener = client - .get::(&listener_name, &pod_ref.namespace) - .await - .context(GetPodListenerSnafu { - listener: listener_ref(), - pod: pod_obj_ref(), - })?; - let listener_address = listener - .status - .and_then(|s| s.ingress_addresses?.into_iter().next()) - .context(PodListenerHasNoAddressSnafu { - listener: listener_ref(), - pod: pod_obj_ref(), - })?; - Ok(HbasePodRef { - fqdn_override: Some(listener_address.address), - ports: listener_address - .ports - .into_iter() - .map(|(port_name, port)| { - let port = u16::try_from(port).context(PortOutOfBoundsSnafu { - port_name: &port_name, - port, - })?; - Ok((port_name, port)) - }) - .collect::>()?, - ..pod_ref - }) - })) - .await - } else { - Ok(vec![]) + let pod_refs = self.pod_refs(role, hbase_version)?; + try_join_all(pod_refs.into_iter().map(|pod_ref| async { + // N.B. use the naming convention for persistent listener volumes as we + // have specified above that we only want externally-reachable endpoints. + let listener_name = format!("{LISTENER_VOLUME_NAME}-{}", pod_ref.pod_name); + let listener_ref = + || ObjectRef::::new(&listener_name).within(&pod_ref.namespace); + let pod_obj_ref = + || ObjectRef::::new(&pod_ref.pod_name).within(&pod_ref.namespace); + let listener = client + .get::(&listener_name, &pod_ref.namespace) + .await + .context(GetPodListenerSnafu { + listener: listener_ref(), + pod: pod_obj_ref(), + })?; + let listener_address = listener + .status + .and_then(|s| s.ingress_addresses?.into_iter().next()) + .context(PodListenerHasNoAddressSnafu { + listener: listener_ref(), + pod: pod_obj_ref(), + })?; + Ok(HbasePodRef { + fqdn_override: Some(listener_address.address), + ports: listener_address + .ports + .into_iter() + .map(|(port_name, port)| { + let port = u16::try_from(port).context(PortOutOfBoundsSnafu { + port_name: &port_name, + port, + })?; + Ok((port_name, port)) + }) + .collect::>()?, + ..pod_ref + }) + })) + .await + } + + pub fn merged_listener_class( + &self, + role: &HbaseRole, + rolegroup_name: &String, + ) -> Option { + match role { + HbaseRole::Master => { + if let Some(masters) = self.spec.masters.as_ref() { + let conf_defaults = Some(SupportedListenerClasses::ClusterInternal); + let mut conf_role = masters.config.config.listener_class.to_owned(); + let mut conf_rolegroup = masters + .role_groups + .get(rolegroup_name) + .map(|rg| rg.config.config.listener_class.clone()) + .unwrap_or_default(); + + conf_role.merge(&conf_defaults); + conf_rolegroup.merge(&conf_role); + + tracing::debug!("Merged listener-class: {:?} for {role}", conf_rolegroup); + conf_rolegroup + } else { + None + } + } + HbaseRole::RegionServer => { + if let Some(region_servers) = self.spec.region_servers.as_ref() { + let conf_defaults = Some(SupportedListenerClasses::ClusterInternal); + let mut conf_role = region_servers.config.config.listener_class.to_owned(); + let mut conf_rolegroup = region_servers + .role_groups + .get(rolegroup_name) + .map(|rg| rg.config.config.listener_class.clone()) + .unwrap_or_default(); + + conf_role.merge(&conf_defaults); + conf_rolegroup.merge(&conf_role); + + tracing::debug!("Merged listener-class: {:?} for {role}", conf_rolegroup); + conf_rolegroup + } else { + None + } + } + HbaseRole::RestServer => { + if let Some(rest_servers) = self.spec.rest_servers.as_ref() { + let conf_defaults = Some(SupportedListenerClasses::ClusterInternal); + let mut conf_role = rest_servers.config.config.listener_class.to_owned(); + let mut conf_rolegroup = rest_servers + .role_groups + .get(rolegroup_name) + .map(|rg| rg.config.config.listener_class.clone()) + .unwrap_or_default(); + + conf_role.merge(&conf_defaults); + conf_rolegroup.merge(&conf_role); + + tracing::debug!("Merged listener-class: {:?} for {role}", conf_rolegroup); + conf_rolegroup + } else { + None + } + } } } } diff --git a/rust/operator-binary/src/discovery.rs b/rust/operator-binary/src/discovery.rs index 5d5470a2..a708591d 100644 --- a/rust/operator-binary/src/discovery.rs +++ b/rust/operator-binary/src/discovery.rs @@ -112,23 +112,19 @@ pub fn build_endpoint_configmap( ); for role_podref in role_podrefs { - let role_name = role_podref.0; - // podrefs are written into the collection by replica index - // and can be retrieved in the same order - let mut i = 0; for podref in role_podref.1 { if let HbasePodRef { fqdn_override: Some(fqdn_override), ports, + pod_name, .. } = podref { if let Some(ui_port) = ports.get(&hbase.ui_port_name()) { cmm.add_data( - format!("hbase.{role_name}-{i}.ui"), + format!("{pod_name}.http"), format!("{fqdn_override}:{ui_port}"), ); - i += 1; } } } diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index c71c547f..71af72e4 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -482,30 +482,6 @@ pub async fn reconcile_hbase( rolegroup: rolegroup.clone(), })?, ); - // if the replicas are changed at the same time as the reconciliation - // being paused, it may be possible to have listeners that are *expected* - // (according to their replica number) but which are not yet created, so - // deactivate this action in such cases. - if hbase.spec.cluster_operation.reconciliation_paused - || hbase.spec.cluster_operation.stopped - { - tracing::info!( - "Cluster is in a transitional state so do not attempt to collect listener information that will only be active once cluster has returned to a non-transitional state." - ); - } else { - listener_refs.insert( - hbase_role.to_string(), - hbase - .listener_refs( - client, - &hbase_role, - &merged_config, - &resolved_product_image.product_version, - ) - .await - .context(CollectDiscoveryConfigSnafu)?, - ); - } } let role_config = hbase.role_config(&hbase_role); @@ -517,6 +493,26 @@ pub async fn reconcile_hbase( .await .context(FailedToCreatePdbSnafu)?; } + + // if the replicas are changed at the same time as the reconciliation + // being paused, it may be possible to have listeners that are *expected* + // (according to their replica number) but which are not yet created, so + // deactivate this action in such cases. + if hbase.spec.cluster_operation.reconciliation_paused + || hbase.spec.cluster_operation.stopped + { + tracing::info!( + "Cluster is in a transitional state so do not attempt to collect listener information that will only be active once cluster has returned to a non-transitional state." + ); + } else { + listener_refs.insert( + hbase_role.to_string(), + hbase + .listener_refs(client, &hbase_role, &resolved_product_image.product_version) + .await + .context(CollectDiscoveryConfigSnafu)?, + ); + } } tracing::debug!( diff --git a/tests/templates/kuttl/external-access/03-assert.yaml b/tests/templates/kuttl/external-access/03-assert.yaml index 3c34faad..b9964df8 100644 --- a/tests/templates/kuttl/external-access/03-assert.yaml +++ b/tests/templates/kuttl/external-access/03-assert.yaml @@ -16,13 +16,29 @@ kind: StatefulSet metadata: name: test-hbase-regionserver-default status: - readyReplicas: 2 - replicas: 2 + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hbase-regionserver-cluster-internal +status: + readyReplicas: 1 + replicas: 1 --- apiVersion: apps/v1 kind: StatefulSet metadata: name: test-hbase-restserver-default status: - readyReplicas: 2 - replicas: 2 + readyReplicas: 1 + replicas: 1 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-hbase-restserver-external-unstable +status: + readyReplicas: 1 + replicas: 1 diff --git a/tests/templates/kuttl/external-access/03-install-hbase.yaml.j2 b/tests/templates/kuttl/external-access/03-install-hbase.yaml.j2 index 30dd61f4..249d1643 100644 --- a/tests/templates/kuttl/external-access/03-install-hbase.yaml.j2 +++ b/tests/templates/kuttl/external-access/03-install-hbase.yaml.j2 @@ -23,7 +23,7 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} - listenerClass: {{ test_scenario['values']['listener-class'] }} + listenerClass: external-unstable roleGroups: default: replicas: 2 @@ -32,16 +32,24 @@ spec: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} - listenerClass: {{ test_scenario['values']['listener-class'] }} + listenerClass: external-unstable roleGroups: default: - replicas: 2 + replicas: 1 + cluster-internal: + replicas: 1 + config: + listenerClass: cluster-internal restServers: config: gracefulShutdownTimeout: 1m logging: enableVectorAgent: {{ lookup('env', 'VECTOR_AGGREGATOR') | length > 0 }} - listenerClass: {{ test_scenario['values']['listener-class'] }} + listenerClass: cluster-internal roleGroups: default: - replicas: 2 + replicas: 1 + external-unstable: + replicas: 1 + config: + listenerClass: external-unstable diff --git a/tests/templates/kuttl/external-access/30-access-hbase.txt.j2 b/tests/templates/kuttl/external-access/30-access-hbase.txt.j2 index 666a15a9..ab749159 100644 --- a/tests/templates/kuttl/external-access/30-access-hbase.txt.j2 +++ b/tests/templates/kuttl/external-access/30-access-hbase.txt.j2 @@ -18,39 +18,27 @@ spec: command: - /bin/bash - /tmp/script/script.sh -{% if test_scenario['values']['listener-class'] == 'external-unstable' %} env: - name: MASTER_UI_0 valueFrom: configMapKeyRef: name: test-hbase-ui-endpoints - key: hbase.master-0.ui + key: test-hbase-master-default-0.http - name: MASTER_UI_1 valueFrom: configMapKeyRef: name: test-hbase-ui-endpoints - key: hbase.master-1.ui + key: test-hbase-master-default-0.http - name: REGIONSERVER_UI_0 valueFrom: configMapKeyRef: name: test-hbase-ui-endpoints - key: hbase.regionserver-0.ui - - name: REGIONSERVER_UI_1 - valueFrom: - configMapKeyRef: - name: test-hbase-ui-endpoints - key: hbase.regionserver-1.ui + key: test-hbase-regionserver-default-0.http - name: RESTSERVER_UI_0 valueFrom: configMapKeyRef: name: test-hbase-ui-endpoints - key: hbase.restserver-0.ui - - name: RESTSERVER_UI_1 - valueFrom: - configMapKeyRef: - name: test-hbase-ui-endpoints - key: hbase.restserver-1.ui -{% endif %} + key: test-hbase-restserver-external-unstable-0.http volumeMounts: - name: script mountPath: /tmp/script @@ -71,19 +59,17 @@ metadata: data: script.sh: | set -euxo pipefail -{% if test_scenario['values']['listener-class'] == 'external-unstable' %} + echo "Attempting to reach master at $MASTER_UI_0..." curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${MASTER_UI_0}" | grep 200 + + echo "Attempting to reach master at $MASTER_UI_1..." + curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${MASTER_UI_1}" | grep 200 + echo "Attempting to reach region-server at $REGIONSERVER_UI_0..." curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${REGIONSERVER_UI_0}" | grep 200 + echo "Attempting to reach rest-server at $RESTSERVER_UI_0..." curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${RESTSERVER_UI_0}" | grep 200 - echo "Attempting to reach master at $MASTER_UI_1..." - curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${MASTER_UI_1}" | grep 200 - echo "Attempting to reach region-server at $REGIONSERVER_UI_1..." - curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${REGIONSERVER_UI_1}" | grep 200 - echo "Attempting to reach rest-server at $RESTSERVER_UI_1..." - curl --retry 0 -f -s -o /dev/null -w "%{http_code}" "${RESTSERVER_UI_1}" | grep 200 -{% endif %} echo "All tests successful!" diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index cfcb5510..b7afd697 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -148,7 +148,6 @@ tests: - hdfs-latest - zookeeper-latest - openshift - - listener-class suites: - name: nightly patch: From fb8efde951c2d6ef47b6221c07d6926e5e5a4f19 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Tue, 8 Apr 2025 17:40:45 +0200 Subject: [PATCH 20/22] regenerate nix --- Cargo.nix | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.nix b/Cargo.nix index cec128e4..96b15a64 100644 --- a/Cargo.nix +++ b/Cargo.nix @@ -7178,10 +7178,10 @@ rec { }; "ring" = rec { crateName = "ring"; - version = "0.17.11"; + version = "0.17.14"; edition = "2021"; - links = "ring_core_0_17_11_"; - sha256 = "0wzyhdbf71ndd14kkpyj2a6nvczvli2mndzv2al7r26k4yp4jlys"; + links = "ring_core_0_17_14_"; + sha256 = "1dw32gv19ccq4hsx3ribhpdzri1vnrlcfqb2vj41xn4l49n9ws54"; dependencies = [ { name = "cfg-if"; From 5ddbff2a2fb4d20ccbc27f8b59c090e0ad463726 Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Wed, 9 Apr 2025 09:53:31 +0200 Subject: [PATCH 21/22] updated tokio and nix packages --- Cargo.lock | 4 +- Cargo.nix | 909 ++++++++++++++++++++++++++--------------------- nix/sources.json | 20 +- 3 files changed, 524 insertions(+), 409 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4bdbff9d..6311aa49 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2924,9 +2924,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.43.0" +version = "1.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" dependencies = [ "backtrace", "bytes", diff --git a/Cargo.nix b/Cargo.nix index 96b15a64..d2055673 100644 --- a/Cargo.nix +++ b/Cargo.nix @@ -5,6 +5,7 @@ { nixpkgs ? , pkgs ? import nixpkgs { config = {}; } +, fetchurl ? pkgs.fetchurl , lib ? pkgs.lib , stdenv ? pkgs.stdenv , buildRustCrateForPkgs ? pkgs: pkgs.buildRustCrate @@ -61,6 +62,8 @@ rec { }; }; + + # A derivation that joins the outputs of all workspace members together. allWorkspaceMembers = pkgs.symlinkJoin { name = "all-workspace-members"; @@ -971,14 +974,14 @@ rec { { name = "tokio"; packageId = "tokio"; - target = {target, features}: (!("wasm32" == target."arch" or null)); + target = { target, features }: (!("wasm32" == target."arch" or null)); features = [ "time" "rt" "macros" "sync" "rt-multi-thread" ]; } { name = "tokio"; packageId = "tokio"; usesDefaultFeatures = false; - target = {target, features}: ("wasm32" == target."arch" or null); + target = { target, features }: ("wasm32" == target."arch" or null); features = [ "macros" "rt" "sync" ]; } ]; @@ -1649,7 +1652,7 @@ rec { name = "libc"; packageId = "libc"; usesDefaultFeatures = false; - target = { target, features }: (stdenv.hostPlatform.rust.rustcTarget == "aarch64-linux-android"); + target = { target, features }: (target.name == "aarch64-linux-android"); } { name = "libc"; @@ -7098,33 +7101,33 @@ rec { name = "futures-util"; packageId = "futures-util"; usesDefaultFeatures = false; - target = {target, features}: (!("wasm32" == target."arch" or null)); + target = { target, features }: (!("wasm32" == target."arch" or null)); features = [ "std" "alloc" ]; } { name = "hyper"; packageId = "hyper"; usesDefaultFeatures = false; - target = {target, features}: (!("wasm32" == target."arch" or null)); + target = { target, features }: (!("wasm32" == target."arch" or null)); features = [ "http1" "http2" "client" "server" ]; } { name = "hyper-util"; packageId = "hyper-util"; - target = {target, features}: (!("wasm32" == target."arch" or null)); + target = { target, features }: (!("wasm32" == target."arch" or null)); features = [ "http1" "http2" "client" "client-legacy" "server-auto" "tokio" ]; } { name = "serde"; packageId = "serde"; - target = {target, features}: (!("wasm32" == target."arch" or null)); + target = { target, features }: (!("wasm32" == target."arch" or null)); features = [ "derive" ]; } { name = "tokio"; packageId = "tokio"; usesDefaultFeatures = false; - target = {target, features}: (!("wasm32" == target."arch" or null)); + target = { target, features }: (!("wasm32" == target."arch" or null)); features = [ "macros" "rt-multi-thread" ]; } { @@ -7136,7 +7139,7 @@ rec { { name = "wasm-bindgen"; packageId = "wasm-bindgen"; - target = {target, features}: ("wasm32" == target."arch" or null); + target = { target, features }: ("wasm32" == target."arch" or null); features = [ "serde-serialize" ]; } ]; @@ -7227,7 +7230,7 @@ rec { name = "libc"; packageId = "libc"; usesDefaultFeatures = false; - target = {target, features}: ((target."unix" or false) || (target."windows" or false) || ("wasi" == target."os" or null)); + target = { target, features }: ((target."unix" or false) || (target."windows" or false) || ("wasi" == target."os" or null)); } ]; features = { @@ -9449,9 +9452,9 @@ rec { }; "tokio" = rec { crateName = "tokio"; - version = "1.43.0"; + version = "1.44.2"; edition = "2021"; - sha256 = "17pdm49ihlhfw3rpxix3kdh2ppl1yv7nwp1kxazi5r1xz97zlq9x"; + sha256 = "0j4w3qvlcqzgbxlnap0czvspqj6x461vyk1sbqcf97g4rci8if76"; authors = [ "Tokio Contributors " ]; @@ -9516,17 +9519,17 @@ rec { { name = "libc"; packageId = "libc"; - target = {target, features}: (target."unix" or false); + target = { target, features }: (target."unix" or false); } { name = "socket2"; packageId = "socket2"; - target = {target, features}: (!(builtins.elem "wasm" target."family")); + target = { target, features }: (!(builtins.elem "wasm" target."family")); } { name = "windows-sys"; packageId = "windows-sys 0.52.0"; - target = {target, features}: (target."windows" or false); + target = { target, features }: (target."windows" or false); features = [ "Win32_Foundation" "Win32_Security_Authorization" ]; } ]; @@ -11679,12 +11682,12 @@ rec { { name = "winapi-i686-pc-windows-gnu"; packageId = "winapi-i686-pc-windows-gnu"; - target = { target, features }: (stdenv.hostPlatform.rust.rustcTarget == "i686-pc-windows-gnu"); + target = { target, features }: (target.name == "i686-pc-windows-gnu"); } { name = "winapi-x86_64-pc-windows-gnu"; packageId = "winapi-x86_64-pc-windows-gnu"; - target = { target, features }: (stdenv.hostPlatform.rust.rustcTarget == "x86_64-pc-windows-gnu"); + target = { target, features }: (target.name == "x86_64-pc-windows-gnu"); } ]; features = { @@ -13016,7 +13019,7 @@ rec { { name = "windows_aarch64_gnullvm"; packageId = "windows_aarch64_gnullvm 0.52.6"; - target = { target, features }: (stdenv.hostPlatform.rust.rustcTarget == "aarch64-pc-windows-gnullvm"); + target = { target, features }: (target.name == "aarch64-pc-windows-gnullvm"); } { name = "windows_aarch64_msvc"; @@ -13031,7 +13034,7 @@ rec { { name = "windows_i686_gnullvm"; packageId = "windows_i686_gnullvm 0.52.6"; - target = { target, features }: (stdenv.hostPlatform.rust.rustcTarget == "i686-pc-windows-gnullvm"); + target = { target, features }: (target.name == "i686-pc-windows-gnullvm"); } { name = "windows_i686_msvc"; @@ -13046,7 +13049,7 @@ rec { { name = "windows_x86_64_gnullvm"; packageId = "windows_x86_64_gnullvm 0.52.6"; - target = { target, features }: (stdenv.hostPlatform.rust.rustcTarget == "x86_64-pc-windows-gnullvm"); + target = { target, features }: (target.name == "x86_64-pc-windows-gnullvm"); } { name = "windows_x86_64_msvc"; @@ -13069,7 +13072,7 @@ rec { { name = "windows_aarch64_gnullvm"; packageId = "windows_aarch64_gnullvm 0.53.0"; - target = { target, features }: (stdenv.hostPlatform.rust.rustcTarget == "aarch64-pc-windows-gnullvm"); + target = { target, features }: (target.name == "aarch64-pc-windows-gnullvm"); } { name = "windows_aarch64_msvc"; @@ -13084,7 +13087,7 @@ rec { { name = "windows_i686_gnullvm"; packageId = "windows_i686_gnullvm 0.53.0"; - target = { target, features }: (stdenv.hostPlatform.rust.rustcTarget == "i686-pc-windows-gnullvm"); + target = { target, features }: (target.name == "i686-pc-windows-gnullvm"); } { name = "windows_i686_msvc"; @@ -13099,7 +13102,7 @@ rec { { name = "windows_x86_64_gnullvm"; packageId = "windows_x86_64_gnullvm 0.53.0"; - target = { target, features }: (stdenv.hostPlatform.rust.rustcTarget == "x86_64-pc-windows-gnullvm"); + target = { target, features }: (target.name == "x86_64-pc-windows-gnullvm"); } { name = "windows_x86_64_msvc"; @@ -13671,10 +13674,13 @@ rec { # crate2nix/default.nix (excerpt start) # - /* Target (platform) data for conditional dependencies. + /* + Target (platform) data for conditional dependencies. This corresponds roughly to what buildRustCrate is setting. */ makeDefaultTarget = platform: { + name = platform.rust.rustcTarget; + unix = platform.isUnix; windows = platform.isWindows; fuchsia = true; @@ -13683,30 +13689,70 @@ rec { inherit (platform.rust.platform) arch os - vendor; + vendor + ; family = platform.rust.platform.target-family; env = "gnu"; - endian = - if platform.parsed.cpu.significantByte.name == "littleEndian" - then "little" else "big"; + endian = if platform.parsed.cpu.significantByte.name == "littleEndian" then "little" else "big"; pointer_width = toString platform.parsed.cpu.bits; debug_assertions = false; }; - /* Filters common temp files and build files. */ + registryUrl = + { registries + , url + , crate + , version + , sha256 + , + }: + let + dl = registries.${url}.dl; + tmpl = [ + "{crate}" + "{version}" + "{prefix}" + "{lowerprefix}" + "{sha256-checksum}" + ]; + in + with lib.strings; + if lib.lists.any (i: hasInfix "{}" dl) tmpl then + let + prefix = + if builtins.stringLength crate == 1 then + "1" + else if builtins.stringLength crate == 2 then + "2" + else + "${builtins.substring 0 2 crate}/${builtins.substring 2 (builtins.stringLength crate - 2) crate}"; + in + builtins.replaceStrings tmpl [ + crate + version + prefix + (lib.strings.toLower prefix) + sha256 + ] + else + "${dl}/${crate}/${version}/download"; + + # Filters common temp files and build files. # TODO(pkolloch): Substitute with gitignore filter - sourceFilter = name: type: + sourceFilter = + name: type: let baseName = builtins.baseNameOf (builtins.toString name); in - ! ( + !( # Filter out git baseName == ".gitignore" || (type == "directory" && baseName == ".git") # Filter out build results || ( - type == "directory" && ( + type == "directory" + && ( baseName == "target" || baseName == "_site" || baseName == ".sass-cache" @@ -13716,16 +13762,11 @@ rec { ) # Filter out nix-build result symlinks - || ( - type == "symlink" && lib.hasPrefix "result" baseName - ) + || (type == "symlink" && lib.hasPrefix "result" baseName) # Filter out IDE config - || ( - type == "directory" && ( - baseName == ".idea" || baseName == ".vscode" - ) - ) || lib.hasSuffix ".iml" baseName + || (type == "directory" && (baseName == ".idea" || baseName == ".vscode")) + || lib.hasSuffix ".iml" baseName # Filter out nix build files || baseName == "Cargo.nix" @@ -13739,90 +13780,100 @@ rec { || baseName == "tests.nix" ); - /* Returns a crate which depends on successful test execution + /* + Returns a crate which depends on successful test execution of crate given as the second argument. testCrateFlags: list of flags to pass to the test exectuable testInputs: list of packages that should be available during test execution */ - crateWithTest = { crate, testCrate, testCrateFlags, testInputs, testPreRun, testPostRun }: - assert builtins.typeOf testCrateFlags == "list"; - assert builtins.typeOf testInputs == "list"; - assert builtins.typeOf testPreRun == "string"; - assert builtins.typeOf testPostRun == "string"; - let - # override the `crate` so that it will build and execute tests instead of - # building the actual lib and bin targets We just have to pass `--test` - # to rustc and it will do the right thing. We execute the tests and copy - # their log and the test executables to $out for later inspection. - test = - let - drv = testCrate.override - ( - _: { - buildTests = true; - } + crateWithTest = + { crate + , testCrate + , testCrateFlags + , testInputs + , testPreRun + , testPostRun + , + }: + assert builtins.typeOf testCrateFlags == "list"; + assert builtins.typeOf testInputs == "list"; + assert builtins.typeOf testPreRun == "string"; + assert builtins.typeOf testPostRun == "string"; + let + # override the `crate` so that it will build and execute tests instead of + # building the actual lib and bin targets We just have to pass `--test` + # to rustc and it will do the right thing. We execute the tests and copy + # their log and the test executables to $out for later inspection. + test = + let + drv = testCrate.override (_: { + buildTests = true; + }); + # If the user hasn't set any pre/post commands, we don't want to + # insert empty lines. This means that any existing users of crate2nix + # don't get a spurious rebuild unless they set these explicitly. + testCommand = pkgs.lib.concatStringsSep "\n" ( + pkgs.lib.filter (s: s != "") [ + testPreRun + "$f $testCrateFlags 2>&1 | tee -a $out" + testPostRun + ] ); - # If the user hasn't set any pre/post commands, we don't want to - # insert empty lines. This means that any existing users of crate2nix - # don't get a spurious rebuild unless they set these explicitly. - testCommand = pkgs.lib.concatStringsSep "\n" - (pkgs.lib.filter (s: s != "") [ - testPreRun - "$f $testCrateFlags 2>&1 | tee -a $out" - testPostRun - ]); - in - pkgs.stdenvNoCC.mkDerivation { - name = "run-tests-${testCrate.name}"; - - inherit (crate) src; - - inherit testCrateFlags; - - buildInputs = testInputs; - - buildPhase = '' - set -e - export RUST_BACKTRACE=1 - - # build outputs - testRoot=target/debug - mkdir -p $testRoot - - # executables of the crate - # we copy to prevent std::env::current_exe() to resolve to a store location - for i in ${crate}/bin/*; do - cp "$i" "$testRoot" - done - chmod +w -R . - - # test harness executables are suffixed with a hash, like cargo does - # this allows to prevent name collision with the main - # executables of the crate - hash=$(basename $out) - for file in ${drv}/tests/*; do - f=$testRoot/$(basename $file)-$hash - cp $file $f - ${testCommand} - done - ''; - }; - in - pkgs.runCommand "${crate.name}-linked" - { - inherit (crate) outputs crateName; - passthru = (crate.passthru or { }) // { - inherit test; - }; - } - (lib.optionalString (stdenv.buildPlatform.canExecute stdenv.hostPlatform) '' - echo tested by ${test} - '' + '' - ${lib.concatMapStringsSep "\n" (output: "ln -s ${crate.${output}} ${"$"}${output}") crate.outputs} - ''); - - /* A restricted overridable version of builtRustCratesWithFeatures. */ + in + pkgs.stdenvNoCC.mkDerivation { + name = "run-tests-${testCrate.name}"; + + inherit (crate) src; + + inherit testCrateFlags; + + buildInputs = testInputs; + + buildPhase = '' + set -e + export RUST_BACKTRACE=1 + + # build outputs + testRoot=target/debug + mkdir -p $testRoot + + # executables of the crate + # we copy to prevent std::env::current_exe() to resolve to a store location + for i in ${crate}/bin/*; do + cp "$i" "$testRoot" + done + chmod +w -R . + + # test harness executables are suffixed with a hash, like cargo does + # this allows to prevent name collision with the main + # executables of the crate + hash=$(basename $out) + for file in ${drv}/tests/*; do + f=$testRoot/$(basename $file)-$hash + cp $file $f + ${testCommand} + done + ''; + }; + in + pkgs.runCommand "${crate.name}-linked" + { + inherit (crate) outputs crateName; + passthru = (crate.passthru or { }) // { + inherit test; + }; + } + ( + lib.optionalString (stdenv.buildPlatform.canExecute stdenv.hostPlatform) '' + echo tested by ${test} + '' + + '' + ${lib.concatMapStringsSep "\n" (output: "ln -s ${crate.${output}} ${"$"}${output}") crate.outputs} + '' + ); + + # A restricted overridable version of builtRustCratesWithFeatures. buildRustCrateWithFeatures = { packageId , features ? rootFeatures @@ -13831,10 +13882,11 @@ rec { , runTests ? false , testCrateFlags ? [ ] , testInputs ? [ ] - # Any command to run immediatelly before a test is executed. - , testPreRun ? "" - # Any command run immediatelly after a test is executed. - , testPostRun ? "" + , # Any command to run immediatelly before a test is executed. + testPreRun ? "" + , # Any command run immediatelly after a test is executed. + testPostRun ? "" + , }: lib.makeOverridable ( @@ -13845,17 +13897,19 @@ rec { , testInputs , testPreRun , testPostRun + , }: let buildRustCrateForPkgsFuncOverriden = - if buildRustCrateForPkgsFunc != null - then buildRustCrateForPkgsFunc + if buildRustCrateForPkgsFunc != null then + buildRustCrateForPkgsFunc else ( - if crateOverrides == pkgs.defaultCrateOverrides - then buildRustCrateForPkgs + if crateOverrides == pkgs.defaultCrateOverrides then + buildRustCrateForPkgs else - pkgs: (buildRustCrateForPkgs pkgs).override { + pkgs: + (buildRustCrateForPkgs pkgs).override { defaultCrateOverrides = crateOverrides; } ); @@ -13877,15 +13931,32 @@ rec { { crate = drv; testCrate = testDrv; - inherit testCrateFlags testInputs testPreRun testPostRun; + inherit + testCrateFlags + testInputs + testPreRun + testPostRun + ; } - else drv; + else + drv; in derivation ) - { inherit features crateOverrides runTests testCrateFlags testInputs testPreRun testPostRun; }; + { + inherit + features + crateOverrides + runTests + testCrateFlags + testInputs + testPreRun + testPostRun + ; + }; - /* Returns an attr set with packageId mapped to the result of buildRustCrateForPkgsFunc + /* + Returns an attr set with packageId mapped to the result of buildRustCrateForPkgsFunc for the corresponding crate. */ builtRustCratesWithFeatures = @@ -13895,7 +13966,8 @@ rec { , buildRustCrateForPkgsFunc , runTests , makeTarget ? makeDefaultTarget - } @ args: + , + }@args: assert (builtins.isAttrs crateConfigs); assert (builtins.isString packageId); assert (builtins.isList features); @@ -13903,55 +13975,61 @@ rec { assert (builtins.isBool runTests); let rootPackageId = packageId; - mergedFeatures = mergePackageFeatures - ( - args // { - inherit rootPackageId; - target = makeTarget stdenv.hostPlatform // { test = runTests; }; - } - ); + mergedFeatures = mergePackageFeatures ( + args + // { + inherit rootPackageId; + target = makeTarget stdenv.hostPlatform // { + test = runTests; + }; + } + ); # Memoize built packages so that reappearing packages are only built once. builtByPackageIdByPkgs = mkBuiltByPackageIdByPkgs pkgs; - mkBuiltByPackageIdByPkgs = pkgs: + mkBuiltByPackageIdByPkgs = + pkgs: let self = { - crates = lib.mapAttrs (packageId: value: buildByPackageIdForPkgsImpl self pkgs packageId) crateConfigs; - target = makeTarget stdenv.hostPlatform; + crates = lib.mapAttrs + ( + packageId: value: buildByPackageIdForPkgsImpl self pkgs packageId + ) + crateConfigs; + target = makeTarget pkgs.stdenv.hostPlatform; build = mkBuiltByPackageIdByPkgs pkgs.buildPackages; }; in self; - buildByPackageIdForPkgsImpl = self: pkgs: packageId: + buildByPackageIdForPkgsImpl = + self: pkgs: packageId: let features = mergedFeatures."${packageId}" or [ ]; crateConfig' = crateConfigs."${packageId}"; - crateConfig = - builtins.removeAttrs crateConfig' [ "resolvedDefaultFeatures" "devDependencies" ]; - devDependencies = - lib.optionals - (runTests && packageId == rootPackageId) - (crateConfig'.devDependencies or [ ]); - dependencies = - dependencyDerivations { - inherit features; - inherit (self) target; - buildByPackageId = depPackageId: - # proc_macro crates must be compiled for the build architecture - if crateConfigs.${depPackageId}.procMacro or false - then self.build.crates.${depPackageId} - else self.crates.${depPackageId}; - dependencies = - (crateConfig.dependencies or [ ]) - ++ devDependencies; - }; - buildDependencies = - dependencyDerivations { - inherit features; - inherit (self.build) target; - buildByPackageId = depPackageId: - self.build.crates.${depPackageId}; - dependencies = crateConfig.buildDependencies or [ ]; - }; + crateConfig = builtins.removeAttrs crateConfig' [ + "resolvedDefaultFeatures" + "devDependencies" + ]; + devDependencies = lib.optionals (runTests && packageId == rootPackageId) ( + crateConfig'.devDependencies or [ ] + ); + dependencies = dependencyDerivations { + inherit features; + inherit (self) target; + buildByPackageId = + depPackageId: + # proc_macro crates must be compiled for the build architecture + if crateConfigs.${depPackageId}.procMacro or false then + self.build.crates.${depPackageId} + else + self.crates.${depPackageId}; + dependencies = (crateConfig.dependencies or [ ]) ++ devDependencies; + }; + buildDependencies = dependencyDerivations { + inherit features; + inherit (self.build) target; + buildByPackageId = depPackageId: self.build.crates.${depPackageId}; + dependencies = crateConfig.buildDependencies or [ ]; + }; dependenciesWithRenames = let buildDeps = filterEnabledDependencies { @@ -13976,45 +14054,54 @@ rec { # } crateRenames = let - grouped = - lib.groupBy - (dependency: dependency.name) - dependenciesWithRenames; - versionAndRename = dep: + grouped = lib.groupBy (dependency: dependency.name) dependenciesWithRenames; + versionAndRename = + dep: let package = crateConfigs."${dep.packageId}"; in - { inherit (dep) rename; inherit (package) version; }; + { + inherit (dep) rename; + inherit (package) version; + }; in lib.mapAttrs (name: builtins.map versionAndRename) grouped; in - buildRustCrateForPkgsFunc pkgs - ( - crateConfig // { - src = crateConfig.src or ( - pkgs.fetchurl rec { - name = "${crateConfig.crateName}-${crateConfig.version}.tar.gz"; - # https://www.pietroalbini.org/blog/downloading-crates-io/ - # Not rate-limited, CDN URL. - url = "https://static.crates.io/crates/${crateConfig.crateName}/${crateConfig.crateName}-${crateConfig.version}.crate"; - sha256 = - assert (lib.assertMsg (crateConfig ? sha256) "Missing sha256 for ${name}"); - crateConfig.sha256; - } - ); - extraRustcOpts = lib.lists.optional (targetFeatures != [ ]) "-C target-feature=${lib.concatMapStringsSep "," (x: "+${x}") targetFeatures}"; - inherit features dependencies buildDependencies crateRenames release; - } - ); + buildRustCrateForPkgsFunc pkgs ( + crateConfig + // { + src = + crateConfig.src or (pkgs.fetchurl rec { + name = "${crateConfig.crateName}-${crateConfig.version}.tar.gz"; + # https://www.pietroalbini.org/blog/downloading-crates-io/ + # Not rate-limited, CDN URL. + url = "https://static.crates.io/crates/${crateConfig.crateName}/${crateConfig.crateName}-${crateConfig.version}.crate"; + sha256 = + assert (lib.assertMsg (crateConfig ? sha256) "Missing sha256 for ${name}"); + crateConfig.sha256; + }); + extraRustcOpts = + lib.lists.optional (targetFeatures != [ ]) + "-C target-feature=${lib.concatMapStringsSep "," (x: "+${x}") targetFeatures}"; + inherit + features + dependencies + buildDependencies + crateRenames + release + ; + } + ); in builtByPackageIdByPkgs; - /* Returns the actual derivations for the given dependencies. */ + # Returns the actual derivations for the given dependencies. dependencyDerivations = { buildByPackageId , features , dependencies , target + , }: assert (builtins.isList features); assert (builtins.isList dependencies); @@ -14027,52 +14114,59 @@ rec { in map depDerivation enabledDependencies; - /* Returns a sanitized version of val with all values substituted that cannot + /* + Returns a sanitized version of val with all values substituted that cannot be serialized as JSON. */ - sanitizeForJson = val: - if builtins.isAttrs val - then lib.mapAttrs (n: sanitizeForJson) val - else if builtins.isList val - then builtins.map sanitizeForJson val - else if builtins.isFunction val - then "function" - else val; - - /* Returns various tools to debug a crate. */ - debugCrate = { packageId, target ? makeDefaultTarget stdenv.hostPlatform }: - assert (builtins.isString packageId); - let - debug = rec { - # The built tree as passed to buildRustCrate. - buildTree = buildRustCrateWithFeatures { - buildRustCrateForPkgsFunc = _: lib.id; - inherit packageId; - }; - sanitizedBuildTree = sanitizeForJson buildTree; - dependencyTree = sanitizeForJson - ( - buildRustCrateWithFeatures { - buildRustCrateForPkgsFunc = _: crate: { - "01_crateName" = crate.crateName or false; - "02_features" = crate.features or [ ]; - "03_dependencies" = crate.dependencies or [ ]; - }; - inherit packageId; - } - ); - mergedPackageFeatures = mergePackageFeatures { - features = rootFeatures; - inherit packageId target; - }; - diffedDefaultPackageFeatures = diffDefaultPackageFeatures { - inherit packageId target; + sanitizeForJson = + val: + if builtins.isAttrs val then + lib.mapAttrs (n: sanitizeForJson) val + else if builtins.isList val then + builtins.map sanitizeForJson val + else if builtins.isFunction val then + "function" + else + val; + + # Returns various tools to debug a crate. + debugCrate = + { packageId + , target ? makeDefaultTarget stdenv.hostPlatform + , + }: + assert (builtins.isString packageId); + let + debug = rec { + # The built tree as passed to buildRustCrate. + buildTree = buildRustCrateWithFeatures { + buildRustCrateForPkgsFunc = _: lib.id; + inherit packageId; + }; + sanitizedBuildTree = sanitizeForJson buildTree; + dependencyTree = sanitizeForJson (buildRustCrateWithFeatures { + buildRustCrateForPkgsFunc = _: crate: { + "01_crateName" = crate.crateName or false; + "02_features" = crate.features or [ ]; + "03_dependencies" = crate.dependencies or [ ]; + }; + inherit packageId; + }); + mergedPackageFeatures = mergePackageFeatures { + features = rootFeatures; + inherit packageId target; + }; + diffedDefaultPackageFeatures = diffDefaultPackageFeatures { + inherit packageId target; + }; }; + in + { + internal = debug; }; - in - { internal = debug; }; - /* Returns differences between cargo default features and crate2nix default + /* + Returns differences between cargo default features and crate2nix default features. This is useful for verifying the feature resolution in crate2nix. @@ -14081,22 +14175,26 @@ rec { { crateConfigs ? crates , packageId , target + , }: assert (builtins.isAttrs crateConfigs); let prefixValues = prefix: lib.mapAttrs (n: v: { "${prefix}" = v; }); - mergedFeatures = - prefixValues - "crate2nix" - (mergePackageFeatures { inherit crateConfigs packageId target; features = [ "default" ]; }); + mergedFeatures = prefixValues "crate2nix" (mergePackageFeatures { + inherit crateConfigs packageId target; + features = [ "default" ]; + }); configs = prefixValues "cargo" crateConfigs; - combined = lib.foldAttrs (a: b: a // b) { } [ mergedFeatures configs ]; - onlyInCargo = - builtins.attrNames - (lib.filterAttrs (n: v: !(v ? "crate2nix") && (v ? "cargo")) combined); - onlyInCrate2Nix = - builtins.attrNames - (lib.filterAttrs (n: v: (v ? "crate2nix") && !(v ? "cargo")) combined); + combined = lib.foldAttrs (a: b: a // b) { } [ + mergedFeatures + configs + ]; + onlyInCargo = builtins.attrNames ( + lib.filterAttrs (n: v: !(v ? "crate2nix") && (v ? "cargo")) combined + ); + onlyInCrate2Nix = builtins.attrNames ( + lib.filterAttrs (n: v: (v ? "crate2nix") && !(v ? "cargo")) combined + ); differentFeatures = lib.filterAttrs ( n: v: @@ -14110,7 +14208,8 @@ rec { inherit onlyInCargo onlyInCrate2Nix differentFeatures; }; - /* Returns an attrset mapping packageId to the list of enabled features. + /* + Returns an attrset mapping packageId to the list of enabled features. If multiple paths to a dependency enable different features, the corresponding feature sets are merged. Features in rust are additive. @@ -14123,10 +14222,10 @@ rec { , dependencyPath ? [ crates.${packageId}.crateName ] , featuresByPackageId ? { } , target - # Adds devDependencies to the crate with rootPackageId. - , runTests ? false + , # Adds devDependencies to the crate with rootPackageId. + runTests ? false , ... - } @ args: + }@args: assert (builtins.isAttrs crateConfigs); assert (builtins.isString packageId); assert (builtins.isString rootPackageId); @@ -14139,84 +14238,93 @@ rec { crateConfig = crateConfigs."${packageId}" or (builtins.throw "Package not found: ${packageId}"); expandedFeatures = expandFeatures (crateConfig.features or { }) features; enabledFeatures = enableFeatures (crateConfig.dependencies or [ ]) expandedFeatures; - depWithResolvedFeatures = dependency: + depWithResolvedFeatures = + dependency: let inherit (dependency) packageId; features = dependencyFeatures enabledFeatures dependency; in - { inherit packageId features; }; - resolveDependencies = cache: path: dependencies: - assert (builtins.isAttrs cache); - assert (builtins.isList dependencies); - let - enabledDependencies = filterEnabledDependencies { - inherit dependencies target; - features = enabledFeatures; - }; - directDependencies = map depWithResolvedFeatures enabledDependencies; - foldOverCache = op: lib.foldl op cache directDependencies; - in - foldOverCache - ( - cache: { packageId, features }: - let - cacheFeatures = cache.${packageId} or [ ]; - combinedFeatures = sortedUnique (cacheFeatures ++ features); - in - if cache ? ${packageId} && cache.${packageId} == combinedFeatures - then cache - else - mergePackageFeatures { - features = combinedFeatures; - featuresByPackageId = cache; - inherit crateConfigs packageId target runTests rootPackageId; - } + { + inherit packageId features; + }; + resolveDependencies = + cache: path: dependencies: + assert (builtins.isAttrs cache); + assert (builtins.isList dependencies); + let + enabledDependencies = filterEnabledDependencies { + inherit dependencies target; + features = enabledFeatures; + }; + directDependencies = map depWithResolvedFeatures enabledDependencies; + foldOverCache = op: lib.foldl op cache directDependencies; + in + foldOverCache ( + cache: + { packageId, features }: + let + cacheFeatures = cache.${packageId} or [ ]; + combinedFeatures = sortedUnique (cacheFeatures ++ features); + in + if cache ? ${packageId} && cache.${packageId} == combinedFeatures then + cache + else + mergePackageFeatures { + features = combinedFeatures; + featuresByPackageId = cache; + inherit + crateConfigs + packageId + target + runTests + rootPackageId + ; + } ); cacheWithSelf = let cacheFeatures = featuresByPackageId.${packageId} or [ ]; combinedFeatures = sortedUnique (cacheFeatures ++ enabledFeatures); in - featuresByPackageId // { + featuresByPackageId + // { "${packageId}" = combinedFeatures; }; - cacheWithDependencies = - resolveDependencies cacheWithSelf "dep" - ( - crateConfig.dependencies or [ ] - ++ lib.optionals - (runTests && packageId == rootPackageId) - (crateConfig.devDependencies or [ ]) - ); - cacheWithAll = - resolveDependencies - cacheWithDependencies "build" - (crateConfig.buildDependencies or [ ]); + cacheWithDependencies = resolveDependencies cacheWithSelf "dep" ( + crateConfig.dependencies or [ ] + ++ lib.optionals (runTests && packageId == rootPackageId) (crateConfig.devDependencies or [ ]) + ); + cacheWithAll = resolveDependencies cacheWithDependencies "build" ( + crateConfig.buildDependencies or [ ] + ); in cacheWithAll; - /* Returns the enabled dependencies given the enabled features. */ - filterEnabledDependencies = { dependencies, features, target }: - assert (builtins.isList dependencies); - assert (builtins.isList features); - assert (builtins.isAttrs target); + # Returns the enabled dependencies given the enabled features. + filterEnabledDependencies = + { dependencies + , features + , target + , + }: + assert (builtins.isList dependencies); + assert (builtins.isList features); + assert (builtins.isAttrs target); - lib.filter - ( - dep: - let - targetFunc = dep.target or (features: true); - in - targetFunc { inherit features target; } - && ( - !(dep.optional or false) - || builtins.any (doesFeatureEnableDependency dep) features + lib.filter + ( + dep: + let + targetFunc = dep.target or (features: true); + in + targetFunc { inherit features target; } + && (!(dep.optional or false) || builtins.any (doesFeatureEnableDependency dep) features) ) - ) - dependencies; + dependencies; - /* Returns whether the given feature should enable the given dependency. */ - doesFeatureEnableDependency = dependency: feature: + # Returns whether the given feature should enable the given dependency. + doesFeatureEnableDependency = + dependency: feature: let name = dependency.rename or dependency.name; prefix = "${name}/"; @@ -14225,109 +14333,116 @@ rec { in feature == name || feature == "dep:" + name || startsWithPrefix; - /* Returns the expanded features for the given inputFeatures by applying the + /* + Returns the expanded features for the given inputFeatures by applying the rules in featureMap. featureMap is an attribute set which maps feature names to lists of further feature names to enable in case this feature is selected. */ - expandFeatures = featureMap: inputFeatures: - assert (builtins.isAttrs featureMap); - assert (builtins.isList inputFeatures); - let - expandFeaturesNoCycle = oldSeen: inputFeatures: - if inputFeatures != [ ] - then - let - # The feature we're currently expanding. - feature = builtins.head inputFeatures; - # All the features we've seen/expanded so far, including the one - # we're currently processing. - seen = oldSeen // { ${feature} = 1; }; - # Expand the feature but be careful to not re-introduce a feature - # that we've already seen: this can easily cause a cycle, see issue - # #209. - enables = builtins.filter (f: !(seen ? "${f}")) (featureMap."${feature}" or [ ]); - in - [ feature ] ++ (expandFeaturesNoCycle seen (builtins.tail inputFeatures ++ enables)) - # No more features left, nothing to expand to. - else [ ]; - outFeatures = expandFeaturesNoCycle { } inputFeatures; - in - sortedUnique outFeatures; + expandFeatures = + featureMap: inputFeatures: + assert (builtins.isAttrs featureMap); + assert (builtins.isList inputFeatures); + let + expandFeaturesNoCycle = + oldSeen: inputFeatures: + if inputFeatures != [ ] then + let + # The feature we're currently expanding. + feature = builtins.head inputFeatures; + # All the features we've seen/expanded so far, including the one + # we're currently processing. + seen = oldSeen // { + ${feature} = 1; + }; + # Expand the feature but be careful to not re-introduce a feature + # that we've already seen: this can easily cause a cycle, see issue + # #209. + enables = builtins.filter (f: !(seen ? "${f}")) (featureMap."${feature}" or [ ]); + in + [ feature ] ++ (expandFeaturesNoCycle seen (builtins.tail inputFeatures ++ enables)) + # No more features left, nothing to expand to. + else + [ ]; + outFeatures = expandFeaturesNoCycle { } inputFeatures; + in + sortedUnique outFeatures; - /* This function adds optional dependencies as features if they are enabled + /* + This function adds optional dependencies as features if they are enabled indirectly by dependency features. This function mimics Cargo's behavior described in a note at: https://doc.rust-lang.org/nightly/cargo/reference/features.html#dependency-features */ - enableFeatures = dependencies: features: - assert (builtins.isList features); - assert (builtins.isList dependencies); - let - additionalFeatures = lib.concatMap - ( - dependency: - assert (builtins.isAttrs dependency); - let - enabled = builtins.any (doesFeatureEnableDependency dependency) features; - in - if (dependency.optional or false) && enabled - then [ (dependency.rename or dependency.name) ] - else [ ] - ) - dependencies; - in - sortedUnique (features ++ additionalFeatures); + enableFeatures = + dependencies: features: + assert (builtins.isList features); + assert (builtins.isList dependencies); + let + additionalFeatures = lib.concatMap + ( + dependency: + assert (builtins.isAttrs dependency); + let + enabled = builtins.any (doesFeatureEnableDependency dependency) features; + in + if (dependency.optional or false) && enabled then + [ (dependency.rename or dependency.name) ] + else + [ ] + ) + dependencies; + in + sortedUnique (features ++ additionalFeatures); /* Returns the actual features for the given dependency. features: The features of the crate that refers this dependency. */ - dependencyFeatures = features: dependency: - assert (builtins.isList features); - assert (builtins.isAttrs dependency); - let - defaultOrNil = - if dependency.usesDefaultFeatures or true - then [ "default" ] - else [ ]; - explicitFeatures = dependency.features or [ ]; - additionalDependencyFeatures = - let - name = dependency.rename or dependency.name; - stripPrefixMatch = prefix: s: - if lib.hasPrefix prefix s - then lib.removePrefix prefix s - else null; - extractFeature = feature: lib.findFirst - (f: f != null) - null - (map (prefix: stripPrefixMatch prefix feature) [ - (name + "/") - (name + "?/") - ]); - dependencyFeatures = lib.filter (f: f != null) (map extractFeature features); - in - dependencyFeatures; - in - defaultOrNil ++ explicitFeatures ++ additionalDependencyFeatures; - - /* Sorts and removes duplicates from a list of strings. */ - sortedUnique = features: - assert (builtins.isList features); - assert (builtins.all builtins.isString features); - let - outFeaturesSet = lib.foldl (set: feature: set // { "${feature}" = 1; }) { } features; - outFeaturesUnique = builtins.attrNames outFeaturesSet; - in - builtins.sort (a: b: a < b) outFeaturesUnique; + dependencyFeatures = + features: dependency: + assert (builtins.isList features); + assert (builtins.isAttrs dependency); + let + defaultOrNil = if dependency.usesDefaultFeatures or true then [ "default" ] else [ ]; + explicitFeatures = dependency.features or [ ]; + additionalDependencyFeatures = + let + name = dependency.rename or dependency.name; + stripPrefixMatch = prefix: s: if lib.hasPrefix prefix s then lib.removePrefix prefix s else null; + extractFeature = + feature: + lib.findFirst (f: f != null) null ( + map (prefix: stripPrefixMatch prefix feature) [ + (name + "/") + (name + "?/") + ] + ); + dependencyFeatures = lib.filter (f: f != null) (map extractFeature features); + in + dependencyFeatures; + in + defaultOrNil ++ explicitFeatures ++ additionalDependencyFeatures; - deprecationWarning = message: value: - if strictDeprecation - then builtins.throw "strictDeprecation enabled, aborting: ${message}" - else builtins.trace message value; + # Sorts and removes duplicates from a list of strings. + sortedUnique = + features: + assert (builtins.isList features); + assert (builtins.all builtins.isString features); + let + outFeaturesSet = lib.foldl (set: feature: set // { "${feature}" = 1; }) { } features; + outFeaturesUnique = builtins.attrNames outFeaturesSet; + in + builtins.sort (a: b: a < b) outFeaturesUnique; + + deprecationWarning = + message: value: + if strictDeprecation then + builtins.throw "strictDeprecation enabled, aborting: ${message}" + else + builtins.trace message value; # # crate2nix/default.nix (excerpt end) diff --git a/nix/sources.json b/nix/sources.json index 9a26de8f..5aee1c9d 100644 --- a/nix/sources.json +++ b/nix/sources.json @@ -1,14 +1,14 @@ { "beku.py": { - "branch": "main", + "branch": "0.0.10", "description": "Test suite expander for Stackable Kuttl tests.", "homepage": null, "owner": "stackabletech", "repo": "beku.py", - "rev": "1ebc9e7b70fb8ee11dfb569ae45b3bcd63666d0e", - "sha256": "1zg24h5wdis7cysa08r8vvbw2rpyx6fgv148i1lg54dwd3sa0h0d", + "rev": "fc75202a38529a4ac6776dd8a5dfee278d927f58", + "sha256": "152yary0p11h87yabv74jnwkghsal7lx16az0qlzrzdrs6n5v8id", "type": "tarball", - "url": "https://github.com/stackabletech/beku.py/archive/1ebc9e7b70fb8ee11dfb569ae45b3bcd63666d0e.tar.gz", + "url": "https://github.com/stackabletech/beku.py/archive/fc75202a38529a4ac6776dd8a5dfee278d927f58.tar.gz", "url_template": "https://github.com///archive/.tar.gz" }, "crate2nix": { @@ -17,10 +17,10 @@ "homepage": "", "owner": "kolloch", "repo": "crate2nix", - "rev": "236f6addfd452a48be805819e3216af79e988fd5", - "sha256": "1cnq84c1bhhbn3blm31scrqsxw2bl1w67v6gpav01m0s2509klf5", + "rev": "be31feae9a82c225c0fd1bdf978565dc452a483a", + "sha256": "14d0ymlrwk7dynv35qcw4xn0dylfpwjmf6f8znflbk2l6fk23l12", "type": "tarball", - "url": "https://github.com/kolloch/crate2nix/archive/236f6addfd452a48be805819e3216af79e988fd5.tar.gz", + "url": "https://github.com/kolloch/crate2nix/archive/be31feae9a82c225c0fd1bdf978565dc452a483a.tar.gz", "url_template": "https://github.com///archive/.tar.gz" }, "nixpkgs": { @@ -29,10 +29,10 @@ "homepage": "", "owner": "NixOS", "repo": "nixpkgs", - "rev": "aa6ae0afa6adeb5c202a168e51eda1d3da571117", - "sha256": "1kbg6limdl7f21vr36g7qlrimm8lxr97b6kvxkz91yfdffn942p9", + "rev": "b2b0718004cc9a5bca610326de0a82e6ea75920b", + "sha256": "0aqrxx1w40aqicjhg2057bpyrrbsx6mnii5dp5klpm4labfg2iwi", "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/aa6ae0afa6adeb5c202a168e51eda1d3da571117.tar.gz", + "url": "https://github.com/NixOS/nixpkgs/archive/b2b0718004cc9a5bca610326de0a82e6ea75920b.tar.gz", "url_template": "https://github.com///archive/.tar.gz" } } From 68ab9cc200a546bb281b568461c63ad89d845a4c Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy Date: Wed, 23 Apr 2025 15:24:07 +0200 Subject: [PATCH 22/22] added listener-relevant config settings --- rust/operator-binary/src/hbase_controller.rs | 27 +++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/rust/operator-binary/src/hbase_controller.rs b/rust/operator-binary/src/hbase_controller.rs index 71af72e4..83586eb7 100644 --- a/rust/operator-binary/src/hbase_controller.rs +++ b/rust/operator-binary/src/hbase_controller.rs @@ -597,6 +597,30 @@ fn build_rolegroup_config_map( hbase_site_config .extend(hbase_opa_config.map_or(vec![], |config| config.hbase_site_config())); + match hbase_role { + HbaseRole::Master => { + hbase_site_config.insert( + "hbase.listener.master.hostname".to_string(), + "${HBASE_SERVICE_HOST}".to_string(), + ); + hbase_site_config.insert( + "hbase.listener.master.port".to_string(), + "${HBASE_SERVICE_PORT}".to_string(), + ) + } + HbaseRole::RegionServer => { + hbase_site_config.insert( + "hbase.listener.regionserver.hostname".to_string(), + "${HBASE_SERVICE_HOST}".to_string(), + ); + hbase_site_config.insert( + "hbase.listener.regionserver.port".to_string(), + "${HBASE_SERVICE_PORT}".to_string(), + ) + } + HbaseRole::RestServer => None, + }; + // configOverride come last hbase_site_config.extend(config.clone()); hbase_site_xml = to_hadoop_xml( @@ -881,11 +905,12 @@ fn build_rolegroup_statefulset( .image_from_product_image(resolved_product_image) .command(command()) .args(vec![formatdoc! {" - {entrypoint} {role} {domain} {port}", + {entrypoint} {role} {domain} {port} {port_name}", entrypoint = "/stackable/hbase/bin/hbase-entrypoint.sh".to_string(), role = role_name, domain = hbase_service_domain_name(hbase, rolegroup_ref, cluster_info)?, port = hbase.service_port(hbase_role).to_string(), + port_name = hbase.ui_port_name(), }]) .add_env_vars(merged_env) // Needed for the `containerdebug` process to log it's tracing information to.