From 3bf5b88f710906298650b9f11b2537d30a5e5c35 Mon Sep 17 00:00:00 2001 From: Techassi Date: Mon, 18 Nov 2024 16:16:01 +0100 Subject: [PATCH 1/7] Update CHANGELOG.md from release 24.11.0 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b8b06444..b4e93130 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,8 @@ All notable changes to this project will be documented in this file. ## [Unreleased] +## [24.11.0] - 2024-11-18 + ### Added - The operator can now run on Kubernetes clusters using a non-default cluster domain. From 31e864048ccdf83c45651a8055076fe26bee01d9 Mon Sep 17 00:00:00 2001 From: Stacky McStackface <95074132+stackable-bot@users.noreply.github.com> Date: Mon, 18 Nov 2024 22:46:41 +0100 Subject: [PATCH 2/7] chore: Generated commit to update templated files since the last template run up to stackabletech/operator-templating@fd40109c1bd66c79fa74bf67a5b5bff1e3a96ce8 (#608) Reference-to: stackabletech/operator-templating@fd40109 (Add make render-doc command) --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index f77712b0..40863452 100644 --- a/Makefile +++ b/Makefile @@ -29,6 +29,9 @@ SHELL=/usr/bin/env bash -euo pipefail render-readme: scripts/render_readme.sh +render-docs: + scripts/docs_templating.sh + ## Docker related targets docker-build: docker build --force-rm --build-arg VERSION=${VERSION} -t "${DOCKER_REPO}/${ORGANIZATION}/${OPERATOR_NAME}:${VERSION}-${ARCH}" -f docker/Dockerfile . From 02af4400a58fcab1abb82a87617ba6e9d70ab214 Mon Sep 17 00:00:00 2001 From: Nick <10092581+NickLarsenNZ@users.noreply.github.com> Date: Wed, 20 Nov 2024 20:16:02 +1300 Subject: [PATCH 3/7] chore(getting_started): replace templating instructions with make (#609) --- .../examples/getting_started/getting_started.sh | 13 +------------ .../examples/getting_started/getting_started.sh.j2 | 13 +------------ .../hdfs/examples/getting_started/hdfs.yaml.j2 | 4 ++-- 3 files changed, 4 insertions(+), 26 deletions(-) diff --git a/docs/modules/hdfs/examples/getting_started/getting_started.sh b/docs/modules/hdfs/examples/getting_started/getting_started.sh index ea5df8f1..d80c5a6f 100755 --- a/docs/modules/hdfs/examples/getting_started/getting_started.sh +++ b/docs/modules/hdfs/examples/getting_started/getting_started.sh @@ -2,18 +2,7 @@ set -euo pipefail # DO NOT EDIT THE SCRIPT -# Instead, update the j2 template, and regenerate it for dev: -# cat < Date: Wed, 20 Nov 2024 21:07:55 +1300 Subject: [PATCH 4/7] docs(getting_started): Increase the wait timeout (#611) --- .../hdfs/examples/getting_started/getting_started.sh | 6 +++--- .../hdfs/examples/getting_started/getting_started.sh.j2 | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/modules/hdfs/examples/getting_started/getting_started.sh b/docs/modules/hdfs/examples/getting_started/getting_started.sh index d80c5a6f..218ffa23 100755 --- a/docs/modules/hdfs/examples/getting_started/getting_started.sh +++ b/docs/modules/hdfs/examples/getting_started/getting_started.sh @@ -97,9 +97,9 @@ done echo "Awaiting HDFS rollout finish" # tag::watch-hdfs-rollout[] -kubectl rollout status --watch --timeout=5m statefulset/simple-hdfs-datanode-default -kubectl rollout status --watch --timeout=5m statefulset/simple-hdfs-namenode-default -kubectl rollout status --watch --timeout=5m statefulset/simple-hdfs-journalnode-default +kubectl rollout status --watch --timeout=10m statefulset/simple-hdfs-datanode-default +kubectl rollout status --watch --timeout=10m statefulset/simple-hdfs-namenode-default +kubectl rollout status --watch --timeout=10m statefulset/simple-hdfs-journalnode-default # end::watch-hdfs-rollout[] echo "Creating Helper" diff --git a/docs/modules/hdfs/examples/getting_started/getting_started.sh.j2 b/docs/modules/hdfs/examples/getting_started/getting_started.sh.j2 index 740c8874..eac2ffa7 100755 --- a/docs/modules/hdfs/examples/getting_started/getting_started.sh.j2 +++ b/docs/modules/hdfs/examples/getting_started/getting_started.sh.j2 @@ -97,9 +97,9 @@ done echo "Awaiting HDFS rollout finish" # tag::watch-hdfs-rollout[] -kubectl rollout status --watch --timeout=5m statefulset/simple-hdfs-datanode-default -kubectl rollout status --watch --timeout=5m statefulset/simple-hdfs-namenode-default -kubectl rollout status --watch --timeout=5m statefulset/simple-hdfs-journalnode-default +kubectl rollout status --watch --timeout=10m statefulset/simple-hdfs-datanode-default +kubectl rollout status --watch --timeout=10m statefulset/simple-hdfs-namenode-default +kubectl rollout status --watch --timeout=10m statefulset/simple-hdfs-journalnode-default # end::watch-hdfs-rollout[] echo "Creating Helper" From 28476813a7c757a8fb0d9e55bdfb129e1bb3b23e Mon Sep 17 00:00:00 2001 From: Andrew Kenworthy <1712947+adwk67@users.noreply.github.com> Date: Wed, 20 Nov 2024 14:12:45 +0100 Subject: [PATCH 5/7] tests: Add opa as a dimension (#614) --- tests/templates/kuttl/kerberos/11-install-opa.yaml.j2 | 2 +- tests/test-definition.yaml | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/templates/kuttl/kerberos/11-install-opa.yaml.j2 b/tests/templates/kuttl/kerberos/11-install-opa.yaml.j2 index 15fd2c2b..699b05c1 100644 --- a/tests/templates/kuttl/kerberos/11-install-opa.yaml.j2 +++ b/tests/templates/kuttl/kerberos/11-install-opa.yaml.j2 @@ -5,7 +5,7 @@ metadata: name: opa spec: image: - productVersion: 0.61.0 + productVersion: "{{ test_scenario['values']['opa'] }}" servers: roleGroups: default: {} diff --git a/tests/test-definition.yaml b/tests/test-definition.yaml index 4e657b30..4613a497 100644 --- a/tests/test-definition.yaml +++ b/tests/test-definition.yaml @@ -24,6 +24,9 @@ dimensions: - name: krb5 values: - 1.21.1 + - name: opa + values: + - 0.67.1 - name: number-of-datanodes values: - "1" @@ -68,6 +71,7 @@ tests: - kerberos-realm - kerberos-backend - openshift + - opa - name: topology-provider dimensions: - hadoop-latest From 47ee88aede7e5c0f870430b316367cbc5da40051 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=B6nke=20Liebau?= Date: Mon, 25 Nov 2024 18:58:16 +0100 Subject: [PATCH 6/7] Use a patched version of op-rs to hopefully fix SUP-148 (#616) * Use a patched version of op-rs to hopefully fix SUP-148 * Update revision of op-rs pr * Make necessary changes to reflect now private fns in op-rs due. * Added changelog entry. Removed patched op-rs version and referenced release 0.82 * rustfmt * Removed use of unwrap and expect in favor of proper error logging. * Added legacy serviceAccount name to clusterrolebinding. * Added removal of duplicate roleBindings in case a cluster is called hdfs. * Addressed review comments. * markdownlint --- CHANGELOG.md | 2 + Cargo.lock | 8 +-- Cargo.nix | 14 ++--- Cargo.toml | 4 +- crate-hashes.json | 6 +- ...dfs_clusterrolebinding_nodes_controller.rs | 60 ++++++++++++++++--- rust/operator-binary/src/hdfs_controller.rs | 12 ++-- 7 files changed, 77 insertions(+), 29 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b4e93130..e144e0e1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,10 +20,12 @@ All notable changes to this project will be documented in this file. ### Fixed - An invalid `HdfsCluster` doesn't cause the operator to stop functioning ([#594]). +- BREAKING: Use distinct ServiceAccounts for the Stacklets, so that multiple Stacklets can be deployed in one namespace. Existing Stacklets will use the newly created ServiceAccounts after restart ([#616]). [#574]: https://github.com/stackabletech/hdfs-operator/pull/574 [#591]: https://github.com/stackabletech/hdfs-operator/pull/591 [#594]: https://github.com/stackabletech/hdfs-operator/pull/594 +[#616]: https://github.com/stackabletech/hdfs-operator/pull/616 ## [24.7.0] - 2024-07-24 diff --git a/Cargo.lock b/Cargo.lock index 44c7c69d..789f13e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2212,8 +2212,8 @@ dependencies = [ [[package]] name = "stackable-operator" -version = "0.80.0" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.80.0#6fbe32300b60f95e0baa2ab0ff2daf961b06531c" +version = "0.82.0" +source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.82.0#415bbd031bd52e9c0c5392060235030e9930b46b" dependencies = [ "chrono", "clap", @@ -2251,7 +2251,7 @@ dependencies = [ [[package]] name = "stackable-operator-derive" version = "0.3.1" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.80.0#6fbe32300b60f95e0baa2ab0ff2daf961b06531c" +source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.82.0#415bbd031bd52e9c0c5392060235030e9930b46b" dependencies = [ "darling", "proc-macro2", @@ -2262,7 +2262,7 @@ dependencies = [ [[package]] name = "stackable-shared" version = "0.0.1" -source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.80.0#6fbe32300b60f95e0baa2ab0ff2daf961b06531c" +source = "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.82.0#415bbd031bd52e9c0c5392060235030e9930b46b" dependencies = [ "kube", "semver", diff --git a/Cargo.nix b/Cargo.nix index 9e2eb5a9..c01b2ea7 100644 --- a/Cargo.nix +++ b/Cargo.nix @@ -6837,13 +6837,13 @@ rec { }; "stackable-operator" = rec { crateName = "stackable-operator"; - version = "0.80.0"; + version = "0.82.0"; edition = "2021"; workspace_member = null; src = pkgs.fetchgit { url = "https://github.com/stackabletech/operator-rs.git"; - rev = "6fbe32300b60f95e0baa2ab0ff2daf961b06531c"; - sha256 = "16jrq3wdwz63210jgmqbx3snrr15wxw6l1smqhzv7b7jpq8qvya3"; + rev = "415bbd031bd52e9c0c5392060235030e9930b46b"; + sha256 = "0phasjwb64rxgn5hs8vks92icmx9255bd5v9dms280clrfpcg4hy"; }; libName = "stackable_operator"; authors = [ @@ -7000,8 +7000,8 @@ rec { workspace_member = null; src = pkgs.fetchgit { url = "https://github.com/stackabletech/operator-rs.git"; - rev = "6fbe32300b60f95e0baa2ab0ff2daf961b06531c"; - sha256 = "16jrq3wdwz63210jgmqbx3snrr15wxw6l1smqhzv7b7jpq8qvya3"; + rev = "415bbd031bd52e9c0c5392060235030e9930b46b"; + sha256 = "0phasjwb64rxgn5hs8vks92icmx9255bd5v9dms280clrfpcg4hy"; }; procMacro = true; libName = "stackable_operator_derive"; @@ -7035,8 +7035,8 @@ rec { workspace_member = null; src = pkgs.fetchgit { url = "https://github.com/stackabletech/operator-rs.git"; - rev = "6fbe32300b60f95e0baa2ab0ff2daf961b06531c"; - sha256 = "16jrq3wdwz63210jgmqbx3snrr15wxw6l1smqhzv7b7jpq8qvya3"; + rev = "415bbd031bd52e9c0c5392060235030e9930b46b"; + sha256 = "0phasjwb64rxgn5hs8vks92icmx9255bd5v9dms280clrfpcg4hy"; }; libName = "stackable_shared"; authors = [ diff --git a/Cargo.toml b/Cargo.toml index e670b12d..f5e32b48 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,13 +21,13 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" serde_yaml = "0.9" snafu = "0.8" -stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "stackable-operator-0.80.0" } +stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "stackable-operator-0.82.0" } product-config = { git = "https://github.com/stackabletech/product-config.git", tag = "0.7.0" } strum = { version = "0.26", features = ["derive"] } tokio = { version = "1.40", features = ["full"] } tracing = "0.1" tracing-futures = { version = "0.2", features = ["futures-03"] } -[patch."https://github.com/stackabletech/operator-rs.git"] +#[patch."https://github.com/stackabletech/operator-rs.git"] #stackable-operator = { path = "../operator-rs/crates/stackable-operator" } #stackable-operator = { git = "https://github.com/stackabletech//operator-rs.git", branch = "main" } diff --git a/crate-hashes.json b/crate-hashes.json index 562fb18b..0ca37e6e 100644 --- a/crate-hashes.json +++ b/crate-hashes.json @@ -1,6 +1,6 @@ { - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.80.0#stackable-operator-derive@0.3.1": "16jrq3wdwz63210jgmqbx3snrr15wxw6l1smqhzv7b7jpq8qvya3", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.80.0#stackable-operator@0.80.0": "16jrq3wdwz63210jgmqbx3snrr15wxw6l1smqhzv7b7jpq8qvya3", - "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.80.0#stackable-shared@0.0.1": "16jrq3wdwz63210jgmqbx3snrr15wxw6l1smqhzv7b7jpq8qvya3", + "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.82.0#stackable-operator-derive@0.3.1": "0phasjwb64rxgn5hs8vks92icmx9255bd5v9dms280clrfpcg4hy", + "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.82.0#stackable-operator@0.82.0": "0phasjwb64rxgn5hs8vks92icmx9255bd5v9dms280clrfpcg4hy", + "git+https://github.com/stackabletech/operator-rs.git?tag=stackable-operator-0.82.0#stackable-shared@0.0.1": "0phasjwb64rxgn5hs8vks92icmx9255bd5v9dms280clrfpcg4hy", "git+https://github.com/stackabletech/product-config.git?tag=0.7.0#product-config@0.7.0": "0gjsm80g6r75pm3824dcyiz4ysq1ka4c1if6k1mjm9cnd5ym0gny" } \ No newline at end of file diff --git a/rust/operator-binary/src/hdfs_clusterrolebinding_nodes_controller.rs b/rust/operator-binary/src/hdfs_clusterrolebinding_nodes_controller.rs index 797d9f39..93e57ee1 100644 --- a/rust/operator-binary/src/hdfs_clusterrolebinding_nodes_controller.rs +++ b/rust/operator-binary/src/hdfs_clusterrolebinding_nodes_controller.rs @@ -3,8 +3,9 @@ use stackable_hdfs_crd::{ constants::{APP_NAME, FIELD_MANAGER_SCOPE}, HdfsCluster, }; +use stackable_operator::kube::ResourceExt; use stackable_operator::{ - commons::rbac::service_account_name, + commons::rbac::build_rbac_resources, k8s_openapi::api::rbac::v1::{ClusterRoleBinding, Subject}, kube::{ api::{Patch, PatchParams}, @@ -15,6 +16,7 @@ use stackable_operator::{ }, Api, Client, }, + kvp::Labels, }; use tracing::{error, info}; @@ -41,18 +43,62 @@ pub async fn reconcile( ) } } + // Build a list of SubjectRef objects for all deployed HdfsClusters. // To do this we only need the metadata for that, as we only really // need name and namespace of the objects let subjects: Vec = store .state() .into_iter() - .map(|object| object.metadata.clone()) - .map(|meta| Subject { - kind: "ServiceAccount".to_string(), - name: service_account_name(APP_NAME), - namespace: meta.namespace.clone(), - ..Subject::default() + .filter_map(|object| { + // The call to 'build_rbac_resources' can fail, so we + // use filter_map here, log an error for any failures and keep + // going with all the non-broken elements + // Usually we'd rather opt for failing completely here, but in this specific instance + // this could mean that one broken cluster somewhere could impact other working clusters + // within the namespace, so we opted for doing everything we can here, instead of failing + // completely. + match build_rbac_resources(&*object, APP_NAME, Labels::default()) { + Ok((service_account, _role_binding)) => { + Some((object.metadata.clone(), service_account.name_any())) + } + Err(e) => { + error!( + ?object, + error = &e as &dyn std::error::Error, + "Failed to build serviceAccount name for hdfs cluster" + ); + None + } + } + }) + .flat_map(|(meta, sa_name)| { + let mut result = vec![ + Subject { + kind: "ServiceAccount".to_string(), + name: sa_name, + namespace: meta.namespace.clone(), + ..Subject::default() + }, + // This extra Serviceaccount is being written for legacy/compatibility purposes + // to ensure that running clusters don't lose access to anything during an upgrade + // of the Stackable operators, this code can be removed in later releases + // The value is hardcoded here, as we have removed access to the private fns that + // would have built it, since this is a known target though, and will be removed soon + // this should not be an issue. + Subject { + kind: "ServiceAccount".to_string(), + name: "hdfs-serviceaccount".to_string(), + namespace: meta.namespace.clone(), + ..Subject::default() + }, + ]; + // If a cluster is called hdfs this would result in the same subject + // being written twicex. + // Since we know this vec only contains two elements we can use dedup for + // simply removing this duplicate. + result.dedup(); + result }) .collect(); diff --git a/rust/operator-binary/src/hdfs_controller.rs b/rust/operator-binary/src/hdfs_controller.rs index bb72c1c6..fa0ed484 100644 --- a/rust/operator-binary/src/hdfs_controller.rs +++ b/rust/operator-binary/src/hdfs_controller.rs @@ -9,6 +9,7 @@ use product_config::{ ProductConfigManager, }; use snafu::{OptionExt, ResultExt, Snafu}; +use stackable_operator::k8s_openapi::api::core::v1::ServiceAccount; use stackable_operator::{ builder::{ configmap::ConfigMapBuilder, @@ -17,10 +18,7 @@ use stackable_operator::{ }, client::Client, cluster_resources::{ClusterResourceApplyStrategy, ClusterResources}, - commons::{ - product_image_selection::ResolvedProductImage, - rbac::{build_rbac_resources, service_account_name}, - }, + commons::{product_image_selection::ResolvedProductImage, rbac::build_rbac_resources}, iter::reverse_if, k8s_openapi::{ api::{ @@ -326,7 +324,7 @@ pub async fn reconcile_hdfs( .context(BuildRbacResourcesSnafu)?; cluster_resources - .add(client, rbac_sa) + .add(client, rbac_sa.clone()) .await .context(ApplyServiceAccountSnafu)?; cluster_resources @@ -434,6 +432,7 @@ pub async fn reconcile_hdfs( env_overrides, &merged_config, &namenode_podrefs, + &rbac_sa, )?; let rg_service_name = rg_service.name_any(); @@ -818,6 +817,7 @@ fn rolegroup_statefulset( env_overrides: Option<&BTreeMap>, merged_config: &AnyNodeConfig, namenode_podrefs: &[HdfsPodRef], + service_account: &ServiceAccount, ) -> HdfsOperatorResult { tracing::info!("Setting up StatefulSet for {:?}", rolegroup_ref); @@ -837,7 +837,7 @@ fn rolegroup_statefulset( pb.metadata(pb_metadata) .image_pull_secrets_from_product_image(resolved_product_image) .affinity(&merged_config.affinity) - .service_account_name(service_account_name(APP_NAME)) + .service_account_name(service_account.name_any()) .security_context( PodSecurityContextBuilder::new() .run_as_user(HDFS_UID) From 750a156a96b21260360e27c713f5ec3fa22a2bca Mon Sep 17 00:00:00 2001 From: Siegfried Weber Date: Tue, 26 Nov 2024 17:01:01 +0100 Subject: [PATCH 7/7] Update changelog --- CHANGELOG.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e144e0e1..1462a510 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,14 @@ All notable changes to this project will be documented in this file. ## [Unreleased] +### Fixed + +- BREAKING: Use distinct ServiceAccounts for the Stacklets, so that multiple Stacklets can be + deployed in one namespace. Existing Stacklets will use the newly created ServiceAccounts after + restart ([#616]). + +[#616]: https://github.com/stackabletech/hdfs-operator/pull/616 + ## [24.11.0] - 2024-11-18 ### Added @@ -20,12 +28,10 @@ All notable changes to this project will be documented in this file. ### Fixed - An invalid `HdfsCluster` doesn't cause the operator to stop functioning ([#594]). -- BREAKING: Use distinct ServiceAccounts for the Stacklets, so that multiple Stacklets can be deployed in one namespace. Existing Stacklets will use the newly created ServiceAccounts after restart ([#616]). [#574]: https://github.com/stackabletech/hdfs-operator/pull/574 [#591]: https://github.com/stackabletech/hdfs-operator/pull/591 [#594]: https://github.com/stackabletech/hdfs-operator/pull/594 -[#616]: https://github.com/stackabletech/hdfs-operator/pull/616 ## [24.7.0] - 2024-07-24