Skip to content

Update release branch #617

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Nov 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,14 @@ All notable changes to this project will be documented in this file.

## [Unreleased]

### Fixed

- BREAKING: Use distinct ServiceAccounts for the Stacklets, so that multiple Stacklets can be
deployed in one namespace. Existing Stacklets will use the newly created ServiceAccounts after
restart ([#616]).

[#616]: https://github.com/stackabletech/hdfs-operator/pull/616

## [24.11.0] - 2024-11-18

### Added
Expand Down
8 changes: 4 additions & 4 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 7 additions & 7 deletions Cargo.nix

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,13 @@ serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_yaml = "0.9"
snafu = "0.8"
stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "stackable-operator-0.80.0" }
stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "stackable-operator-0.82.0" }
product-config = { git = "https://github.com/stackabletech/product-config.git", tag = "0.7.0" }
strum = { version = "0.26", features = ["derive"] }
tokio = { version = "1.40", features = ["full"] }
tracing = "0.1"
tracing-futures = { version = "0.2", features = ["futures-03"] }

[patch."https://github.com/stackabletech/operator-rs.git"]
#[patch."https://github.com/stackabletech/operator-rs.git"]
#stackable-operator = { path = "../operator-rs/crates/stackable-operator" }
#stackable-operator = { git = "https://github.com/stackabletech//operator-rs.git", branch = "main" }
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ SHELL=/usr/bin/env bash -euo pipefail
render-readme:
scripts/render_readme.sh

render-docs:
scripts/docs_templating.sh

## Docker related targets
docker-build:
docker build --force-rm --build-arg VERSION=${VERSION} -t "${DOCKER_REPO}/${ORGANIZATION}/${OPERATOR_NAME}:${VERSION}-${ARCH}" -f docker/Dockerfile .
Expand Down
6 changes: 3 additions & 3 deletions crate-hashes.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

13 changes: 1 addition & 12 deletions docs/modules/hdfs/examples/getting_started/getting_started.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,7 @@
set -euo pipefail

# DO NOT EDIT THE SCRIPT
# Instead, update the j2 template, and regenerate it for dev:
# cat <<EOF | jinja2 --format yaml getting_started.sh.j2 -o getting_started.sh
# helm:
# repo_name: stackable-dev
# repo_url: https://repo.stackable.tech/repository/helm-dev/
# versions:
# commons: 0.0.0-dev
# hdfs: 0.0.0-dev
# listener: 0.0.0-dev
# secret: 0.0.0-dev
# zookeeper: 0.0.0-dev
# EOF
# Instead, update the j2 template, and regenerate it for dev with `make render-docs`.

# This script contains all the code snippets from the guide, as well as some assert tests
# to test if the instructions in the guide work. The user *could* use it, but it is intended
Expand Down
13 changes: 1 addition & 12 deletions docs/modules/hdfs/examples/getting_started/getting_started.sh.j2
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,7 @@
set -euo pipefail

# DO NOT EDIT THE SCRIPT
# Instead, update the j2 template, and regenerate it for dev:
# cat <<EOF | jinja2 --format yaml getting_started.sh.j2 -o getting_started.sh
# helm:
# repo_name: stackable-dev
# repo_url: https://repo.stackable.tech/repository/helm-dev/
# versions:
# commons: 0.0.0-dev
# hdfs: 0.0.0-dev
# listener: 0.0.0-dev
# secret: 0.0.0-dev
# zookeeper: 0.0.0-dev
# EOF
# Instead, update the j2 template, and regenerate it for dev with `make render-docs`.

# This script contains all the code snippets from the guide, as well as some assert tests
# to test if the instructions in the guide work. The user *could* use it, but it is intended
Expand Down
4 changes: 2 additions & 2 deletions docs/modules/hdfs/examples/getting_started/hdfs.yaml.j2
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,13 @@ spec:
dfsReplication: 1
nameNodes:
config:
listenerClass: external-stable
listenerClass: external-stable # This exposes your Stacklet outside of Kubernetes. Remove this configuration if this is not desired
roleGroups:
default:
replicas: 2
dataNodes:
config:
listenerClass: external-unstable
listenerClass: external-unstable # This exposes your Stacklet outside of Kubernetes. Remove this configuration if this is not desired
roleGroups:
default:
replicas: 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,9 @@ use stackable_hdfs_crd::{
constants::{APP_NAME, FIELD_MANAGER_SCOPE},
HdfsCluster,
};
use stackable_operator::kube::ResourceExt;
use stackable_operator::{
commons::rbac::service_account_name,
commons::rbac::build_rbac_resources,
k8s_openapi::api::rbac::v1::{ClusterRoleBinding, Subject},
kube::{
api::{Patch, PatchParams},
Expand All @@ -15,6 +16,7 @@ use stackable_operator::{
},
Api, Client,
},
kvp::Labels,
};
use tracing::{error, info};

Expand All @@ -41,18 +43,62 @@ pub async fn reconcile(
)
}
}

// Build a list of SubjectRef objects for all deployed HdfsClusters.
// To do this we only need the metadata for that, as we only really
// need name and namespace of the objects
let subjects: Vec<Subject> = store
.state()
.into_iter()
.map(|object| object.metadata.clone())
.map(|meta| Subject {
kind: "ServiceAccount".to_string(),
name: service_account_name(APP_NAME),
namespace: meta.namespace.clone(),
..Subject::default()
.filter_map(|object| {
// The call to 'build_rbac_resources' can fail, so we
// use filter_map here, log an error for any failures and keep
// going with all the non-broken elements
// Usually we'd rather opt for failing completely here, but in this specific instance
// this could mean that one broken cluster somewhere could impact other working clusters
// within the namespace, so we opted for doing everything we can here, instead of failing
// completely.
match build_rbac_resources(&*object, APP_NAME, Labels::default()) {
Ok((service_account, _role_binding)) => {
Some((object.metadata.clone(), service_account.name_any()))
}
Err(e) => {
error!(
?object,
error = &e as &dyn std::error::Error,
"Failed to build serviceAccount name for hdfs cluster"
);
None
}
}
})
.flat_map(|(meta, sa_name)| {
let mut result = vec![
Subject {
kind: "ServiceAccount".to_string(),
name: sa_name,
namespace: meta.namespace.clone(),
..Subject::default()
},
// This extra Serviceaccount is being written for legacy/compatibility purposes
// to ensure that running clusters don't lose access to anything during an upgrade
// of the Stackable operators, this code can be removed in later releases
// The value is hardcoded here, as we have removed access to the private fns that
// would have built it, since this is a known target though, and will be removed soon
// this should not be an issue.
Subject {
kind: "ServiceAccount".to_string(),
name: "hdfs-serviceaccount".to_string(),
namespace: meta.namespace.clone(),
..Subject::default()
},
];
// If a cluster is called hdfs this would result in the same subject
// being written twicex.
// Since we know this vec only contains two elements we can use dedup for
// simply removing this duplicate.
result.dedup();
result
})
.collect();

Expand Down
12 changes: 6 additions & 6 deletions rust/operator-binary/src/hdfs_controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ use product_config::{
ProductConfigManager,
};
use snafu::{OptionExt, ResultExt, Snafu};
use stackable_operator::k8s_openapi::api::core::v1::ServiceAccount;
use stackable_operator::{
builder::{
configmap::ConfigMapBuilder,
Expand All @@ -17,10 +18,7 @@ use stackable_operator::{
},
client::Client,
cluster_resources::{ClusterResourceApplyStrategy, ClusterResources},
commons::{
product_image_selection::ResolvedProductImage,
rbac::{build_rbac_resources, service_account_name},
},
commons::{product_image_selection::ResolvedProductImage, rbac::build_rbac_resources},
iter::reverse_if,
k8s_openapi::{
api::{
Expand Down Expand Up @@ -326,7 +324,7 @@ pub async fn reconcile_hdfs(
.context(BuildRbacResourcesSnafu)?;

cluster_resources
.add(client, rbac_sa)
.add(client, rbac_sa.clone())
.await
.context(ApplyServiceAccountSnafu)?;
cluster_resources
Expand Down Expand Up @@ -434,6 +432,7 @@ pub async fn reconcile_hdfs(
env_overrides,
&merged_config,
&namenode_podrefs,
&rbac_sa,
)?;

let rg_service_name = rg_service.name_any();
Expand Down Expand Up @@ -818,6 +817,7 @@ fn rolegroup_statefulset(
env_overrides: Option<&BTreeMap<String, String>>,
merged_config: &AnyNodeConfig,
namenode_podrefs: &[HdfsPodRef],
service_account: &ServiceAccount,
) -> HdfsOperatorResult<StatefulSet> {
tracing::info!("Setting up StatefulSet for {:?}", rolegroup_ref);

Expand All @@ -837,7 +837,7 @@ fn rolegroup_statefulset(
pb.metadata(pb_metadata)
.image_pull_secrets_from_product_image(resolved_product_image)
.affinity(&merged_config.affinity)
.service_account_name(service_account_name(APP_NAME))
.service_account_name(service_account.name_any())
.security_context(
PodSecurityContextBuilder::new()
.run_as_user(HDFS_UID)
Expand Down
Loading