Skip to content

Commit a663bfb

Browse files
authored
fix: env overrides work as expected (#499)
* fix: env overrides work as expected * Change release.yaml for jenkins * Revert "Change release.yaml for jenkins" This reverts commit f1e9f97. * unit test env overrides * Update cargo.lock
1 parent 764ef91 commit a663bfb

File tree

5 files changed

+171
-27
lines changed

5 files changed

+171
-27
lines changed

CHANGELOG.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ All notable changes to this project will be documented in this file.
3030

3131
- Include hdfs principals `dfs.journalnode.kerberos.principal`, `dfs.namenode.kerberos.principal`
3232
and `dfs.datanode.kerberos.principal` in the discovery ConfigMap in case Kerberos is enabled ([#451]).
33+
- User provided env overrides now work as expected ([#499]).
3334

3435
[#429]: https://github.com/stackabletech/hdfs-operator/pull/429
3536
[#450]: https://github.com/stackabletech/hdfs-operator/pull/450
@@ -43,6 +44,7 @@ All notable changes to this project will be documented in this file.
4344
[#491]: https://github.com/stackabletech/hdfs-operator/pull/491
4445
[#492]: https://github.com/stackabletech/hdfs-operator/pull/492
4546
[#495]: https://github.com/stackabletech/hdfs-operator/pull/495
47+
[#499]: https://github.com/stackabletech/hdfs-operator/pull/499
4648

4749
## [23.11.0] - 2023-11-24
4850

Cargo.lock

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

rust/operator-binary/Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ tracing.workspace = true
2727

2828
[dev-dependencies]
2929
rstest.workspace = true
30+
serde_yaml.workspace = true
3031

3132
[build-dependencies]
3233
built.workspace = true

rust/operator-binary/src/container.rs

Lines changed: 55 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -741,12 +741,18 @@ wait_for_termination $!
741741
env_overrides: Option<&BTreeMap<String, String>>,
742742
resources: Option<&ResourceRequirements>,
743743
) -> Vec<EnvVar> {
744-
let mut env = Vec::new();
744+
// Maps env var name to env var object. This allows env_overrides to work
745+
// as expected (i.e. users can override the env var value).
746+
let mut env: BTreeMap<String, EnvVar> = BTreeMap::new();
745747

746-
env.extend(Self::shared_env_vars(
747-
self.volume_mount_dirs().final_config(),
748-
zookeeper_config_map_name,
749-
));
748+
env.extend(
749+
Self::shared_env_vars(
750+
self.volume_mount_dirs().final_config(),
751+
zookeeper_config_map_name,
752+
)
753+
.into_iter()
754+
.map(|env_var| (env_var.name.clone(), env_var)),
755+
);
750756

751757
// For the main container we use specialized env variables for every role
752758
// (think of like HDFS_NAMENODE_OPTS or HDFS_DATANODE_OPTS)
@@ -758,39 +764,61 @@ wait_for_termination $!
758764
// so we don't want to stuff all the config into HADOOP_OPTS, but rather into the specialized env variables
759765
// See https://github.com/stackabletech/hdfs-operator/issues/138 for details
760766
if let ContainerConfig::Hdfs { role, .. } = self {
761-
env.push(EnvVar {
762-
name: role.hadoop_opts_env_var_for_role().to_string(),
763-
value: self.build_hadoop_opts(hdfs, resources).ok(),
764-
..EnvVar::default()
765-
});
767+
let role_opts_name = role.hadoop_opts_env_var_for_role().to_string();
768+
env.insert(
769+
role_opts_name.clone(),
770+
EnvVar {
771+
name: role_opts_name,
772+
value: self.build_hadoop_opts(hdfs, resources).ok(),
773+
..EnvVar::default()
774+
},
775+
);
766776
}
767777
// Additionally, any other init or sidecar container must have access to the following settings.
768778
// As the Prometheus metric emitter is not part of this config it's safe to use for hdfs cli tools as well.
769779
// This will not only enable the init containers to work, but also the user to run e.g.
770780
// `bin/hdfs dfs -ls /` without getting `Caused by: java.lang.IllegalArgumentException: KrbException: Cannot locate default realm`
771781
// because the `-Djava.security.krb5.conf` setting is missing
772782
if hdfs.has_kerberos_enabled() {
773-
env.push(EnvVar {
774-
name: "HADOOP_OPTS".to_string(),
775-
value: Some("-Djava.security.krb5.conf=/stackable/kerberos/krb5.conf".to_string()),
776-
..EnvVar::default()
777-
});
783+
env.insert(
784+
"HADOOP_OPTS".to_string(),
785+
EnvVar {
786+
name: "HADOOP_OPTS".to_string(),
787+
value: Some(
788+
"-Djava.security.krb5.conf=/stackable/kerberos/krb5.conf".to_string(),
789+
),
790+
..EnvVar::default()
791+
},
792+
);
778793

779-
env.push(EnvVar {
780-
name: "KRB5_CONFIG".to_string(),
781-
value: Some("/stackable/kerberos/krb5.conf".to_string()),
782-
..EnvVar::default()
783-
});
784-
env.push(EnvVar {
785-
name: "KRB5_CLIENT_KTNAME".to_string(),
786-
value: Some("/stackable/kerberos/keytab".to_string()),
787-
..EnvVar::default()
788-
});
794+
env.insert(
795+
"KRB5_CONFIG".to_string(),
796+
EnvVar {
797+
name: "KRB5_CONFIG".to_string(),
798+
value: Some("/stackable/kerberos/krb5.conf".to_string()),
799+
..EnvVar::default()
800+
},
801+
);
802+
env.insert(
803+
"KRB5_CLIENT_KTNAME".to_string(),
804+
EnvVar {
805+
name: "KRB5_CLIENT_KTNAME".to_string(),
806+
value: Some("/stackable/kerberos/keytab".to_string()),
807+
..EnvVar::default()
808+
},
809+
);
789810
}
790811

791812
// Overrides need to come last
792-
env.extend(Self::transform_env_overrides_to_env_vars(env_overrides));
793-
env
813+
let mut env_override_vars: BTreeMap<String, EnvVar> =
814+
Self::transform_env_overrides_to_env_vars(env_overrides)
815+
.into_iter()
816+
.map(|env_var| (env_var.name.clone(), env_var))
817+
.collect();
818+
819+
env.append(&mut env_override_vars);
820+
821+
env.into_values().collect()
794822
}
795823

796824
/// Returns the container resources.

rust/operator-binary/src/hdfs_controller.rs

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -846,3 +846,115 @@ fn rolegroup_statefulset(
846846
pub fn error_policy(_obj: Arc<HdfsCluster>, _error: &Error, _ctx: Arc<Ctx>) -> Action {
847847
Action::requeue(*Duration::from_secs(5))
848848
}
849+
850+
#[cfg(test)]
851+
mod test {
852+
use super::*;
853+
854+
#[test]
855+
pub fn test_env_overrides() {
856+
let cr = "
857+
---
858+
apiVersion: hdfs.stackable.tech/v1alpha1
859+
kind: HdfsCluster
860+
metadata:
861+
name: hdfs
862+
spec:
863+
image:
864+
productVersion: 3.3.6
865+
clusterConfig:
866+
zookeeperConfigMapName: hdfs-zk
867+
nameNodes:
868+
roleGroups:
869+
default:
870+
replicas: 1
871+
journalNodes:
872+
roleGroups:
873+
default:
874+
replicas: 1
875+
dataNodes:
876+
roleGroups:
877+
default:
878+
envOverrides:
879+
MY_ENV: my-value
880+
HADOOP_HOME: /not/the/default/path
881+
replicas: 1
882+
";
883+
let product_config = "
884+
---
885+
version: 0.1.0
886+
spec:
887+
units: []
888+
properties: []
889+
";
890+
891+
let hdfs: HdfsCluster = serde_yaml::from_str(cr).unwrap();
892+
893+
let config =
894+
transform_all_roles_to_config(&hdfs, hdfs.build_role_properties().unwrap()).unwrap();
895+
896+
let validated_config = validate_all_roles_and_groups_config(
897+
"3.3.6",
898+
&config,
899+
&ProductConfigManager::from_str(product_config).unwrap(),
900+
false,
901+
false,
902+
)
903+
.unwrap();
904+
905+
let role = HdfsRole::DataNode;
906+
let rolegroup_config = validated_config
907+
.get(&role.to_string())
908+
.unwrap()
909+
.get("default")
910+
.unwrap();
911+
let env_overrides = rolegroup_config.get(&PropertyNameKind::Env);
912+
913+
let merged_config = role.merged_config(&hdfs, "default").unwrap();
914+
let resolved_product_image = hdfs.spec.image.resolve(DOCKER_IMAGE_BASE_NAME, "0.0.0-dev");
915+
916+
let mut pb = PodBuilder::new();
917+
pb.metadata(ObjectMeta::default());
918+
ContainerConfig::add_containers_and_volumes(
919+
&mut pb,
920+
&hdfs,
921+
&role,
922+
&resolved_product_image,
923+
&merged_config,
924+
env_overrides,
925+
&hdfs.spec.cluster_config.zookeeper_config_map_name,
926+
"todo",
927+
&[],
928+
)
929+
.unwrap();
930+
let containers = pb.build().unwrap().spec.unwrap().containers;
931+
let main_container = containers
932+
.iter()
933+
.find(|c| c.name == role.to_string())
934+
.unwrap();
935+
936+
assert_eq!(
937+
main_container
938+
.env
939+
.clone()
940+
.unwrap()
941+
.into_iter()
942+
.find(|e| e.name == "MY_ENV")
943+
.unwrap()
944+
.value,
945+
Some("my-value".to_string())
946+
);
947+
948+
assert_eq!(
949+
main_container
950+
.env
951+
.clone()
952+
.unwrap()
953+
.into_iter()
954+
.find(|e| e.name == "HADOOP_HOME")
955+
.unwrap()
956+
.value,
957+
Some("/not/the/default/path".to_string())
958+
);
959+
}
960+
}

0 commit comments

Comments
 (0)