From 0a697ab19cb8aec104714d2c64269a687393f1a8 Mon Sep 17 00:00:00 2001 From: Rain Date: Tue, 1 Jul 2025 18:52:31 +0000 Subject: [PATCH 1/2] [spr] changes to main this commit is based on Created using spr 1.3.6-beta.1 [skip ci] --- Cargo.lock | 3 + dev-tools/reconfigurator-cli/src/lib.rs | 410 ++++++++++++--- .../tests/input/cmds-example.txt | 16 + .../tests/input/cmds-noop-image-source.txt | 50 ++ .../cmds-set-remove-mupdate-override.txt | 39 +- .../reconfigurator-cli/tests/input/cmds.txt | 2 +- .../output/cmds-add-sled-no-disks-stdout | 3 +- .../tests/output/cmds-example-stdout | 451 +++++++++++++++- ...ds-expunge-newly-added-external-dns-stdout | 1 + ...ds-expunge-newly-added-internal-dns-stdout | 1 + .../output/cmds-noop-image-source-stderr | 0 .../output/cmds-noop-image-source-stdout | 480 ++++++++++++++++++ .../cmds-set-remove-mupdate-override-stdout | 66 ++- .../tests/output/cmds-stdout | 22 +- .../tests/output/cmds-target-release-stdout | 18 + .../planning/src/blueprint_builder/builder.rs | 25 + .../src/blueprint_editor/sled_editor.rs | 14 +- .../blueprint_editor/sled_editor/scalar.rs | 1 - nexus/reconfigurator/planning/src/example.rs | 131 +++-- nexus/reconfigurator/planning/src/planner.rs | 177 +++++++ nexus/reconfigurator/planning/src/system.rs | 76 ++- nexus/reconfigurator/simulation/Cargo.toml | 3 + nexus/reconfigurator/simulation/src/errors.rs | 35 ++ nexus/reconfigurator/simulation/src/lib.rs | 2 + .../simulation/src/zone_images.rs | 168 ++++++ update-common/manifests/fake-non-semver.toml | 13 +- wicketd/tests/integration_tests/updates.rs | 48 +- 27 files changed, 2058 insertions(+), 197 deletions(-) create mode 100644 dev-tools/reconfigurator-cli/tests/input/cmds-noop-image-source.txt create mode 100644 dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stderr create mode 100644 dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout create mode 100644 nexus/reconfigurator/simulation/src/zone_images.rs diff --git a/Cargo.lock b/Cargo.lock index f742c5c3da6..386ddd1e3d5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6727,10 +6727,12 @@ name = "nexus-reconfigurator-simulation" version = "0.1.0" dependencies = [ "anyhow", + "camino", "chrono", "indexmap 2.9.0", "nexus-inventory", "nexus-reconfigurator-planning", + "nexus-sled-agent-shared", "nexus-types", "omicron-common", "omicron-uuid-kinds", @@ -6740,6 +6742,7 @@ dependencies = [ "swrite", "sync-ptr", "thiserror 2.0.12", + "tufaceous-artifact", "typed-rng", "uuid", ] diff --git a/dev-tools/reconfigurator-cli/src/lib.rs b/dev-tools/reconfigurator-cli/src/lib.rs index dfa78a80a7b..c9b82a9b89e 100644 --- a/dev-tools/reconfigurator-cli/src/lib.rs +++ b/dev-tools/reconfigurator-cli/src/lib.rs @@ -5,7 +5,7 @@ //! developer REPL for driving blueprint planning use anyhow::{Context, anyhow, bail}; -use camino::Utf8PathBuf; +use camino::{Utf8Path, Utf8PathBuf}; use clap::ValueEnum; use clap::{Args, Parser, Subcommand}; use iddqd::IdOrdMap; @@ -20,9 +20,9 @@ use nexus_reconfigurator_planning::blueprint_builder::BlueprintBuilder; use nexus_reconfigurator_planning::example::ExampleSystemBuilder; use nexus_reconfigurator_planning::planner::Planner; use nexus_reconfigurator_planning::system::{SledBuilder, SystemDescription}; -use nexus_reconfigurator_simulation::SimStateBuilder; -use nexus_reconfigurator_simulation::Simulator; use nexus_reconfigurator_simulation::{BlueprintId, SimState}; +use nexus_reconfigurator_simulation::{SimStateBuilder, SimTufRepoSource}; +use nexus_reconfigurator_simulation::{SimTufRepoDescription, Simulator}; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; use nexus_types::deployment::execution; @@ -38,19 +38,21 @@ use nexus_types::deployment::{OmicronZoneNic, TargetReleaseDescription}; use nexus_types::external_api::views::SledPolicy; use nexus_types::external_api::views::SledProvisionPolicy; use omicron_common::address::REPO_DEPOT_PORT; -use omicron_common::api::external::Generation; use omicron_common::api::external::Name; +use omicron_common::api::external::{Generation, TufRepoDescription}; use omicron_common::policy::NEXUS_REDUNDANCY; +use omicron_common::update::OmicronZoneManifestSource; use omicron_repl_utils::run_repl_from_file; use omicron_repl_utils::run_repl_on_stdin; -use omicron_uuid_kinds::CollectionUuid; use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::OmicronZoneUuid; use omicron_uuid_kinds::ReconfiguratorSimUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::VnicUuid; use omicron_uuid_kinds::{BlueprintUuid, MupdateOverrideUuid}; +use omicron_uuid_kinds::{CollectionUuid, MupdateUuid}; use std::borrow::Cow; +use std::convert::Infallible; use std::fmt::{self, Write}; use std::io::IsTerminal; use std::num::ParseIntError; @@ -218,6 +220,9 @@ fn process_command( Commands::SledRemove(args) => cmd_sled_remove(sim, args), Commands::SledShow(args) => cmd_sled_show(sim, args), Commands::SledSetPolicy(args) => cmd_sled_set_policy(sim, args), + Commands::SledUpdateInstallDataset(args) => { + cmd_sled_update_install_dataset(sim, args) + } Commands::SledUpdateSp(args) => cmd_sled_update_sp(sim, args), Commands::SiloList => cmd_silo_list(sim), Commands::SiloAdd(args) => cmd_silo_add(sim, args), @@ -273,6 +278,8 @@ enum Commands { SledShow(SledArgs), /// set a sled's policy SledSetPolicy(SledSetPolicyArgs), + /// update the install dataset on a sled, simulating a mupdate + SledUpdateInstallDataset(SledUpdateInstallDatasetArgs), /// simulate updating the sled's SP versions SledUpdateSp(SledUpdateSpArgs), @@ -336,12 +343,16 @@ struct SledAddArgs { /// number of disks or pools #[clap(short = 'd', long, visible_alias = "npools", default_value_t = SledBuilder::DEFAULT_NPOOLS)] ndisks: u8, + + /// The policy for the sled. + #[clap(long, value_enum, default_value_t = SledPolicyOpt::InService)] + policy: SledPolicyOpt, } #[derive(Debug, Args)] struct SledArgs { /// id of the sled - sled_id: SledUuid, + sled_id: SledOpt, /// Filter to match sled ID against #[clap(short = 'F', long, value_enum, default_value_t = SledFilter::Commissioned)] @@ -351,7 +362,7 @@ struct SledArgs { #[derive(Debug, Args)] struct SledSetPolicyArgs { /// id of the sled - sled_id: SledUuid, + sled_id: SledOpt, /// The policy to set for the sled #[clap(value_enum)] @@ -389,10 +400,50 @@ impl From for SledPolicy { } } +#[derive(Debug, Args)] +struct SledUpdateInstallDatasetArgs { + /// id of the sled + sled_id: SledOpt, + + #[clap(flatten)] + source: SledMupdateSource, +} + +#[derive(Debug, Args)] +// This makes it so that only one source can be specified. +struct SledMupdateSource { + #[clap(flatten)] + valid: SledMupdateValidSource, + + /// set the mupdate source to Installinator with the given ID + #[clap(long, requires = "sled-mupdate-valid-source")] + mupdate_id: Option, + + /// simulate an error reading the zone manifest + #[clap(long, conflicts_with = "sled-mupdate-valid-source")] + with_manifest_error: bool, + + /// simulate an error validating zones by this artifact name + #[clap(long, requires = "sled-mupdate-valid-source")] + with_zone_error: Vec, +} + +#[derive(Debug, Args)] +#[group(id = "sled-mupdate-valid-source", multiple = false)] +struct SledMupdateValidSource { + /// the TUF repo.zip to simulate the mupdate from + #[clap(long)] + from_repo: Option, + + /// simulate a mupdate to the target release + #[clap(long)] + to_target_release: bool, +} + #[derive(Debug, Args)] struct SledUpdateSpArgs { /// id of the sled - sled_id: SledUuid, + sled_id: SledOpt, /// sets the version reported for the SP active slot #[clap(long, required_unless_present_any = &["inactive"])] @@ -406,7 +457,7 @@ struct SledUpdateSpArgs { #[derive(Debug, Args)] struct SledRemoveArgs { /// id of the sled - sled_id: SledUuid, + sled_id: SledOpt, } #[derive(Debug, Args)] @@ -452,10 +503,10 @@ enum BlueprintEditCommands { /// add a Nexus instance to a particular sled AddNexus { /// sled on which to deploy the new instance - sled_id: SledUuid, + sled_id: SledOpt, }, /// add a CockroachDB instance to a particular sled - AddCockroach { sled_id: SledUuid }, + AddCockroach { sled_id: SledOpt }, /// set the image source for a zone SetZoneImage { /// id of zone whose image to set @@ -466,7 +517,7 @@ enum BlueprintEditCommands { /// set the remove_mupdate_override field for a sled SetRemoveMupdateOverride { /// sled to set the field on - sled_id: SledUuid, + sled_id: SledOpt, /// the UUID to set the field to, or "unset" value: MupdateOverrideUuidOpt, @@ -521,17 +572,66 @@ enum BlueprintEditDebugCommands { /// the sled from the blueprint. RemoveSled { /// the sled to remove - sled: SledUuid, + sled: SledOpt, }, /// Bump a sled's generation number, even if nothing else about the sled has /// changed. ForceSledGenerationBump { /// the sled to bump the sled-agent generation number of - sled: SledUuid, + sled: SledOpt, }, } +/// Identifies a sled in a system. +#[derive(Clone, Debug)] +enum SledOpt { + /// Identifies a sled by its UUID. + Uuid(SledUuid), + /// Identifies a sled by its serial number. + Serial(String), +} + +impl SledOpt { + /// Resolves this sled option into a sled UUID. + fn to_sled_id( + &self, + description: &SystemDescription, + ) -> anyhow::Result { + match self { + SledOpt::Uuid(uuid) => Ok(*uuid), + SledOpt::Serial(serial) => description.serial_to_sled_id(&serial), + } + } +} + +impl FromStr for SledOpt { + type Err = Infallible; + + fn from_str(s: &str) -> Result { + // If the sled looks like a UUID, parse it as that. + if let Ok(uuid) = s.parse::() { + return Ok(SledOpt::Uuid(uuid)); + } + + // We treat anything that doesn't parse as a UUID as a serial number. + // + // Can we do something more intelligent here, like looking for a + // particular prefix? In principle, yes, but in reality there are + // several different sources of serial numbers: + // + // * simulated sleds ("serial0", "serial1", ...) + // * real sleds ("BRM42220014") + // * a4x2 ("g0", "g1", ...) + // * single-sled dev deployments + // + // and possibly more. We could exhaustively enumerate all of them, but + // it's easier to assume that if it doesn't look like a UUID, it's a + // serial number. + Ok(Self::Serial(s.to_owned())) + } +} + #[derive(Clone, Debug)] enum BlueprintIdOpt { /// use the target blueprint @@ -753,6 +853,10 @@ struct TufAssembleArgs { /// The tufaceous manifest path (relative to this crate's root) manifest_path: Utf8PathBuf, + /// Allow non-semver artifact versions. + #[clap(long)] + allow_non_semver: bool, + #[clap( long, // Use help here rather than a doc comment because rustdoc doesn't like @@ -797,6 +901,44 @@ struct LoadExampleArgs { /// Do not create entries for disks in the blueprint. #[clap(long)] no_disks_in_blueprint: bool, + + /// Set a 0-indexed sled's policy + #[clap(long, value_name = "INDEX:POLICY")] + sled_policy: Vec, +} + +#[derive(Clone, Debug)] +struct LoadExampleSledPolicy { + /// The index of the sled to set the policy for. + index: usize, + + /// The policy to set. + policy: SledPolicy, +} + +impl FromStr for LoadExampleSledPolicy { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + let Some((index, policy)) = s.split_once(':') else { + return Err(anyhow!("invalid format, expected :")); + }; + let index = index.parse().with_context(|| { + format!("error parsing sled index `{index}` as a usize") + })?; + let policy = SledPolicyOpt::from_str( + policy, /* ignore_case */ false, + ) + .map_err(|_message| { + // _message is just something like "invalid variant: ". + // We choose to use our own message instead. + anyhow!( + "invalid sled policy `{policy}` (possible values: {})", + SledPolicyOpt::value_variants().iter().join(", "), + ) + })?; + Ok(LoadExampleSledPolicy { index, policy: policy.into() }) + } } #[derive(Debug, Args)] @@ -904,14 +1046,27 @@ fn cmd_sled_add( ) -> anyhow::Result> { let mut state = sim.current_state().to_mut(); let sled_id = add.sled_id.unwrap_or_else(|| state.rng_mut().next_sled_id()); - let new_sled = SledBuilder::new().id(sled_id).npools(add.ndisks); - state.system_mut().description_mut().sled(new_sled)?; + let new_sled = SledBuilder::new() + .id(sled_id) + .npools(add.ndisks) + .policy(add.policy.into()); + let system = state.system_mut(); + system.description_mut().sled(new_sled)?; + // Figure out what serial number this sled was assigned. + let added_sled = system + .description() + .get_sled(sled_id) + .expect("we just added this sled"); + let serial = match added_sled.sp_state() { + Some((_, sp_state)) => sp_state.serial_number.clone(), + None => "(none)".to_owned(), + }; sim.commit_and_bump( - format!("reconfigurator-cli sled-add: {sled_id}"), + format!("reconfigurator-cli sled-add: {sled_id} (serial: {serial})"), state, ); - Ok(Some(format!("added sled {}", sled_id))) + Ok(Some(format!("added sled {} (serial: {})", sled_id, serial))) } fn cmd_sled_remove( @@ -919,9 +1074,9 @@ fn cmd_sled_remove( args: SledRemoveArgs, ) -> anyhow::Result> { let mut state = sim.current_state().to_mut(); - let sled_id = args.sled_id; - state - .system_mut() + let system = state.system_mut(); + let sled_id = args.sled_id.to_sled_id(system.description())?; + system .description_mut() .sled_remove(sled_id) .context("failed to remove sled")?; @@ -938,7 +1093,7 @@ fn cmd_sled_show( ) -> anyhow::Result> { let state = sim.current_state(); let description = state.system().description(); - let sled_id = args.sled_id; + let sled_id = args.sled_id.to_sled_id(description)?; let sp_active_version = description.sled_sp_active_version(sled_id)?; let sp_inactive_version = description.sled_sp_inactive_version(sled_id)?; let planning_input = description @@ -948,7 +1103,7 @@ fn cmd_sled_show( let sled = planning_input.sled_lookup(args.filter, sled_id)?; let sled_resources = &sled.resources; let mut s = String::new(); - swriteln!(s, "sled {}", sled_id); + swriteln!(s, "sled {} ({}, {})", sled_id, sled.policy, sled.state); swriteln!(s, "serial {}", sled.baseboard_id.serial_number); swriteln!(s, "subnet {}", sled_resources.subnet.net()); swriteln!(s, "SP active version: {:?}", sp_active_version); @@ -966,18 +1121,43 @@ fn cmd_sled_set_policy( args: SledSetPolicyArgs, ) -> anyhow::Result> { let mut state = sim.current_state().to_mut(); - state - .system_mut() - .description_mut() - .sled_set_policy(args.sled_id, args.policy.into())?; + let system = state.system_mut(); + let sled_id = args.sled_id.to_sled_id(system.description())?; + system.description_mut().sled_set_policy(sled_id, args.policy.into())?; sim.commit_and_bump( format!( "reconfigurator-cli sled-set-policy: {} to {}", - args.sled_id, args.policy, + sled_id, args.policy, + ), + state, + ); + Ok(Some(format!("set sled {} policy to {}", sled_id, args.policy))) +} + +fn cmd_sled_update_install_dataset( + sim: &mut ReconfiguratorSim, + args: SledUpdateInstallDatasetArgs, +) -> anyhow::Result> { + let description = mupdate_source_to_description(sim, &args.source)?; + + let mut state = sim.current_state().to_mut(); + let system = state.system_mut(); + let sled_id = args.sled_id.to_sled_id(system.description())?; + system + .description_mut() + .sled_set_zone_manifest(sled_id, description.to_boot_inventory())?; + + sim.commit_and_bump( + format!( + "reconfigurator-cli sled-update-install-dataset: {}", + description.message, ), state, ); - Ok(Some(format!("set sled {} policy to {}", args.sled_id, args.policy))) + Ok(Some(format!( + "sled {}: install dataset updated: {}", + sled_id, description.message + ))) } fn cmd_sled_update_sp( @@ -998,8 +1178,10 @@ fn cmd_sled_update_sp( ); let mut state = sim.current_state().to_mut(); - state.system_mut().description_mut().sled_update_sp_versions( - args.sled_id, + let system = state.system_mut(); + let sled_id = args.sled_id.to_sled_id(system.description())?; + system.description_mut().sled_update_sp_versions( + sled_id, args.active, args.inactive, )?; @@ -1007,17 +1189,13 @@ fn cmd_sled_update_sp( sim.commit_and_bump( format!( "reconfigurator-cli sled-update-sp: {}: {}", - args.sled_id, + sled_id, labels.join(", "), ), state, ); - Ok(Some(format!( - "set sled {} SP versions: {}", - args.sled_id, - labels.join(", ") - ))) + Ok(Some(format!("set sled {} SP versions: {}", sled_id, labels.join(", ")))) } fn cmd_inventory_list( @@ -1226,18 +1404,21 @@ fn cmd_blueprint_edit( let label = match args.edit_command { BlueprintEditCommands::AddNexus { sled_id } => { + let sled_id = sled_id.to_sled_id(system.description())?; builder .sled_add_zone_nexus(sled_id) .context("failed to add Nexus zone")?; format!("added Nexus zone to sled {}", sled_id) } BlueprintEditCommands::AddCockroach { sled_id } => { + let sled_id = sled_id.to_sled_id(system.description())?; builder .sled_add_zone_cockroachdb(sled_id) .context("failed to add CockroachDB zone")?; format!("added CockroachDB zone to sled {}", sled_id) } BlueprintEditCommands::SetRemoveMupdateOverride { sled_id, value } => { + let sled_id = sled_id.to_sled_id(system.description())?; builder .sled_set_remove_mupdate_override(sled_id, value.into()) .context("failed to set remove_mupdate_override")?; @@ -1344,15 +1525,17 @@ fn cmd_blueprint_edit( BlueprintEditCommands::Debug { command: BlueprintEditDebugCommands::RemoveSled { sled }, } => { - builder.debug_sled_remove(sled)?; - format!("debug: removed sled {sled} from blueprint") + let sled_id = sled.to_sled_id(system.description())?; + builder.debug_sled_remove(sled_id)?; + format!("debug: removed sled {sled_id} from blueprint") } BlueprintEditCommands::Debug { command: BlueprintEditDebugCommands::ForceSledGenerationBump { sled }, } => { - builder.debug_sled_force_generation_bump(sled)?; - format!("debug: forced sled {sled} generation bump") + let sled_id = sled.to_sled_id(system.description())?; + builder.debug_sled_force_generation_bump(sled_id)?; + format!("debug: forced sled {sled_id} generation bump") } }; @@ -1766,26 +1949,8 @@ fn cmd_set( rv } SetArgs::TargetRelease { filename } => { - let file = std::fs::File::open(&filename) - .with_context(|| format!("open {:?}", filename))?; - let buf = std::io::BufReader::new(file); - let rt = tokio::runtime::Runtime::new() - .context("creating tokio runtime")?; - // We're not using the repo hash here. Make one up. - let repo_hash = ArtifactHash([0; 32]); - let artifacts_with_plan = rt.block_on(async { - ArtifactsWithPlan::from_zip( - buf, - None, - repo_hash, - ControlPlaneZonesMode::Split, - &sim.log, - ) - .await - .with_context(|| format!("unpacking {:?}", filename)) - })?; - let description = artifacts_with_plan.description().clone(); - drop(artifacts_with_plan); + let description = + extract_tuf_repo_description(&sim.log, &filename)?; state.system_mut().description_mut().set_target_release( TargetReleaseDescription::TufRepo(description), ); @@ -1797,6 +1962,84 @@ fn cmd_set( Ok(Some(rv)) } +/// Converts a mupdate source to a TUF repo description. +fn mupdate_source_to_description( + sim: &ReconfiguratorSim, + source: &SledMupdateSource, +) -> anyhow::Result { + let manifest_source = match source.mupdate_id { + Some(mupdate_id) => { + OmicronZoneManifestSource::Installinator { mupdate_id } + } + None => OmicronZoneManifestSource::SledAgent, + }; + if let Some(repo_path) = &source.valid.from_repo { + let description = extract_tuf_repo_description(&sim.log, repo_path)?; + let mut sim_source = SimTufRepoSource::new( + description, + manifest_source, + format!("from repo at {repo_path}"), + ); + sim_source.simulate_zone_errors(&source.with_zone_error)?; + Ok(SimTufRepoDescription::new(sim_source)) + } else if source.valid.to_target_release { + let description = sim + .current_state() + .system() + .description() + .target_release() + .description(); + match description { + TargetReleaseDescription::Initial => { + bail!( + "cannot mupdate zones without a target release \ + (use `set target-release` or --from-repo)" + ) + } + TargetReleaseDescription::TufRepo(desc) => { + let mut sim_source = SimTufRepoSource::new( + desc.clone(), + manifest_source, + "to target release".to_owned(), + ); + sim_source.simulate_zone_errors(&source.with_zone_error)?; + Ok(SimTufRepoDescription::new(sim_source)) + } + } + } else if source.with_manifest_error { + Ok(SimTufRepoDescription::new_error( + "simulated error obtaining zone manifest".to_owned(), + )) + } else { + bail!("an update source must be specified") + } +} + +fn extract_tuf_repo_description( + log: &slog::Logger, + filename: &Utf8Path, +) -> anyhow::Result { + let file = std::fs::File::open(filename) + .with_context(|| format!("open {:?}", filename))?; + let buf = std::io::BufReader::new(file); + let rt = + tokio::runtime::Runtime::new().context("creating tokio runtime")?; + let repo_hash = ArtifactHash([0; 32]); + let artifacts_with_plan = rt.block_on(async { + ArtifactsWithPlan::from_zip( + buf, + None, + repo_hash, + ControlPlaneZonesMode::Split, + log, + ) + .await + .with_context(|| format!("unpacking {:?}", filename)) + })?; + let description = artifacts_with_plan.description().clone(); + Ok(description) +} + fn cmd_tuf_assemble( sim: &ReconfiguratorSim, args: TufAssembleArgs, @@ -1830,15 +2073,19 @@ fn cmd_tuf_assemble( // Just use a fixed key for now. // // In the future we may want to test changing the TUF key. - let args = tufaceous::Args::try_parse_from([ + let mut tufaceous_args = vec![ "tufaceous", "--key", DEFAULT_TUFACEOUS_KEY, "assemble", manifest_path.as_str(), output_path.as_str(), - ]) - .expect("args are valid so this shouldn't fail"); + ]; + if args.allow_non_semver { + tufaceous_args.push("--allow-non-semver"); + } + let args = tufaceous::Args::try_parse_from(tufaceous_args) + .expect("args are valid so this shouldn't fail"); let rt = tokio::runtime::Runtime::new().context("creating tokio runtime")?; rt.block_on(async move { args.exec(&sim.log).await }) @@ -1944,21 +2191,26 @@ fn cmd_load_example( }; let rng = state.rng_mut().next_example_rng(); - let (example, blueprint) = - ExampleSystemBuilder::new_with_rng(&sim.log, rng) - .nsleds(args.nsleds) - .ndisks_per_sled(args.ndisks_per_sled) - .nexus_count( - state - .config_mut() - .num_nexus() - .map_or(NEXUS_REDUNDANCY, |n| n.into()), - ) - .external_dns_count(3) - .context("invalid external DNS zone count")? - .create_zones(!args.no_zones) - .create_disks_in_blueprint(!args.no_disks_in_blueprint) - .build(); + let mut builder = ExampleSystemBuilder::new_with_rng(&sim.log, rng) + .nsleds(args.nsleds) + .ndisks_per_sled(args.ndisks_per_sled) + .nexus_count( + state + .config_mut() + .num_nexus() + .map_or(NEXUS_REDUNDANCY, |n| n.into()), + ) + .external_dns_count(3) + .context("invalid external DNS zone count")? + .create_zones(!args.no_zones) + .create_disks_in_blueprint(!args.no_disks_in_blueprint); + for sled_policy in args.sled_policy { + builder = builder + .with_sled_policy(sled_policy.index, sled_policy.policy) + .context("setting sled policy")?; + } + + let (example, blueprint) = builder.build(); // Generate the internal and external DNS configs based on the blueprint. let sleds_by_id = make_sleds_by_id(&example.system)?; diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-example.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-example.txt index d4c6d688d81..4fa5ee01076 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-example.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-example.txt @@ -45,3 +45,19 @@ blueprint-diff 86db3308-f817-4626-8838-4085949a6a41 blueprint-diff 02697f74-b14a-4418-90f0-c28b2a3a6aa9 86db3308-f817-4626-8838-4085949a6a41 # You can specify them in the reverse order and see the opposite changes. blueprint-diff 86db3308-f817-4626-8838-4085949a6a41 02697f74-b14a-4418-90f0-c28b2a3a6aa9 + +# Load an example with a non-provisionable and an expunged sled. +wipe all +load-example --seed test-basic --nsleds 3 --sled-policy 1:non-provisionable --sled-policy 2:expunged --ndisks-per-sled 3 + +blueprint-list +blueprint-show latest + +# Plan a blueprint run -- this will cause zones and disks on the expunged +# sled to be expunged. +blueprint-plan latest +blueprint-diff ade5749d-bdf3-4fab-a8ae-00bea01b3a5a latest + +# Sled index out of bounds, will error out. +wipe all +load-example --seed test-basic --nsleds 3 --sled-policy 3:non-provisionable diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-noop-image-source.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-noop-image-source.txt new file mode 100644 index 00000000000..f945938d882 --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-noop-image-source.txt @@ -0,0 +1,50 @@ +# Load an example system. The sled with serial5 is marked non-provisionable +# so that discretionary zones don't make their way onto it. (We're going to +# expunge it below to test that we don't try and update zone image sources +# on expunged sleds.) +load-example --nsleds 6 --ndisks-per-sled 1 --sled-policy 5:non-provisionable + +sled-list + +# Create a TUF repository from a fake manifest. (The output TUF repo is +# written to a temporary directory that this invocation of `reconfigurator-cli` +# is running out of as its working directory.) +tuf-assemble ../../update-common/manifests/fake.toml +# Create a second TUF repository from a different fake manifest. +tuf-assemble ../../update-common/manifests/fake-non-semver.toml --allow-non-semver + +# Load the target release from the first TUF repository. +set target-release repo-1.0.0.zip + +# On one sled, update the install dataset. +sled-update-install-dataset serial0 --to-target-release + +# On another sled, simulate an error reading the zone manifest. +sled-update-install-dataset serial1 --with-manifest-error + +# On a third sled, update the install dataset and simulate a mupdate override. +# (Currently we do this in the blueprint, but with +# https://github.com/oxidecomputer/omicron/pull/8456 we should update this test and +# set a mupdate-override on the sled directly.) +sled-update-install-dataset serial2 --to-target-release +blueprint-edit latest set-remove-mupdate-override serial2 ffffffff-ffff-ffff-ffff-ffffffffffff + +# On a fourth sled, simulate an error validating the install dataset image on one zone. +# We pick ntp because internal-ntp is non-discretionary. +sled-update-install-dataset serial3 --to-target-release --with-zone-error ntp + +# On a fifth sled, set the install dataset to the repo-2.0.0.zip generated by the +# second TUF repository. +sled-update-install-dataset serial4 --from-repo repo-2.0.0.zip + +# On the sixth sled, update to the target release (so it shows up in inventory). +# Then, mark the sled expunged (in the planning input). +sled-update-install-dataset serial5 --to-target-release +sled-set-policy serial5 expunged + +# Generate an inventory and run a blueprint planning step. +inventory-generate +blueprint-plan latest eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 + +# This diff should show expected changes to the blueprint. +blueprint-diff 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 latest diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-set-remove-mupdate-override.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-set-remove-mupdate-override.txt index ba23a335401..9696b9e45bb 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-set-remove-mupdate-override.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-set-remove-mupdate-override.txt @@ -1,12 +1,12 @@ # Load example system with 7 sleds: # -# sled 0: unset -> unset (unchanged) -# sled 1: unset -> set -# sled 2: set -> unset -# sled 3: set -> set (unchanged) -# sled 4: set -> set (changed) -# sled 5: set -> set (unchanged) but change something else -# sled 6: set -> sled removed +# serial0: unset -> unset (unchanged) +# serial1: unset -> set +# serial2: set -> unset +# serial3: set -> set (unchanged) +# serial4: set -> set (changed) +# serial5: set -> set (unchanged) but change something else +# serial6: set -> sled removed # # We'll also add another sled below (new_sled_id) with # remove_mupdate_override set. @@ -15,28 +15,29 @@ # outputs minimal. load-example --nsleds 7 --ndisks-per-sled 0 --no-zones +sled-list # Set the field on sleds 2-6 (0-indexed). -blueprint-edit latest set-remove-mupdate-override 9a867dc9-d505-427f-9eff-cdb1d4d9bd73 00000000-0000-0000-0000-000000000000 -blueprint-edit latest set-remove-mupdate-override aff6c093-197d-42c5-ad80-9f10ba051a34 00000000-0000-0000-0000-000000000000 -blueprint-edit latest set-remove-mupdate-override b82ede02-399c-48c6-a1de-411df4fa49a7 00000000-0000-0000-0000-000000000000 -blueprint-edit latest set-remove-mupdate-override d81c6a84-79b8-4958-ae41-ea46c9b19763 00000000-0000-0000-0000-000000000000 -blueprint-edit latest set-remove-mupdate-override e96e226f-4ed9-4c01-91b9-69a9cd076c9e 00000000-0000-0000-0000-000000000000 +blueprint-edit latest set-remove-mupdate-override serial2 00000000-0000-0000-0000-000000000000 +blueprint-edit latest set-remove-mupdate-override serial3 00000000-0000-0000-0000-000000000000 +blueprint-edit latest set-remove-mupdate-override serial4 00000000-0000-0000-0000-000000000000 +blueprint-edit latest set-remove-mupdate-override serial5 00000000-0000-0000-0000-000000000000 +blueprint-edit latest set-remove-mupdate-override serial6 00000000-0000-0000-0000-000000000000 blueprint-show latest # Now make another blueprint, starting by adding a new sled and removing sled 6. sled-add --ndisks 0 -blueprint-edit latest debug remove-sled e96e226f-4ed9-4c01-91b9-69a9cd076c9e -sled-remove e96e226f-4ed9-4c01-91b9-69a9cd076c9e +blueprint-edit latest debug remove-sled serial6 +sled-remove serial6 inventory-generate # Edit sleds 1, 2, 4, 5, and the new one. -blueprint-edit latest set-remove-mupdate-override 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 ffffffff-ffff-ffff-ffff-ffffffffffff -blueprint-edit latest set-remove-mupdate-override 9a867dc9-d505-427f-9eff-cdb1d4d9bd73 unset -blueprint-edit latest set-remove-mupdate-override b82ede02-399c-48c6-a1de-411df4fa49a7 ffffffff-ffff-ffff-ffff-ffffffffffff -blueprint-edit latest debug force-sled-generation-bump d81c6a84-79b8-4958-ae41-ea46c9b19763 -blueprint-edit latest set-remove-mupdate-override 00320471-945d-413c-85e7-03e091a70b3c ffffffff-ffff-ffff-ffff-ffffffffffff +blueprint-edit latest set-remove-mupdate-override serial1 ffffffff-ffff-ffff-ffff-ffffffffffff +blueprint-edit latest set-remove-mupdate-override serial2 unset +blueprint-edit latest set-remove-mupdate-override serial4 ffffffff-ffff-ffff-ffff-ffffffffffff +blueprint-edit latest debug force-sled-generation-bump serial5 +blueprint-edit latest set-remove-mupdate-override serial7 ffffffff-ffff-ffff-ffff-ffffffffffff blueprint-diff 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba latest diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds.txt b/dev-tools/reconfigurator-cli/tests/input/cmds.txt index d8d15011399..671a89303b1 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds.txt @@ -10,7 +10,7 @@ sled-add dde1c0e2-b10d-4621-b420-f179f7a7a00a sled-list sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a sled-add 90c1102a-b9f5-4d88-92a2-60d54a2d98cc -sled-add 04ef3330-c682-4a08-8def-fcc4bef31bcd +sled-add 04ef3330-c682-4a08-8def-fcc4bef31bcd --policy non-provisionable sled-list sled-update-sp dde1c0e2-b10d-4621-b420-f179f7a7a00a diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout index 92718db5696..065cb018b31 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout @@ -25,7 +25,7 @@ f45ba181-4b56-42cc-a762-874d90184a43 0 > # Add a new sled with no disks. > sled-add --ndisks 0 -added sled 00320471-945d-413c-85e7-03e091a70b3c +added sled 00320471-945d-413c-85e7-03e091a70b3c (serial: serial3) > # Generate a new inventory collection that includes that sled. @@ -36,6 +36,7 @@ generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configu > # Try to plan a new blueprint; this should be okay even though the sled > # we added has no disks. > blueprint-plan dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 +INFO skipping noop image source check for all sleds (no current TUF repo) INFO skipping sled (no zpools in service), sled_id: 00320471-945d-413c-85e7-03e091a70b3c INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout index 50c14f73d0a..e6bd81ca525 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout @@ -37,7 +37,7 @@ T ENA ID PARENT > sled-show 2eb69596-f081-4e2d-9425-9994926e0832 -sled 2eb69596-f081-4e2d-9425-9994926e0832 +sled 2eb69596-f081-4e2d-9425-9994926e0832 (in service, active) serial serial1 subnet fd00:1122:3344:102::/64 SP active version: Some("0.0.1") @@ -398,7 +398,7 @@ T ENA ID PARENT > sled-show 89d02b1b-478c-401a-8e28-7a26f74fa41b -sled 89d02b1b-478c-401a-8e28-7a26f74fa41b +sled 89d02b1b-478c-401a-8e28-7a26f74fa41b (in service, active) serial serial0 subnet fd00:1122:3344:101::/64 SP active version: Some("0.0.1") @@ -493,6 +493,7 @@ T ENA ID PARENT * yes ade5749d-bdf3-4fab-a8ae-00bea01b3a5a 02697f74-b14a-4418-90f0-c28b2a3a6aa9 > blueprint-plan ade5749d-bdf3-4fab-a8ae-00bea01b3a5a +INFO skipping noop image source check for all sleds (no current TUF repo) INFO found sled missing NTP zone (will add one), sled_id: 89d02b1b-478c-401a-8e28-7a26f74fa41b INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 WARN failed to place all new desired Clickhouse zones, placed: 0, wanted_to_place: 1 @@ -736,3 +737,449 @@ external DNS: + +> # Load an example with a non-provisionable and an expunged sled. +> wipe all +- wiped system, reconfigurator-sim config, and RNG state + + - reset seed to test-basic + +> load-example --seed test-basic --nsleds 3 --sled-policy 1:non-provisionable --sled-policy 2:expunged --ndisks-per-sled 3 +loaded example system with: +- collection: 9e187896-7809-46d0-9210-d75be1b3c4d4 +- blueprint: ade5749d-bdf3-4fab-a8ae-00bea01b3a5a + + +> blueprint-list +T ENA ID PARENT TIME_CREATED + 02697f74-b14a-4418-90f0-c28b2a3a6aa9 +* yes ade5749d-bdf3-4fab-a8ae-00bea01b3a5a 02697f74-b14a-4418-90f0-c28b2a3a6aa9 + +> blueprint-show latest +blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a +parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 + + sled: 2eb69596-f081-4e2d-9425-9994926e0832 (active, config generation 2) + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-64ea76fb-6673-4810-8e53-c2458b75eb01 in service + fake-vendor fake-model serial-6d012675-6f54-4b6a-8658-ab0076237569 in service + fake-vendor fake-model serial-9a065406-12a0-4b29-926e-d66bb785d17b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + oxp_64ea76fb-6673-4810-8e53-c2458b75eb01/crucible 1338efa2-c05b-4231-9180-db10555139ff in service none none off + oxp_6d012675-6f54-4b6a-8658-ab0076237569/crucible 49722424-f7be-42fb-a718-78eff8234c3a in service none none off + oxp_9a065406-12a0-4b29-926e-d66bb785d17b/crucible 236fd4f2-1e28-49a2-ab27-66c2f54db2a8 in service none none off + oxp_64ea76fb-6673-4810-8e53-c2458b75eb01/crypt/zone d7643dc7-276f-4bb6-af5a-4c404b8edc7e in service none none off + oxp_6d012675-6f54-4b6a-8658-ab0076237569/crypt/zone 9bde99e1-6ef7-434e-8b75-166ab9b1291d in service none none off + oxp_9a065406-12a0-4b29-926e-d66bb785d17b/crypt/zone 37213994-4335-4190-a13e-a493429f7fc2 in service none none off + oxp_6d012675-6f54-4b6a-8658-ab0076237569/crypt/zone/oxz_crucible_b61b7c3c-d665-44b3-9312-794aa81c59de b180e417-b7ca-4be6-b373-8b252d1bdca4 in service none none off + oxp_9a065406-12a0-4b29-926e-d66bb785d17b/crypt/zone/oxz_crucible_b957d6cf-f7b2-4bee-9928-c5fde8c59e04 6c75c045-676e-4e66-a98c-ed535010dd42 in service none none off + oxp_64ea76fb-6673-4810-8e53-c2458b75eb01/crypt/zone/oxz_crucible_e246f5e3-0650-4afc-860f-ee7114d309c5 f87cf44e-8d81-4ed9-a617-cd6f689f88c1 in service none none off + oxp_64ea76fb-6673-4810-8e53-c2458b75eb01/crypt/zone/oxz_ntp_18b3781d-571b-4d7c-b65d-18a452e5a64a fb66f2cb-e036-4181-8db2-466429796639 in service none none off + oxp_64ea76fb-6673-4810-8e53-c2458b75eb01/crypt/debug 54126684-7061-47f1-8599-56bd5fb09591 in service 100 GiB none gzip-9 + oxp_6d012675-6f54-4b6a-8658-ab0076237569/crypt/debug 827c14b4-9625-47de-b036-c645bb41a002 in service 100 GiB none gzip-9 + oxp_9a065406-12a0-4b29-926e-d66bb785d17b/crypt/debug 73f20150-cd0c-488f-9c50-e308d5f417f8 in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------ + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------ + crucible b61b7c3c-d665-44b3-9312-794aa81c59de install dataset in service fd00:1122:3344:102::23 + crucible b957d6cf-f7b2-4bee-9928-c5fde8c59e04 install dataset in service fd00:1122:3344:102::24 + crucible e246f5e3-0650-4afc-860f-ee7114d309c5 install dataset in service fd00:1122:3344:102::22 + internal_ntp 18b3781d-571b-4d7c-b65d-18a452e5a64a install dataset in service fd00:1122:3344:102::21 + + + + sled: 32d8d836-4d8a-4e54-8fa9-f31d79c42646 (active, config generation 2) + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-2db3c119-38fa-41c0-8dbe-89689b84d655 in service + fake-vendor fake-model serial-687e054b-a9b5-4404-8b3f-0ea119c44a6a in service + fake-vendor fake-model serial-a4e8a5cc-cd1c-4a08-bcf1-31785f7cda28 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + oxp_2db3c119-38fa-41c0-8dbe-89689b84d655/crucible 0c8f7b7f-a3d5-48a4-83b9-1185bafc58a5 in service none none off + oxp_687e054b-a9b5-4404-8b3f-0ea119c44a6a/crucible 35d5e16f-f07a-465d-b206-6abf433220b8 in service none none off + oxp_a4e8a5cc-cd1c-4a08-bcf1-31785f7cda28/crucible 76d8c526-6f3b-45a9-9127-168a1d828b0b in service none none off + oxp_2db3c119-38fa-41c0-8dbe-89689b84d655/crypt/zone d2f8776d-904d-4f0d-845b-3c1b8c52ef39 in service none none off + oxp_687e054b-a9b5-4404-8b3f-0ea119c44a6a/crypt/zone 3dd06653-b053-408c-ad50-c1f61a112434 in service none none off + oxp_a4e8a5cc-cd1c-4a08-bcf1-31785f7cda28/crypt/zone 4ed42a5a-53b7-4408-a13d-8455d932990e in service none none off + oxp_2db3c119-38fa-41c0-8dbe-89689b84d655/crypt/zone/oxz_crucible_6c2a57b0-2de0-4409-a6b9-c9aa5614eefa c9e92a11-b24b-4580-97bb-6aee8d927793 in service none none off + oxp_687e054b-a9b5-4404-8b3f-0ea119c44a6a/crypt/zone/oxz_crucible_99a750b2-724d-4828-ae5f-0df1aad90166 9b525ee7-7517-41d4-a759-0bf18f465c09 in service none none off + oxp_a4e8a5cc-cd1c-4a08-bcf1-31785f7cda28/crypt/zone/oxz_crucible_e668d83e-a28c-42dc-b574-467e57403cc1 0ecfbafe-4999-42bb-8b4d-870169e701f8 in service none none off + oxp_2db3c119-38fa-41c0-8dbe-89689b84d655/crypt/zone/oxz_ntp_0ab3dbe9-8387-4600-b097-cb71ee91ee83 5e59c7a5-09e9-43c5-8708-0e8a83624265 in service none none off + oxp_2db3c119-38fa-41c0-8dbe-89689b84d655/crypt/debug 5ca9e07e-a50d-41e5-b154-9bfcdc74d0a0 in service 100 GiB none gzip-9 + oxp_687e054b-a9b5-4404-8b3f-0ea119c44a6a/crypt/debug 22d6d9e3-3e59-4474-aafe-7adf916360f3 in service 100 GiB none gzip-9 + oxp_a4e8a5cc-cd1c-4a08-bcf1-31785f7cda28/crypt/debug 48beaa78-a41c-4ce3-ae7f-f5e4194def58 in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------ + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------ + crucible 6c2a57b0-2de0-4409-a6b9-c9aa5614eefa install dataset in service fd00:1122:3344:103::22 + crucible 99a750b2-724d-4828-ae5f-0df1aad90166 install dataset in service fd00:1122:3344:103::23 + crucible e668d83e-a28c-42dc-b574-467e57403cc1 install dataset in service fd00:1122:3344:103::24 + internal_ntp 0ab3dbe9-8387-4600-b097-cb71ee91ee83 install dataset in service fd00:1122:3344:103::21 + + + + sled: 89d02b1b-478c-401a-8e28-7a26f74fa41b (active, config generation 2) + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-0477165a-a72e-4814-b8d6-74aa02cb2040 in service + fake-vendor fake-model serial-ab94a812-86ce-428c-bbbb-6ce1ab0b071b in service + fake-vendor fake-model serial-f96f5901-2907-4f21-bfeb-772f8a3c4e44 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_0477165a-a72e-4814-b8d6-74aa02cb2040/crucible d495dcb0-eee5-41a7-ae37-1b82ea5db3e5 in service none none off + oxp_ab94a812-86ce-428c-bbbb-6ce1ab0b071b/crucible e46c0314-be52-496a-94d5-68af12502b8c in service none none off + oxp_f96f5901-2907-4f21-bfeb-772f8a3c4e44/crucible 776b3d91-ba2e-4fbf-9acc-8250d833347c in service none none off + oxp_0477165a-a72e-4814-b8d6-74aa02cb2040/crypt/clickhouse d388f8e3-4f92-4c54-a118-34ddad36df74 in service none none off + oxp_0477165a-a72e-4814-b8d6-74aa02cb2040/crypt/external_dns bbf4866b-410d-4cd1-9aa8-49f9949b90f4 in service none none off + oxp_ab94a812-86ce-428c-bbbb-6ce1ab0b071b/crypt/external_dns 4b816d21-0aa3-4904-a828-6541c3aceac3 in service none none off + oxp_f96f5901-2907-4f21-bfeb-772f8a3c4e44/crypt/external_dns 76bf521b-7146-4c84-8aec-1cdaaa4d2dd5 in service none none off + oxp_0477165a-a72e-4814-b8d6-74aa02cb2040/crypt/internal_dns e559f88f-47e2-4565-b3dd-7027d23e4076 in service none none off + oxp_ab94a812-86ce-428c-bbbb-6ce1ab0b071b/crypt/internal_dns b4e595b6-b1fe-4660-99f7-65c631f7753a in service none none off + oxp_f96f5901-2907-4f21-bfeb-772f8a3c4e44/crypt/internal_dns bdfed92e-135f-457d-accc-55352482e4f4 in service none none off + oxp_0477165a-a72e-4814-b8d6-74aa02cb2040/crypt/zone 40b91d92-c810-46a2-9058-55b3675d29cd in service none none off + oxp_ab94a812-86ce-428c-bbbb-6ce1ab0b071b/crypt/zone b95b2a37-a956-4e8e-9685-7f3300201b87 in service none none off + oxp_f96f5901-2907-4f21-bfeb-772f8a3c4e44/crypt/zone c58320b8-b301-43d0-bb19-d95508d47e6a in service none none off + oxp_0477165a-a72e-4814-b8d6-74aa02cb2040/crypt/zone/oxz_clickhouse_a27817ff-db8d-49ba-9682-9e3c13bc4e4b ff1c7f38-0cf7-4d8a-bc78-b0dd66d22d72 in service none none off + oxp_0477165a-a72e-4814-b8d6-74aa02cb2040/crypt/zone/oxz_crucible_397841de-588e-4fe7-94ee-a944b7340074 88873690-de60-4b21-a0f4-cf1151ada67b in service none none off + oxp_ab94a812-86ce-428c-bbbb-6ce1ab0b071b/crypt/zone/oxz_crucible_4a81477d-5a28-4246-93ef-01db835a5c5e d44e6d10-25f4-4161-b908-6f4c8fe22c8d in service none none off + oxp_f96f5901-2907-4f21-bfeb-772f8a3c4e44/crypt/zone/oxz_crucible_f4dc5b5d-6eb6-40a9-a079-971eca862285 5f1cbe29-50c7-4b66-9593-93215cf15e15 in service none none off + oxp_f96f5901-2907-4f21-bfeb-772f8a3c4e44/crypt/zone/oxz_crucible_pantry_17ed8f62-8cd0-416c-86d1-5eaf6f7358ef ded8a905-1f03-4cac-b0e6-7aedf4d4bba6 in service none none off + oxp_0477165a-a72e-4814-b8d6-74aa02cb2040/crypt/zone/oxz_crucible_pantry_534dad87-2b4c-4250-8ad9-2ec1d93b2994 359afcd0-c3b4-4f9a-89a7-e6c710fe52c1 in service none none off + oxp_ab94a812-86ce-428c-bbbb-6ce1ab0b071b/crypt/zone/oxz_crucible_pantry_dc2666e6-4c3e-4b8e-99bc-bcdb5f8986e1 b630c4b8-5ced-412d-90c6-8652c9ad0d41 in service none none off + oxp_0477165a-a72e-4814-b8d6-74aa02cb2040/crypt/zone/oxz_external_dns_03ba2f16-8b5e-41c4-906d-5a7a0af460ef da056dfb-4eaf-4fdc-bb7c-1df3002cde45 in service none none off + oxp_f96f5901-2907-4f21-bfeb-772f8a3c4e44/crypt/zone/oxz_external_dns_5954ecdc-c5ad-49a6-9c5f-4689f783ba83 3944a161-4e9e-4382-9c38-bd73f8b74871 in service none none off + oxp_ab94a812-86ce-428c-bbbb-6ce1ab0b071b/crypt/zone/oxz_external_dns_83ac30bd-ae85-4d6b-84c3-add02d3214cf a7ff14f8-b85a-4ebc-8c43-d57f32f85cff in service none none off + oxp_0477165a-a72e-4814-b8d6-74aa02cb2040/crypt/zone/oxz_internal_dns_4692cc31-6eb6-437c-9634-9688663d06ae 95c0dfaf-a768-40c6-9b0c-29df24a9c608 in service none none off + oxp_ab94a812-86ce-428c-bbbb-6ce1ab0b071b/crypt/zone/oxz_internal_dns_587da9e8-8fc0-4854-b585-070741a7b00d a0e90077-1fa7-41a0-90c2-f3df934b382a in service none none off + oxp_f96f5901-2907-4f21-bfeb-772f8a3c4e44/crypt/zone/oxz_internal_dns_ffbf02f0-261d-4723-b613-eb861245acbd 0f73a8f1-cd2f-4111-b2fe-ea7c7759a367 in service none none off + oxp_f96f5901-2907-4f21-bfeb-772f8a3c4e44/crypt/zone/oxz_nexus_056fd45c-2498-4ce4-8c97-79dfc9321080 9bf9bbf2-3ff6-4ec6-9f91-cf2c1719b2bf in service none none off + oxp_0477165a-a72e-4814-b8d6-74aa02cb2040/crypt/zone/oxz_nexus_a67ac9b3-427b-4ea6-a891-1c76a22720f5 1fb12c03-835f-4922-ab42-055811ddfa35 in service none none off + oxp_ab94a812-86ce-428c-bbbb-6ce1ab0b071b/crypt/zone/oxz_nexus_d856156c-2bc2-41ad-beef-7ca1da5802d3 ec93753f-fcd0-4624-8dcc-328dabb3f322 in service none none off + oxp_0477165a-a72e-4814-b8d6-74aa02cb2040/crypt/zone/oxz_ntp_ac5bb28e-91d5-42f3-a57a-d84e1c414c17 0f224584-6071-4b5b-b972-874e2ee23d1a in service none none off + oxp_0477165a-a72e-4814-b8d6-74aa02cb2040/crypt/debug 21d5fcc0-b87a-4818-b301-6c9fc27676c6 in service 100 GiB none gzip-9 + oxp_ab94a812-86ce-428c-bbbb-6ce1ab0b071b/crypt/debug b7dddd52-7b0f-4f7a-91f2-0535505d1a61 in service 100 GiB none gzip-9 + oxp_f96f5901-2907-4f21-bfeb-772f8a3c4e44/crypt/debug d940619b-769c-47be-9482-99a4db059e9e in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + clickhouse a27817ff-db8d-49ba-9682-9e3c13bc4e4b install dataset in service fd00:1122:3344:101::25 + crucible 397841de-588e-4fe7-94ee-a944b7340074 install dataset in service fd00:1122:3344:101::2c + crucible 4a81477d-5a28-4246-93ef-01db835a5c5e install dataset in service fd00:1122:3344:101::2d + crucible f4dc5b5d-6eb6-40a9-a079-971eca862285 install dataset in service fd00:1122:3344:101::2e + crucible_pantry 17ed8f62-8cd0-416c-86d1-5eaf6f7358ef install dataset in service fd00:1122:3344:101::2b + crucible_pantry 534dad87-2b4c-4250-8ad9-2ec1d93b2994 install dataset in service fd00:1122:3344:101::29 + crucible_pantry dc2666e6-4c3e-4b8e-99bc-bcdb5f8986e1 install dataset in service fd00:1122:3344:101::2a + external_dns 03ba2f16-8b5e-41c4-906d-5a7a0af460ef install dataset in service fd00:1122:3344:101::26 + external_dns 5954ecdc-c5ad-49a6-9c5f-4689f783ba83 install dataset in service fd00:1122:3344:101::28 + external_dns 83ac30bd-ae85-4d6b-84c3-add02d3214cf install dataset in service fd00:1122:3344:101::27 + internal_dns 4692cc31-6eb6-437c-9634-9688663d06ae install dataset in service fd00:1122:3344:1::1 + internal_dns 587da9e8-8fc0-4854-b585-070741a7b00d install dataset in service fd00:1122:3344:2::1 + internal_dns ffbf02f0-261d-4723-b613-eb861245acbd install dataset in service fd00:1122:3344:3::1 + internal_ntp ac5bb28e-91d5-42f3-a57a-d84e1c414c17 install dataset in service fd00:1122:3344:101::21 + nexus 056fd45c-2498-4ce4-8c97-79dfc9321080 install dataset in service fd00:1122:3344:101::24 + nexus a67ac9b3-427b-4ea6-a891-1c76a22720f5 install dataset in service fd00:1122:3344:101::22 + nexus d856156c-2bc2-41ad-beef-7ca1da5802d3 install dataset in service fd00:1122:3344:101::23 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) + cluster.preserve_downgrade_option: (do not modify) + + OXIMETER SETTINGS: + generation: 1 + read from:: SingleNode + + METADATA: + created by::::::::::::: test suite + created at::::::::::::: + comment:::::::::::::::: (none) + internal DNS version::: 1 + external DNS version::: 1 + target release min gen: 1 + + PENDING MGS-MANAGED UPDATES: 0 + + + +> # Plan a blueprint run -- this will cause zones and disks on the expunged +> # sled to be expunged. +> blueprint-plan latest +INFO skipping noop image source check for all sleds (no current TUF repo) +INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 +INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient ClickhouseServer zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient CockroachDb zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient CruciblePantry zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient InternalDns zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient ExternalDns zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient Nexus zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient Oximeter zones exist in plan, desired_count: 0, current_count: 0 +WARN cannot issue more SP updates (no current artifacts) +INFO all zones up-to-date +INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify +generated blueprint 86db3308-f817-4626-8838-4085949a6a41 based on parent blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a + +> blueprint-diff ade5749d-bdf3-4fab-a8ae-00bea01b3a5a latest +from: blueprint ade5749d-bdf3-4fab-a8ae-00bea01b3a5a +to: blueprint 86db3308-f817-4626-8838-4085949a6a41 + + MODIFIED SLEDS: + + sled 32d8d836-4d8a-4e54-8fa9-f31d79c42646 (active -> decommissioned, config generation 2 -> 3): + + physical disks: + --------------------------------------------------------------------------------------- + vendor model serial disposition + --------------------------------------------------------------------------------------- +* fake-vendor fake-model serial-2db3c119-38fa-41c0-8dbe-89689b84d655 - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-687e054b-a9b5-4404-8b3f-0ea119c44a6a - in service + └─ + expunged ✓ +* fake-vendor fake-model serial-a4e8a5cc-cd1c-4a08-bcf1-31785f7cda28 - in service + └─ + expunged ✓ + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +* oxp_2db3c119-38fa-41c0-8dbe-89689b84d655/crucible 0c8f7b7f-a3d5-48a4-83b9-1185bafc58a5 - in service none none off + └─ + expunged +* oxp_687e054b-a9b5-4404-8b3f-0ea119c44a6a/crucible 35d5e16f-f07a-465d-b206-6abf433220b8 - in service none none off + └─ + expunged +* oxp_a4e8a5cc-cd1c-4a08-bcf1-31785f7cda28/crucible 76d8c526-6f3b-45a9-9127-168a1d828b0b - in service none none off + └─ + expunged +* oxp_2db3c119-38fa-41c0-8dbe-89689b84d655/crypt/zone d2f8776d-904d-4f0d-845b-3c1b8c52ef39 - in service none none off + └─ + expunged +* oxp_687e054b-a9b5-4404-8b3f-0ea119c44a6a/crypt/zone 3dd06653-b053-408c-ad50-c1f61a112434 - in service none none off + └─ + expunged +* oxp_a4e8a5cc-cd1c-4a08-bcf1-31785f7cda28/crypt/zone 4ed42a5a-53b7-4408-a13d-8455d932990e - in service none none off + └─ + expunged +* oxp_2db3c119-38fa-41c0-8dbe-89689b84d655/crypt/zone/oxz_crucible_6c2a57b0-2de0-4409-a6b9-c9aa5614eefa c9e92a11-b24b-4580-97bb-6aee8d927793 - in service none none off + └─ + expunged +* oxp_687e054b-a9b5-4404-8b3f-0ea119c44a6a/crypt/zone/oxz_crucible_99a750b2-724d-4828-ae5f-0df1aad90166 9b525ee7-7517-41d4-a759-0bf18f465c09 - in service none none off + └─ + expunged +* oxp_a4e8a5cc-cd1c-4a08-bcf1-31785f7cda28/crypt/zone/oxz_crucible_e668d83e-a28c-42dc-b574-467e57403cc1 0ecfbafe-4999-42bb-8b4d-870169e701f8 - in service none none off + └─ + expunged +* oxp_2db3c119-38fa-41c0-8dbe-89689b84d655/crypt/zone/oxz_ntp_0ab3dbe9-8387-4600-b097-cb71ee91ee83 5e59c7a5-09e9-43c5-8708-0e8a83624265 - in service none none off + └─ + expunged +* oxp_2db3c119-38fa-41c0-8dbe-89689b84d655/crypt/debug 5ca9e07e-a50d-41e5-b154-9bfcdc74d0a0 - in service 100 GiB none gzip-9 + └─ + expunged +* oxp_687e054b-a9b5-4404-8b3f-0ea119c44a6a/crypt/debug 22d6d9e3-3e59-4474-aafe-7adf916360f3 - in service 100 GiB none gzip-9 + └─ + expunged +* oxp_a4e8a5cc-cd1c-4a08-bcf1-31785f7cda28/crypt/debug 48beaa78-a41c-4ce3-ae7f-f5e4194def58 - in service 100 GiB none gzip-9 + └─ + expunged + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- +* crucible 6c2a57b0-2de0-4409-a6b9-c9aa5614eefa install dataset - in service fd00:1122:3344:103::22 + └─ + expunged ✓ +* crucible 99a750b2-724d-4828-ae5f-0df1aad90166 install dataset - in service fd00:1122:3344:103::23 + └─ + expunged ✓ +* crucible e668d83e-a28c-42dc-b574-467e57403cc1 install dataset - in service fd00:1122:3344:103::24 + └─ + expunged ✓ +* internal_ntp 0ab3dbe9-8387-4600-b097-cb71ee91ee83 install dataset - in service fd00:1122:3344:103::21 + └─ + expunged ✓ + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": + name: 03ba2f16-8b5e-41c4-906d-5a7a0af460ef.host (records: 1) + AAAA fd00:1122:3344:101::26 + name: 056fd45c-2498-4ce4-8c97-79dfc9321080.host (records: 1) + AAAA fd00:1122:3344:101::24 +- name: 0ab3dbe9-8387-4600-b097-cb71ee91ee83.host (records: 1) +- AAAA fd00:1122:3344:103::21 + name: 17ed8f62-8cd0-416c-86d1-5eaf6f7358ef.host (records: 1) + AAAA fd00:1122:3344:101::2b + name: 18b3781d-571b-4d7c-b65d-18a452e5a64a.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 2eb69596-f081-4e2d-9425-9994926e0832.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 32d8d836-4d8a-4e54-8fa9-f31d79c42646.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: 397841de-588e-4fe7-94ee-a944b7340074.host (records: 1) + AAAA fd00:1122:3344:101::2c + name: 4692cc31-6eb6-437c-9634-9688663d06ae.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: 4a81477d-5a28-4246-93ef-01db835a5c5e.host (records: 1) + AAAA fd00:1122:3344:101::2d + name: 534dad87-2b4c-4250-8ad9-2ec1d93b2994.host (records: 1) + AAAA fd00:1122:3344:101::29 + name: 587da9e8-8fc0-4854-b585-070741a7b00d.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 5954ecdc-c5ad-49a6-9c5f-4689f783ba83.host (records: 1) + AAAA fd00:1122:3344:101::28 +- name: 6c2a57b0-2de0-4409-a6b9-c9aa5614eefa.host (records: 1) +- AAAA fd00:1122:3344:103::22 + name: 83ac30bd-ae85-4d6b-84c3-add02d3214cf.host (records: 1) + AAAA fd00:1122:3344:101::27 + name: 89d02b1b-478c-401a-8e28-7a26f74fa41b.sled (records: 1) + AAAA fd00:1122:3344:101::1 +- name: 99a750b2-724d-4828-ae5f-0df1aad90166.host (records: 1) +- AAAA fd00:1122:3344:103::23 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 a27817ff-db8d-49ba-9682-9e3c13bc4e4b.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 a27817ff-db8d-49ba-9682-9e3c13bc4e4b.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 a27817ff-db8d-49ba-9682-9e3c13bc4e4b.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 17ed8f62-8cd0-416c-86d1-5eaf6f7358ef.host.control-plane.oxide.internal + SRV port 17000 534dad87-2b4c-4250-8ad9-2ec1d93b2994.host.control-plane.oxide.internal + SRV port 17000 dc2666e6-4c3e-4b8e-99bc-bcdb5f8986e1.host.control-plane.oxide.internal + name: _crucible._tcp.397841de-588e-4fe7-94ee-a944b7340074 (records: 1) + SRV port 32345 397841de-588e-4fe7-94ee-a944b7340074.host.control-plane.oxide.internal + name: _crucible._tcp.4a81477d-5a28-4246-93ef-01db835a5c5e (records: 1) + SRV port 32345 4a81477d-5a28-4246-93ef-01db835a5c5e.host.control-plane.oxide.internal +- name: _crucible._tcp.6c2a57b0-2de0-4409-a6b9-c9aa5614eefa (records: 1) +- SRV port 32345 6c2a57b0-2de0-4409-a6b9-c9aa5614eefa.host.control-plane.oxide.internal +- name: _crucible._tcp.99a750b2-724d-4828-ae5f-0df1aad90166 (records: 1) +- SRV port 32345 99a750b2-724d-4828-ae5f-0df1aad90166.host.control-plane.oxide.internal + name: _crucible._tcp.b61b7c3c-d665-44b3-9312-794aa81c59de (records: 1) + SRV port 32345 b61b7c3c-d665-44b3-9312-794aa81c59de.host.control-plane.oxide.internal + name: _crucible._tcp.b957d6cf-f7b2-4bee-9928-c5fde8c59e04 (records: 1) + SRV port 32345 b957d6cf-f7b2-4bee-9928-c5fde8c59e04.host.control-plane.oxide.internal + name: _crucible._tcp.e246f5e3-0650-4afc-860f-ee7114d309c5 (records: 1) + SRV port 32345 e246f5e3-0650-4afc-860f-ee7114d309c5.host.control-plane.oxide.internal +- name: _crucible._tcp.e668d83e-a28c-42dc-b574-467e57403cc1 (records: 1) +- SRV port 32345 e668d83e-a28c-42dc-b574-467e57403cc1.host.control-plane.oxide.internal + name: _crucible._tcp.f4dc5b5d-6eb6-40a9-a079-971eca862285 (records: 1) + SRV port 32345 f4dc5b5d-6eb6-40a9-a079-971eca862285.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 03ba2f16-8b5e-41c4-906d-5a7a0af460ef.host.control-plane.oxide.internal + SRV port 5353 5954ecdc-c5ad-49a6-9c5f-4689f783ba83.host.control-plane.oxide.internal + SRV port 5353 83ac30bd-ae85-4d6b-84c3-add02d3214cf.host.control-plane.oxide.internal +* name: _internal-ntp._tcp (records: 3 -> 2) +- SRV port 123 0ab3dbe9-8387-4600-b097-cb71ee91ee83.host.control-plane.oxide.internal +- SRV port 123 18b3781d-571b-4d7c-b65d-18a452e5a64a.host.control-plane.oxide.internal +- SRV port 123 ac5bb28e-91d5-42f3-a57a-d84e1c414c17.host.control-plane.oxide.internal ++ SRV port 123 18b3781d-571b-4d7c-b65d-18a452e5a64a.host.control-plane.oxide.internal ++ SRV port 123 ac5bb28e-91d5-42f3-a57a-d84e1c414c17.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 4692cc31-6eb6-437c-9634-9688663d06ae.host.control-plane.oxide.internal + SRV port 5353 587da9e8-8fc0-4854-b585-070741a7b00d.host.control-plane.oxide.internal + SRV port 5353 ffbf02f0-261d-4723-b613-eb861245acbd.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 056fd45c-2498-4ce4-8c97-79dfc9321080.host.control-plane.oxide.internal + SRV port 12221 a67ac9b3-427b-4ea6-a891-1c76a22720f5.host.control-plane.oxide.internal + SRV port 12221 d856156c-2bc2-41ad-beef-7ca1da5802d3.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 a27817ff-db8d-49ba-9682-9e3c13bc4e4b.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2eb69596-f081-4e2d-9425-9994926e0832.sled.control-plane.oxide.internal + SRV port 12348 32d8d836-4d8a-4e54-8fa9-f31d79c42646.sled.control-plane.oxide.internal + SRV port 12348 89d02b1b-478c-401a-8e28-7a26f74fa41b.sled.control-plane.oxide.internal + name: a27817ff-db8d-49ba-9682-9e3c13bc4e4b.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: a67ac9b3-427b-4ea6-a891-1c76a22720f5.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: ac5bb28e-91d5-42f3-a57a-d84e1c414c17.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: b61b7c3c-d665-44b3-9312-794aa81c59de.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: b957d6cf-f7b2-4bee-9928-c5fde8c59e04.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: d856156c-2bc2-41ad-beef-7ca1da5802d3.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: dc2666e6-4c3e-4b8e-99bc-bcdb5f8986e1.host (records: 1) + AAAA fd00:1122:3344:101::2a + name: e246f5e3-0650-4afc-860f-ee7114d309c5.host (records: 1) + AAAA fd00:1122:3344:102::22 +- name: e668d83e-a28c-42dc-b574-467e57403cc1.host (records: 1) +- AAAA fd00:1122:3344:103::24 + name: f4dc5b5d-6eb6-40a9-a079-971eca862285.host (records: 1) + AAAA fd00:1122:3344:101::2e + name: ffbf02f0-261d-4723-b613-eb861245acbd.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.4 + A 192.0.2.2 + A 192.0.2.3 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Sled index out of bounds, will error out. +> wipe all +- wiped system, reconfigurator-sim config, and RNG state + + - reset seed to test-basic + +> load-example --seed test-basic --nsleds 3 --sled-policy 3:non-provisionable +error: setting sled policy: sled index 3 out of range (0..3) + diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout index 96d99da31ee..8732ed80b6e 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout @@ -969,6 +969,7 @@ parent: 3f00b694-1b16-4aaa-8f78-e6b3a527b434 > # blueprint-plan will place a new external DNS zone, diff DNS to see the new zone has `ns` and NS records. > blueprint-plan 366b0b68-d80e-4bc1-abd3-dc69837847e0 +INFO skipping noop image source check for all sleds (no current TUF repo) INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout index a2125c4f88e..132fdeb474f 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout @@ -1002,6 +1002,7 @@ external DNS: > # Planning a new blueprint will now replace the expunged zone, with new records for its replacement. > blueprint-plan 58d5e830-0884-47d8-a7cd-b2b3751adeb4 +INFO skipping noop image source check for all sleds (no current TUF repo) INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stderr b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stderr new file mode 100644 index 00000000000..e69de29bb2d diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout new file mode 100644 index 00000000000..11fd49b79ad --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout @@ -0,0 +1,480 @@ +using provided RNG seed: reconfigurator-cli-test +> # Load an example system. The sled with serial5 is marked non-provisionable +> # so that discretionary zones don't make their way onto it. (We're going to +> # expunge it below to test that we don't try and update zone image sources +> # on expunged sleds.) +> load-example --nsleds 6 --ndisks-per-sled 1 --sled-policy 5:non-provisionable +loaded example system with: +- collection: f45ba181-4b56-42cc-a762-874d90184a43 +- blueprint: dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 + + +> sled-list +ID SERIAL NZPOOLS SUBNET +2b8f0cb3-0295-4b3c-bc58-4fe88b57112c serial1 1 fd00:1122:3344:102::/64 +98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 serial0 1 fd00:1122:3344:101::/64 +9a867dc9-d505-427f-9eff-cdb1d4d9bd73 serial5 1 fd00:1122:3344:106::/64 +aff6c093-197d-42c5-ad80-9f10ba051a34 serial3 1 fd00:1122:3344:104::/64 +b82ede02-399c-48c6-a1de-411df4fa49a7 serial4 1 fd00:1122:3344:105::/64 +d81c6a84-79b8-4958-ae41-ea46c9b19763 serial2 1 fd00:1122:3344:103::/64 + + +> # Create a TUF repository from a fake manifest. (The output TUF repo is +> # written to a temporary directory that this invocation of `reconfigurator-cli` +> # is running out of as its working directory.) +> tuf-assemble ../../update-common/manifests/fake.toml +INFO assembling repository in +INFO artifacts assembled and archived to `repo-1.0.0.zip`, component: OmicronRepoAssembler +created repo-1.0.0.zip for system version 1.0.0 + +> # Create a second TUF repository from a different fake manifest. +> tuf-assemble ../../update-common/manifests/fake-non-semver.toml --allow-non-semver +INFO assembling repository in +INFO artifacts assembled and archived to `repo-2.0.0.zip`, component: OmicronRepoAssembler +created repo-2.0.0.zip for system version 2.0.0 + + +> # Load the target release from the first TUF repository. +> set target-release repo-1.0.0.zip +INFO extracting uploaded archive to +INFO created directory to store extracted artifacts, path: +INFO added artifact, name: SimGimletSp, kind: gimlet_sp, version: 1.0.0, hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, length: 747 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_a, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_b, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-gimlet-rot-bootloader, kind: gimlet_rot_bootloader, version: 1.0.0, hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, length: 750 +INFO added artifact, name: fake-host, kind: host_phase_1, version: 1.0.0, hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, length: 524288 +INFO added artifact, name: fake-host, kind: host_phase_2, version: 1.0.0, hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, length: 1048576 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_1, version: 1.0.0, hash: 9b7575cad720f017e936fe5994fc4e21fe040acaaf83c2edd86132aa3d667c7b, length: 524288 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_2, version: 1.0.0, hash: f355fb8429a7e0f0716dad035f9a06c799168d6c0ffcde85b1a96fef21d4b53e, length: 1048576 +INFO added artifact, name: clickhouse, kind: zone, version: 1.0.0, hash: 52b1eb4daff6f9140491d547b11248392920230db3db0eef5f5fa5333fe9e659, length: 1686 +INFO added artifact, name: clickhouse_keeper, kind: zone, version: 1.0.0, hash: cda702919449d86663be97295043aeca0ead69ae5db3bbdb20053972254a27a3, length: 1690 +INFO added artifact, name: clickhouse_server, kind: zone, version: 1.0.0, hash: 5f9ae6a9821bbe8ff0bf60feddf8b167902fe5f3e2c98bd21edd1ec9d969a001, length: 1690 +INFO added artifact, name: cockroachdb, kind: zone, version: 1.0.0, hash: f3a1a3c0b3469367b005ee78665d982059d5e14e93a479412426bf941c4ed291, length: 1689 +INFO added artifact, name: crucible-zone, kind: zone, version: 1.0.0, hash: 6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047, length: 1690 +INFO added artifact, name: crucible-pantry-zone, kind: zone, version: 1.0.0, hash: 21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02, length: 1695 +INFO added artifact, name: external-dns, kind: zone, version: 1.0.0, hash: ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d, length: 1689 +INFO added artifact, name: internal-dns, kind: zone, version: 1.0.0, hash: ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a, length: 1689 +INFO added artifact, name: ntp, kind: zone, version: 1.0.0, hash: 67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439, length: 1681 +INFO added artifact, name: nexus, kind: zone, version: 1.0.0, hash: 0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388, length: 1682 +INFO added artifact, name: oximeter, kind: zone, version: 1.0.0, hash: 048d8fe8cdef5b175aad714d0f148aa80ce36c9114ac15ce9d02ed3d37877a77, length: 1682 +INFO added artifact, name: fake-psc-sp, kind: psc_sp, version: 1.0.0, hash: f896cf5b19ca85864d470ad8587f980218bff3954e7f52bbd999699cd0f9635b, length: 744 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_a, version: 1.0.0, hash: 179eb660ebc92e28b6748b6af03d9f998d6131319edd4654a1e948454c62551b, length: 750 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_b, version: 1.0.0, hash: 179eb660ebc92e28b6748b6af03d9f998d6131319edd4654a1e948454c62551b, length: 750 +INFO added artifact, name: fake-psc-rot-bootloader, kind: psc_rot_bootloader, version: 1.0.0, hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, length: 750 +INFO added artifact, name: fake-switch-sp, kind: switch_sp, version: 1.0.0, hash: ab32ec86e942e1a16c8d43ea143cd80dd05a9639529d3569b1c24dfa2587ee74, length: 740 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_a, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_b, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-switch-rot-bootloader, kind: switch_rot_bootloader, version: 1.0.0, hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, length: 750 +set target release based on repo-1.0.0.zip + + +> # On one sled, update the install dataset. +> sled-update-install-dataset serial0 --to-target-release +sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: install dataset updated: to target release (system version 1.0.0) + + +> # On another sled, simulate an error reading the zone manifest. +> sled-update-install-dataset serial1 --with-manifest-error +sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: install dataset updated: simulated error obtaining zone manifest + + +> # On a third sled, update the install dataset and simulate a mupdate override. +> # (Currently we do this in the blueprint, but with +> # https://github.com/oxidecomputer/omicron/pull/8456 we should update this test and +> # set a mupdate-override on the sled directly.) +> sled-update-install-dataset serial2 --to-target-release +sled d81c6a84-79b8-4958-ae41-ea46c9b19763: install dataset updated: to target release (system version 1.0.0) + +> blueprint-edit latest set-remove-mupdate-override serial2 ffffffff-ffff-ffff-ffff-ffffffffffff +blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 created from latest blueprint (dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21): set remove_mupdate_override to ffffffff-ffff-ffff-ffff-ffffffffffff + + +> # On a fourth sled, simulate an error validating the install dataset image on one zone. +> # We pick ntp because internal-ntp is non-discretionary. +> sled-update-install-dataset serial3 --to-target-release --with-zone-error ntp +sled aff6c093-197d-42c5-ad80-9f10ba051a34: install dataset updated: to target release (system version 1.0.0, 1 zone errors) + + +> # On a fifth sled, set the install dataset to the repo-2.0.0.zip generated by the +> # second TUF repository. +> sled-update-install-dataset serial4 --from-repo repo-2.0.0.zip +INFO extracting uploaded archive to +INFO created directory to store extracted artifacts, path: +INFO added artifact, name: fake-gimlet-sp, kind: gimlet_sp, version: 2.0.0, hash: ce1e98a8a9ae541654508f101d59a3ddeba3d28177f1d42d5614248eef0b820b, length: 751 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_a, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_b, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-gimlet-rot-bootloader, kind: gimlet_rot_bootloader, version: 2.0.0, hash: 238a9bfc87f02141c7555ff5ebb7a22ec37bc24d6f724ce3af05ed7c412cd115, length: 750 +INFO added artifact, name: fake-host, kind: host_phase_1, version: 2.0.0, hash: 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6, length: 524288 +INFO added artifact, name: fake-host, kind: host_phase_2, version: 2.0.0, hash: 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3, length: 1048576 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_1, version: non-semver, hash: 24f8ca0d52da5238644b11964c6feda854c7530820713efefa7ac91683b3fc76, length: 524288 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_2, version: non-semver, hash: 5fceee33d358aacb8a34ca93a30e28354bd8f341f6e3e895a2cafe83904f3d80, length: 1048576 +INFO added artifact, name: clickhouse, kind: zone, version: 2.0.0, hash: bb2d1ff02d11f72bc9049ae57f27536207519a1859d29f8d7a90ab3b44d56b08, length: 1687 +INFO added artifact, name: clickhouse_keeper, kind: zone, version: 2.0.0, hash: 1eb9f24be68f13c274aa0ac9b863cec520dbfe762620c328431728d75bfd2198, length: 1691 +INFO added artifact, name: clickhouse_server, kind: zone, version: 2.0.0, hash: 50fe271948672a9af1ba5f96c9d87ff2736fa72d78dfef598a79fa0cc8a00474, length: 1691 +INFO added artifact, name: cockroachdb, kind: zone, version: 2.0.0, hash: ebc82bf181db864b78cb7e3ddedf7ab1dd8fe7b377b02846f3c27cf0387bb387, length: 1690 +INFO added artifact, name: crucible-zone, kind: zone, version: 2.0.0, hash: 866f6a7c2e51c056fb722b5113e80181cc9cd8b712a0d3dbf1edc4ce29e5229e, length: 1691 +INFO added artifact, name: crucible-pantry-zone, kind: zone, version: 2.0.0, hash: 3ff26dad96faa8f67251f5de40458b4f809d536bfe8572134da0e42c2fa12674, length: 1696 +INFO added artifact, name: external-dns, kind: zone, version: 2.0.0, hash: f282c45771429f7bebf71f0cc668521066db57c6bb07fcfccdfb44825d3d930f, length: 1690 +INFO added artifact, name: internal-dns, kind: zone, version: 2.0.0, hash: de30657a72b066b8ef1f56351a0a5d4d7000da0a62c4be9b2e949a107ca8a389, length: 1690 +INFO added artifact, name: ntp, kind: zone, version: 2.0.0, hash: d76e26198daed69cdae04490d7477f8c842e0dbe37d463eac0d0a8d3fb803095, length: 1682 +INFO added artifact, name: nexus, kind: zone, version: 2.0.0, hash: e9b7035f41848a987a798c15ac424cc91dd662b1af0920d58d8aa1ebad7467b6, length: 1683 +INFO added artifact, name: oximeter, kind: zone, version: 2.0.0, hash: 9f4bc56a15d5fd943fdac94309994b8fd73aa2be1ec61faf44bfcf2356c9dc23, length: 1683 +INFO added artifact, name: fake-psc-sp, kind: psc_sp, version: 2.0.0, hash: 7adf04de523865003dbf120cebddd5fcf5bad650640281b294197e6ca7016e47, length: 748 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_a, version: 2.0.0, hash: 6d1c432647e9b9e4cf846ff5d17932d75cba49c0d3f23d24243238bc40bcfef5, length: 746 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_b, version: 2.0.0, hash: 6d1c432647e9b9e4cf846ff5d17932d75cba49c0d3f23d24243238bc40bcfef5, length: 746 +INFO added artifact, name: fake-psc-rot-bootloader, kind: psc_rot_bootloader, version: 2.0.0, hash: 238a9bfc87f02141c7555ff5ebb7a22ec37bc24d6f724ce3af05ed7c412cd115, length: 750 +INFO added artifact, name: fake-switch-sp, kind: switch_sp, version: 2.0.0, hash: 5a9019c484c051edfab4903a7a5e1817c89bd555eea3e48f6b92c6e67442e13e, length: 746 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_a, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_b, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-switch-rot-bootloader, kind: switch_rot_bootloader, version: non-semver-2, hash: a0d6df68e6112edcf62c035947563d2a58d06e11443b95b90bf087da710550a5, length: 758 +sled b82ede02-399c-48c6-a1de-411df4fa49a7: install dataset updated: from repo at repo-2.0.0.zip (system version 2.0.0) + + +> # On the sixth sled, update to the target release (so it shows up in inventory). +> # Then, mark the sled expunged (in the planning input). +> sled-update-install-dataset serial5 --to-target-release +sled 9a867dc9-d505-427f-9eff-cdb1d4d9bd73: install dataset updated: to target release (system version 1.0.0) + +> sled-set-policy serial5 expunged +set sled 9a867dc9-d505-427f-9eff-cdb1d4d9bd73 policy to expunged + + +> # Generate an inventory and run a blueprint planning step. +> inventory-generate +generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configured sleds + +> blueprint-plan latest eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 +WARN skipping noop image source check since sled-agent encountered error retrieving zone manifest (this is abnormal), sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, error: reconfigurator-sim simulated error: simulated error obtaining zone manifest +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: nexus v1.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: internal-dns v1.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: crucible-zone v1.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: ntp v1.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: external-dns v1.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: crucible-pantry-zone v1.0.0 (zone) +INFO noop converting 6/6 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, tuf_artifact_id: nexus v1.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, tuf_artifact_id: external-dns v1.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, tuf_artifact_id: crucible-zone v1.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, tuf_artifact_id: internal-dns v1.0.0 (zone) +INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, tuf_artifact_id: crucible-pantry-zone v1.0.0 (zone) +WARN zone manifest inventory indicated install dataset artifact is invalid, not using artifact (this is abnormal), sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, zone_id: e8fe709c-725f-4bb2-b714-ffcda13a9e54, kind: internal_ntp, file_name: ntp.tar.gz, error: reconfigurator-sim: simulated error validating zone image +INFO noop converting 5/6 install-dataset zones to artifact store, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34 +INFO noop converting 0/2 install-dataset zones to artifact store, sled_id: b82ede02-399c-48c6-a1de-411df4fa49a7 +INFO skipping noop image source check on sled (blueprint has get_remove_mupdate_override set for sled), sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, bp_remove_mupdate_override_id: ffffffff-ffff-ffff-ffff-ffffffffffff +INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 +INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient ClickhouseServer zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient CockroachDb zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient CruciblePantry zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient InternalDns zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient ExternalDns zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient Nexus zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient Oximeter zones exist in plan, desired_count: 0, current_count: 0 +INFO configuring SP update, artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 +INFO reached maximum number of pending SP updates, max: 1 +INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify +generated blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 based on parent blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 + + +> # This diff should show expected changes to the blueprint. +> blueprint-diff 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 latest +from: blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 +to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 2 -> 3): + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/external_dns a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/internal_dns 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 686c19cf-a0d7-45f6-866f-c564612b2664 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------- +* crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 - install dataset in service fd00:1122:3344:101::25 + └─ + artifact: version 1.0.0 +* crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 - install dataset in service fd00:1122:3344:101::24 + └─ + artifact: version 1.0.0 +* external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d - install dataset in service fd00:1122:3344:101::23 + └─ + artifact: version 1.0.0 +* internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c - install dataset in service fd00:1122:3344:2::1 + └─ + artifact: version 1.0.0 +* internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c - install dataset in service fd00:1122:3344:101::21 + └─ + artifact: version 1.0.0 +* nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 - install dataset in service fd00:1122:3344:101::22 + └─ + artifact: version 1.0.0 + + + sled 9a867dc9-d505-427f-9eff-cdb1d4d9bd73 (active -> decommissioned, config generation 2 -> 3): + + physical disks: + --------------------------------------------------------------------------------------- + vendor model serial disposition + --------------------------------------------------------------------------------------- +* fake-vendor fake-model serial-5ad21c90-792e-445b-b25e-285723c44243 - in service + └─ + expunged ✓ + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +* oxp_5ad21c90-792e-445b-b25e-285723c44243/crucible 7fd50c89-afc0-4a37-ac93-ba5273120d32 - in service none none off + └─ + expunged +* oxp_5ad21c90-792e-445b-b25e-285723c44243/crypt/zone f9c25bcc-cd19-4003-a23a-807ae76e35aa - in service none none off + └─ + expunged +* oxp_5ad21c90-792e-445b-b25e-285723c44243/crypt/zone/oxz_crucible_824150c0-3fa4-4bac-9d14-c47ad04c9f3a e36ae01d-7975-429c-b802-f1bbc3b032b3 - in service none none off + └─ + expunged +* oxp_5ad21c90-792e-445b-b25e-285723c44243/crypt/zone/oxz_ntp_db288a1e-c33c-44ca-8c79-9a8978afa34d 51086d20-09ec-454c-9127-435beba2b8f0 - in service none none off + └─ + expunged +* oxp_5ad21c90-792e-445b-b25e-285723c44243/crypt/debug 639da9cf-2ce6-4e90-b95d-a0d0fc57d0f2 - in service 100 GiB none gzip-9 + └─ + expunged + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- +* crucible 824150c0-3fa4-4bac-9d14-c47ad04c9f3a install dataset - in service fd00:1122:3344:106::22 + └─ + expunged ✓ +* internal_ntp db288a1e-c33c-44ca-8c79-9a8978afa34d install dataset - in service fd00:1122:3344:106::21 + └─ + expunged ✓ + + + sled aff6c093-197d-42c5-ad80-9f10ba051a34 (active, config generation 2 -> 3): + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-f3e1cbbc-5682-46ce-93a2-00a4a603bb63 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_f3e1cbbc-5682-46ce-93a2-00a4a603bb63/crucible aa71e3e0-8b92-402e-89a9-4f3252af2863 in service none none off + oxp_f3e1cbbc-5682-46ce-93a2-00a4a603bb63/crypt/external_dns 0e924845-9e93-4313-bcce-23d534fcc633 in service none none off + oxp_f3e1cbbc-5682-46ce-93a2-00a4a603bb63/crypt/internal_dns 2f73afe1-14e7-4625-85a6-645d9efaa370 in service none none off + oxp_f3e1cbbc-5682-46ce-93a2-00a4a603bb63/crypt/zone cbb667a8-71f0-4ef3-a8ec-3149eaab45e0 in service none none off + oxp_f3e1cbbc-5682-46ce-93a2-00a4a603bb63/crypt/zone/oxz_crucible_8e3dd7a4-75a3-4917-a6f4-0991bbdef7ea 24bc6a37-a936-4453-96fe-61f15e9535d7 in service none none off + oxp_f3e1cbbc-5682-46ce-93a2-00a4a603bb63/crypt/zone/oxz_crucible_pantry_d07a1fed-4235-4821-a1e5-f7eb2646ff33 a2807343-8c57-45f2-877a-ca064e2e28e2 in service none none off + oxp_f3e1cbbc-5682-46ce-93a2-00a4a603bb63/crypt/zone/oxz_external_dns_43a0588f-5b57-469b-a173-db6cb6105e4c 27521fba-e5e3-418f-975b-a499010bf840 in service none none off + oxp_f3e1cbbc-5682-46ce-93a2-00a4a603bb63/crypt/zone/oxz_internal_dns_97753dbd-5a0f-4273-b1be-db6bb2b69381 f41fb7c9-c08d-463b-9095-dd7b7a39bd70 in service none none off + oxp_f3e1cbbc-5682-46ce-93a2-00a4a603bb63/crypt/zone/oxz_nexus_33862f97-2897-4d53-a9a6-78a80f7eb13f b8457107-50ca-4454-a17d-f9c0d4f94cde in service none none off + oxp_f3e1cbbc-5682-46ce-93a2-00a4a603bb63/crypt/zone/oxz_ntp_e8fe709c-725f-4bb2-b714-ffcda13a9e54 febe87b8-c8ac-4401-b9ee-bbc3be700946 in service none none off + oxp_f3e1cbbc-5682-46ce-93a2-00a4a603bb63/crypt/debug d6f0564e-e4a8-4f6f-b122-0314ff473b20 in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------- + internal_ntp e8fe709c-725f-4bb2-b714-ffcda13a9e54 install dataset in service fd00:1122:3344:104::21 +* crucible 8e3dd7a4-75a3-4917-a6f4-0991bbdef7ea - install dataset in service fd00:1122:3344:104::25 + └─ + artifact: version 1.0.0 +* crucible_pantry d07a1fed-4235-4821-a1e5-f7eb2646ff33 - install dataset in service fd00:1122:3344:104::24 + └─ + artifact: version 1.0.0 +* external_dns 43a0588f-5b57-469b-a173-db6cb6105e4c - install dataset in service fd00:1122:3344:104::23 + └─ + artifact: version 1.0.0 +* internal_dns 97753dbd-5a0f-4273-b1be-db6bb2b69381 - install dataset in service fd00:1122:3344:3::1 + └─ + artifact: version 1.0.0 +* nexus 33862f97-2897-4d53-a9a6-78a80f7eb13f - install dataset in service fd00:1122:3344:104::22 + └─ + artifact: version 1.0.0 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 1 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ++ sled 0 model0 serial0 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670 1.0.0 Sp { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion } + + +internal DNS: +* DNS zone: "control-plane.oxide.internal": + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 33862f97-2897-4d53-a9a6-78a80f7eb13f.host (records: 1) + AAAA fd00:1122:3344:104::22 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3a7c2683-58bc-479c-9c16-2f9dfc102e29.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 43a0588f-5b57-469b-a173-db6cb6105e4c.host (records: 1) + AAAA fd00:1122:3344:104::23 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 +- name: 824150c0-3fa4-4bac-9d14-c47ad04c9f3a.host (records: 1) +- AAAA fd00:1122:3344:106::22 + name: 8e3dd7a4-75a3-4917-a6f4-0991bbdef7ea.host (records: 1) + AAAA fd00:1122:3344:104::25 + name: 97753dbd-5a0f-4273-b1be-db6bb2b69381.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: 9a867dc9-d505-427f-9eff-cdb1d4d9bd73.sled (records: 1) + AAAA fd00:1122:3344:106::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + SRV port 17000 d07a1fed-4235-4821-a1e5-f7eb2646ff33.host.control-plane.oxide.internal + name: _crucible._tcp.3a7c2683-58bc-479c-9c16-2f9dfc102e29 (records: 1) + SRV port 32345 3a7c2683-58bc-479c-9c16-2f9dfc102e29.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal +- name: _crucible._tcp.824150c0-3fa4-4bac-9d14-c47ad04c9f3a (records: 1) +- SRV port 32345 824150c0-3fa4-4bac-9d14-c47ad04c9f3a.host.control-plane.oxide.internal + name: _crucible._tcp.8e3dd7a4-75a3-4917-a6f4-0991bbdef7ea (records: 1) + SRV port 32345 8e3dd7a4-75a3-4917-a6f4-0991bbdef7ea.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.ecbe0b3d-1acc-44b2-b6d4-f4d2770516e4 (records: 1) + SRV port 32345 ecbe0b3d-1acc-44b2-b6d4-f4d2770516e4.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 43a0588f-5b57-469b-a173-db6cb6105e4c.host.control-plane.oxide.internal + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal +* name: _internal-ntp._tcp (records: 6 -> 5) +- SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal +- SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal +- SRV port 123 b910534b-2a53-4335-a3d9-5311d2f3186a.host.control-plane.oxide.internal +- SRV port 123 db288a1e-c33c-44ca-8c79-9a8978afa34d.host.control-plane.oxide.internal +- SRV port 123 dd66f033-4fe8-438e-afb4-29d3561d4c3e.host.control-plane.oxide.internal +- SRV port 123 e8fe709c-725f-4bb2-b714-ffcda13a9e54.host.control-plane.oxide.internal ++ SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal ++ SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal ++ SRV port 123 b910534b-2a53-4335-a3d9-5311d2f3186a.host.control-plane.oxide.internal ++ SRV port 123 dd66f033-4fe8-438e-afb4-29d3561d4c3e.host.control-plane.oxide.internal ++ SRV port 123 e8fe709c-725f-4bb2-b714-ffcda13a9e54.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 97753dbd-5a0f-4273-b1be-db6bb2b69381.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 33862f97-2897-4d53-a9a6-78a80f7eb13f.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 6) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 9a867dc9-d505-427f-9eff-cdb1d4d9bd73.sled.control-plane.oxide.internal + SRV port 12348 aff6c093-197d-42c5-ad80-9f10ba051a34.sled.control-plane.oxide.internal + SRV port 12348 b82ede02-399c-48c6-a1de-411df4fa49a7.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: aff6c093-197d-42c5-ad80-9f10ba051a34.sled (records: 1) + AAAA fd00:1122:3344:104::1 + name: b82ede02-399c-48c6-a1de-411df4fa49a7.sled (records: 1) + AAAA fd00:1122:3344:105::1 + name: b910534b-2a53-4335-a3d9-5311d2f3186a.host (records: 1) + AAAA fd00:1122:3344:105::21 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d07a1fed-4235-4821-a1e5-f7eb2646ff33.host (records: 1) + AAAA fd00:1122:3344:104::24 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 +- name: db288a1e-c33c-44ca-8c79-9a8978afa34d.host (records: 1) +- AAAA fd00:1122:3344:106::21 + name: dd66f033-4fe8-438e-afb4-29d3561d4c3e.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: e8fe709c-725f-4bb2-b714-ffcda13a9e54.host (records: 1) + AAAA fd00:1122:3344:104::21 + name: ecbe0b3d-1acc-44b2-b6d4-f4d2770516e4.host (records: 1) + AAAA fd00:1122:3344:105::22 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout index b21f4cb5c76..8892b3fbb48 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout @@ -1,13 +1,13 @@ using provided RNG seed: reconfigurator-cli-test > # Load example system with 7 sleds: > # -> # sled 0: unset -> unset (unchanged) -> # sled 1: unset -> set -> # sled 2: set -> unset -> # sled 3: set -> set (unchanged) -> # sled 4: set -> set (changed) -> # sled 5: set -> set (unchanged) but change something else -> # sled 6: set -> sled removed +> # serial0: unset -> unset (unchanged) +> # serial1: unset -> set +> # serial2: set -> unset +> # serial3: set -> set (unchanged) +> # serial4: set -> set (changed) +> # serial5: set -> set (unchanged) but change something else +> # serial6: set -> sled removed > # > # We'll also add another sled below (new_sled_id) with > # remove_mupdate_override set. @@ -20,21 +20,31 @@ loaded example system with: - collection: f45ba181-4b56-42cc-a762-874d90184a43 - blueprint: dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 +> sled-list +ID SERIAL NZPOOLS SUBNET +2b8f0cb3-0295-4b3c-bc58-4fe88b57112c serial1 0 fd00:1122:3344:102::/64 +98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 serial0 0 fd00:1122:3344:101::/64 +9a867dc9-d505-427f-9eff-cdb1d4d9bd73 serial5 0 fd00:1122:3344:106::/64 +aff6c093-197d-42c5-ad80-9f10ba051a34 serial3 0 fd00:1122:3344:104::/64 +b82ede02-399c-48c6-a1de-411df4fa49a7 serial4 0 fd00:1122:3344:105::/64 +d81c6a84-79b8-4958-ae41-ea46c9b19763 serial2 0 fd00:1122:3344:103::/64 +e96e226f-4ed9-4c01-91b9-69a9cd076c9e serial6 0 fd00:1122:3344:107::/64 + > # Set the field on sleds 2-6 (0-indexed). -> blueprint-edit latest set-remove-mupdate-override 9a867dc9-d505-427f-9eff-cdb1d4d9bd73 00000000-0000-0000-0000-000000000000 +> blueprint-edit latest set-remove-mupdate-override serial2 00000000-0000-0000-0000-000000000000 blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 created from latest blueprint (dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21): set remove_mupdate_override to 00000000-0000-0000-0000-000000000000 -> blueprint-edit latest set-remove-mupdate-override aff6c093-197d-42c5-ad80-9f10ba051a34 00000000-0000-0000-0000-000000000000 +> blueprint-edit latest set-remove-mupdate-override serial3 00000000-0000-0000-0000-000000000000 blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 created from latest blueprint (8da82a8e-bf97-4fbd-8ddd-9f6462732cf1): set remove_mupdate_override to 00000000-0000-0000-0000-000000000000 -> blueprint-edit latest set-remove-mupdate-override b82ede02-399c-48c6-a1de-411df4fa49a7 00000000-0000-0000-0000-000000000000 +> blueprint-edit latest set-remove-mupdate-override serial4 00000000-0000-0000-0000-000000000000 blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 created from latest blueprint (58d5e830-0884-47d8-a7cd-b2b3751adeb4): set remove_mupdate_override to 00000000-0000-0000-0000-000000000000 -> blueprint-edit latest set-remove-mupdate-override d81c6a84-79b8-4958-ae41-ea46c9b19763 00000000-0000-0000-0000-000000000000 +> blueprint-edit latest set-remove-mupdate-override serial5 00000000-0000-0000-0000-000000000000 blueprint df06bb57-ad42-4431-9206-abff322896c7 created from latest blueprint (af934083-59b5-4bf6-8966-6fb5292c29e1): set remove_mupdate_override to 00000000-0000-0000-0000-000000000000 -> blueprint-edit latest set-remove-mupdate-override e96e226f-4ed9-4c01-91b9-69a9cd076c9e 00000000-0000-0000-0000-000000000000 +> blueprint-edit latest set-remove-mupdate-override serial6 00000000-0000-0000-0000-000000000000 blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba created from latest blueprint (df06bb57-ad42-4431-9206-abff322896c7): set remove_mupdate_override to 00000000-0000-0000-0000-000000000000 @@ -215,12 +225,12 @@ parent: df06bb57-ad42-4431-9206-abff322896c7 > # Now make another blueprint, starting by adding a new sled and removing sled 6. > sled-add --ndisks 0 -added sled 00320471-945d-413c-85e7-03e091a70b3c +added sled 00320471-945d-413c-85e7-03e091a70b3c (serial: serial7) -> blueprint-edit latest debug remove-sled e96e226f-4ed9-4c01-91b9-69a9cd076c9e +> blueprint-edit latest debug remove-sled serial6 blueprint 9034c710-3e57-45f3-99e5-4316145e87ac created from latest blueprint (7f976e0d-d2a5-4eeb-9e82-c82bc2824aba): debug: removed sled e96e226f-4ed9-4c01-91b9-69a9cd076c9e from blueprint -> sled-remove e96e226f-4ed9-4c01-91b9-69a9cd076c9e +> sled-remove serial6 removed sled e96e226f-4ed9-4c01-91b9-69a9cd076c9e from system > inventory-generate @@ -228,19 +238,19 @@ generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configu > # Edit sleds 1, 2, 4, 5, and the new one. -> blueprint-edit latest set-remove-mupdate-override 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 ffffffff-ffff-ffff-ffff-ffffffffffff +> blueprint-edit latest set-remove-mupdate-override serial1 ffffffff-ffff-ffff-ffff-ffffffffffff blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 created from latest blueprint (9034c710-3e57-45f3-99e5-4316145e87ac): set remove_mupdate_override to ffffffff-ffff-ffff-ffff-ffffffffffff -> blueprint-edit latest set-remove-mupdate-override 9a867dc9-d505-427f-9eff-cdb1d4d9bd73 unset +> blueprint-edit latest set-remove-mupdate-override serial2 unset blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 created from latest blueprint (d60afc57-f15d-476c-bd0f-b1071e2bb976): unset remove_mupdate_override -> blueprint-edit latest set-remove-mupdate-override b82ede02-399c-48c6-a1de-411df4fa49a7 ffffffff-ffff-ffff-ffff-ffffffffffff +> blueprint-edit latest set-remove-mupdate-override serial4 ffffffff-ffff-ffff-ffff-ffffffffffff blueprint 626487fa-7139-45ec-8416-902271fc730b created from latest blueprint (a5a8f242-ffa5-473c-8efd-2acf2dc0b736): set remove_mupdate_override to ffffffff-ffff-ffff-ffff-ffffffffffff -> blueprint-edit latest debug force-sled-generation-bump d81c6a84-79b8-4958-ae41-ea46c9b19763 -blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b created from latest blueprint (626487fa-7139-45ec-8416-902271fc730b): debug: forced sled d81c6a84-79b8-4958-ae41-ea46c9b19763 generation bump +> blueprint-edit latest debug force-sled-generation-bump serial5 +blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b created from latest blueprint (626487fa-7139-45ec-8416-902271fc730b): debug: forced sled 9a867dc9-d505-427f-9eff-cdb1d4d9bd73 generation bump -> blueprint-edit latest set-remove-mupdate-override 00320471-945d-413c-85e7-03e091a70b3c ffffffff-ffff-ffff-ffff-ffffffffffff +> blueprint-edit latest set-remove-mupdate-override serial7 ffffffff-ffff-ffff-ffff-ffffffffffff blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 created from latest blueprint (c1a0d242-9160-40f4-96ae-61f8f40a0b1b): set remove_mupdate_override to ffffffff-ffff-ffff-ffff-ffffffffffff @@ -255,17 +265,17 @@ to: blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 MODIFIED SLEDS: - sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 1 -> 2): + sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 1 -> 2): + will remove mupdate override: (none) -> ffffffff-ffff-ffff-ffff-ffffffffffff sled 9a867dc9-d505-427f-9eff-cdb1d4d9bd73 (active, config generation 2 -> 3): -- will remove mupdate override: 00000000-0000-0000-0000-000000000000 -> (none) + will remove mupdate override: 00000000-0000-0000-0000-000000000000 (unchanged) sled b82ede02-399c-48c6-a1de-411df4fa49a7 (active, config generation 2 -> 3): * will remove mupdate override: 00000000-0000-0000-0000-000000000000 -> ffffffff-ffff-ffff-ffff-ffffffffffff sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 2 -> 3): - will remove mupdate override: 00000000-0000-0000-0000-000000000000 (unchanged) +- will remove mupdate override: 00000000-0000-0000-0000-000000000000 -> (none) ADDED SLEDS: @@ -348,7 +358,8 @@ parent: afb09faf-a586-4483-9289-04d4f1d8ba23 - sled: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 1) + sled: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 2) + will remove mupdate override: ffffffff-ffff-ffff-ffff-ffffffffffff physical disks: ------------------------------------- @@ -369,8 +380,7 @@ parent: afb09faf-a586-4483-9289-04d4f1d8ba23 - sled: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 2) - will remove mupdate override: ffffffff-ffff-ffff-ffff-ffffffffffff + sled: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 1) physical disks: ------------------------------------- @@ -392,6 +402,7 @@ parent: afb09faf-a586-4483-9289-04d4f1d8ba23 sled: 9a867dc9-d505-427f-9eff-cdb1d4d9bd73 (active, config generation 3) + will remove mupdate override: 00000000-0000-0000-0000-000000000000 physical disks: ------------------------------------- @@ -457,7 +468,6 @@ parent: afb09faf-a586-4483-9289-04d4f1d8ba23 sled: d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 3) - will remove mupdate override: 00000000-0000-0000-0000-000000000000 physical disks: ------------------------------------- diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-stdout index a9b9d834056..26619691d05 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-stdout @@ -18,14 +18,14 @@ T ENA ID PARENT TIME_CREATED error: attempted to access sled dde1c0e2-b10d-4621-b420-f179f7a7a00a not found in system > sled-add dde1c0e2-b10d-4621-b420-f179f7a7a00a -added sled dde1c0e2-b10d-4621-b420-f179f7a7a00a +added sled dde1c0e2-b10d-4621-b420-f179f7a7a00a (serial: serial0) > sled-list ID SERIAL NZPOOLS SUBNET dde1c0e2-b10d-4621-b420-f179f7a7a00a serial0 10 fd00:1122:3344:101::/64 > sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a -sled dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled dde1c0e2-b10d-4621-b420-f179f7a7a00a (in service, active) serial serial0 subnet fd00:1122:3344:101::/64 SP active version: Some("0.0.1") @@ -54,10 +54,10 @@ zpools (10): > sled-add 90c1102a-b9f5-4d88-92a2-60d54a2d98cc -added sled 90c1102a-b9f5-4d88-92a2-60d54a2d98cc +added sled 90c1102a-b9f5-4d88-92a2-60d54a2d98cc (serial: serial1) -> sled-add 04ef3330-c682-4a08-8def-fcc4bef31bcd -added sled 04ef3330-c682-4a08-8def-fcc4bef31bcd +> sled-add 04ef3330-c682-4a08-8def-fcc4bef31bcd --policy non-provisionable +added sled 04ef3330-c682-4a08-8def-fcc4bef31bcd (serial: serial2) > sled-list ID SERIAL NZPOOLS SUBNET @@ -72,7 +72,7 @@ dde1c0e2-b10d-4621-b420-f179f7a7a00a serial0 10 fd00:1122:3344:101::/64 set sled dde1c0e2-b10d-4621-b420-f179f7a7a00a SP versions: active -> 1.0.0 > sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a -sled dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled dde1c0e2-b10d-4621-b420-f179f7a7a00a (in service, active) serial serial0 subnet fd00:1122:3344:101::/64 SP active version: Some("1.0.0") @@ -104,7 +104,7 @@ zpools (10): set sled dde1c0e2-b10d-4621-b420-f179f7a7a00a SP versions: inactive -> 2.0.0 > sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a -sled dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled dde1c0e2-b10d-4621-b420-f179f7a7a00a (in service, active) serial serial0 subnet fd00:1122:3344:101::/64 SP active version: Some("1.0.0") @@ -136,7 +136,7 @@ zpools (10): set sled dde1c0e2-b10d-4621-b420-f179f7a7a00a SP versions: active -> 3.0.0 > sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a -sled dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled dde1c0e2-b10d-4621-b420-f179f7a7a00a (in service, active) serial serial0 subnet fd00:1122:3344:101::/64 SP active version: Some("3.0.0") @@ -168,7 +168,7 @@ zpools (10): set sled dde1c0e2-b10d-4621-b420-f179f7a7a00a SP versions: active -> 4.0.0, inactive -> invalid > sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a -sled dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled dde1c0e2-b10d-4621-b420-f179f7a7a00a (in service, active) serial serial0 subnet fd00:1122:3344:101::/64 SP active version: Some("4.0.0") @@ -200,7 +200,7 @@ zpools (10): set sled dde1c0e2-b10d-4621-b420-f179f7a7a00a SP versions: active -> 4.0.0, inactive -> 5.0.0 > sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a -sled dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled dde1c0e2-b10d-4621-b420-f179f7a7a00a (in service, active) serial serial0 subnet fd00:1122:3344:101::/64 SP active version: Some("4.0.0") @@ -264,7 +264,7 @@ result: > sled-show dde1c0e2-b10d-4621-b420-f179f7a7a00a -sled dde1c0e2-b10d-4621-b420-f179f7a7a00a +sled dde1c0e2-b10d-4621-b420-f179f7a7a00a (in service, active) serial serial0 subnet fd00:1122:3344:101::/64 SP active version: Some("4.0.0") diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index 8218fd338b9..0f253e5c83a 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -192,6 +192,9 @@ f45ba181-4b56-42cc-a762-874d90184a43 0 > # First step: upgrade one SP. > blueprint-plan dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 f45ba181-4b56-42cc-a762-874d90184a43 +INFO noop converting 0/9 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 @@ -374,6 +377,9 @@ external DNS: > # If we generate another plan, there should be no change. > blueprint-plan 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 f45ba181-4b56-42cc-a762-874d90184a43 +INFO noop converting 0/9 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 @@ -557,6 +563,9 @@ set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 SP versions: active -> 1.0.0 generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configured sleds > blueprint-plan 58d5e830-0884-47d8-a7cd-b2b3751adeb4 eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 +INFO noop converting 0/9 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 @@ -750,6 +759,9 @@ set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c SP versions: inactive -> 0.5.0 generated inventory collection 61f451b3-2121-4ed6-91c7-a550054f6c21 from configured sleds > blueprint-plan af934083-59b5-4bf6-8966-6fb5292c29e1 61f451b3-2121-4ed6-91c7-a550054f6c21 +INFO noop converting 0/9 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 @@ -941,6 +953,9 @@ set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c SP versions: active -> 1.0.0 generated inventory collection b1bda47d-2c19-4fba-96e3-d9df28db7436 from configured sleds > blueprint-plan df06bb57-ad42-4431-9206-abff322896c7 b1bda47d-2c19-4fba-96e3-d9df28db7436 +INFO noop converting 0/9 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 @@ -1134,6 +1149,9 @@ set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 SP versions: active -> 1.0.0 generated inventory collection a71f7a73-35a6-45e8-acbe-f1c5925eed69 from configured sleds > blueprint-plan 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba a71f7a73-35a6-45e8-acbe-f1c5925eed69 +INFO noop converting 0/9 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 7a12308dc01..115dad91bdd 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -306,6 +306,10 @@ pub(crate) enum Operation { num_datasets_expunged: usize, num_zones_expunged: usize, }, + SledNoopZoneImageSourcesUpdated { + sled_id: SledUuid, + count: usize, + }, } impl fmt::Display for Operation { @@ -370,6 +374,13 @@ impl fmt::Display for Operation { {num_zones_expunged} zones)" ) } + Self::SledNoopZoneImageSourcesUpdated { sled_id, count } => { + write!( + f, + "sled {sled_id}: performed {count} noop \ + zone image source updates" + ) + } } } } @@ -1136,6 +1147,20 @@ impl<'a> BlueprintBuilder<'a> { Ok(datasets.into()) } + /// Returns the remove_mupdate_override field for a sled. + pub(crate) fn sled_get_remove_mupdate_override( + &self, + sled_id: SledUuid, + ) -> Result, Error> { + let editor = self.sled_editors.get(&sled_id).ok_or_else(|| { + Error::Planner(anyhow!( + "tried to get remove_mupdate_override for \ + unknown sled {sled_id}" + )) + })?; + Ok(editor.get_remove_mupdate_override()) + } + fn next_internal_dns_gz_address_index(&self, sled_id: SledUuid) -> u32 { let used_internal_dns_gz_address_indices = self .current_sled_zones( diff --git a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs index edb84d97d42..202584c93e0 100644 --- a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs +++ b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs @@ -286,6 +286,18 @@ impl SledEditor { } } + /// Returns the remove_mupdate_override field for this sled. + pub fn get_remove_mupdate_override(&self) -> Option { + match &self.0 { + InnerSledEditor::Active(editor) => { + *editor.remove_mupdate_override.value() + } + InnerSledEditor::Decommissioned(sled) => { + sled.config.remove_mupdate_override + } + } + } + fn as_active_mut( &mut self, ) -> Result<&mut ActiveSledEditor, SledEditError> { @@ -343,8 +355,6 @@ impl SledEditor { } /// Sets the image source for a zone. - /// - /// Currently only used by test code. pub fn set_zone_image_source( &mut self, zone_id: &OmicronZoneUuid, diff --git a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor/scalar.rs b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor/scalar.rs index 74eea138f86..ceafd15606d 100644 --- a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor/scalar.rs +++ b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor/scalar.rs @@ -16,7 +16,6 @@ impl ScalarEditor { ScalarEditor { original, value: EditValue::Original } } - #[expect(dead_code)] pub(crate) fn value(&self) -> &T { match &self.value { EditValue::Original => &self.original, diff --git a/nexus/reconfigurator/planning/src/example.rs b/nexus/reconfigurator/planning/src/example.rs index 8a351b660bf..b1a82ed7f23 100644 --- a/nexus/reconfigurator/planning/src/example.rs +++ b/nexus/reconfigurator/planning/src/example.rs @@ -13,11 +13,13 @@ use crate::blueprint_builder::BlueprintBuilder; use crate::planner::rng::PlannerRng; use crate::system::SledBuilder; use crate::system::SystemDescription; +use anyhow::bail; use nexus_inventory::CollectionBuilderRng; use nexus_types::deployment::Blueprint; use nexus_types::deployment::OmicronZoneNic; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; +use nexus_types::external_api::views::SledPolicy; use nexus_types::inventory::Collection; use omicron_common::policy::CRUCIBLE_PANTRY_REDUNDANCY; use omicron_common::policy::INTERNAL_DNS_REDUNDANCY; @@ -177,9 +179,9 @@ pub fn example( pub struct ExampleSystemBuilder { log: slog::Logger, rng: ExampleSystemRng, + sled_settings: Vec, // TODO: Store a Policy struct instead of these fields: // https://github.com/oxidecomputer/omicron/issues/6803 - nsleds: usize, ndisks_per_sled: u8, // None means nsleds nexus_count: Option, @@ -212,7 +214,10 @@ impl ExampleSystemBuilder { "rng_seed" => rng.seed.clone(), )), rng, - nsleds: Self::DEFAULT_N_SLEDS, + sled_settings: vec![ + BuilderSledSettings::default(); + Self::DEFAULT_N_SLEDS + ], ndisks_per_sled: SledBuilder::DEFAULT_NPOOLS, nexus_count: None, internal_dns_count: ZoneCount(INTERNAL_DNS_REDUNDANCY), @@ -228,7 +233,15 @@ impl ExampleSystemBuilder { /// Currently, this value can be anywhere between 0 and 5. (More can be /// added in the future if necessary.) pub fn nsleds(mut self, nsleds: usize) -> Self { - self.nsleds = nsleds; + // Add more sleds if there's a shortfall. + if nsleds > self.sled_settings.len() { + self.sled_settings.extend(vec![ + BuilderSledSettings::default(); + nsleds - self.sled_settings.len() + ]); + } else if nsleds < self.sled_settings.len() { + self.sled_settings.truncate(nsleds); + } self } @@ -326,8 +339,27 @@ impl ExampleSystemBuilder { self } + /// Set the policy for a sled in the example system by index. + /// + /// Returns an error if `index >= nsleds`. + pub fn with_sled_policy( + mut self, + index: usize, + policy: SledPolicy, + ) -> anyhow::Result { + if index >= self.sled_settings.len() { + bail!( + "sled index {} out of range (0..{})", + index, + self.sled_settings.len(), + ); + } + self.sled_settings[index].policy = policy; + Ok(self) + } + fn get_nexus_zones(&self) -> ZoneCount { - self.nexus_count.unwrap_or(ZoneCount(self.nsleds)) + self.nexus_count.unwrap_or(ZoneCount(self.sled_settings.len())) } pub fn get_internal_dns_zones(&self) -> usize { @@ -347,7 +379,7 @@ impl ExampleSystemBuilder { slog::debug!( &self.log, "Creating example system"; - "nsleds" => self.nsleds, + "nsleds" => self.sled_settings.len(), "ndisks_per_sled" => self.ndisks_per_sled, "nexus_count" => nexus_count.0, "internal_dns_count" => self.internal_dns_count.0, @@ -366,15 +398,19 @@ impl ExampleSystemBuilder { .target_nexus_zone_count(nexus_count.0) .target_internal_dns_zone_count(self.internal_dns_count.0) .target_crucible_pantry_zone_count(self.crucible_pantry_count.0); - let sled_ids: Vec<_> = - (0..self.nsleds).map(|_| rng.sled_rng.next()).collect(); + let sled_ids_with_settings: Vec<_> = self + .sled_settings + .iter() + .map(|settings| (rng.sled_rng.next(), settings)) + .collect(); - for sled_id in &sled_ids { + for (sled_id, settings) in &sled_ids_with_settings { let _ = system .sled( SledBuilder::new() .id(*sled_id) - .npools(self.ndisks_per_sled), + .npools(self.ndisks_per_sled) + .policy(settings.policy), ) .unwrap(); } @@ -426,8 +462,14 @@ impl ExampleSystemBuilder { ); } - for (i, (sled_id, sled_details)) in - base_input.all_sleds(SledFilter::Commissioned).enumerate() + let discretionary_sled_count = + base_input.all_sled_ids(SledFilter::Discretionary).count(); + + // * Create disks and non-discretionary zones on all sleds. + // * Only create discretionary zones on discretionary sleds. + let mut discretionary_ix = 0; + for (sled_id, sled_details) in + base_input.all_sleds(SledFilter::Commissioned) { if self.create_disks_in_blueprint { let _ = builder @@ -436,25 +478,44 @@ impl ExampleSystemBuilder { } if self.create_zones { let _ = builder.sled_ensure_zone_ntp(sled_id).unwrap(); - for _ in 0..nexus_count.on(i, self.nsleds) { - builder - .sled_add_zone_nexus_with_config(sled_id, false, vec![]) - .unwrap(); - } - if i == 0 { - builder.sled_add_zone_clickhouse(sled_id).unwrap(); - } - for _ in 0..self.internal_dns_count.on(i, self.nsleds) { - builder.sled_add_zone_internal_dns(sled_id).unwrap(); - } - for _ in 0..self.external_dns_count.on(i, self.nsleds) { - builder.sled_add_zone_external_dns(sled_id).unwrap(); - } - for _ in 0..self.crucible_pantry_count.on(i, self.nsleds) { - builder.sled_add_zone_crucible_pantry(sled_id).unwrap(); + + // Create discretionary zones if allowed. + if sled_details.policy.matches(SledFilter::Discretionary) { + for _ in 0..nexus_count + .on(discretionary_ix, discretionary_sled_count) + { + builder + .sled_add_zone_nexus_with_config( + sled_id, + false, + vec![], + ) + .unwrap(); + } + if discretionary_ix == 0 { + builder.sled_add_zone_clickhouse(sled_id).unwrap(); + } + for _ in 0..self + .internal_dns_count + .on(discretionary_ix, discretionary_sled_count) + { + builder.sled_add_zone_internal_dns(sled_id).unwrap(); + } + for _ in 0..self + .external_dns_count + .on(discretionary_ix, discretionary_sled_count) + { + builder.sled_add_zone_external_dns(sled_id).unwrap(); + } + for _ in 0..self + .crucible_pantry_count + .on(discretionary_ix, discretionary_sled_count) + { + builder.sled_add_zone_crucible_pantry(sled_id).unwrap(); + } + discretionary_ix += 1; } - } - if self.create_zones { + for pool_name in sled_details.resources.zpools.keys() { let _ = builder .sled_ensure_zone_crucible(sled_id, *pool_name) @@ -526,6 +587,18 @@ impl ExampleSystemBuilder { } } +/// Per-sled state. +#[derive(Clone, Debug)] +struct BuilderSledSettings { + policy: SledPolicy, +} + +impl Default for BuilderSledSettings { + fn default() -> Self { + Self { policy: SledPolicy::provisionable() } + } +} + // A little wrapper to try and avoid having an `on` function which takes 3 // usize parameters. #[derive(Clone, Copy, Debug)] diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 1fc84a9ea6c..d43e24a8bcf 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -22,6 +22,7 @@ use nexus_types::deployment::Blueprint; use nexus_types::deployment::BlueprintPhysicalDiskDisposition; use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; +use nexus_types::deployment::BlueprintZoneImageSource; use nexus_types::deployment::CockroachDbClusterVersion; use nexus_types::deployment::CockroachDbPreserveDowngrade; use nexus_types::deployment::CockroachDbSettings; @@ -29,6 +30,7 @@ use nexus_types::deployment::DiskFilter; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledDetails; use nexus_types::deployment::SledFilter; +use nexus_types::deployment::TargetReleaseDescription; use nexus_types::deployment::ZpoolFilter; use nexus_types::external_api::views::PhysicalDiskPolicy; use nexus_types::external_api::views::SledPolicy; @@ -37,11 +39,13 @@ use nexus_types::inventory::Collection; use omicron_common::policy::INTERNAL_DNS_REDUNDANCY; use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; +use slog::debug; use slog::error; use slog::{Logger, info, warn}; use slog_error_chain::InlineErrorChain; use std::collections::BTreeMap; use std::collections::BTreeSet; +use std::collections::HashMap; use std::str::FromStr; pub(crate) use self::omicron_zone_placement::DiscretionaryOmicronZone; @@ -148,6 +152,7 @@ impl<'a> Planner<'a> { fn do_plan(&mut self) -> Result<(), Error> { self.do_plan_expunge()?; self.do_plan_decommission()?; + self.do_plan_noop_image_source()?; self.do_plan_add()?; if let UpdateStepResult::ContinueToNextStep = self.do_plan_mgs_updates() { @@ -494,6 +499,178 @@ impl<'a> Planner<'a> { Ok(()) } + fn do_plan_noop_image_source(&mut self) -> Result<(), Error> { + let TargetReleaseDescription::TufRepo(current_artifacts) = + self.input.tuf_repo().description() + else { + info!( + self.log, + "skipping noop image source check for all sleds \ + (no current TUF repo)", + ); + return Ok(()); + }; + let artifacts_by_hash: HashMap<_, _> = current_artifacts + .artifacts + .iter() + .map(|artifact| (artifact.hash, artifact)) + .collect(); + + for sled_id in self.input.all_sled_ids(SledFilter::InService) { + let Some(inv_sled) = self.inventory.sled_agents.get(&sled_id) + else { + info!( + self.log, + "skipping noop image source check \ + (sled not present in latest inventory collection)"; + "sled_id" => %sled_id, + ); + continue; + }; + + let zone_manifest = match &inv_sled + .zone_image_resolver + .zone_manifest + .boot_inventory + { + Ok(zm) => zm, + Err(message) => { + // This is a string so we don't use InlineErrorChain::new. + let message: &str = message; + warn!( + self.log, + "skipping noop image source check since \ + sled-agent encountered error retrieving zone manifest \ + (this is abnormal)"; + "sled_id" => %sled_id, + "error" => %message, + ); + continue; + } + }; + + // Does the blueprint have the remove_mupdate_override field set for + // this sled? If it does, we don't want to touch the zones on this + // sled (they should all be InstallDataset until the + // remove_mupdate_override field is cleared). + if let Some(id) = + self.blueprint.sled_get_remove_mupdate_override(sled_id)? + { + info!( + self.log, + "skipping noop image source check on sled \ + (blueprint has get_remove_mupdate_override set for sled)"; + "sled_id" => %sled_id, + "bp_remove_mupdate_override_id" => %id, + ); + continue; + } + + // Which zones have image sources set to InstallDataset? + let install_dataset_zones = self + .blueprint + .current_sled_zones( + sled_id, + BlueprintZoneDisposition::is_in_service, + ) + .filter(|z| { + z.image_source == BlueprintZoneImageSource::InstallDataset + }); + + // Out of these, which zones' hashes (as reported in the zone + // manifest) match the corresponding ones in the TUF repo? + let mut install_dataset_zone_count = 0; + let matching_zones: Vec<_> = install_dataset_zones + .inspect(|_| { + install_dataset_zone_count += 1; + }) + .filter_map(|z| { + let file_name = + format!("{}.tar.gz", z.kind().artifact_name()); + let Some(artifact) = + zone_manifest.artifacts.get(file_name.as_str()) + else { + // The blueprint indicates that a zone should be present + // that isn't in the install dataset. This might be an old + // install dataset with a zone kind known to this version of + // Nexus that isn't present in it. Not normally a cause for + // concern. + debug!( + self.log, + "blueprint zone not found in zone manifest, \ + ignoring for noop checks"; + "sled_id" => %sled_id, + "zone_id" => %z.id, + "kind" => z.kind().report_str(), + "file_name" => file_name, + ); + return None; + }; + if let Err(message) = &artifact.status { + // The artifact is somehow invalid and corrupt -- definitely + // something to warn about and not proceed. + warn!( + self.log, + "zone manifest inventory indicated install dataset \ + artifact is invalid, not using artifact (this is \ + abnormal)"; + "sled_id" => %sled_id, + "zone_id" => %z.id, + "kind" => z.kind().report_str(), + "file_name" => file_name, + "error" => %message, + ); + return None; + } + + // Does the hash match what's in the TUF repo? + let Some(tuf_artifact) = + artifacts_by_hash.get(&artifact.expected_hash) + else { + return None; + }; + + info!( + self.log, + "install dataset artifact hash matches TUF repo, \ + switching out the zone image source to Artifact"; + "sled_id" => %sled_id, + "tuf_artifact_id" => %tuf_artifact.id, + ); + Some((z.id, tuf_artifact)) + }) + .collect(); + + info!( + self.log, + "noop converting {}/{} install-dataset zones to artifact store", + matching_zones.len(), + install_dataset_zone_count; + "sled_id" => %sled_id, + ); + + // Set all these zones' image sources to the corresponding + // blueprint. + for (zone_id, tuf_artifact) in &matching_zones { + self.blueprint.sled_set_zone_source( + sled_id, + *zone_id, + BlueprintZoneImageSource::from_available_artifact( + tuf_artifact, + ), + )?; + self.blueprint.record_operation( + Operation::SledNoopZoneImageSourcesUpdated { + sled_id, + count: matching_zones.len(), + }, + ); + } + } + + Ok(()) + } + fn do_plan_add(&mut self) -> Result<(), Error> { // Internal DNS is a prerequisite for bringing up all other zones. At // this point, we assume that internal DNS (as a service) is already diff --git a/nexus/reconfigurator/planning/src/system.rs b/nexus/reconfigurator/planning/src/system.rs index f47975820e9..b03ffb1b2fb 100644 --- a/nexus/reconfigurator/planning/src/system.rs +++ b/nexus/reconfigurator/planning/src/system.rs @@ -24,6 +24,7 @@ use nexus_sled_agent_shared::inventory::InventoryZpool; use nexus_sled_agent_shared::inventory::OmicronSledConfig; use nexus_sled_agent_shared::inventory::SledRole; use nexus_sled_agent_shared::inventory::ZoneImageResolverInventory; +use nexus_sled_agent_shared::inventory::ZoneManifestBootInventory; use nexus_types::deployment::ClickhousePolicy; use nexus_types::deployment::CockroachDbClusterVersion; use nexus_types::deployment::CockroachDbSettings; @@ -270,6 +271,39 @@ impl SystemDescription { self } + /// Resolve a serial number into a sled ID. + pub fn serial_to_sled_id(&self, serial: &str) -> anyhow::Result { + let sled_id = self.sleds.values().find_map(|sled| { + if let Some((_, sp_state)) = sled.sp_state() { + if sp_state.serial_number == serial { + return Some(sled.sled_id); + } + } + None + }); + sled_id.with_context(|| { + let known_serials = self + .sleds + .values() + .filter_map(|sled| { + sled.sp_state() + .map(|(_, sp_state)| sp_state.serial_number.as_str()) + }) + .collect::>(); + format!( + "sled not found with serial {serial} (known serials: {})", + known_serials.join(", "), + ) + }) + } + + pub fn get_sled(&self, sled_id: SledUuid) -> anyhow::Result<&Sled> { + let Some(sled) = self.sleds.get(&sled_id) else { + bail!("Sled not found with id {sled_id}"); + }; + Ok(sled) + } + pub fn get_sled_mut( &mut self, sled_id: SledUuid, @@ -323,6 +357,7 @@ impl SystemDescription { sled.unique, sled.hardware, hardware_slot, + sled.policy, sled.sled_config, sled.npools, ); @@ -449,6 +484,20 @@ impl SystemDescription { Ok(self) } + /// Set the zone manifest for a sled from a provided `TufRepoDescription`. + pub fn sled_set_zone_manifest( + &mut self, + sled_id: SledUuid, + boot_inventory: Result, + ) -> anyhow::Result<&mut Self> { + let sled = self.sleds.get_mut(&sled_id).with_context(|| { + format!("attempted to access sled {} not found in system", sled_id) + })?; + let sled = Arc::make_mut(sled); + sled.set_zone_manifest(boot_inventory); + Ok(self) + } + pub fn sled_sp_active_version( &self, sled_id: SledUuid, @@ -644,6 +693,7 @@ pub struct SledBuilder { hardware: SledHardware, hardware_slot: Option, sled_role: SledRole, + policy: SledPolicy, sled_config: OmicronSledConfig, npools: u8, } @@ -663,6 +713,9 @@ impl SledBuilder { hardware_slot: None, sled_role: SledRole::Gimlet, sled_config: OmicronSledConfig::default(), + policy: SledPolicy::InService { + provision_policy: SledProvisionPolicy::Provisionable, + }, npools: Self::DEFAULT_NPOOLS, } } @@ -717,6 +770,12 @@ impl SledBuilder { self.sled_role = sled_role; self } + + /// Sets this sled's policy. + pub fn policy(mut self, policy: SledPolicy) -> Self { + self.policy = policy; + self + } } /// Convenience structure summarizing `Sled` inputs that come from inventory @@ -755,6 +814,7 @@ impl Sled { unique: Option, hardware: SledHardware, hardware_slot: u16, + policy: SledPolicy, sled_config: OmicronSledConfig, nzpools: u8, ) -> Sled { @@ -886,9 +946,7 @@ impl Sled { sled_id, inventory_sp, inventory_sled_agent, - policy: SledPolicy::InService { - provision_policy: SledProvisionPolicy::Provisionable, - }, + policy, state: SledState::Active, resources: SledResources { subnet: sled_subnet, zpools }, sp_active_caboose: Some(Arc::new(Self::default_sp_caboose( @@ -1061,7 +1119,7 @@ impl Sled { }); } - fn sp_state(&self) -> Option<&(u16, SpState)> { + pub fn sp_state(&self) -> Option<&(u16, SpState)> { self.inventory_sp.as_ref() } @@ -1077,6 +1135,16 @@ impl Sled { self.sp_inactive_caboose.as_deref() } + fn set_zone_manifest( + &mut self, + boot_inventory: Result, + ) { + self.inventory_sled_agent + .zone_image_resolver + .zone_manifest + .boot_inventory = boot_inventory; + } + /// Update the reported SP versions /// /// If either field is `None`, that field is _unchanged_. diff --git a/nexus/reconfigurator/simulation/Cargo.toml b/nexus/reconfigurator/simulation/Cargo.toml index 0f65c31c58d..98ff4b605f9 100644 --- a/nexus/reconfigurator/simulation/Cargo.toml +++ b/nexus/reconfigurator/simulation/Cargo.toml @@ -8,10 +8,12 @@ workspace = true [dependencies] anyhow.workspace = true +camino.workspace = true chrono.workspace = true indexmap.workspace = true nexus-inventory.workspace = true nexus-reconfigurator-planning.workspace = true +nexus-sled-agent-shared.workspace = true nexus-types.workspace = true omicron-common.workspace = true omicron-uuid-kinds.workspace = true @@ -21,5 +23,6 @@ slog.workspace = true swrite.workspace = true sync-ptr.workspace = true thiserror.workspace = true +tufaceous-artifact.workspace = true typed-rng.workspace = true uuid.workspace = true diff --git a/nexus/reconfigurator/simulation/src/errors.rs b/nexus/reconfigurator/simulation/src/errors.rs index 26b91cf70f8..75d76845882 100644 --- a/nexus/reconfigurator/simulation/src/errors.rs +++ b/nexus/reconfigurator/simulation/src/errors.rs @@ -2,6 +2,8 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. +use std::collections::BTreeSet; + use omicron_common::api::external::{Generation, Name}; use omicron_uuid_kinds::CollectionUuid; use thiserror::Error; @@ -128,3 +130,36 @@ impl NonEmptySystemError { Self {} } } + +/// Unknown zone names were provided to `SimTufRepoSource::simulate_zone_error`. +#[derive(Clone, Debug, Error)] +#[error("unknown zone names `{}` (valid zone names: {})", self.unknown.join(", "), join(&self.known, ", "))] +pub struct UnknownZoneNamesError { + /// The names of the unknown zones. + pub unknown: Vec, + + /// The set of known zone names. + pub known: BTreeSet, +} + +impl UnknownZoneNamesError { + pub(crate) fn new(unknown: Vec, known: BTreeSet) -> Self { + Self { unknown, known } + } +} + +fn join>( + strings: impl IntoIterator, + separator: &str, +) -> String { + let mut out = String::new(); + let mut iter = strings.into_iter(); + if let Some(first) = iter.next() { + out.push_str(first.as_ref()); + } + for s in iter { + out.push_str(separator); + out.push_str(s.as_ref()); + } + out +} diff --git a/nexus/reconfigurator/simulation/src/lib.rs b/nexus/reconfigurator/simulation/src/lib.rs index 8f8c77e09f1..30ba3fdc80c 100644 --- a/nexus/reconfigurator/simulation/src/lib.rs +++ b/nexus/reconfigurator/simulation/src/lib.rs @@ -54,9 +54,11 @@ mod sim; mod state; mod system; mod utils; +mod zone_images; pub use config::*; pub use rng::*; pub use sim::*; pub use state::*; pub use system::*; +pub use zone_images::*; diff --git a/nexus/reconfigurator/simulation/src/zone_images.rs b/nexus/reconfigurator/simulation/src/zone_images.rs new file mode 100644 index 00000000000..251755054f2 --- /dev/null +++ b/nexus/reconfigurator/simulation/src/zone_images.rs @@ -0,0 +1,168 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! Simulation of TUF repos, zone images, and thereabouts. + +use std::collections::BTreeSet; + +use camino::Utf8Path; +use nexus_sled_agent_shared::inventory::{ + ZoneArtifactInventory, ZoneManifestBootInventory, +}; +use omicron_common::{ + api::external::TufRepoDescription, update::OmicronZoneManifestSource, +}; +use swrite::{SWrite, swrite}; +use tufaceous_artifact::KnownArtifactKind; + +use crate::errors::UnknownZoneNamesError; + +/// The reconfigurator simulator's notion of a TUF repository. +#[derive(Clone, Debug)] +pub struct SimTufRepoDescription { + /// The description and manifest source, or a simulated error. + pub source: Result, + + /// A message describing the operation. + pub message: String, +} + +impl SimTufRepoDescription { + /// Creates a new `SimTufRepoDescription`. + pub fn new(source: SimTufRepoSource) -> Self { + let message = source.full_message(); + Self { source: Ok(source), message } + } + + /// Creates a new description with a simulated error reading the zone + /// manifest. + pub fn new_error(message: String) -> Self { + Self { source: Err(message.clone()), message } + } + + /// Generates a simulated [`ZoneManifestBootInventory`] or an error. + pub fn to_boot_inventory( + &self, + ) -> Result { + match &self.source { + Ok(source) => Ok(source.to_boot_inventory()), + Err(error) => { + Err(format!("reconfigurator-sim simulated error: {error}")) + } + } + } +} + +/// The reconfigurator simulator's notion of a TUF repository where there wasn't +/// an error reading the zone manifest. +#[derive(Clone, Debug)] +pub struct SimTufRepoSource { + description: TufRepoDescription, + manifest_source: OmicronZoneManifestSource, + message: String, + known_zone_names: BTreeSet, + error_zone_names: BTreeSet, +} + +impl SimTufRepoSource { + /// Creates a new `SimTufRepoSource`. + /// + /// The message should be of the form "from repo at ..." or "to target release". + pub fn new( + description: TufRepoDescription, + manifest_source: OmicronZoneManifestSource, + message: String, + ) -> Self { + let known_zone_names = description + .artifacts + .iter() + .filter_map(|x| { + (x.id.kind.to_known() == Some(KnownArtifactKind::Zone)) + .then(|| x.id.name.clone()) + }) + .collect(); + Self { + description, + manifest_source, + message, + known_zone_names, + error_zone_names: BTreeSet::new(), + } + } + + /// Simulates errors validating zones by the given name. + /// + /// Returns an error if any of the provided zone names weren't found in the + /// description. + pub fn simulate_zone_errors( + &mut self, + zone_names: I, + ) -> Result<(), UnknownZoneNamesError> + where + I: IntoIterator, + S: AsRef, + { + let (known, unknown): (Vec<_>, Vec<_>) = zone_names + .into_iter() + .map(|zone_name| zone_name.as_ref().to_owned()) + .partition(|zone_name| self.known_zone_names.contains(zone_name)); + if !unknown.is_empty() { + return Err(UnknownZoneNamesError::new( + unknown, + self.known_zone_names.clone(), + )); + } + self.error_zone_names.extend(known); + Ok(()) + } + + /// Generates a simulated [`ZoneManifestBootInventory`]. + pub fn to_boot_inventory(&self) -> ZoneManifestBootInventory { + let artifacts = self + .description + .artifacts + .iter() + .filter_map(|artifact| { + if artifact.id.kind.to_known() != Some(KnownArtifactKind::Zone) + { + return None; + } + + let file_name = format!("{}.tar.gz", artifact.id.name); + let path = Utf8Path::new("/fake/path/install").join(&file_name); + let status = + if self.error_zone_names.contains(&artifact.id.name) { + Err("reconfigurator-sim: simulated error validating zone image".to_owned()) + } else { + Ok(()) + }; + Some(ZoneArtifactInventory { + file_name, + path, + expected_size: artifact.size, + expected_hash: artifact.hash, + status, + }) + }) + .collect(); + ZoneManifestBootInventory { source: self.manifest_source, artifacts } + } + + /// Returns a message including the system version and the number of zone + /// errors. + pub fn full_message(&self) -> String { + let mut message = self.message.clone(); + swrite!( + message, + " (system version {}", + self.description.repo.system_version + ); + if !self.error_zone_names.is_empty() { + swrite!(message, ", {} zone errors", self.error_zone_names.len()); + } + message.push(')'); + + message + } +} diff --git a/update-common/manifests/fake-non-semver.toml b/update-common/manifests/fake-non-semver.toml index ba805e1ba21..055d5261d2a 100644 --- a/update-common/manifests/fake-non-semver.toml +++ b/update-common/manifests/fake-non-semver.toml @@ -41,8 +41,17 @@ version = "2.0.0" [artifact.control_plane.source] kind = "composite-control-plane" zones = [ - { kind = "fake", name = "zone1", size = "1MiB" }, - { kind = "fake", name = "zone2", size = "1MiB" }, + { kind = "fake", name = "clickhouse", size = "128KiB" }, + { kind = "fake", name = "clickhouse_keeper", size = "128KiB" }, + { kind = "fake", name = "clickhouse_server", size = "128KiB" }, + { kind = "fake", name = "cockroachdb", size = "128KiB" }, + { kind = "fake", name = "crucible-zone", size = "128KiB" }, + { kind = "fake", name = "crucible-pantry-zone", size = "128KiB" }, + { kind = "fake", name = "external-dns", size = "128KiB" }, + { kind = "fake", name = "internal-dns", size = "128KiB" }, + { kind = "fake", name = "ntp", size = "128KiB" }, + { kind = "fake", name = "nexus", size = "128KiB" }, + { kind = "fake", name = "oximeter", size = "128KiB" }, ] [[artifact.psc_sp]] diff --git a/wicketd/tests/integration_tests/updates.rs b/wicketd/tests/integration_tests/updates.rs index 00ed5825e74..93bcb5ff734 100644 --- a/wicketd/tests/integration_tests/updates.rs +++ b/wicketd/tests/integration_tests/updates.rs @@ -41,6 +41,22 @@ use wicketd_client::types::{ GetInventoryParams, GetInventoryResponse, StartUpdateParams, }; +/// The list of zone file names defined in fake-non-semver.toml. +static FAKE_NON_SEMVER_ZONE_FILE_NAMES: &[&str] = &[ + "clickhouse.tar.gz", + "clickhouse_keeper.tar.gz", + "clickhouse_server.tar.gz", + "cockroachdb.tar.gz", + "crucible-zone.tar.gz", + "crucible-pantry-zone.tar.gz", + "external-dns.tar.gz", + "external-dns.tar.gz", + "external-dns.tar.gz", + "ntp.tar.gz", + "nexus.tar.gz", + "oximeter.tar.gz", +]; + // See documentation for extract_nested_artifact_pair in update_plan.rs for why // multi_thread is required. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -413,15 +429,15 @@ async fn test_installinator_fetch() { // Check that the host and control plane artifacts were downloaded // correctly. // - // The control plane zone names here are defined in `fake.toml` which we - // load above. - for file_name in - [HOST_PHASE_2_FILE_NAME, "install/zone1.tar.gz", "install/zone2.tar.gz"] - { - let a_path = a_path.join(file_name); + // The control plane zone names here are defined in `fake-non-semver.toml` + // which we load above. + for file_name in [HOST_PHASE_2_FILE_NAME.to_owned()].into_iter().chain( + FAKE_NON_SEMVER_ZONE_FILE_NAMES.iter().map(|z| format!("install/{z}")), + ) { + let a_path = a_path.join(&file_name); assert!(a_path.is_file(), "{a_path} was written out"); - let b_path = b_path.join(file_name); + let b_path = b_path.join(&file_name); assert!(b_path.is_file(), "{b_path} was written out"); } @@ -481,17 +497,13 @@ async fn test_installinator_fetch() { "mupdate ID matches", ); - // Check that the zone1 and zone2 images are present in the zone set. (The - // names come from fake-non-semver.toml, under - // [artifact.control-plane.source]). - assert!( - a_manifest.zones.contains_key("zone1.tar.gz"), - "zone1 is present in the zone set" - ); - assert!( - a_manifest.zones.contains_key("zone2.tar.gz"), - "zone2 is present in the zone set" - ); + // Check that the images are present in the zone set. + for file_name in FAKE_NON_SEMVER_ZONE_FILE_NAMES { + assert!( + a_manifest.zones.contains_key(file_name), + "{file_name} is present in the zone set" + ); + } // Ensure that the B path also had the same file written out. let b_manifest_path = From f88f166265bd65ca7f29cca56e9fd6f7c5d0ce72 Mon Sep 17 00:00:00 2001 From: Rain Date: Tue, 1 Jul 2025 18:56:37 +0000 Subject: [PATCH 2/2] make doc comments more regular Created using spr 1.3.6-beta.1 --- dev-tools/reconfigurator-cli/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-tools/reconfigurator-cli/src/lib.rs b/dev-tools/reconfigurator-cli/src/lib.rs index 0cd17ed612e..90725e3d0cc 100644 --- a/dev-tools/reconfigurator-cli/src/lib.rs +++ b/dev-tools/reconfigurator-cli/src/lib.rs @@ -371,13 +371,13 @@ struct SledSetArgs { #[derive(Debug, Subcommand)] enum SledSetCommand { - /// Set the policy for the sled + /// set the policy for this sled Policy(SledSetPolicyArgs), } #[derive(Debug, Args)] struct SledSetPolicyArgs { - /// The policy to set for the sled + /// the policy to set #[clap(value_enum)] policy: SledPolicyOpt, }