diff --git a/Cargo.lock b/Cargo.lock index 9860cea13b..1e1325caff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6683,6 +6683,7 @@ dependencies = [ "sp-sim", "static_assertions", "strum 0.27.1", + "swrite", "test-strategy", "thiserror 2.0.12", "tufaceous-artifact", diff --git a/dev-tools/reconfigurator-cli/src/lib.rs b/dev-tools/reconfigurator-cli/src/lib.rs index 928a64793e..5c9d76dde9 100644 --- a/dev-tools/reconfigurator-cli/src/lib.rs +++ b/dev-tools/reconfigurator-cli/src/lib.rs @@ -377,6 +377,8 @@ enum SledSetCommand { Policy(SledSetPolicyArgs), #[clap(flatten)] Visibility(SledSetVisibilityCommand), + /// set the mupdate override for this sled + MupdateOverride(SledSetMupdateOverrideArgs), } #[derive(Debug, Args)] @@ -498,6 +500,23 @@ struct SledUpdateSpArgs { inactive: Option, } +#[derive(Debug, Args)] +struct SledSetMupdateOverrideArgs { + #[clap(flatten)] + source: SledMupdateOverrideSource, +} + +#[derive(Debug, Args)] +#[group(id = "sled-mupdate-override-source", required = true, multiple = false)] +struct SledMupdateOverrideSource { + /// the new value of the mupdate override, or "unset" + mupdate_override_id: Option, + + /// simulate an error reading the mupdate override + #[clap(long, conflicts_with = "mupdate_override_id")] + with_error: bool, +} + #[derive(Debug, Args)] struct SledRemoveArgs { /// id of the sled @@ -1344,6 +1363,51 @@ fn cmd_sled_set( ))) } } + SledSetCommand::MupdateOverride(SledSetMupdateOverrideArgs { + source: + SledMupdateOverrideSource { mupdate_override_id, with_error }, + }) => { + let (desc, prev) = if with_error { + let prev = + system.description_mut().sled_set_mupdate_override_error( + sled_id, + "reconfigurator-cli simulated mupdate-override error" + .to_owned(), + )?; + ("error".to_owned(), prev) + } else { + let mupdate_override_id = + mupdate_override_id.expect("clap ensures that this is set"); + let prev = system.description_mut().sled_set_mupdate_override( + sled_id, + mupdate_override_id.into(), + )?; + let desc = match mupdate_override_id { + MupdateOverrideUuidOpt::Set(id) => id.to_string(), + MupdateOverrideUuidOpt::Unset => "unset".to_owned(), + }; + (desc, prev) + }; + + let prev_desc = match prev { + Ok(Some(id)) => id.to_string(), + Ok(None) => "unset".to_owned(), + Err(_) => "error".to_owned(), + }; + + sim.commit_and_bump( + format!( + "reconfigurator-cli sled-set-mupdate-override: {}: {} -> {}", + sled_id, prev_desc, desc, + ), + state, + ); + + Ok(Some(format!( + "set sled {} mupdate override: {} -> {}", + sled_id, prev_desc, desc, + ))) + } } } diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-update-flow.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-update-flow.txt new file mode 100644 index 0000000000..e479705fb4 --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-mupdate-update-flow.txt @@ -0,0 +1,124 @@ +# Load an example system. + +load-example --nsleds 3 --ndisks-per-sled 1 + +# Create a TUF repository from a fake manifest. We're going to use this +# repository to test out the minimum release generation flow. +tuf-assemble ../../update-common/manifests/fake.toml +set target-release repo-1.0.0.zip + +# Update the install dataset on this sled to the target release. +# (This populates the zone manifest, used for no-op conversions from +# install dataset to artifact down the road.) +sled-update-install-dataset serial0 --to-target-release + +# Set one of sled 0's zone's image sources to a specific artifact, and +# also set MGS and host phase 2 updates on the sled. Both should be +# reset as part of this process. +blueprint-edit latest set-zone-image 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact 1.2.3 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +blueprint-edit latest set-sp-update serial0 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 1.1.0 sp 1.0.0 1.0.1 +blueprint-edit latest set-host-phase2 serial0 A artifact 1.0.0 3a9607047b03ccaab6d222249d890e93ca51b94ad631c7ca38be74cba60802ff +blueprint-edit latest set-host-phase2 serial0 B artifact 1.0.0 044d45ad681b44e89c10e056cabdedf19fd8b1e54bc95e6622bcdd23f16bc8f2 + +# Simulate a mupdate on sled 0 by setting the mupdate override field to a +# new UUID (generated using uuidgen). +sled-set serial0 mupdate-override 6123eac1-ec5b-42ba-b73f-9845105a9971 + +# On sled 1, simulate an error obtaining the mupdate override. +sled-set serial1 mupdate-override --with-error + +# Also set its SP update, which will not be cleared. +blueprint-edit latest set-sp-update serial1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 newest sp newer older + +# Simulate a mupdate on sled 2 as well. +sled-set serial2 mupdate-override 203fa72c-85c1-466a-8ed3-338ee029530d + +# Generate a new inventory and plan against that. +inventory-generate +blueprint-plan latest latest + +# Diff the blueprints. This diff should show: +# +# * for sled 0: +# * "+ will remove mupdate override" +# * for zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038, a change from artifact to install-dataset +# * the pending MGS update cleared +# * host phase 2 contents set to current contents +# * for sled 1, no change, because the mupdate override field had an error +# * for sled 2, "+ will remove mupdate override" +# * the target release's minimum generation bumped from 1 to 3 +# (the 3 is because generation 2 is repo-1.0.0.zip) +blueprint-diff latest + +# Hide sled 0 from inventory temporarily -- this does two things: +# 1. Tests that mupdate/update state transitions don't happen when +# the sled isn't present in inventory. +# 2. We don't want sled 0 to participate in the next few operations +# below. +sled-set serial0 inventory-hidden + +# Set the target release to a new repo, causing a generation number bump +# to 3. +set target-release repo-1.0.0.zip + +# Invoke the planner -- should not proceed with adding or updating zones +# because sled 0 has a remove-mupdate-override set in the blueprint. +inventory-generate +blueprint-plan latest latest +blueprint-diff latest + +# Now simulate the new config being applied to sled 0, which would +# cause the mupdate override to be removed. +sled-set serial0 mupdate-override unset +sled-set serial0 inventory-visible + +# But simulate a second mupdate on sled 2. This should invalidate the existing +# mupdate override on sled 2 and cause another target release minimum +# generation bump. +tuf-assemble ../../update-common/manifests/fake-non-semver.toml --allow-non-semver +sled-update-install-dataset serial2 --from-repo repo-2.0.0.zip +sled-set serial2 mupdate-override 1c0ce176-6dc8-4a90-adea-d4a8000751da + +# Generate a new inventory and plan against that. +inventory-generate +blueprint-plan latest latest + +# Diff the blueprints. This diff should show: +# * on sled 0: +# * the "remove mupdate override" line going away +# * no-op image source switches from install dataset to artifact +# * on sled 1, no changes +# * on sled 2, a _change_ in the will-remove-mupdate-override field +# * another bump to the target release minimum generation, this time to 4. +blueprint-diff latest + +# Clear the mupdate override on sled 2, signifying that the config has been +# applied. +sled-set serial2 mupdate-override unset + +# Run the planner again. This will cause sled 2's blueprint +# remove_mupdate_override to be unset. But no further planning steps will +# happen because the target release generation is not new enough. +# +# TODO: we want to block remove_mupdate_override unsets until the +# target release is uploaded and all install-dataset zones have been +# converted to artifact ones. +inventory-generate +blueprint-plan latest latest +blueprint-show latest +blueprint-diff latest + +# Now set the target release -- with this, the rest of the planner starts +# working again. +set target-release repo-2.0.0.zip +blueprint-plan latest latest +blueprint-show latest +blueprint-diff latest + +# Set the target release minimum generation to a large value -- we're going to +# test that the planner bails if it attempts a rollback of the target release +# minimum generation. +blueprint-edit latest set-target-release-min-gen 1000 +sled-set serial1 mupdate-override cc724abe-80c1-47e6-9771-19e6540531a9 +inventory-generate +blueprint-plan latest latest diff --git a/dev-tools/reconfigurator-cli/tests/input/cmds-noop-image-source.txt b/dev-tools/reconfigurator-cli/tests/input/cmds-noop-image-source.txt index 9d7d0db2ee..a7b20c01a7 100644 --- a/dev-tools/reconfigurator-cli/tests/input/cmds-noop-image-source.txt +++ b/dev-tools/reconfigurator-cli/tests/input/cmds-noop-image-source.txt @@ -23,10 +23,10 @@ sled-update-install-dataset serial0 --to-target-release sled-update-install-dataset serial1 --with-manifest-error # On a third sled, update the install dataset and simulate a mupdate override. -# (Currently we do this in the blueprint, but with -# https://github.com/oxidecomputer/omicron/pull/8456 we should update this test and -# set a mupdate-override on the sled directly.) +# Also set it in the blueprint -- this simulates the situation where the mupdate +# override is in progress and will be cleared in the future. sled-update-install-dataset serial2 --to-target-release +sled-set serial2 mupdate-override ffffffff-ffff-ffff-ffff-ffffffffffff blueprint-edit latest set-remove-mupdate-override serial2 ffffffff-ffff-ffff-ffff-ffffffffffff # On a fourth sled, simulate an error validating the install dataset image on one zone. diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stderr b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stderr new file mode 100644 index 0000000000..e69de29bb2 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout new file mode 100644 index 0000000000..b5c1e124a0 --- /dev/null +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-mupdate-update-flow-stdout @@ -0,0 +1,1740 @@ +using provided RNG seed: reconfigurator-cli-test +> # Load an example system. + +> load-example --nsleds 3 --ndisks-per-sled 1 +loaded example system with: +- collection: f45ba181-4b56-42cc-a762-874d90184a43 +- blueprint: dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 + + +> # Create a TUF repository from a fake manifest. We're going to use this +> # repository to test out the minimum release generation flow. +> tuf-assemble ../../update-common/manifests/fake.toml +INFO assembling repository in +INFO artifacts assembled and archived to `repo-1.0.0.zip`, component: OmicronRepoAssembler +created repo-1.0.0.zip for system version 1.0.0 + +> set target-release repo-1.0.0.zip +INFO extracting uploaded archive to +INFO created directory to store extracted artifacts, path: +INFO added artifact, name: SimGimletSp, kind: gimlet_sp, version: 1.0.0, hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, length: 747 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_a, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_b, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-gimlet-rot-bootloader, kind: gimlet_rot_bootloader, version: 1.0.0, hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, length: 750 +INFO added artifact, name: fake-host, kind: host_phase_1, version: 1.0.0, hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, length: 524288 +INFO added artifact, name: fake-host, kind: host_phase_2, version: 1.0.0, hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, length: 1048576 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_1, version: 1.0.0, hash: 9b7575cad720f017e936fe5994fc4e21fe040acaaf83c2edd86132aa3d667c7b, length: 524288 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_2, version: 1.0.0, hash: f355fb8429a7e0f0716dad035f9a06c799168d6c0ffcde85b1a96fef21d4b53e, length: 1048576 +INFO added artifact, name: clickhouse, kind: zone, version: 1.0.0, hash: 52b1eb4daff6f9140491d547b11248392920230db3db0eef5f5fa5333fe9e659, length: 1686 +INFO added artifact, name: clickhouse_keeper, kind: zone, version: 1.0.0, hash: cda702919449d86663be97295043aeca0ead69ae5db3bbdb20053972254a27a3, length: 1690 +INFO added artifact, name: clickhouse_server, kind: zone, version: 1.0.0, hash: 5f9ae6a9821bbe8ff0bf60feddf8b167902fe5f3e2c98bd21edd1ec9d969a001, length: 1690 +INFO added artifact, name: cockroachdb, kind: zone, version: 1.0.0, hash: f3a1a3c0b3469367b005ee78665d982059d5e14e93a479412426bf941c4ed291, length: 1689 +INFO added artifact, name: crucible-zone, kind: zone, version: 1.0.0, hash: 6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047, length: 1690 +INFO added artifact, name: crucible-pantry-zone, kind: zone, version: 1.0.0, hash: 21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02, length: 1695 +INFO added artifact, name: external-dns, kind: zone, version: 1.0.0, hash: ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d, length: 1689 +INFO added artifact, name: internal-dns, kind: zone, version: 1.0.0, hash: ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a, length: 1689 +INFO added artifact, name: ntp, kind: zone, version: 1.0.0, hash: 67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439, length: 1681 +INFO added artifact, name: nexus, kind: zone, version: 1.0.0, hash: 0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388, length: 1682 +INFO added artifact, name: oximeter, kind: zone, version: 1.0.0, hash: 048d8fe8cdef5b175aad714d0f148aa80ce36c9114ac15ce9d02ed3d37877a77, length: 1682 +INFO added artifact, name: fake-psc-sp, kind: psc_sp, version: 1.0.0, hash: f896cf5b19ca85864d470ad8587f980218bff3954e7f52bbd999699cd0f9635b, length: 744 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_a, version: 1.0.0, hash: 179eb660ebc92e28b6748b6af03d9f998d6131319edd4654a1e948454c62551b, length: 750 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_b, version: 1.0.0, hash: 179eb660ebc92e28b6748b6af03d9f998d6131319edd4654a1e948454c62551b, length: 750 +INFO added artifact, name: fake-psc-rot-bootloader, kind: psc_rot_bootloader, version: 1.0.0, hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, length: 750 +INFO added artifact, name: fake-switch-sp, kind: switch_sp, version: 1.0.0, hash: ab32ec86e942e1a16c8d43ea143cd80dd05a9639529d3569b1c24dfa2587ee74, length: 740 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_a, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_b, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-switch-rot-bootloader, kind: switch_rot_bootloader, version: 1.0.0, hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, length: 750 +set target release based on repo-1.0.0.zip + + +> # Update the install dataset on this sled to the target release. +> # (This populates the zone manifest, used for no-op conversions from +> # install dataset to artifact down the road.) +> sled-update-install-dataset serial0 --to-target-release +sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6: install dataset updated: to target release (system version 1.0.0) + + +> # Set one of sled 0's zone's image sources to a specific artifact, and +> # also set MGS and host phase 2 updates on the sled. Both should be +> # reset as part of this process. +> blueprint-edit latest set-zone-image 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact 1.2.3 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 created from latest blueprint (dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21): set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038 image source to artifact: version 1.2.3 +warn: no validation is done on the requested image source + +> blueprint-edit latest set-sp-update serial0 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 1.1.0 sp 1.0.0 1.0.1 +blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 created from latest blueprint (8da82a8e-bf97-4fbd-8ddd-9f6462732cf1): configured update for serial serial0 +warn: no validation is done on the requested artifact hash or version + +> blueprint-edit latest set-host-phase2 serial0 A artifact 1.0.0 3a9607047b03ccaab6d222249d890e93ca51b94ad631c7ca38be74cba60802ff +blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 created from latest blueprint (58d5e830-0884-47d8-a7cd-b2b3751adeb4): set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 host phase 2 slot A source to artifact: version 1.0.0 +warn: no validation is done on the requested source + +> blueprint-edit latest set-host-phase2 serial0 B artifact 1.0.0 044d45ad681b44e89c10e056cabdedf19fd8b1e54bc95e6622bcdd23f16bc8f2 +blueprint df06bb57-ad42-4431-9206-abff322896c7 created from latest blueprint (af934083-59b5-4bf6-8966-6fb5292c29e1): set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 host phase 2 slot B source to artifact: version 1.0.0 +warn: no validation is done on the requested source + + +> # Simulate a mupdate on sled 0 by setting the mupdate override field to a +> # new UUID (generated using uuidgen). +> sled-set serial0 mupdate-override 6123eac1-ec5b-42ba-b73f-9845105a9971 +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 mupdate override: unset -> 6123eac1-ec5b-42ba-b73f-9845105a9971 + + +> # On sled 1, simulate an error obtaining the mupdate override. +> sled-set serial1 mupdate-override --with-error +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c mupdate override: unset -> error + + +> # Also set its SP update, which will not be cleared. +> blueprint-edit latest set-sp-update serial1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 newest sp newer older +blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba created from latest blueprint (df06bb57-ad42-4431-9206-abff322896c7): configured update for serial serial1 +warn: no validation is done on the requested artifact hash or version + + +> # Simulate a mupdate on sled 2 as well. +> sled-set serial2 mupdate-override 203fa72c-85c1-466a-8ed3-338ee029530d +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 mupdate override: unset -> 203fa72c-85c1-466a-8ed3-338ee029530d + + +> # Generate a new inventory and plan against that. +> inventory-generate +generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configured sleds + +> blueprint-plan latest latest +ERRO error getting mupdate override info for sled, not altering blueprint override, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, message: reconfigurator-cli simulated mupdate-override error +INFO blueprint mupdate override updated to match inventory, phase: do_plan_mupdate_override, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, new_bp_override: 6123eac1-ec5b-42ba-b73f-9845105a9971, prev_bp_override: None, zones: + - zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (Nexus) updated from artifact: version 1.2.3 to install dataset + - zone 427ec88f-f467-42fa-9bbb-66a91a36103c (InternalDns) left unchanged, image source: install dataset + - zone 5199c033-4cf9-4ab6-8ae7-566bd7606363 (Crucible) left unchanged, image source: install dataset + - zone 6444f8a5-6465-4f0b-a549-1993c113569c (InternalNtp) left unchanged, image source: install dataset + - zone 803bfb63-c246-41db-b0da-d3b87ddfc63d (ExternalDns) left unchanged, image source: install dataset + - zone ba4994a8-23f9-4b1a-a84f-a08d74591389 (CruciblePantry) left unchanged, image source: install dataset +, host_phase_2: + - host phase 2 slot A: updated from artifact (version version 1.0.0, hash 3a9607047b03ccaab6d222249d890e93ca51b94ad631c7ca38be74cba60802ff) to preserving current contents + - host phase 2 slot B: updated from artifact (version version 1.0.0, hash 044d45ad681b44e89c10e056cabdedf19fd8b1e54bc95e6622bcdd23f16bc8f2) to preserving current contents + +INFO previous MGS update cleared as part of updating blueprint mupdate override to match inventory, phase: do_plan_mupdate_override, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, artifact_version: 1.1.0, artifact_hash: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855, expected_inactive_version: Version(ArtifactVersion("1.0.1")), expected_active_version: 1.0.0, component: sp, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 +INFO blueprint mupdate override updated to match inventory, phase: do_plan_mupdate_override, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, new_bp_override: 203fa72c-85c1-466a-8ed3-338ee029530d, prev_bp_override: None, zones: + - zone 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (Nexus) left unchanged, image source: install dataset + - zone 75b220ba-a0f4-4872-8202-dc7c87f062d0 (CruciblePantry) left unchanged, image source: install dataset + - zone ea5b4030-b52f-44b2-8d70-45f15f987d01 (InternalDns) left unchanged, image source: install dataset + - zone f10a4fb9-759f-4a65-b25e-5794ad2d07d8 (InternalNtp) left unchanged, image source: install dataset + - zone f55647d4-5500-4ad3-893a-df45bd50d622 (Crucible) left unchanged, image source: install dataset + - zone f6ec9c67-946a-4da3-98d5-581f72ce8bf0 (ExternalDns) left unchanged, image source: install dataset +, host_phase_2: + - host phase 2 slot A: current contents (unchanged) + - host phase 2 slot B: current contents (unchanged) + +INFO no previous MGS update found as part of updating blueprint mupdate override to match inventory, phase: do_plan_mupdate_override, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO updating target release minimum generation based on new set-override actions, phase: do_plan_mupdate_override, current_generation: 1, new_generation: 3 +INFO not ready to add or update new zones yet, phase: do_plan_mupdate_override, reasons: current target release generation (2) is lower than minimum required by blueprint (3); sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 0, num_eligible: 0, num_ineligible: 7 +INFO skipped noop image source check on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, reason: remove_mupdate_override is set in the blueprint (6123eac1-ec5b-42ba-b73f-9845105a9971) +INFO skipped noop image source check on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, reason: remove_mupdate_override is set in the blueprint (203fa72c-85c1-466a-8ed3-338ee029530d) +INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify +generated blueprint 9034c710-3e57-45f3-99e5-4316145e87ac based on parent blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba + + +> # Diff the blueprints. This diff should show: +> # +> # * for sled 0: +> # * "+ will remove mupdate override" +> # * for zone 0c71b3b2-6ceb-4e8f-b020-b08675e83038, a change from artifact to install-dataset +> # * the pending MGS update cleared +> # * host phase 2 contents set to current contents +> # * for sled 1, no change, because the mupdate override field had an error +> # * for sled 2, "+ will remove mupdate override" +> # * the target release's minimum generation bumped from 1 to 3 +> # (the 3 is because generation 2 is repo-1.0.0.zip) +> blueprint-diff latest +from: blueprint 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba +to: blueprint 9034c710-3e57-45f3-99e5-4316145e87ac + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 5 -> 6): ++ will remove mupdate override: (none) -> 6123eac1-ec5b-42ba-b73f-9845105a9971 + + host phase 2 contents: + -------------------------------- + slot boot image source + -------------------------------- +* A - artifact: version 1.0.0 + └─ + current contents +* B - artifact: version 1.0.0 + └─ + current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/external_dns a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/internal_dns 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 686c19cf-a0d7-45f6-866f-c564612b2664 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------- + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 install dataset in service fd00:1122:3344:101::25 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 install dataset in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d install dataset in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c install dataset in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c install dataset in service fd00:1122:3344:101::21 +* nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 - artifact: version 1.2.3 in service fd00:1122:3344:101::22 + └─ + install dataset + + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 2 -> 3): ++ will remove mupdate override: (none) -> 203fa72c-85c1-466a-8ed3-338ee029530d + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/external_dns 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/internal_dns 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 21fd4f3a-ec31-469b-87b1-087c343a2422 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) +* target release min gen: 1 -> 3 + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sled 1 model1 serial1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 newest Sp { expected_active_version: ArtifactVersion("newer"), expected_inactive_version: Version(ArtifactVersion("older")) } +- sled 0 model0 serial0 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 1.1.0 Sp { expected_active_version: ArtifactVersion("1.0.0"), expected_inactive_version: Version(ArtifactVersion("1.0.1")) } + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Hide sled 0 from inventory temporarily -- this does two things: +> # 1. Tests that mupdate/update state transitions don't happen when +> # the sled isn't present in inventory. +> # 2. We don't want sled 0 to participate in the next few operations +> # below. +> sled-set serial0 inventory-hidden +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 inventory visibility: visible -> hidden + + +> # Set the target release to a new repo, causing a generation number bump +> # to 3. +> set target-release repo-1.0.0.zip +INFO extracting uploaded archive to +INFO created directory to store extracted artifacts, path: +INFO added artifact, name: SimGimletSp, kind: gimlet_sp, version: 1.0.0, hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, length: 747 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_a, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_b, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-gimlet-rot-bootloader, kind: gimlet_rot_bootloader, version: 1.0.0, hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, length: 750 +INFO added artifact, name: fake-host, kind: host_phase_1, version: 1.0.0, hash: 2053f8594971bbf0a7326c833e2ffc12b065b9d823b9c0b967d275fa595e4e89, length: 524288 +INFO added artifact, name: fake-host, kind: host_phase_2, version: 1.0.0, hash: f3dd0c7a1bd4500ea0d8bcf67581f576d47752b2f1998a4cb0f0c3155c483008, length: 1048576 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_1, version: 1.0.0, hash: 9b7575cad720f017e936fe5994fc4e21fe040acaaf83c2edd86132aa3d667c7b, length: 524288 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_2, version: 1.0.0, hash: f355fb8429a7e0f0716dad035f9a06c799168d6c0ffcde85b1a96fef21d4b53e, length: 1048576 +INFO added artifact, name: clickhouse, kind: zone, version: 1.0.0, hash: 52b1eb4daff6f9140491d547b11248392920230db3db0eef5f5fa5333fe9e659, length: 1686 +INFO added artifact, name: clickhouse_keeper, kind: zone, version: 1.0.0, hash: cda702919449d86663be97295043aeca0ead69ae5db3bbdb20053972254a27a3, length: 1690 +INFO added artifact, name: clickhouse_server, kind: zone, version: 1.0.0, hash: 5f9ae6a9821bbe8ff0bf60feddf8b167902fe5f3e2c98bd21edd1ec9d969a001, length: 1690 +INFO added artifact, name: cockroachdb, kind: zone, version: 1.0.0, hash: f3a1a3c0b3469367b005ee78665d982059d5e14e93a479412426bf941c4ed291, length: 1689 +INFO added artifact, name: crucible-zone, kind: zone, version: 1.0.0, hash: 6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047, length: 1690 +INFO added artifact, name: crucible-pantry-zone, kind: zone, version: 1.0.0, hash: 21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02, length: 1695 +INFO added artifact, name: external-dns, kind: zone, version: 1.0.0, hash: ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d, length: 1689 +INFO added artifact, name: internal-dns, kind: zone, version: 1.0.0, hash: ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a, length: 1689 +INFO added artifact, name: ntp, kind: zone, version: 1.0.0, hash: 67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439, length: 1681 +INFO added artifact, name: nexus, kind: zone, version: 1.0.0, hash: 0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388, length: 1682 +INFO added artifact, name: oximeter, kind: zone, version: 1.0.0, hash: 048d8fe8cdef5b175aad714d0f148aa80ce36c9114ac15ce9d02ed3d37877a77, length: 1682 +INFO added artifact, name: fake-psc-sp, kind: psc_sp, version: 1.0.0, hash: f896cf5b19ca85864d470ad8587f980218bff3954e7f52bbd999699cd0f9635b, length: 744 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_a, version: 1.0.0, hash: 179eb660ebc92e28b6748b6af03d9f998d6131319edd4654a1e948454c62551b, length: 750 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_b, version: 1.0.0, hash: 179eb660ebc92e28b6748b6af03d9f998d6131319edd4654a1e948454c62551b, length: 750 +INFO added artifact, name: fake-psc-rot-bootloader, kind: psc_rot_bootloader, version: 1.0.0, hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, length: 750 +INFO added artifact, name: fake-switch-sp, kind: switch_sp, version: 1.0.0, hash: ab32ec86e942e1a16c8d43ea143cd80dd05a9639529d3569b1c24dfa2587ee74, length: 740 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_a, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_b, version: 1.0.0, hash: 04e4a7fdb84acca92c8fd3235e26d64ea61bef8a5f98202589fd346989c5720a, length: 735 +INFO added artifact, name: fake-switch-rot-bootloader, kind: switch_rot_bootloader, version: 1.0.0, hash: 005ea358f1cd316df42465b1e3a0334ea22cc0c0442cf9ddf9b42fbf49780236, length: 750 +set target release based on repo-1.0.0.zip + + +> # Invoke the planner -- should not proceed with adding or updating zones +> # because sled 0 has a remove-mupdate-override set in the blueprint. +> inventory-generate +generated inventory collection 61f451b3-2121-4ed6-91c7-a550054f6c21 from configured sleds + +> blueprint-plan latest latest +WARN skipping zones eligible for cleanup check (sled not present in latest inventory collection), sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +ERRO error getting mupdate override info for sled, not altering blueprint override, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, message: reconfigurator-cli simulated mupdate-override error +WARN no inventory found for in-service sled, phase: do_plan_mupdate_override, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 +INFO not ready to add or update new zones yet, phase: do_plan_mupdate_override, reasons: sleds have remove mupdate override set in blueprint: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 0, num_eligible: 0, num_ineligible: 7 +INFO skipped noop image source check on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, reason: sled not found in inventory +INFO skipped noop image source check on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, reason: remove_mupdate_override is set in the blueprint (203fa72c-85c1-466a-8ed3-338ee029530d) +INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify +generated blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 based on parent blueprint 9034c710-3e57-45f3-99e5-4316145e87ac + +> blueprint-diff latest +from: blueprint 9034c710-3e57-45f3-99e5-4316145e87ac +to: blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 3 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 2) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Now simulate the new config being applied to sled 0, which would +> # cause the mupdate override to be removed. +> sled-set serial0 mupdate-override unset +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 mupdate override: 6123eac1-ec5b-42ba-b73f-9845105a9971 -> unset + +> sled-set serial0 inventory-visible +set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 inventory visibility: hidden -> visible + + +> # But simulate a second mupdate on sled 2. This should invalidate the existing +> # mupdate override on sled 2 and cause another target release minimum +> # generation bump. +> tuf-assemble ../../update-common/manifests/fake-non-semver.toml --allow-non-semver +INFO assembling repository in +INFO artifacts assembled and archived to `repo-2.0.0.zip`, component: OmicronRepoAssembler +created repo-2.0.0.zip for system version 2.0.0 + +> sled-update-install-dataset serial2 --from-repo repo-2.0.0.zip +INFO extracting uploaded archive to +INFO created directory to store extracted artifacts, path: +INFO added artifact, name: fake-gimlet-sp, kind: gimlet_sp, version: 2.0.0, hash: ce1e98a8a9ae541654508f101d59a3ddeba3d28177f1d42d5614248eef0b820b, length: 751 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_a, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_b, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-gimlet-rot-bootloader, kind: gimlet_rot_bootloader, version: 2.0.0, hash: 238a9bfc87f02141c7555ff5ebb7a22ec37bc24d6f724ce3af05ed7c412cd115, length: 750 +INFO added artifact, name: fake-host, kind: host_phase_1, version: 2.0.0, hash: 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6, length: 524288 +INFO added artifact, name: fake-host, kind: host_phase_2, version: 2.0.0, hash: 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3, length: 1048576 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_1, version: non-semver, hash: 24f8ca0d52da5238644b11964c6feda854c7530820713efefa7ac91683b3fc76, length: 524288 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_2, version: non-semver, hash: 5fceee33d358aacb8a34ca93a30e28354bd8f341f6e3e895a2cafe83904f3d80, length: 1048576 +INFO added artifact, name: clickhouse, kind: zone, version: 2.0.0, hash: bb2d1ff02d11f72bc9049ae57f27536207519a1859d29f8d7a90ab3b44d56b08, length: 1687 +INFO added artifact, name: clickhouse_keeper, kind: zone, version: 2.0.0, hash: 1eb9f24be68f13c274aa0ac9b863cec520dbfe762620c328431728d75bfd2198, length: 1691 +INFO added artifact, name: clickhouse_server, kind: zone, version: 2.0.0, hash: 50fe271948672a9af1ba5f96c9d87ff2736fa72d78dfef598a79fa0cc8a00474, length: 1691 +INFO added artifact, name: cockroachdb, kind: zone, version: 2.0.0, hash: ebc82bf181db864b78cb7e3ddedf7ab1dd8fe7b377b02846f3c27cf0387bb387, length: 1690 +INFO added artifact, name: crucible-zone, kind: zone, version: 2.0.0, hash: 866f6a7c2e51c056fb722b5113e80181cc9cd8b712a0d3dbf1edc4ce29e5229e, length: 1691 +INFO added artifact, name: crucible-pantry-zone, kind: zone, version: 2.0.0, hash: 3ff26dad96faa8f67251f5de40458b4f809d536bfe8572134da0e42c2fa12674, length: 1696 +INFO added artifact, name: external-dns, kind: zone, version: 2.0.0, hash: f282c45771429f7bebf71f0cc668521066db57c6bb07fcfccdfb44825d3d930f, length: 1690 +INFO added artifact, name: internal-dns, kind: zone, version: 2.0.0, hash: de30657a72b066b8ef1f56351a0a5d4d7000da0a62c4be9b2e949a107ca8a389, length: 1690 +INFO added artifact, name: ntp, kind: zone, version: 2.0.0, hash: d76e26198daed69cdae04490d7477f8c842e0dbe37d463eac0d0a8d3fb803095, length: 1682 +INFO added artifact, name: nexus, kind: zone, version: 2.0.0, hash: e9b7035f41848a987a798c15ac424cc91dd662b1af0920d58d8aa1ebad7467b6, length: 1683 +INFO added artifact, name: oximeter, kind: zone, version: 2.0.0, hash: 9f4bc56a15d5fd943fdac94309994b8fd73aa2be1ec61faf44bfcf2356c9dc23, length: 1683 +INFO added artifact, name: fake-psc-sp, kind: psc_sp, version: 2.0.0, hash: 7adf04de523865003dbf120cebddd5fcf5bad650640281b294197e6ca7016e47, length: 748 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_a, version: 2.0.0, hash: 6d1c432647e9b9e4cf846ff5d17932d75cba49c0d3f23d24243238bc40bcfef5, length: 746 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_b, version: 2.0.0, hash: 6d1c432647e9b9e4cf846ff5d17932d75cba49c0d3f23d24243238bc40bcfef5, length: 746 +INFO added artifact, name: fake-psc-rot-bootloader, kind: psc_rot_bootloader, version: 2.0.0, hash: 238a9bfc87f02141c7555ff5ebb7a22ec37bc24d6f724ce3af05ed7c412cd115, length: 750 +INFO added artifact, name: fake-switch-sp, kind: switch_sp, version: 2.0.0, hash: 5a9019c484c051edfab4903a7a5e1817c89bd555eea3e48f6b92c6e67442e13e, length: 746 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_a, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_b, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-switch-rot-bootloader, kind: switch_rot_bootloader, version: non-semver-2, hash: a0d6df68e6112edcf62c035947563d2a58d06e11443b95b90bf087da710550a5, length: 758 +sled d81c6a84-79b8-4958-ae41-ea46c9b19763: install dataset updated: from repo at repo-2.0.0.zip (system version 2.0.0) + +> sled-set serial2 mupdate-override 1c0ce176-6dc8-4a90-adea-d4a8000751da +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 mupdate override: 203fa72c-85c1-466a-8ed3-338ee029530d -> 1c0ce176-6dc8-4a90-adea-d4a8000751da + + +> # Generate a new inventory and plan against that. +> inventory-generate +generated inventory collection b1bda47d-2c19-4fba-96e3-d9df28db7436 from configured sleds + +> blueprint-plan latest latest +ERRO error getting mupdate override info for sled, not altering blueprint override, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, message: reconfigurator-cli simulated mupdate-override error +INFO inventory override no longer exists, blueprint override cleared, phase: do_plan_mupdate_override, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, prev_bp_override: 6123eac1-ec5b-42ba-b73f-9845105a9971 +INFO blueprint mupdate override updated to match inventory, phase: do_plan_mupdate_override, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, new_bp_override: 1c0ce176-6dc8-4a90-adea-d4a8000751da, prev_bp_override: Some(203fa72c-85c1-466a-8ed3-338ee029530d (mupdate_override)), zones: + - zone 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 (Nexus) left unchanged, image source: install dataset + - zone 75b220ba-a0f4-4872-8202-dc7c87f062d0 (CruciblePantry) left unchanged, image source: install dataset + - zone ea5b4030-b52f-44b2-8d70-45f15f987d01 (InternalDns) left unchanged, image source: install dataset + - zone f10a4fb9-759f-4a65-b25e-5794ad2d07d8 (InternalNtp) left unchanged, image source: install dataset + - zone f55647d4-5500-4ad3-893a-df45bd50d622 (Crucible) left unchanged, image source: install dataset + - zone f6ec9c67-946a-4da3-98d5-581f72ce8bf0 (ExternalDns) left unchanged, image source: install dataset +, host_phase_2: + - host phase 2 slot A: current contents (unchanged) + - host phase 2 slot B: current contents (unchanged) + +INFO no previous MGS update found as part of updating blueprint mupdate override to match inventory, phase: do_plan_mupdate_override, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO updating target release minimum generation based on new set-override actions, phase: do_plan_mupdate_override, current_generation: 3, new_generation: 4 +INFO not ready to add or update new zones yet, phase: do_plan_mupdate_override, reasons: current target release generation (3) is lower than minimum required by blueprint (4); sleds have remove mupdate override set in blueprint: d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 0, num_eligible: 0, num_ineligible: 7 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 0, num_eligible: 6, num_ineligible: 0 +INFO skipped noop image source check on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, reason: remove_mupdate_override is set in the blueprint (1c0ce176-6dc8-4a90-adea-d4a8000751da) +INFO noop converting 6/6 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 0 +INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify +generated blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 based on parent blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 + + +> # Diff the blueprints. This diff should show: +> # * on sled 0: +> # * the "remove mupdate override" line going away +> # * no-op image source switches from install dataset to artifact +> # * on sled 1, no changes +> # * on sled 2, a _change_ in the will-remove-mupdate-override field +> # * another bump to the target release minimum generation, this time to 4. +> blueprint-diff latest +from: blueprint d60afc57-f15d-476c-bd0f-b1071e2bb976 +to: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 + + MODIFIED SLEDS: + + sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 6 -> 7): +- will remove mupdate override: 6123eac1-ec5b-42ba-b73f-9845105a9971 -> (none) + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/external_dns a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/internal_dns 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 686c19cf-a0d7-45f6-866f-c564612b2664 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------- +* crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 - install dataset in service fd00:1122:3344:101::25 + └─ + artifact: version 1.0.0 +* crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 - install dataset in service fd00:1122:3344:101::24 + └─ + artifact: version 1.0.0 +* external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d - install dataset in service fd00:1122:3344:101::23 + └─ + artifact: version 1.0.0 +* internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c - install dataset in service fd00:1122:3344:2::1 + └─ + artifact: version 1.0.0 +* internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c - install dataset in service fd00:1122:3344:101::21 + └─ + artifact: version 1.0.0 +* nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 - install dataset in service fd00:1122:3344:101::22 + └─ + artifact: version 1.0.0 + + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 3 -> 4): +* will remove mupdate override: 203fa72c-85c1-466a-8ed3-338ee029530d -> 1c0ce176-6dc8-4a90-adea-d4a8000751da + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/external_dns 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/internal_dns 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 21fd4f3a-ec31-469b-87b1-087c343a2422 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) +* target release min gen: 3 -> 4 + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Clear the mupdate override on sled 2, signifying that the config has been +> # applied. +> sled-set serial2 mupdate-override unset +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 mupdate override: 1c0ce176-6dc8-4a90-adea-d4a8000751da -> unset + + +> # Run the planner again. This will cause sled 2's blueprint +> # remove_mupdate_override to be unset. But no further planning steps will +> # happen because the target release generation is not new enough. +> # +> # TODO: we want to block remove_mupdate_override unsets until the +> # target release is uploaded and all install-dataset zones have been +> # converted to artifact ones. +> inventory-generate +generated inventory collection a71f7a73-35a6-45e8-acbe-f1c5925eed69 from configured sleds + +> blueprint-plan latest latest +ERRO error getting mupdate override info for sled, not altering blueprint override, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, message: reconfigurator-cli simulated mupdate-override error +INFO inventory override no longer exists, blueprint override cleared, phase: do_plan_mupdate_override, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, prev_bp_override: 1c0ce176-6dc8-4a90-adea-d4a8000751da +INFO not ready to add or update new zones yet, phase: do_plan_mupdate_override, reasons: current target release generation (3) is lower than minimum required by blueprint (4) +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 0, num_eligible: 0, num_ineligible: 7 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 6, num_already_artifact: 0, num_eligible: 0, num_ineligible: 6 +INFO install dataset artifact hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, zone_id: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6, kind: nexus, file_name: nexus.tar.gz, expected_hash: e9b7035f41848a987a798c15ac424cc91dd662b1af0920d58d8aa1ebad7467b6 +INFO install dataset artifact hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, zone_id: 75b220ba-a0f4-4872-8202-dc7c87f062d0, kind: crucible_pantry, file_name: crucible_pantry.tar.gz, expected_hash: 3ff26dad96faa8f67251f5de40458b4f809d536bfe8572134da0e42c2fa12674 +INFO install dataset artifact hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, zone_id: ea5b4030-b52f-44b2-8d70-45f15f987d01, kind: internal_dns, file_name: internal_dns.tar.gz, expected_hash: de30657a72b066b8ef1f56351a0a5d4d7000da0a62c4be9b2e949a107ca8a389 +INFO install dataset artifact hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, zone_id: f10a4fb9-759f-4a65-b25e-5794ad2d07d8, kind: internal_ntp, file_name: ntp.tar.gz, expected_hash: d76e26198daed69cdae04490d7477f8c842e0dbe37d463eac0d0a8d3fb803095 +INFO install dataset artifact hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, zone_id: f55647d4-5500-4ad3-893a-df45bd50d622, kind: crucible, file_name: crucible.tar.gz, expected_hash: 866f6a7c2e51c056fb722b5113e80181cc9cd8b712a0d3dbf1edc4ce29e5229e +INFO install dataset artifact hash not found in TUF repo, ignoring for noop checks, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, zone_id: f6ec9c67-946a-4da3-98d5-581f72ce8bf0, kind: external_dns, file_name: external_dns.tar.gz, expected_hash: f282c45771429f7bebf71f0cc668521066db57c6bb07fcfccdfb44825d3d930f +INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify +generated blueprint 626487fa-7139-45ec-8416-902271fc730b based on parent blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 + +> blueprint-show latest +blueprint 626487fa-7139-45ec-8416-902271fc730b +parent: a5a8f242-ffa5-473c-8efd-2acf2dc0b736 + + sled: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 2) + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 8c4fa711-1d5d-4e93-85f0-d17bff47b063 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/clickhouse 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/external_dns 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/internal_dns 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca 09b9cc9b-3426-470b-a7bc-538f82dede03 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 2db6b7c1-0f46-4ced-a3ad-48872793360e in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 install dataset in service fd00:1122:3344:102::23 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset in service fd00:1122:3344:102::21 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 + + + + sled: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 7) + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/external_dns a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/internal_dns 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 686c19cf-a0d7-45f6-866f-c564612b2664 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + + + omicron zones: + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 1.0.0 in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 1.0.0 in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 1.0.0 in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 1.0.0 in service fd00:1122:3344:101::22 + + + + sled: d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 5) + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/external_dns 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/internal_dns 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 21fd4f3a-ec31-469b-87b1-087c343a2422 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) + cluster.preserve_downgrade_option: (do not modify) + + OXIMETER SETTINGS: + generation: 1 + read from:: SingleNode + + METADATA: + created by::::::::::::: reconfigurator-sim + created at::::::::::::: + comment:::::::::::::::: (none) + internal DNS version::: 1 + external DNS version::: 1 + target release min gen: 4 + + PENDING MGS-MANAGED UPDATES: 1 + Pending MGS-managed updates (all baseboards): + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sled 1 model1 serial1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 newest Sp { expected_active_version: ArtifactVersion("newer"), expected_inactive_version: Version(ArtifactVersion("older")) } + + + +> blueprint-diff latest +from: blueprint a5a8f242-ffa5-473c-8efd-2acf2dc0b736 +to: blueprint 626487fa-7139-45ec-8416-902271fc730b + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 4 -> 5): +- will remove mupdate override: 1c0ce176-6dc8-4a90-adea-d4a8000751da -> (none) + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/external_dns 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/internal_dns 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 21fd4f3a-ec31-469b-87b1-087c343a2422 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + crucible f55647d4-5500-4ad3-893a-df45bd50d622 install dataset in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 install dataset in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 install dataset in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 install dataset in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 install dataset in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 install dataset in service fd00:1122:3344:103::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 4 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Now set the target release -- with this, the rest of the planner starts +> # working again. +> set target-release repo-2.0.0.zip +INFO extracting uploaded archive to +INFO created directory to store extracted artifacts, path: +INFO added artifact, name: fake-gimlet-sp, kind: gimlet_sp, version: 2.0.0, hash: ce1e98a8a9ae541654508f101d59a3ddeba3d28177f1d42d5614248eef0b820b, length: 751 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_a, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-gimlet-rot, kind: gimlet_rot_image_b, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-gimlet-rot-bootloader, kind: gimlet_rot_bootloader, version: 2.0.0, hash: 238a9bfc87f02141c7555ff5ebb7a22ec37bc24d6f724ce3af05ed7c412cd115, length: 750 +INFO added artifact, name: fake-host, kind: host_phase_1, version: 2.0.0, hash: 44714733af7600b30a50bfd2cbaf707ff7ee9724073ff70a6732e55a88864cf6, length: 524288 +INFO added artifact, name: fake-host, kind: host_phase_2, version: 2.0.0, hash: 0c0362b640cece5b9a5e86d8fa683bd2eb84c3e7f90731f597197d604ffa76e3, length: 1048576 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_1, version: non-semver, hash: 24f8ca0d52da5238644b11964c6feda854c7530820713efefa7ac91683b3fc76, length: 524288 +INFO added artifact, name: fake-trampoline, kind: trampoline_phase_2, version: non-semver, hash: 5fceee33d358aacb8a34ca93a30e28354bd8f341f6e3e895a2cafe83904f3d80, length: 1048576 +INFO added artifact, name: clickhouse, kind: zone, version: 2.0.0, hash: bb2d1ff02d11f72bc9049ae57f27536207519a1859d29f8d7a90ab3b44d56b08, length: 1687 +INFO added artifact, name: clickhouse_keeper, kind: zone, version: 2.0.0, hash: 1eb9f24be68f13c274aa0ac9b863cec520dbfe762620c328431728d75bfd2198, length: 1691 +INFO added artifact, name: clickhouse_server, kind: zone, version: 2.0.0, hash: 50fe271948672a9af1ba5f96c9d87ff2736fa72d78dfef598a79fa0cc8a00474, length: 1691 +INFO added artifact, name: cockroachdb, kind: zone, version: 2.0.0, hash: ebc82bf181db864b78cb7e3ddedf7ab1dd8fe7b377b02846f3c27cf0387bb387, length: 1690 +INFO added artifact, name: crucible-zone, kind: zone, version: 2.0.0, hash: 866f6a7c2e51c056fb722b5113e80181cc9cd8b712a0d3dbf1edc4ce29e5229e, length: 1691 +INFO added artifact, name: crucible-pantry-zone, kind: zone, version: 2.0.0, hash: 3ff26dad96faa8f67251f5de40458b4f809d536bfe8572134da0e42c2fa12674, length: 1696 +INFO added artifact, name: external-dns, kind: zone, version: 2.0.0, hash: f282c45771429f7bebf71f0cc668521066db57c6bb07fcfccdfb44825d3d930f, length: 1690 +INFO added artifact, name: internal-dns, kind: zone, version: 2.0.0, hash: de30657a72b066b8ef1f56351a0a5d4d7000da0a62c4be9b2e949a107ca8a389, length: 1690 +INFO added artifact, name: ntp, kind: zone, version: 2.0.0, hash: d76e26198daed69cdae04490d7477f8c842e0dbe37d463eac0d0a8d3fb803095, length: 1682 +INFO added artifact, name: nexus, kind: zone, version: 2.0.0, hash: e9b7035f41848a987a798c15ac424cc91dd662b1af0920d58d8aa1ebad7467b6, length: 1683 +INFO added artifact, name: oximeter, kind: zone, version: 2.0.0, hash: 9f4bc56a15d5fd943fdac94309994b8fd73aa2be1ec61faf44bfcf2356c9dc23, length: 1683 +INFO added artifact, name: fake-psc-sp, kind: psc_sp, version: 2.0.0, hash: 7adf04de523865003dbf120cebddd5fcf5bad650640281b294197e6ca7016e47, length: 748 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_a, version: 2.0.0, hash: 6d1c432647e9b9e4cf846ff5d17932d75cba49c0d3f23d24243238bc40bcfef5, length: 746 +INFO added artifact, name: fake-psc-rot, kind: psc_rot_image_b, version: 2.0.0, hash: 6d1c432647e9b9e4cf846ff5d17932d75cba49c0d3f23d24243238bc40bcfef5, length: 746 +INFO added artifact, name: fake-psc-rot-bootloader, kind: psc_rot_bootloader, version: 2.0.0, hash: 238a9bfc87f02141c7555ff5ebb7a22ec37bc24d6f724ce3af05ed7c412cd115, length: 750 +INFO added artifact, name: fake-switch-sp, kind: switch_sp, version: 2.0.0, hash: 5a9019c484c051edfab4903a7a5e1817c89bd555eea3e48f6b92c6e67442e13e, length: 746 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_a, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-switch-rot, kind: switch_rot_image_b, version: 2.0.0, hash: e7047f500a5391e22cd8e6a8d3ae66c9d9de7a8d021e6e9a10e05bb6d554da77, length: 743 +INFO added artifact, name: fake-switch-rot-bootloader, kind: switch_rot_bootloader, version: non-semver-2, hash: a0d6df68e6112edcf62c035947563d2a58d06e11443b95b90bf087da710550a5, length: 758 +set target release based on repo-2.0.0.zip + +> blueprint-plan latest latest +ERRO error getting mupdate override info for sled, not altering blueprint override, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, message: reconfigurator-cli simulated mupdate-override error +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 7, num_already_artifact: 0, num_eligible: 0, num_ineligible: 7 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 6, num_already_artifact: 0, num_eligible: 6, num_ineligible: 0 +INFO noop converting 6/6 install-dataset zones to artifact store, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 6, num_already_artifact: 0 +INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 +INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient ClickhouseServer zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient CockroachDb zones exist in plan, desired_count: 0, current_count: 0 +INFO sufficient CruciblePantry zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient InternalDns zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient ExternalDns zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient Nexus zones exist in plan, desired_count: 3, current_count: 3 +INFO sufficient Oximeter zones exist in plan, desired_count: 0, current_count: 0 +INFO SP update impossible (will remove it and re-evaluate board), artifact_version: newest, artifact_hash: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855, expected_inactive_version: Version(ArtifactVersion("older")), expected_active_version: newer, component: sp, sp_slot: 1, sp_type: Sled, serial_number: serial1, part_number: model1 +WARN cannot configure SP update for board (no matching artifact), serial_number: serial1, part_number: model1 +INFO skipping board for SP update, serial_number: serial1, part_number: model1 +WARN cannot configure SP update for board (no matching artifact), serial_number: serial0, part_number: model0 +INFO skipping board for SP update, serial_number: serial0, part_number: model0 +WARN cannot configure SP update for board (no matching artifact), serial_number: serial2, part_number: model2 +INFO skipping board for SP update, serial_number: serial2, part_number: model2 +INFO ran out of boards for SP update +INFO some zones not yet up-to-date, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, zones_currently_updating: [ZoneCurrentlyUpdating { zone_id: 0c71b3b2-6ceb-4e8f-b020-b08675e83038 (service), zone_kind: Nexus, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("0e32b4a3e5d3668bb1d6a16fb06b74dc60b973fa479dcee0aae3adbb52bf1388") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 427ec88f-f467-42fa-9bbb-66a91a36103c (service), zone_kind: InternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ffbf1373f7ee08dddd74c53ed2a94e7c4c572a982d3a9bc94000c6956b700c6a") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 5199c033-4cf9-4ab6-8ae7-566bd7606363 (service), zone_kind: Crucible, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("6f17cf65fb5a5bec5542dd07c03cd0acc01e59130f02c532c8d848ecae810047") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 6444f8a5-6465-4f0b-a549-1993c113569c (service), zone_kind: InternalNtp, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("67593d686ed04a1709f93972b71f4ebc148a9362120f65d239943e814a9a7439") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: 803bfb63-c246-41db-b0da-d3b87ddfc63d (service), zone_kind: ExternalDns, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("ccca13ed19b8731f9adaf0d6203b02ea3b9ede4fa426b9fac0a07ce95440046d") }, inv_image_source: InstallDataset } }, ZoneCurrentlyUpdating { zone_id: ba4994a8-23f9-4b1a-a84f-a08d74591389 (service), zone_kind: CruciblePantry, reason: ImageSourceMismatch { bp_image_source: Artifact { version: Available { version: ArtifactVersion("1.0.0") }, hash: ArtifactHash("21f0ada306859c23917361f2e0b9235806c32607ec689c7e8cf16bb898bc5a02") }, inv_image_source: InstallDataset } }] +INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify +generated blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b based on parent blueprint 626487fa-7139-45ec-8416-902271fc730b + +> blueprint-show latest +blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b +parent: 626487fa-7139-45ec-8416-902271fc730b + + sled: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c (active, config generation 2) + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-72c59873-31ff-4e36-8d76-ff834009349a in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crucible 8c4fa711-1d5d-4e93-85f0-d17bff47b063 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/clickhouse 3b66453b-7148-4c1b-84a9-499e43290ab4 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/external_dns 841d5648-05f0-47b0-b446-92f6b60fe9a6 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/internal_dns 3560dd69-3b23-4c69-807d-d673104cfc68 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone 4829f422-aa31-41a8-ab73-95684ff1ef48 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_clickhouse_353b3b65-20f7-48c3-88f7-495bd5d31545 318fae85-abcb-4259-b1b6-ac96d193f7b7 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_bd354eef-d8a6-4165-9124-283fb5e46d77 2ad1875a-92ac-472f-8c26-593309f0e4da in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_crucible_pantry_ad6a3a03-8d0f-4504-99a4-cbf73d69b973 c31623de-c19b-4615-9f1d-5e1daa5d3bda in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_external_dns_6c3ae381-04f7-41ea-b0ac-74db387dbc3a b46de15d-33e7-4cd0-aa7c-e7be2a61e71b in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_internal_dns_99e2f30b-3174-40bf-a78a-90da8abba8ca 09b9cc9b-3426-470b-a7bc-538f82dede03 in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_nexus_466a9f29-62bf-4e63-924a-b9efdb86afec 775f9207-c42d-4af2-9186-27ffef67735e in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/zone/oxz_ntp_62620961-fc4a-481e-968b-f5acbac0dc63 2db6b7c1-0f46-4ced-a3ad-48872793360e in service none none off + oxp_72c59873-31ff-4e36-8d76-ff834009349a/crypt/debug 93957ca0-9ed1-4e7b-8c34-2ce07a69541c in service 100 GiB none gzip-9 + + + omicron zones: + --------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + --------------------------------------------------------------------------------------------------------------- + clickhouse 353b3b65-20f7-48c3-88f7-495bd5d31545 install dataset in service fd00:1122:3344:102::23 + crucible bd354eef-d8a6-4165-9124-283fb5e46d77 install dataset in service fd00:1122:3344:102::26 + crucible_pantry ad6a3a03-8d0f-4504-99a4-cbf73d69b973 install dataset in service fd00:1122:3344:102::25 + external_dns 6c3ae381-04f7-41ea-b0ac-74db387dbc3a install dataset in service fd00:1122:3344:102::24 + internal_dns 99e2f30b-3174-40bf-a78a-90da8abba8ca install dataset in service fd00:1122:3344:1::1 + internal_ntp 62620961-fc4a-481e-968b-f5acbac0dc63 install dataset in service fd00:1122:3344:102::21 + nexus 466a9f29-62bf-4e63-924a-b9efdb86afec install dataset in service fd00:1122:3344:102::22 + + + + sled: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 (active, config generation 7) + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-c6d33b64-fb96-4129-bab1-7878a06a5f9b in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crucible 43931274-7fe8-4077-825d-dff2bc8efa58 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/external_dns a4c3032e-21fa-4d4a-b040-a7e3c572cf3c in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/internal_dns 4f60b534-eaa3-40a1-b60f-bfdf147af478 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone 4617d206-4330-4dfa-b9f3-f63a3db834f9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_5199c033-4cf9-4ab6-8ae7-566bd7606363 ad41be71-6c15-4428-b510-20ceacde4fa6 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_crucible_pantry_ba4994a8-23f9-4b1a-a84f-a08d74591389 1bca7f71-5e42-4749-91ec-fa40793a3a9a in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_external_dns_803bfb63-c246-41db-b0da-d3b87ddfc63d 3ac089c9-9dec-465b-863a-188e80d71fb4 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_internal_dns_427ec88f-f467-42fa-9bbb-66a91a36103c 686c19cf-a0d7-45f6-866f-c564612b2664 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_nexus_0c71b3b2-6ceb-4e8f-b020-b08675e83038 793ac181-1b01-403c-850d-7f5c54bda6c9 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/zone/oxz_ntp_6444f8a5-6465-4f0b-a549-1993c113569c cdf3684f-a6cf-4449-b9ec-e696b2c663e2 in service none none off + oxp_c6d33b64-fb96-4129-bab1-7878a06a5f9b/crypt/debug 248c6c10-1ac6-45de-bb55-ede36ca56bbd in service 100 GiB none gzip-9 + + + omicron zones: + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + crucible 5199c033-4cf9-4ab6-8ae7-566bd7606363 artifact: version 1.0.0 in service fd00:1122:3344:101::25 + crucible_pantry ba4994a8-23f9-4b1a-a84f-a08d74591389 artifact: version 1.0.0 in service fd00:1122:3344:101::24 + external_dns 803bfb63-c246-41db-b0da-d3b87ddfc63d artifact: version 1.0.0 in service fd00:1122:3344:101::23 + internal_dns 427ec88f-f467-42fa-9bbb-66a91a36103c artifact: version 1.0.0 in service fd00:1122:3344:2::1 + internal_ntp 6444f8a5-6465-4f0b-a549-1993c113569c artifact: version 1.0.0 in service fd00:1122:3344:101::21 + nexus 0c71b3b2-6ceb-4e8f-b020-b08675e83038 artifact: version 1.0.0 in service fd00:1122:3344:101::22 + + + + sled: d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 6) + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/external_dns 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/internal_dns 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 21fd4f3a-ec31-469b-87b1-087c343a2422 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + + + omicron zones: + ----------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ----------------------------------------------------------------------------------------------------------------------- + crucible f55647d4-5500-4ad3-893a-df45bd50d622 artifact: version 2.0.0 in service fd00:1122:3344:103::25 + crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 artifact: version 2.0.0 in service fd00:1122:3344:103::24 + external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 artifact: version 2.0.0 in service fd00:1122:3344:103::23 + internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 artifact: version 2.0.0 in service fd00:1122:3344:3::1 + internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 artifact: version 2.0.0 in service fd00:1122:3344:103::21 + nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 artifact: version 2.0.0 in service fd00:1122:3344:103::22 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) + cluster.preserve_downgrade_option: (do not modify) + + OXIMETER SETTINGS: + generation: 1 + read from:: SingleNode + + METADATA: + created by::::::::::::: reconfigurator-sim + created at::::::::::::: + comment:::::::::::::::: sled d81c6a84-79b8-4958-ae41-ea46c9b19763: performed 6 noop zone image source updates + internal DNS version::: 1 + external DNS version::: 1 + target release min gen: 4 + + PENDING MGS-MANAGED UPDATES: 0 + + +> blueprint-diff latest +from: blueprint 626487fa-7139-45ec-8416-902271fc730b +to: blueprint c1a0d242-9160-40f4-96ae-61f8f40a0b1b + + MODIFIED SLEDS: + + sled d81c6a84-79b8-4958-ae41-ea46c9b19763 (active, config generation 5 -> 6): + + host phase 2 contents: + ------------------------ + slot boot image source + ------------------------ + A current contents + B current contents + + + physical disks: + ------------------------------------------------------------------------------------ + vendor model serial disposition + ------------------------------------------------------------------------------------ + fake-vendor fake-model serial-4930954e-9ac7-4453-b63f-5ab97c389a99 in service + + + datasets: + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + dataset name dataset id disposition quota reservation compression + ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crucible 090bd88d-0a43-4040-a832-b13ae721f74f in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/external_dns 4da74a5b-6911-4cca-b624-b90c65530117 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/internal_dns 252ac39f-b9e2-4697-8c07-3a833115d704 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone 45cd9687-20be-4247-b62a-dfdacf324929 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_f55647d4-5500-4ad3-893a-df45bd50d622 1cb0a47a-59ac-4892-8e92-cf87b4290f96 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_crucible_pantry_75b220ba-a0f4-4872-8202-dc7c87f062d0 b1deff4b-51df-4a37-9043-afbd7c70a1cb in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_external_dns_f6ec9c67-946a-4da3-98d5-581f72ce8bf0 c65a9c1c-36dc-4ddb-8aac-ec3be8dbb209 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_internal_dns_ea5b4030-b52f-44b2-8d70-45f15f987d01 21fd4f3a-ec31-469b-87b1-087c343a2422 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_nexus_3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 e009d8b8-4695-4322-b53f-f03f2744aef7 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/zone/oxz_ntp_f10a4fb9-759f-4a65-b25e-5794ad2d07d8 41071985-1dfd-4ce5-8bc2-897161a8bce4 in service none none off + oxp_4930954e-9ac7-4453-b63f-5ab97c389a99/crypt/debug 7a6a2058-ea78-49de-9730-cce5e28b4cfb in service 100 GiB none gzip-9 + + + omicron zones: + ------------------------------------------------------------------------------------------------------------------------- + zone type zone id image source disposition underlay IP + ------------------------------------------------------------------------------------------------------------------------- +* crucible f55647d4-5500-4ad3-893a-df45bd50d622 - install dataset in service fd00:1122:3344:103::25 + └─ + artifact: version 2.0.0 +* crucible_pantry 75b220ba-a0f4-4872-8202-dc7c87f062d0 - install dataset in service fd00:1122:3344:103::24 + └─ + artifact: version 2.0.0 +* external_dns f6ec9c67-946a-4da3-98d5-581f72ce8bf0 - install dataset in service fd00:1122:3344:103::23 + └─ + artifact: version 2.0.0 +* internal_dns ea5b4030-b52f-44b2-8d70-45f15f987d01 - install dataset in service fd00:1122:3344:3::1 + └─ + artifact: version 2.0.0 +* internal_ntp f10a4fb9-759f-4a65-b25e-5794ad2d07d8 - install dataset in service fd00:1122:3344:103::21 + └─ + artifact: version 2.0.0 +* nexus 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6 - install dataset in service fd00:1122:3344:103::22 + └─ + artifact: version 2.0.0 + + + COCKROACHDB SETTINGS: + state fingerprint::::::::::::::::: (none) (unchanged) + cluster.preserve_downgrade_option: (do not modify) (unchanged) + + METADATA: + internal DNS version::: 1 (unchanged) + external DNS version::: 1 (unchanged) + target release min gen: 4 (unchanged) + + OXIMETER SETTINGS: + generation: 1 (unchanged) + read from:: SingleNode (unchanged) + + PENDING MGS UPDATES: + + Pending MGS-managed updates (all baseboards): + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + sp_type slot part_number serial_number artifact_hash artifact_version details + ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +- sled 1 model1 serial1 e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 newest Sp { expected_active_version: ArtifactVersion("newer"), expected_inactive_version: Version(ArtifactVersion("older")) } + + +internal DNS: + DNS zone: "control-plane.oxide.internal" (unchanged) + name: 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host (records: 1) + AAAA fd00:1122:3344:101::22 + name: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled (records: 1) + AAAA fd00:1122:3344:102::1 + name: 353b3b65-20f7-48c3-88f7-495bd5d31545.host (records: 1) + AAAA fd00:1122:3344:102::23 + name: 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host (records: 1) + AAAA fd00:1122:3344:103::22 + name: 427ec88f-f467-42fa-9bbb-66a91a36103c.host (records: 1) + AAAA fd00:1122:3344:2::1 + name: 466a9f29-62bf-4e63-924a-b9efdb86afec.host (records: 1) + AAAA fd00:1122:3344:102::22 + name: 5199c033-4cf9-4ab6-8ae7-566bd7606363.host (records: 1) + AAAA fd00:1122:3344:101::25 + name: 62620961-fc4a-481e-968b-f5acbac0dc63.host (records: 1) + AAAA fd00:1122:3344:102::21 + name: 6444f8a5-6465-4f0b-a549-1993c113569c.host (records: 1) + AAAA fd00:1122:3344:101::21 + name: 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host (records: 1) + AAAA fd00:1122:3344:102::24 + name: 75b220ba-a0f4-4872-8202-dc7c87f062d0.host (records: 1) + AAAA fd00:1122:3344:103::24 + name: 803bfb63-c246-41db-b0da-d3b87ddfc63d.host (records: 1) + AAAA fd00:1122:3344:101::23 + name: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled (records: 1) + AAAA fd00:1122:3344:101::1 + name: 99e2f30b-3174-40bf-a78a-90da8abba8ca.host (records: 1) + AAAA fd00:1122:3344:1::1 + name: @ (records: 3) + NS ns1.control-plane.oxide.internal + NS ns2.control-plane.oxide.internal + NS ns3.control-plane.oxide.internal + name: _clickhouse-admin-single-server._tcp (records: 1) + SRV port 8888 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse-native._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _clickhouse._tcp (records: 1) + SRV port 8123 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _crucible-pantry._tcp (records: 3) + SRV port 17000 75b220ba-a0f4-4872-8202-dc7c87f062d0.host.control-plane.oxide.internal + SRV port 17000 ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host.control-plane.oxide.internal + SRV port 17000 ba4994a8-23f9-4b1a-a84f-a08d74591389.host.control-plane.oxide.internal + name: _crucible._tcp.5199c033-4cf9-4ab6-8ae7-566bd7606363 (records: 1) + SRV port 32345 5199c033-4cf9-4ab6-8ae7-566bd7606363.host.control-plane.oxide.internal + name: _crucible._tcp.bd354eef-d8a6-4165-9124-283fb5e46d77 (records: 1) + SRV port 32345 bd354eef-d8a6-4165-9124-283fb5e46d77.host.control-plane.oxide.internal + name: _crucible._tcp.f55647d4-5500-4ad3-893a-df45bd50d622 (records: 1) + SRV port 32345 f55647d4-5500-4ad3-893a-df45bd50d622.host.control-plane.oxide.internal + name: _external-dns._tcp (records: 3) + SRV port 5353 6c3ae381-04f7-41ea-b0ac-74db387dbc3a.host.control-plane.oxide.internal + SRV port 5353 803bfb63-c246-41db-b0da-d3b87ddfc63d.host.control-plane.oxide.internal + SRV port 5353 f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host.control-plane.oxide.internal + name: _internal-ntp._tcp (records: 3) + SRV port 123 62620961-fc4a-481e-968b-f5acbac0dc63.host.control-plane.oxide.internal + SRV port 123 6444f8a5-6465-4f0b-a549-1993c113569c.host.control-plane.oxide.internal + SRV port 123 f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host.control-plane.oxide.internal + name: _nameservice._tcp (records: 3) + SRV port 5353 427ec88f-f467-42fa-9bbb-66a91a36103c.host.control-plane.oxide.internal + SRV port 5353 99e2f30b-3174-40bf-a78a-90da8abba8ca.host.control-plane.oxide.internal + SRV port 5353 ea5b4030-b52f-44b2-8d70-45f15f987d01.host.control-plane.oxide.internal + name: _nexus._tcp (records: 3) + SRV port 12221 0c71b3b2-6ceb-4e8f-b020-b08675e83038.host.control-plane.oxide.internal + SRV port 12221 3eeb8d49-eb1a-43f8-bb64-c2338421c2c6.host.control-plane.oxide.internal + SRV port 12221 466a9f29-62bf-4e63-924a-b9efdb86afec.host.control-plane.oxide.internal + name: _oximeter-reader._tcp (records: 1) + SRV port 9000 353b3b65-20f7-48c3-88f7-495bd5d31545.host.control-plane.oxide.internal + name: _repo-depot._tcp (records: 3) + SRV port 12348 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c.sled.control-plane.oxide.internal + SRV port 12348 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6.sled.control-plane.oxide.internal + SRV port 12348 d81c6a84-79b8-4958-ae41-ea46c9b19763.sled.control-plane.oxide.internal + name: ad6a3a03-8d0f-4504-99a4-cbf73d69b973.host (records: 1) + AAAA fd00:1122:3344:102::25 + name: ba4994a8-23f9-4b1a-a84f-a08d74591389.host (records: 1) + AAAA fd00:1122:3344:101::24 + name: bd354eef-d8a6-4165-9124-283fb5e46d77.host (records: 1) + AAAA fd00:1122:3344:102::26 + name: d81c6a84-79b8-4958-ae41-ea46c9b19763.sled (records: 1) + AAAA fd00:1122:3344:103::1 + name: ea5b4030-b52f-44b2-8d70-45f15f987d01.host (records: 1) + AAAA fd00:1122:3344:3::1 + name: f10a4fb9-759f-4a65-b25e-5794ad2d07d8.host (records: 1) + AAAA fd00:1122:3344:103::21 + name: f55647d4-5500-4ad3-893a-df45bd50d622.host (records: 1) + AAAA fd00:1122:3344:103::25 + name: f6ec9c67-946a-4da3-98d5-581f72ce8bf0.host (records: 1) + AAAA fd00:1122:3344:103::23 + name: ns1 (records: 1) + AAAA fd00:1122:3344:1::1 + name: ns2 (records: 1) + AAAA fd00:1122:3344:2::1 + name: ns3 (records: 1) + AAAA fd00:1122:3344:3::1 + +external DNS: + DNS zone: "oxide.example" (unchanged) + name: @ (records: 3) + NS ns1.oxide.example + NS ns2.oxide.example + NS ns3.oxide.example + name: example-silo.sys (records: 3) + A 192.0.2.2 + A 192.0.2.3 + A 192.0.2.4 + name: ns1 (records: 1) + A 198.51.100.1 + name: ns2 (records: 1) + A 198.51.100.2 + name: ns3 (records: 1) + A 198.51.100.3 + + + + +> # Set the target release minimum generation to a large value -- we're going to +> # test that the planner bails if it attempts a rollback of the target release +> # minimum generation. +> blueprint-edit latest set-target-release-min-gen 1000 +blueprint afb09faf-a586-4483-9289-04d4f1d8ba23 created from latest blueprint (c1a0d242-9160-40f4-96ae-61f8f40a0b1b): set target release minimum generation to 1000 + +> sled-set serial1 mupdate-override cc724abe-80c1-47e6-9771-19e6540531a9 +set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c mupdate override: error -> cc724abe-80c1-47e6-9771-19e6540531a9 + +> inventory-generate +generated inventory collection 0b5efbb3-0b1b-4bbf-b7d8-a2d6fca074c6 from configured sleds + +> blueprint-plan latest latest +INFO blueprint mupdate override updated to match inventory, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, new_bp_override: cc724abe-80c1-47e6-9771-19e6540531a9, prev_bp_override: None, zones: + - zone 353b3b65-20f7-48c3-88f7-495bd5d31545 (Clickhouse) left unchanged, image source: install dataset + - zone 466a9f29-62bf-4e63-924a-b9efdb86afec (Nexus) left unchanged, image source: install dataset + - zone 62620961-fc4a-481e-968b-f5acbac0dc63 (InternalNtp) left unchanged, image source: install dataset + - zone 6c3ae381-04f7-41ea-b0ac-74db387dbc3a (ExternalDns) left unchanged, image source: install dataset + - zone 99e2f30b-3174-40bf-a78a-90da8abba8ca (InternalDns) left unchanged, image source: install dataset + - zone ad6a3a03-8d0f-4504-99a4-cbf73d69b973 (CruciblePantry) left unchanged, image source: install dataset + - zone bd354eef-d8a6-4165-9124-283fb5e46d77 (Crucible) left unchanged, image source: install dataset +, host_phase_2: + - host phase 2 slot A: current contents (unchanged) + - host phase 2 slot B: current contents (unchanged) + +INFO no previous MGS update found as part of updating blueprint mupdate override to match inventory, phase: do_plan_mupdate_override, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c +error: generating blueprint: target release minimum generation was set to 1000, but we tried to set it to the older generation 5, indicating a possible table rollback which should not happen + diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout index 95a4c206dd..ad8bfe4148 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout @@ -80,12 +80,14 @@ sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c: install dataset updated: simulated er > # On a third sled, update the install dataset and simulate a mupdate override. -> # (Currently we do this in the blueprint, but with -> # https://github.com/oxidecomputer/omicron/pull/8456 we should update this test and -> # set a mupdate-override on the sled directly.) +> # Also set it in the blueprint -- this simulates the situation where the mupdate +> # override is in progress and will be cleared in the future. > sled-update-install-dataset serial2 --to-target-release sled d81c6a84-79b8-4958-ae41-ea46c9b19763: install dataset updated: to target release (system version 1.0.0) +> sled-set serial2 mupdate-override ffffffff-ffff-ffff-ffff-ffffffffffff +set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 mupdate override: unset -> ffffffff-ffff-ffff-ffff-ffffffffffff + > blueprint-edit latest set-remove-mupdate-override serial2 ffffffff-ffff-ffff-ffff-ffffffffffff blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 created from latest blueprint (dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21): set remove_mupdate_override to ffffffff-ffff-ffff-ffff-ffffffffffff @@ -156,6 +158,8 @@ generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configu > blueprint-plan latest latest WARN skipping zones eligible for cleanup check (sled not present in latest inventory collection), sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e +WARN no inventory found for in-service sled, phase: do_plan_mupdate_override, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e +INFO not ready to add or update new zones yet, phase: do_plan_mupdate_override, reasons: sleds have remove mupdate override set in blueprint: d81c6a84-79b8-4958-ae41-ea46c9b19763 WARN skipped noop image source check on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, reason: error retrieving zone manifest: reconfigurator-sim simulated error: simulated error obtaining zone manifest INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 0, num_eligible: 6, num_ineligible: 0 INFO performed noop image source checks on sled, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, num_total: 6, num_already_artifact: 0, num_eligible: 5, num_ineligible: 1 @@ -167,19 +171,6 @@ INFO skipped noop image source check on sled, sled_id: d81c6a84-79b8-4958-ae41-e INFO skipped noop image source check on sled, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e, reason: sled not found in inventory INFO noop converting 6/6 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 0 INFO noop converting 5/6 install-dataset zones to artifact store, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, num_total: 6, num_already_artifact: 0 -INFO parent blueprint contains NTP zone, but it's not in inventory yet, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e -INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 -INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient ClickhouseServer zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient CockroachDb zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient CruciblePantry zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient InternalDns zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient ExternalDns zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient Nexus zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient Oximeter zones exist in plan, desired_count: 0, current_count: 0 -INFO configuring SP update, artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 -INFO reached maximum number of pending SP updates, max: 1 INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify generated blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 based on parent blueprint 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 @@ -351,14 +342,6 @@ to: blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 generation: 1 (unchanged) read from:: SingleNode (unchanged) - PENDING MGS UPDATES: - - Pending MGS-managed updates (all baseboards): - ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - sp_type slot part_number serial_number artifact_hash artifact_version details - ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -+ sled 0 model0 serial0 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670 1.0.0 Sp { expected_active_version: ArtifactVersion("0.0.1"), expected_inactive_version: NoValidVersion } - internal DNS: * DNS zone: "control-plane.oxide.internal": @@ -528,6 +511,7 @@ set sled e96e226f-4ed9-4c01-91b9-69a9cd076c9e inventory visibility: hidden -> vi generated inventory collection 61f451b3-2121-4ed6-91c7-a550054f6c21 from configured sleds > blueprint-plan latest latest +INFO not ready to add or update new zones yet, phase: do_plan_mupdate_override, reasons: sleds have remove mupdate override set in blueprint: d81c6a84-79b8-4958-ae41-ea46c9b19763 WARN skipped noop image source check on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, reason: error retrieving zone manifest: reconfigurator-sim simulated error: simulated error obtaining zone manifest INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 INFO performed noop image source checks on sled, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, num_total: 6, num_already_artifact: 5, num_eligible: 0, num_ineligible: 1 @@ -538,18 +522,6 @@ INFO install dataset artifact hash not found in TUF repo, ignoring for noop chec INFO skipped noop image source check on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, reason: remove_mupdate_override is set in the blueprint (ffffffff-ffff-ffff-ffff-ffffffffffff) INFO performed noop image source checks on sled, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e, num_total: 2, num_already_artifact: 0, num_eligible: 2, num_ineligible: 0 INFO noop converting 2/2 install-dataset zones to artifact store, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e, num_total: 2, num_already_artifact: 0 -INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 -INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient ClickhouseServer zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient CockroachDb zones exist in plan, desired_count: 0, current_count: 0 -INFO sufficient CruciblePantry zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient InternalDns zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient ExternalDns zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient Nexus zones exist in plan, desired_count: 3, current_count: 3 -INFO sufficient Oximeter zones exist in plan, desired_count: 0, current_count: 0 -INFO SP update not yet completed (will keep it), artifact_version: 1.0.0, artifact_hash: 7e6667e646ad001b54c8365a3d309c03f89c59102723d38d01697ee8079fe670, expected_inactive_version: NoValidVersion, expected_active_version: 0.0.1, component: sp, sp_slot: 0, sp_type: Sled, serial_number: serial0, part_number: model0 -INFO reached maximum number of pending SP updates, max: 1 INFO will ensure cockroachdb setting, setting: cluster.preserve_downgrade_option, value: DoNotModify generated blueprint af934083-59b5-4bf6-8966-6fb5292c29e1 based on parent blueprint 58d5e830-0884-47d8-a7cd-b2b3751adeb4 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout index 149410242a..c341265192 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-set-remove-mupdate-override-stdout @@ -658,7 +658,7 @@ parent: afb09faf-a586-4483-9289-04d4f1d8ba23 METADATA: created by::::::::::::: reconfigurator-cli created at::::::::::::: - comment:::::::::::::::: (none) + comment:::::::::::::::: updated target release minimum generation from 1 to 2 internal DNS version::: 1 external DNS version::: 1 target release min gen: 2 diff --git a/nexus/reconfigurator/planning/Cargo.toml b/nexus/reconfigurator/planning/Cargo.toml index 6d4725b670..ce4a2db858 100644 --- a/nexus/reconfigurator/planning/Cargo.toml +++ b/nexus/reconfigurator/planning/Cargo.toml @@ -37,6 +37,7 @@ slog-error-chain.workspace = true sp-sim.workspace = true static_assertions.workspace = true strum.workspace = true +swrite.workspace = true thiserror.workspace = true tufaceous-artifact.workspace = true typed-rng.workspace = true diff --git a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs index 3ec4aef2ef..d944cb954a 100644 --- a/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs +++ b/nexus/reconfigurator/planning/src/blueprint_builder/builder.rs @@ -14,6 +14,9 @@ use crate::blueprint_editor::ExternalSnatNetworkingChoice; use crate::blueprint_editor::NoAvailableDnsSubnets; use crate::blueprint_editor::SledEditError; use crate::blueprint_editor::SledEditor; +use crate::planner::NoopConvertGlobalIneligibleReason; +use crate::planner::NoopConvertInfo; +use crate::planner::NoopConvertSledIneligibleReason; use crate::planner::ZoneExpungeReason; use crate::planner::rng::PlannerRng; use anyhow::Context as _; @@ -21,6 +24,9 @@ use anyhow::anyhow; use anyhow::bail; use clickhouse_admin_types::OXIMETER_CLUSTER; use id_map::IdMap; +use iddqd::IdOrdItem; +use iddqd::IdOrdMap; +use iddqd::id_upcast; use itertools::Either; use nexus_inventory::now_db_precision; use nexus_sled_agent_shared::inventory::OmicronZoneDataset; @@ -43,6 +49,7 @@ use nexus_types::deployment::OmicronZoneExternalFloatingAddr; use nexus_types::deployment::OmicronZoneExternalFloatingIp; use nexus_types::deployment::OmicronZoneExternalSnatIp; use nexus_types::deployment::OximeterReadMode; +use nexus_types::deployment::PendingMgsUpdate; use nexus_types::deployment::PendingMgsUpdates; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledFilter; @@ -86,6 +93,8 @@ use std::net::IpAddr; use std::net::Ipv6Addr; use std::net::SocketAddr; use std::net::SocketAddrV6; +use swrite::SWrite; +use swrite::swriteln; use thiserror::Error; use super::ClickhouseZonesThatShouldBeRunning; @@ -138,6 +147,15 @@ pub enum Error { expected: Generation, actual: Generation, }, + #[error( + "target release minimum generation was set to {current}, \ + but we tried to set it to the older generation {new}, indicating a \ + possible table rollback which should not happen" + )] + TargetReleaseMinimumGenerationRollback { + current: Generation, + new: Generation, + }, #[error(transparent)] TufRepoContentsError(#[from] TufRepoContentsError), } @@ -269,6 +287,29 @@ impl From for SledEditCounts { } } +/// A list of scalar (primitive) values which have been edited on a sled. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub struct EditedSledScalarEdits { + /// Whether the remove_mupdate_override field was modified. + pub remove_mupdate_override: bool, + /// Whether the debug operation to force a Sled Agent generation bump was + /// set. + pub debug_force_generation_bump: bool, +} + +impl EditedSledScalarEdits { + pub fn zeroes() -> Self { + Self { + debug_force_generation_bump: false, + remove_mupdate_override: false, + } + } + + pub fn has_edits(&self) -> bool { + self.debug_force_generation_bump || self.remove_mupdate_override + } +} + /// Describes operations which the BlueprintBuilder has performed to arrive /// at its state. /// @@ -308,6 +349,10 @@ pub(crate) enum Operation { num_datasets_expunged: usize, num_zones_expunged: usize, }, + SetTargetReleaseMinimumGeneration { + current_generation: Generation, + new_generation: Generation, + }, SledNoopZoneImageSourcesUpdated { sled_id: SledUuid, count: usize, @@ -383,6 +428,16 @@ impl fmt::Display for Operation { zone image source updates" ) } + Self::SetTargetReleaseMinimumGeneration { + current_generation, + new_generation, + } => { + write!( + f, + "updated target release minimum generation from \ + {current_generation} to {new_generation}" + ) + } } } } @@ -652,9 +707,14 @@ impl<'a> BlueprintBuilder<'a> { // are no longer in service and need expungement work. let mut sleds = BTreeMap::new(); for (sled_id, editor) in self.sled_editors { - let EditedSled { config, edit_counts } = editor.finalize(); + let EditedSled { config, edit_counts, scalar_edits } = + editor.finalize(); sleds.insert(sled_id, config); - if edit_counts.has_nonzero_counts() { + if edit_counts.has_nonzero_counts() || scalar_edits.has_edits() { + let EditedSledScalarEdits { + debug_force_generation_bump, + remove_mupdate_override, + } = scalar_edits; debug!( self.log, "sled modified in new blueprint"; "sled_id" => %sled_id, @@ -662,6 +722,8 @@ impl<'a> BlueprintBuilder<'a> { "disk_edits" => ?edit_counts.disks, "dataset_edits" => ?edit_counts.datasets, "zone_edits" => ?edit_counts.zones, + "debug_force_generation_bump" => debug_force_generation_bump, + "remove_mupdate_override_modified" => remove_mupdate_override, ); } else { debug!( @@ -1163,6 +1225,39 @@ impl<'a> BlueprintBuilder<'a> { Ok(editor.get_remove_mupdate_override()) } + /// Updates a sled's mupdate override field based on the mupdate override + /// provided by inventory. + pub(crate) fn sled_ensure_mupdate_override( + &mut self, + sled_id: SledUuid, + inv_mupdate_override_id: Option, + noop_info: &mut NoopConvertInfo, + ) -> Result { + let editor = self.sled_editors.get_mut(&sled_id).ok_or_else(|| { + Error::Planner(anyhow!( + "tried to ensure mupdate override for unknown sled {sled_id}" + )) + })?; + + // Also map the editor to the corresponding PendingMgsUpdates. + let sled_details = self + .input + .sled_lookup(SledFilter::InService, sled_id) + .map_err(|error| Error::Planner(anyhow!(error)))?; + // TODO: simplify down to &BaseboardId + let baseboard_id = Arc::new(sled_details.baseboard_id.clone()); + let pending_mgs_update = self.pending_mgs_updates.entry(baseboard_id); + let noop_sled_info = noop_info.sled_info_mut(sled_id)?; + + editor + .ensure_mupdate_override( + inv_mupdate_override_id, + pending_mgs_update, + noop_sled_info, + ) + .map_err(|err| Error::SledEditError { sled_id, err }) + } + fn next_internal_dns_gz_address_index(&self, sled_id: SledUuid) -> u32 { let used_internal_dns_gz_address_indices = self .current_sled_zones( @@ -1994,21 +2089,29 @@ impl<'a> BlueprintBuilder<'a> { .len() } + /// Get the value of `target_release_minimum_generation`. + pub fn target_release_minimum_generation(&self) -> Generation { + self.target_release_minimum_generation + } + /// Given the current value of `target_release_minimum_generation`, set the /// new value for this blueprint. pub fn set_target_release_minimum_generation( &mut self, - current: Generation, - target_release_minimum_generation: Generation, + current_generation: Generation, + new_generation: Generation, ) -> Result<(), Error> { - if self.target_release_minimum_generation != current { + if self.target_release_minimum_generation != current_generation { return Err(Error::TargetReleaseMinimumGenerationMismatch { - expected: current, + expected: current_generation, actual: self.target_release_minimum_generation, }); } - self.target_release_minimum_generation = - target_release_minimum_generation; + self.target_release_minimum_generation = new_generation; + self.record_operation(Operation::SetTargetReleaseMinimumGeneration { + current_generation, + new_generation, + }); Ok(()) } @@ -2242,6 +2345,258 @@ pub(super) fn ensure_input_networking_records_appear_in_parent_blueprint( Ok(()) } +/// The result of an `ensure_mupdate_override` call for a particular sled. +#[derive(Clone, Debug, Eq, PartialEq)] +pub(crate) enum EnsureMupdateOverrideAction { + /// The inventory and blueprint overrides are consistent, so no action was + /// taken. + NoAction { + /// The mupdate override currently in place. + mupdate_override: Option, + }, + /// Inventory had an override that didn't match what was in the blueprint, + /// so the blueprint was updated to match the inventory. + BpSetOverride { + /// The override ID that was set. + inv_override: MupdateOverrideUuid, + /// The previous blueprint override that was removed. + prev_bp_override: Option, + /// The zones which were updated to the install dataset, along with + /// their old values. + zones: IdOrdMap, + /// The pending MGS update that was cleared, if any. + prev_mgs_update: Option>, + /// The previous host phase 2 contents. + prev_host_phase_2: BlueprintHostPhase2DesiredSlots, + }, + /// The inventory did not have an override but the blueprint did, and other + /// conditions were met, so the blueprint's override was cleared. + BpClearOverride { + /// The previous blueprint override that was removed. + prev_bp_override: MupdateOverrideUuid, + }, + /// The inventory did not have an override but the blueprint did, but some + /// zones' image sources can't be converted over to Artifact, so the + /// blueprint's override was left in place. + BpOverrideNotCleared { + /// The blueprint override that was not removed. + bp_override: MupdateOverrideUuid, + /// The reason the blueprint override was not cleared. + reason: BpMupdateOverrideNotClearedReason, + }, + /// Sled Agent encountered an error occurred retrieving the mupdate + /// override + /// from the inventory. + GetOverrideError { + /// An error message. + message: String, + }, +} + +impl EnsureMupdateOverrideAction { + pub fn log_to(&self, log: &slog::Logger) { + match self { + EnsureMupdateOverrideAction::NoAction { mupdate_override } => { + debug!( + log, + "no mupdate override action taken, current value left unchanged"; + "mupdate_override" => ?mupdate_override, + ); + } + EnsureMupdateOverrideAction::BpSetOverride { + inv_override, + prev_bp_override, + zones, + prev_mgs_update, + prev_host_phase_2, + } => { + let mut zones_desc = String::new(); + if zones.is_empty() { + zones_desc.push_str("(none)"); + } else { + // Add a newline before the first zone -- it makes it easier + // to read in log output. + zones_desc.push('\n'); + for zone in zones { + swriteln!(zones_desc, " - {}", zone); + } + } + + let mut host_phase_2_desc = String::from("\n"); + let BlueprintHostPhase2DesiredSlots { slot_a, slot_b } = + prev_host_phase_2; + match slot_a { + BlueprintHostPhase2DesiredContents::CurrentContents => { + swriteln!( + host_phase_2_desc, + " - host phase 2 slot A: current contents (unchanged)" + ); + } + BlueprintHostPhase2DesiredContents::Artifact { + version, + hash, + } => { + swriteln!( + host_phase_2_desc, + " - host phase 2 slot A: updated from artifact \ + (version {}, hash {}) to preserving current contents", + version, + hash + ); + } + } + match slot_b { + BlueprintHostPhase2DesiredContents::CurrentContents => { + swriteln!( + host_phase_2_desc, + " - host phase 2 slot B: current contents (unchanged)" + ); + } + BlueprintHostPhase2DesiredContents::Artifact { + version, + hash, + } => { + swriteln!( + host_phase_2_desc, + " - host phase 2 slot B: updated from artifact \ + (version {}, hash {}) to preserving current contents", + version, + hash + ); + } + } + + info!( + log, + "blueprint mupdate override updated to match inventory"; + "new_bp_override" => %inv_override, + "prev_bp_override" => ?prev_bp_override, + "zones" => zones_desc, + "host_phase_2" => host_phase_2_desc, + ); + if let Some(prev_mgs_update) = prev_mgs_update { + info!( + log, + "previous MGS update cleared as part of updating \ + blueprint mupdate override to match inventory"; + prev_mgs_update, + ); + } else { + info!( + log, + "no previous MGS update found as part of updating \ + blueprint mupdate override to match inventory", + ); + } + } + EnsureMupdateOverrideAction::BpClearOverride { + prev_bp_override, + } => { + info!( + log, + "inventory override no longer exists, blueprint override \ + cleared"; + "prev_bp_override" => %prev_bp_override, + ) + } + EnsureMupdateOverrideAction::BpOverrideNotCleared { + bp_override, + reason, + } => { + info!( + log, + "inventory override no longer exists, but blueprint \ + override could not be cleared"; + "bp_override" => %bp_override, + "reason" => %reason, + ); + } + EnsureMupdateOverrideAction::GetOverrideError { message } => { + error!( + log, + "error getting mupdate override info for sled, \ + not altering blueprint override"; + "message" => %message, + ); + } + } + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct EnsureMupdateOverrideUpdatedZone { + /// The ID of the zone. + pub zone_id: OmicronZoneUuid, + + /// The Omicron zone kind. + pub kind: ZoneKind, + + /// The previous image source. + pub old_image_source: BlueprintZoneImageSource, + + /// The new image source. + pub new_image_source: BlueprintZoneImageSource, +} + +impl fmt::Display for EnsureMupdateOverrideUpdatedZone { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.old_image_source == self.new_image_source { + write!( + f, + "zone {} ({:?}) left unchanged, image source: {}", + self.zone_id, self.kind, self.old_image_source, + ) + } else { + write!( + f, + "zone {} ({:?}) updated from {} to {}", + self.zone_id, + self.kind, + self.old_image_source, + self.new_image_source, + ) + } + } +} + +impl IdOrdItem for EnsureMupdateOverrideUpdatedZone { + type Key<'a> = OmicronZoneUuid; + fn key(&self) -> Self::Key<'_> { + self.zone_id + } + id_upcast!(); +} + +/// The reason a blueprint's mupdate override for a sled was not cleared, even +/// though inventory no longer has the sled. +#[derive(Clone, Debug, Eq, PartialEq)] +pub(crate) enum BpMupdateOverrideNotClearedReason { + /// There is a global reason noop conversions are not possible. + NoopGlobalIneligible(NoopConvertGlobalIneligibleReason), + + /// There is a sled-specific reason noop conversions are not possible. + NoopSledIneligible(NoopConvertSledIneligibleReason), +} + +impl fmt::Display for BpMupdateOverrideNotClearedReason { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BpMupdateOverrideNotClearedReason::NoopGlobalIneligible(reason) => { + write!( + f, + "no sleds can be noop-converted to Artifact: {reason}", + ) + } + BpMupdateOverrideNotClearedReason::NoopSledIneligible(reason) => { + write!( + f, + "this sled cannot be noop-converted to Artifact: {reason}", + ) + } + } + } +} + #[cfg(test)] pub mod test { use super::*; diff --git a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs index 613bd79230..497e16247f 100644 --- a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs +++ b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs @@ -4,9 +4,19 @@ //! Support for editing the blueprint details of a single sled. +use crate::blueprint_builder::BpMupdateOverrideNotClearedReason; +use crate::blueprint_builder::EditedSledScalarEdits; +use crate::blueprint_builder::EnsureMupdateOverrideAction; +use crate::blueprint_builder::EnsureMupdateOverrideUpdatedZone; use crate::blueprint_builder::SledEditCounts; +use crate::planner::NoopConvertSledEligible; +use crate::planner::NoopConvertSledIneligibleReason; +use crate::planner::NoopConvertSledInfoMut; +use crate::planner::NoopConvertSledStatus; use crate::planner::SledPlannerRng; use host_phase_2::HostPhase2Editor; +use id_map::Entry; +use iddqd::IdOrdMap; use illumos_utils::zpool::ZpoolName; use itertools::Either; use nexus_sled_agent_shared::inventory::ZoneKind; @@ -21,6 +31,7 @@ use nexus_types::deployment::BlueprintZoneConfig; use nexus_types::deployment::BlueprintZoneDisposition; use nexus_types::deployment::BlueprintZoneImageSource; use nexus_types::deployment::BlueprintZoneType; +use nexus_types::deployment::PendingMgsUpdate; use nexus_types::deployment::blueprint_zone_type; use nexus_types::external_api::views::SledState; use omicron_common::address::Ipv6Subnet; @@ -121,6 +132,14 @@ pub enum SledEditError { ZoneOnNonexistentZpool { zone_id: OmicronZoneUuid, zpool: ZpoolName }, #[error("ran out of underlay IP addresses")] OutOfUnderlayIps, + #[error( + "noop conversion info's mupdate_override_id ({noop_id:?}) didn't \ + match cached value in blueprint ({blueprint_id})" + )] + NoopMupdateOverrideMismatch { + noop_id: Option, + blueprint_id: MupdateOverrideUuid, + }, } #[derive(Debug)] @@ -158,8 +177,11 @@ impl SledEditor { SledState::Decommissioned, "for_existing_decommissioned called on non-decommissioned sled" ); - let inner = - EditedSled { config, edit_counts: SledEditCounts::zeroes() }; + let inner = EditedSled { + config, + edit_counts: SledEditCounts::zeroes(), + scalar_edits: EditedSledScalarEdits::zeroes(), + }; Ok(Self(InnerSledEditor::Decommissioned(inner))) } @@ -387,6 +409,21 @@ impl SledEditor { Ok(()) } + /// Updates a sled's mupdate override field based on the mupdate override + /// provided by inventory. + pub fn ensure_mupdate_override( + &mut self, + inv_mupdate_override_id: Option, + pending_mgs_update: Entry<'_, PendingMgsUpdate>, + noop_sled_info: NoopConvertSledInfoMut<'_>, + ) -> Result { + self.as_active_mut()?.ensure_mupdate_override( + inv_mupdate_override_id, + pending_mgs_update, + noop_sled_info, + ) + } + /// Sets remove-mupdate-override configuration for this sled. /// /// Currently only used in test code. @@ -436,6 +473,7 @@ struct ActiveSledEditor { pub(crate) struct EditedSled { pub config: BlueprintSledConfig, pub edit_counts: SledEditCounts, + pub scalar_edits: EditedSledScalarEdits, } impl ActiveSledEditor { @@ -500,6 +538,11 @@ impl ActiveSledEditor { let changed_host_phase_2 = self.host_phase_2.is_modified(); let mut sled_agent_generation = self.incoming_sled_agent_generation; + let scalar_edits = EditedSledScalarEdits { + debug_force_generation_bump: self.debug_force_generation_bump, + remove_mupdate_override: remove_mupdate_override_is_modified, + }; + // Bump the generation if we made any changes of concern to sled-agent. if self.debug_force_generation_bump || disks_counts.has_nonzero_counts() @@ -528,6 +571,7 @@ impl ActiveSledEditor { datasets: datasets_counts, zones: zones_counts, }, + scalar_edits, } } @@ -714,12 +758,14 @@ impl ActiveSledEditor { Ok(self.zones.set_zone_image_source(zone_id, image_source)?) } - // Sets the desired host phase 2 contents for this sled. + /// Sets the desired host phase 2 contents for this sled. + /// + /// Returns the old host phase 2 contents. pub fn set_host_phase_2( &mut self, host_phase_2: BlueprintHostPhase2DesiredSlots, - ) { - self.host_phase_2.set_value(host_phase_2); + ) -> BlueprintHostPhase2DesiredSlots { + self.host_phase_2.set_value(host_phase_2) } // Sets the desired host phase 2 contents for a specific slot on this sled. @@ -745,6 +791,189 @@ impl ActiveSledEditor { Ok(()) } + /// Update a sled's mupdate override field based on the mupdate override + /// provided by inventory. + pub fn ensure_mupdate_override( + &mut self, + inv_mupdate_override_id: Option, + pending_mgs_update: Entry<'_, PendingMgsUpdate>, + noop_sled_info: NoopConvertSledInfoMut<'_>, + ) -> Result { + match (inv_mupdate_override_id, *self.remove_mupdate_override.value()) { + (Some(inv_override), Some(bp_override)) + if inv_override == bp_override => + { + // If the inventory and blueprint overrides are the same, the + // sled agent hasn't yet removed the override. Nothing to do at + // the moment. + Ok(EnsureMupdateOverrideAction::NoAction { + mupdate_override: Some(inv_override), + }) + } + (Some(inv_override), bp_override) => { + // Inventory says there's an override in place, but the + // blueprint doesn't (or has a different override in place). + // This means that a MUPdate happened since we last did + // blueprint planning. + // + // Set the blueprint's remove_mupdate_override. + self.set_remove_mupdate_override(Some(inv_override)); + // Also update the cached value inside `noop_sled_info`. + if let NoopConvertSledInfoMut::Ok(mut info) = noop_sled_info { + use NoopConvertSledIneligibleReason::*; + + match &mut info.status { + NoopConvertSledStatus::Ineligible( + MupdateOverride { mupdate_override_id, .. }, + ) => { + *mupdate_override_id = inv_override; + } + NoopConvertSledStatus::Ineligible(_) => { + // Some other reason -- sled remains ineligible. + } + NoopConvertSledStatus::Eligible(eligible) => { + // Transition to Eligible with the new override. + let zones = mem::replace( + &mut eligible.zones, + IdOrdMap::new(), + ); + info.status = NoopConvertSledStatus::Ineligible( + MupdateOverride { + mupdate_override_id: inv_override, + zones, + }, + ); + } + } + } + + // Set all zone image sources to InstallDataset. This is an + // acknowledgement of the current state of the world. + let zone_ids: Vec<_> = self + .zones(BlueprintZoneDisposition::is_in_service) + .map(|zone| (zone.id, zone.kind())) + .collect(); + + let mut zones = IdOrdMap::with_capacity(zone_ids.len()); + for (zone_id, kind) in zone_ids { + let old_image_source = self.zones.set_zone_image_source( + &zone_id, + BlueprintZoneImageSource::InstallDataset, + )?; + let item = EnsureMupdateOverrideUpdatedZone { + zone_id, + kind, + old_image_source, + new_image_source: + BlueprintZoneImageSource::InstallDataset, + }; + zones.insert_unique(item).expect( + "self.zones is a BTreeMap so zone IDs are unique", + ); + } + + // Clear out the pending MGS update for this sled. + let prev_mgs_update = match pending_mgs_update { + Entry::Vacant(_) => None, + Entry::Occupied(entry) => Some(Box::new(entry.remove())), + }; + + // Clear out the host phase 2 information for this sled as well. + let prev_host_phase_2 = self.set_host_phase_2( + BlueprintHostPhase2DesiredSlots::current_contents(), + ); + + Ok(EnsureMupdateOverrideAction::BpSetOverride { + inv_override, + prev_bp_override: bp_override, + zones, + prev_mgs_update, + prev_host_phase_2, + }) + } + (None, Some(bp_override)) => { + // The blueprint says there's an override in place, but the + // inventory doesn't. This means that the sled has removed its + // override that was set in the above branch. + // + // However, the blueprint's remove_mupdate_override remains in + // place until all zones' image sources can be noop-converted to + // Artifact. We do this to minimize the number of different + // versions of software that exist. + use BpMupdateOverrideNotClearedReason::*; + + match noop_sled_info { + NoopConvertSledInfoMut::Ok(mut info) => match &mut info + .status + { + NoopConvertSledStatus::Ineligible( + NoopConvertSledIneligibleReason::MupdateOverride { + mupdate_override_id, + zones, + }, + ) => { + // Check that the mupdate override is the same as + // what's in the blueprint. + if *mupdate_override_id == bp_override { + // TODO: Check if any zones are ineligible for + // conversion, and don't clear the mupdate + // override if so. We'll also need similar + // checks for Hubris and host phase 2 images. + + // Clear the mupdate override field. + // + // The actual conversion process will happen + // later, during do_plan_noop_image_source. + self.set_remove_mupdate_override(None); + let zones = + mem::replace(zones, IdOrdMap::new()); + info.status = NoopConvertSledStatus::Eligible( + NoopConvertSledEligible { zones }, + ); + Ok(EnsureMupdateOverrideAction::BpClearOverride { + prev_bp_override: bp_override, + }) + } else { + Err( + SledEditError::NoopMupdateOverrideMismatch { + noop_id: Some(*mupdate_override_id), + blueprint_id: bp_override, + }, + ) + } + } + NoopConvertSledStatus::Ineligible(reason) => Ok( + EnsureMupdateOverrideAction::BpOverrideNotCleared { + bp_override, + reason: NoopSledIneligible(reason.clone()), + }, + ), + NoopConvertSledStatus::Eligible(_) => { + // If the override is set, then we should always be + // in the Ineligible state (handled above). + Err(SledEditError::NoopMupdateOverrideMismatch { + noop_id: None, + blueprint_id: bp_override, + }) + } + }, + NoopConvertSledInfoMut::GlobalIneligible(reason) => { + Ok(EnsureMupdateOverrideAction::BpOverrideNotCleared { + bp_override, + reason: NoopGlobalIneligible(reason.clone()), + }) + } + } + } + (None, None) => { + // No override in place, nothing to do. + Ok(EnsureMupdateOverrideAction::NoAction { + mupdate_override: None, + }) + } + } + } + /// Set remove-mupdate-override configuration for this sled. pub fn set_remove_mupdate_override( &mut self, diff --git a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor/host_phase_2.rs b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor/host_phase_2.rs index 7852a353d1..d1a24db9e7 100644 --- a/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor/host_phase_2.rs +++ b/nexus/reconfigurator/planning/src/blueprint_editor/sled_editor/host_phase_2.rs @@ -22,10 +22,19 @@ impl HostPhase2Editor { } } - pub fn set_value(&mut self, host_phase_2: BlueprintHostPhase2DesiredSlots) { + /// Set the host phase 2 information for this sled, returning the previous value. + pub fn set_value( + &mut self, + host_phase_2: BlueprintHostPhase2DesiredSlots, + ) -> BlueprintHostPhase2DesiredSlots { let BlueprintHostPhase2DesiredSlots { slot_a, slot_b } = host_phase_2; + let previous = BlueprintHostPhase2DesiredSlots { + slot_a: self.slot_a.value().clone(), + slot_b: self.slot_b.value().clone(), + }; self.slot_a.set_value(slot_a); self.slot_b.set_value(slot_b); + previous } pub fn set_slot( diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index 9b13b6323b..eb55b313b3 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -9,16 +9,16 @@ use crate::blueprint_builder::BlueprintBuilder; use crate::blueprint_builder::Ensure; use crate::blueprint_builder::EnsureMultiple; +use crate::blueprint_builder::EnsureMupdateOverrideAction; use crate::blueprint_builder::Error; use crate::blueprint_builder::Operation; use crate::blueprint_editor::DisksEditError; use crate::blueprint_editor::SledEditError; use crate::mgs_updates::plan_mgs_updates; -use crate::planner::image_source::NoopConvertInfo; -use crate::planner::image_source::NoopConvertSledStatus; use crate::planner::image_source::NoopConvertZoneStatus; use crate::planner::omicron_zone_placement::PlacementError; use gateway_client::types::SpType; +use itertools::Itertools; use nexus_sled_agent_shared::inventory::ConfigReconcilerInventoryResult; use nexus_sled_agent_shared::inventory::OmicronZoneImageSource; use nexus_sled_agent_shared::inventory::OmicronZoneType; @@ -48,12 +48,19 @@ use omicron_uuid_kinds::PhysicalDiskUuid; use omicron_uuid_kinds::SledUuid; use slog::debug; use slog::error; +use slog::o; use slog::{Logger, info, warn}; use slog_error_chain::InlineErrorChain; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::str::FromStr; +pub(crate) use self::image_source::NoopConvertGlobalIneligibleReason; +pub(crate) use self::image_source::NoopConvertInfo; +pub(crate) use self::image_source::NoopConvertSledEligible; +pub(crate) use self::image_source::NoopConvertSledIneligibleReason; +pub(crate) use self::image_source::NoopConvertSledInfoMut; +pub(crate) use self::image_source::NoopConvertSledStatus; pub(crate) use self::omicron_zone_placement::DiscretionaryOmicronZone; use self::omicron_zone_placement::OmicronZonePlacement; use self::omicron_zone_placement::OmicronZonePlacementSledState; @@ -160,16 +167,36 @@ impl<'a> Planner<'a> { self.do_plan_expunge()?; self.do_plan_decommission()?; - let noop_info = + let mut noop_info = NoopConvertInfo::new(self.input, self.inventory, &self.blueprint)?; + + let plan_mupdate_override_res = + self.do_plan_mupdate_override(&mut noop_info)?; + + // Log noop-convert results after do_plan_mupdate_override, because this + // step might alter noop_info. noop_info.log_to(&self.log); + // Within `do_plan_noop_image_source`, we plan noop image sources on + // sleds other than those currently affected by mupdate overrides. This + // means that we don't have to wait for the `plan_mupdate_override_res` + // result for that step. self.do_plan_noop_image_source(noop_info)?; - self.do_plan_add()?; - if let UpdateStepResult::ContinueToNextStep = self.do_plan_mgs_updates() + + if let UpdateStepResult::ContinueToNextStep = plan_mupdate_override_res { - self.do_plan_zone_updates()?; + // If do_plan_mupdate_override returns Waiting, we don't plan *any* + // additional steps until the system has recovered. + self.do_plan_add()?; + if let UpdateStepResult::ContinueToNextStep = + self.do_plan_mgs_updates() + { + self.do_plan_zone_updates()?; + } } + + // CockroachDB settings aren't dependent on zones, so they can be + // planned independently of the rest of the system. self.do_plan_cockroachdb_settings(); Ok(()) } @@ -1388,6 +1415,210 @@ impl<'a> Planner<'a> { Ok(()) } + fn do_plan_mupdate_override( + &mut self, + noop_info: &mut NoopConvertInfo, + ) -> Result { + // For each sled, compare what's in the inventory to what's in the + // blueprint. + let mut actions_by_sled = BTreeMap::new(); + let log = self.log.new(o!("phase" => "do_plan_mupdate_override")); + + // We use the list of in-service sleds here -- we don't want to alter + // expunged or decommissioned sleds. + for sled_id in self.input.all_sled_ids(SledFilter::InService) { + let log = log.new(o!("sled_id" => sled_id.to_string())); + let Some(inv_sled) = self.inventory.sled_agents.get(&sled_id) + else { + warn!(log, "no inventory found for in-service sled"); + continue; + }; + let action = match &inv_sled + .zone_image_resolver + .mupdate_override + .boot_override + { + Ok(inv_mupdate_override) => { + self.blueprint.sled_ensure_mupdate_override( + sled_id, + inv_mupdate_override + .as_ref() + .map(|inv| inv.mupdate_override_id), + noop_info, + )? + } + Err(message) => EnsureMupdateOverrideAction::GetOverrideError { + message: message.clone(), + }, + }; + action.log_to(&log); + actions_by_sled.insert(sled_id, action); + } + + // As a result of the action above, did any sleds get a new mupdate + // override in the blueprint? In that case, halt consideration of + // updates by setting the target_release_minimum_generation. + // + // Note that this is edge-triggered, not level-triggered. This is a + // domain requirement. Consider what happens if: + // + // 1. Let's say the target release generation is 5. + // 2. A sled is mupdated. + // 3. As a result of the mupdate, we update the target release minimum + // generation to 6. + // 4. Then, an operator sets the target release generation to 6. + // + // At this point, we *do not* want to set the blueprint's minimum + // generation to 7. We only want to do it if we acknowledged a new sled + // getting mupdated. + // + // Some notes: + // + // * We only process sleds that are currently in the inventory. This + // means that if some sleds take longer to come back up than others + // and the target release is updated in the middle, we'll potentially + // bump the minimum generation multiple times, asking the operator to + // intervene each time. + // + // It's worth considering ways to mitigate this in the future: for + // example, we could ensure that for a particular TUF repo a shared + // mupdate override ID is assigned by wicketd, and track the override + // IDs that are currently in flight. + // + // * We aren't handling errors while fetching the mupdate override here. + // We don't have a history of state transitions for the mupdate + // override, so we can't do edge-triggered logic. We probably need + // another channel to report errors. (But in general, errors should be + // rare.) + if actions_by_sled.values().any(|action| { + matches!(action, EnsureMupdateOverrideAction::BpSetOverride { .. }) + }) { + let current = self.blueprint.target_release_minimum_generation(); + let new = self.input.tuf_repo().target_release_generation.next(); + if current == new { + // No change needed. + info!( + log, + "would have updated target release minimum generation, but \ + it was already set to the desired value, so no change was \ + needed"; + "generation" => %current, + ); + } else { + if current < new { + info!( + log, + "updating target release minimum generation based on \ + new set-override actions"; + "current_generation" => %current, + "new_generation" => %new, + ); + } else { + // It would be very strange for the current value to be + // greater than the new value. That would indicate something + // like a row being removed from the target release + // generation table -- one of the invariants of the target + // release generation is that it only moves forward. + // + // In this case we bail out of planning entirely. + return Err( + Error::TargetReleaseMinimumGenerationRollback { + current, + new, + }, + ); + } + self.blueprint + .set_target_release_minimum_generation(current, new) + .expect("current value passed in => can't fail"); + } + } + + // Now we need to determine whether to also perform other actions like + // updating or adding zones. We have to be careful here: + // + // * We may have moved existing zones with an Artifact source to using + // the install dataset via the BpSetOverride action, but we don't want + // to use the install dataset on sleds that weren't MUPdated (because + // the install dataset might be ancient). + // + // * While any overrides are in place according to inventory, we wait + // for the system to recover and don't start new zones on *any* sleds, + // or perform any further updates. + // + // This condition is level-triggered on the following conditions: + // + // 1. If the planning input's target release generation is less than the + // minimum generation set in the blueprint, the operator hasn't set a + // new generation in the blueprint -- we should wait to decide what + // to do until the operator provides an indication. + // + // 2. If any sleds have a mupdate override set in the blueprint, then + // we're still recovering from a MUPdate. If that is the case, we + // don't want to add zones on *any* sled. + // + // This might seem overly conservative (why block zone additions on + // *all* sleds if *any* are currently recovering from a MUPdate?), + // but is probably correct for the medium term: we want to minimize + // the number of different versions of services running at any time. + // + // There's some potential to relax this in the future (e.g. by + // matching up the zone manifest with the target release to compute + // the number of versions running at a given time), but that's a + // non-trivial optimization that we should probably defer until we + // see its necessity. + // + // What does "any sleds" mean in this context? We don't need to care + // about decommissioned or expunged sleds, so we consider in-service + // sleds. + let mut reasons = Vec::new(); + + // Condition 1 above. + if self.blueprint.target_release_minimum_generation() + > self.input.tuf_repo().target_release_generation + { + reasons.push(format!( + "current target release generation ({}) is lower than \ + minimum required by blueprint ({})", + self.input.tuf_repo().target_release_generation, + self.blueprint.target_release_minimum_generation(), + )); + } + + // Condition 2 above. + { + let mut sleds_with_override = BTreeSet::new(); + for sled_id in self.input.all_sled_ids(SledFilter::InService) { + if self + .blueprint + .sled_get_remove_mupdate_override(sled_id)? + .is_some() + { + sleds_with_override.insert(sled_id); + } + } + + if !sleds_with_override.is_empty() { + reasons.push(format!( + "sleds have remove mupdate override set in blueprint: {}", + sleds_with_override.iter().join(", ") + )); + } + } + + if !reasons.is_empty() { + let reasons = reasons.join("; "); + info!( + log, + "not ready to add or update new zones yet"; + "reasons" => reasons, + ); + Ok(UpdateStepResult::Waiting) + } else { + Ok(UpdateStepResult::ContinueToNextStep) + } + } + fn do_plan_cockroachdb_settings(&mut self) { // Figure out what we should set the CockroachDB "preserve downgrade // option" setting to based on the planning input. diff --git a/nexus/reconfigurator/planning/src/planner/image_source.rs b/nexus/reconfigurator/planning/src/planner/image_source.rs index e4096c68a7..d0fb697ce6 100644 --- a/nexus/reconfigurator/planning/src/planner/image_source.rs +++ b/nexus/reconfigurator/planning/src/planner/image_source.rs @@ -4,7 +4,8 @@ use std::{collections::HashMap, fmt}; -use iddqd::{IdOrdItem, IdOrdMap, id_upcast}; +use anyhow::anyhow; +use iddqd::{IdOrdItem, IdOrdMap, id_ord_map::RefMut, id_upcast}; use nexus_sled_agent_shared::inventory::{ZoneKind, ZoneManifestBootInventory}; use nexus_types::{ deployment::{ @@ -145,9 +146,41 @@ impl NoopConvertInfo { } } } + + /// Return a mutable reference to [`NoopConvertSledInfo`] for the given + /// sled. + /// + /// Returns `Err(Error::Planner)` if the sled ID wasn't found. + pub(crate) fn sled_info_mut( + &mut self, + sled_id: SledUuid, + ) -> Result, Error> { + match self { + Self::GlobalIneligible(_) => { + Ok(NoopConvertSledInfoMut::GlobalIneligible( + NoopConvertGlobalIneligibleReason::NoTargetRelease, + )) + } + Self::GlobalEligible { sleds } => { + let Some(sled_info) = sleds.get_mut(&sled_id) else { + return Err(Error::Planner(anyhow!( + "tried to get noop convert zone info \ + for unknown sled {sled_id}" + ))); + }; + Ok(NoopConvertSledInfoMut::Ok(sled_info)) + } + } + } } -#[derive(Clone, Debug)] +#[derive(Debug)] +pub(crate) enum NoopConvertSledInfoMut<'a> { + Ok(RefMut<'a, NoopConvertSledInfo>), + GlobalIneligible(NoopConvertGlobalIneligibleReason), +} + +#[derive(Clone, Debug, Eq, PartialEq)] pub(crate) enum NoopConvertGlobalIneligibleReason { /// No target release was set. NoTargetRelease, @@ -254,7 +287,7 @@ impl NoopConvertSledEligible { } } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Eq, PartialEq)] pub(crate) struct NoopConvertZoneCounts { pub(crate) num_total: usize, pub(crate) num_already_artifact: usize, @@ -263,7 +296,7 @@ pub(crate) struct NoopConvertZoneCounts { } impl NoopConvertZoneCounts { - fn new(zones: &IdOrdMap) -> Self { + pub(crate) fn new(zones: &IdOrdMap) -> Self { let mut num_already_artifact = 0; let mut num_eligible = 0; let mut num_ineligible = 0; @@ -295,7 +328,7 @@ impl NoopConvertZoneCounts { } } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Eq, PartialEq)] pub(crate) enum NoopConvertSledIneligibleReason { /// This sled is missing from inventory. NotInInventory, @@ -314,7 +347,6 @@ pub(crate) enum NoopConvertSledIneligibleReason { /// If the mupdate override is changed, a sled can transition from /// ineligible to eligible, or vice versa. We build and retain the zone /// map for easy state transitions. - #[expect(unused)] zones: IdOrdMap, }, } @@ -337,7 +369,7 @@ impl fmt::Display for NoopConvertSledIneligibleReason { } } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Eq, PartialEq)] pub(crate) struct NoopConvertZoneInfo { pub(crate) zone_id: OmicronZoneUuid, pub(crate) kind: ZoneKind, @@ -492,14 +524,14 @@ impl IdOrdItem for NoopConvertZoneInfo { id_upcast!(); } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Eq, PartialEq)] pub(crate) enum NoopConvertZoneStatus { AlreadyArtifact { version: BlueprintArtifactVersion, hash: ArtifactHash }, Ineligible(NoopConvertZoneIneligibleReason), Eligible(BlueprintZoneImageSource), } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Eq, PartialEq)] pub(crate) enum NoopConvertZoneIneligibleReason { NotInManifest, ArtifactError { message: String }, diff --git a/nexus/reconfigurator/planning/src/system.rs b/nexus/reconfigurator/planning/src/system.rs index fef4bde6a2..dbae19b312 100644 --- a/nexus/reconfigurator/planning/src/system.rs +++ b/nexus/reconfigurator/planning/src/system.rs @@ -21,6 +21,7 @@ use nexus_sled_agent_shared::inventory::Inventory; use nexus_sled_agent_shared::inventory::InventoryDataset; use nexus_sled_agent_shared::inventory::InventoryDisk; use nexus_sled_agent_shared::inventory::InventoryZpool; +use nexus_sled_agent_shared::inventory::MupdateOverrideBootInventory; use nexus_sled_agent_shared::inventory::OmicronSledConfig; use nexus_sled_agent_shared::inventory::SledRole; use nexus_sled_agent_shared::inventory::ZoneImageResolverInventory; @@ -59,12 +60,14 @@ use omicron_common::disk::DiskIdentity; use omicron_common::disk::DiskVariant; use omicron_common::policy::INTERNAL_DNS_REDUNDANCY; use omicron_common::policy::NEXUS_REDUNDANCY; +use omicron_uuid_kinds::MupdateOverrideUuid; use omicron_uuid_kinds::SledUuid; use omicron_uuid_kinds::ZpoolUuid; use std::collections::BTreeMap; use std::collections::BTreeSet; use std::fmt; use std::fmt::Debug; +use std::mem; use std::net::Ipv4Addr; use std::net::Ipv6Addr; use std::sync::Arc; @@ -535,6 +538,36 @@ impl SystemDescription { Ok(sled.sp_inactive_caboose().map(|c| c.version.as_ref())) } + /// Set a sled's mupdate override field. + /// + /// Returns the previous value, or previous error if set. + pub fn sled_set_mupdate_override( + &mut self, + sled_id: SledUuid, + mupdate_override: Option, + ) -> anyhow::Result, String>> { + let sled = self.sleds.get_mut(&sled_id).with_context(|| { + format!("attempted to access sled {} not found in system", sled_id) + })?; + let sled = Arc::make_mut(sled); + Ok(sled.set_mupdate_override(Ok(mupdate_override))) + } + + /// Set a sled's mupdate override field to an error. + /// + /// Returns the previous value, or previous error if set. + pub fn sled_set_mupdate_override_error( + &mut self, + sled_id: SledUuid, + message: String, + ) -> anyhow::Result, String>> { + let sled = self.sleds.get_mut(&sled_id).with_context(|| { + format!("attempted to access sled {} not found in system", sled_id) + })?; + let sled = Arc::make_mut(sled); + Ok(sled.set_mupdate_override(Err(message))) + } + pub fn set_tuf_repo(&mut self, tuf_repo: TufRepoPolicy) { self.tuf_repo = tuf_repo; } @@ -1222,6 +1255,30 @@ impl Sled { sign: None, } } + + /// Set the mupdate override field for a sled, returning the previous value. + fn set_mupdate_override( + &mut self, + mupdate_override_id: Result, String>, + ) -> Result, String> { + // We don't alter the non-boot override because it's not used in this process. + let inv = match mupdate_override_id { + Ok(Some(id)) => Ok(Some(MupdateOverrideBootInventory { + mupdate_override_id: id, + })), + Ok(None) => Ok(None), + Err(message) => Err(message), + }; + let prev = mem::replace( + &mut self + .inventory_sled_agent + .zone_image_resolver + .mupdate_override + .boot_override, + inv, + ); + prev.map(|prev| prev.map(|prev| prev.mupdate_override_id)) + } } /// The visibility of a sled in the inventory. diff --git a/nexus/types/src/deployment.rs b/nexus/types/src/deployment.rs index b6e46b26a1..e6768c5b93 100644 --- a/nexus/types/src/deployment.rs +++ b/nexus/types/src/deployment.rs @@ -22,6 +22,8 @@ use blueprint_display::BpDatasetsTableSchema; use blueprint_display::BpHostPhase2TableSchema; use blueprint_display::BpTableColumn; use daft::Diffable; +use id_map::Entry; +use id_map::RefMut; use nexus_sled_agent_shared::inventory::HostPhase2DesiredContents; use nexus_sled_agent_shared::inventory::HostPhase2DesiredSlots; use nexus_sled_agent_shared::inventory::OmicronSledConfig; @@ -1282,20 +1284,32 @@ impl PendingMgsUpdates { self.by_baseboard.is_empty() } - pub fn contains_key(&self, key: &Arc) -> bool { + pub fn contains_key(&self, key: &BaseboardId) -> bool { self.by_baseboard.contains_key(key) } - pub fn get( - &self, - baseboard_id: &Arc, - ) -> Option<&PendingMgsUpdate> { + pub fn get(&self, baseboard_id: &BaseboardId) -> Option<&PendingMgsUpdate> { self.by_baseboard.get(baseboard_id) } + pub fn get_mut( + &mut self, + baseboard_id: &BaseboardId, + ) -> Option> { + self.by_baseboard.get_mut(baseboard_id) + } + + pub fn entry( + &mut self, + // TODO: simplify down to &BaseboardId + baseboard_id: Arc, + ) -> Entry<'_, PendingMgsUpdate> { + self.by_baseboard.entry(baseboard_id) + } + pub fn remove( &mut self, - baseboard_id: &Arc, + baseboard_id: &BaseboardId, ) -> Option { self.by_baseboard.remove(baseboard_id) }