diff --git a/Cargo.lock b/Cargo.lock index 0a28ac69b4f..9860cea13b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6655,6 +6655,7 @@ dependencies = [ "expectorate", "gateway-client", "id-map", + "iddqd", "illumos-utils", "indexmap 2.10.0", "internal-dns-resolver", diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout index 81c483cd9b3..4feccb11e4c 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-add-sled-no-disks-stdout @@ -36,7 +36,7 @@ generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configu > # Try to plan a new blueprint; this should be okay even though the sled > # we added has no disks. > blueprint-plan dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 -INFO skipping noop image source check for all sleds (no current TUF repo) +INFO skipping noop image source check for all sleds, reason: no target release is currently set INFO skipping sled (no zpools in service), sled_id: 00320471-945d-413c-85e7-03e091a70b3c INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout index 8739cd3d14e..4b2c4f5e167 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-example-stdout @@ -525,7 +525,7 @@ T ENA ID PARENT * yes ade5749d-bdf3-4fab-a8ae-00bea01b3a5a 02697f74-b14a-4418-90f0-c28b2a3a6aa9 > blueprint-plan ade5749d-bdf3-4fab-a8ae-00bea01b3a5a -INFO skipping noop image source check for all sleds (no current TUF repo) +INFO skipping noop image source check for all sleds, reason: no target release is currently set INFO found sled missing NTP zone (will add one), sled_id: 89d02b1b-478c-401a-8e28-7a26f74fa41b INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 WARN failed to place all new desired Clickhouse zones, placed: 0, wanted_to_place: 1 @@ -1017,7 +1017,7 @@ parent: 02697f74-b14a-4418-90f0-c28b2a3a6aa9 > # Plan a blueprint run -- this will cause zones and disks on the expunged > # sled to be expunged. > blueprint-plan latest -INFO skipping noop image source check for all sleds (no current TUF repo) +INFO skipping noop image source check for all sleds, reason: no target release is currently set INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout index 2ec5f47662f..a1876121bed 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-external-dns-stdout @@ -1025,7 +1025,7 @@ parent: 3f00b694-1b16-4aaa-8f78-e6b3a527b434 > # blueprint-plan will place a new external DNS zone, diff DNS to see the new zone has `ns` and NS records. > blueprint-plan 366b0b68-d80e-4bc1-abd3-dc69837847e0 -INFO skipping noop image source check for all sleds (no current TUF repo) +INFO skipping noop image source check for all sleds, reason: no target release is currently set INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout index e6a3aebc720..ade6db21966 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-expunge-newly-added-internal-dns-stdout @@ -1042,7 +1042,7 @@ external DNS: > # Planning a new blueprint will now replace the expunged zone, with new records for its replacement. > blueprint-plan 58d5e830-0884-47d8-a7cd-b2b3751adeb4 -INFO skipping noop image source check for all sleds (no current TUF repo) +INFO skipping noop image source check for all sleds, reason: no target release is currently set INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout index 5ccfb6f25f1..95a4c206dd7 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-noop-image-source-stdout @@ -156,24 +156,17 @@ generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configu > blueprint-plan latest latest WARN skipping zones eligible for cleanup check (sled not present in latest inventory collection), sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e -WARN skipping noop image source check since sled-agent encountered error retrieving zone manifest (this is abnormal), sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, error: reconfigurator-sim simulated error: simulated error obtaining zone manifest -INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: nexus v1.0.0 (zone) -INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: internal-dns v1.0.0 (zone) -INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: crucible-zone v1.0.0 (zone) -INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: ntp v1.0.0 (zone) -INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: external-dns v1.0.0 (zone) -INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, tuf_artifact_id: crucible-pantry-zone v1.0.0 (zone) -INFO noop converting 6/6 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 -INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, tuf_artifact_id: nexus v1.0.0 (zone) -INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, tuf_artifact_id: external-dns v1.0.0 (zone) -INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, tuf_artifact_id: crucible-zone v1.0.0 (zone) -INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, tuf_artifact_id: internal-dns v1.0.0 (zone) -INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, tuf_artifact_id: crucible-pantry-zone v1.0.0 (zone) -WARN zone manifest inventory indicated install dataset artifact is invalid, not using artifact (this is abnormal), sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, zone_id: e8fe709c-725f-4bb2-b714-ffcda13a9e54, kind: internal_ntp, file_name: ntp.tar.gz, error: reconfigurator-sim: simulated error validating zone image -INFO noop converting 5/6 install-dataset zones to artifact store, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34 -INFO noop converting 0/2 install-dataset zones to artifact store, sled_id: b82ede02-399c-48c6-a1de-411df4fa49a7 -INFO skipping noop image source check on sled (blueprint has get_remove_mupdate_override set for sled), sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, bp_remove_mupdate_override_id: ffffffff-ffff-ffff-ffff-ffffffffffff -INFO skipping noop image source check (sled not present in latest inventory collection), sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e +WARN skipped noop image source check on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, reason: error retrieving zone manifest: reconfigurator-sim simulated error: simulated error obtaining zone manifest +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 0, num_eligible: 6, num_ineligible: 0 +INFO performed noop image source checks on sled, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, num_total: 6, num_already_artifact: 0, num_eligible: 5, num_ineligible: 1 +WARN zone manifest inventory indicated install dataset artifact is invalid, not using artifact (this is abnormal), sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, zone_id: e8fe709c-725f-4bb2-b714-ffcda13a9e54, kind: internal_ntp, file_name: ntp.tar.gz, message: reconfigurator-sim: simulated error validating zone image +INFO performed noop image source checks on sled, sled_id: b82ede02-399c-48c6-a1de-411df4fa49a7, num_total: 2, num_already_artifact: 0, num_eligible: 0, num_ineligible: 2 +INFO install dataset artifact hash not found in TUF repo, ignoring for noop checks, sled_id: b82ede02-399c-48c6-a1de-411df4fa49a7, zone_id: b910534b-2a53-4335-a3d9-5311d2f3186a, kind: internal_ntp, file_name: ntp.tar.gz, expected_hash: d76e26198daed69cdae04490d7477f8c842e0dbe37d463eac0d0a8d3fb803095 +INFO install dataset artifact hash not found in TUF repo, ignoring for noop checks, sled_id: b82ede02-399c-48c6-a1de-411df4fa49a7, zone_id: ecbe0b3d-1acc-44b2-b6d4-f4d2770516e4, kind: crucible, file_name: crucible.tar.gz, expected_hash: 866f6a7c2e51c056fb722b5113e80181cc9cd8b712a0d3dbf1edc4ce29e5229e +INFO skipped noop image source check on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, reason: remove_mupdate_override is set in the blueprint (ffffffff-ffff-ffff-ffff-ffffffffffff) +INFO skipped noop image source check on sled, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e, reason: sled not found in inventory +INFO noop converting 6/6 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 0 +INFO noop converting 5/6 install-dataset zones to artifact store, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, num_total: 6, num_already_artifact: 0 INFO parent blueprint contains NTP zone, but it's not in inventory yet, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 @@ -535,15 +528,16 @@ set sled e96e226f-4ed9-4c01-91b9-69a9cd076c9e inventory visibility: hidden -> vi generated inventory collection 61f451b3-2121-4ed6-91c7-a550054f6c21 from configured sleds > blueprint-plan latest latest -WARN skipping noop image source check since sled-agent encountered error retrieving zone manifest (this is abnormal), sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, error: reconfigurator-sim simulated error: simulated error obtaining zone manifest -INFO noop converting 0/0 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 -WARN zone manifest inventory indicated install dataset artifact is invalid, not using artifact (this is abnormal), sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, zone_id: e8fe709c-725f-4bb2-b714-ffcda13a9e54, kind: internal_ntp, file_name: ntp.tar.gz, error: reconfigurator-sim: simulated error validating zone image -INFO noop converting 0/1 install-dataset zones to artifact store, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34 -INFO noop converting 0/2 install-dataset zones to artifact store, sled_id: b82ede02-399c-48c6-a1de-411df4fa49a7 -INFO skipping noop image source check on sled (blueprint has get_remove_mupdate_override set for sled), sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, bp_remove_mupdate_override_id: ffffffff-ffff-ffff-ffff-ffffffffffff -INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e, tuf_artifact_id: crucible-zone v1.0.0 (zone) -INFO install dataset artifact hash matches TUF repo, switching out the zone image source to Artifact, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e, tuf_artifact_id: ntp v1.0.0 (zone) -INFO noop converting 2/2 install-dataset zones to artifact store, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e +WARN skipped noop image source check on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, reason: error retrieving zone manifest: reconfigurator-sim simulated error: simulated error obtaining zone manifest +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 6, num_already_artifact: 6, num_eligible: 0, num_ineligible: 0 +INFO performed noop image source checks on sled, sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, num_total: 6, num_already_artifact: 5, num_eligible: 0, num_ineligible: 1 +WARN zone manifest inventory indicated install dataset artifact is invalid, not using artifact (this is abnormal), sled_id: aff6c093-197d-42c5-ad80-9f10ba051a34, zone_id: e8fe709c-725f-4bb2-b714-ffcda13a9e54, kind: internal_ntp, file_name: ntp.tar.gz, message: reconfigurator-sim: simulated error validating zone image +INFO performed noop image source checks on sled, sled_id: b82ede02-399c-48c6-a1de-411df4fa49a7, num_total: 2, num_already_artifact: 0, num_eligible: 0, num_ineligible: 2 +INFO install dataset artifact hash not found in TUF repo, ignoring for noop checks, sled_id: b82ede02-399c-48c6-a1de-411df4fa49a7, zone_id: b910534b-2a53-4335-a3d9-5311d2f3186a, kind: internal_ntp, file_name: ntp.tar.gz, expected_hash: d76e26198daed69cdae04490d7477f8c842e0dbe37d463eac0d0a8d3fb803095 +INFO install dataset artifact hash not found in TUF repo, ignoring for noop checks, sled_id: b82ede02-399c-48c6-a1de-411df4fa49a7, zone_id: ecbe0b3d-1acc-44b2-b6d4-f4d2770516e4, kind: crucible, file_name: crucible.tar.gz, expected_hash: 866f6a7c2e51c056fb722b5113e80181cc9cd8b712a0d3dbf1edc4ce29e5229e +INFO skipped noop image source check on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, reason: remove_mupdate_override is set in the blueprint (ffffffff-ffff-ffff-ffff-ffffffffffff) +INFO performed noop image source checks on sled, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e, num_total: 2, num_already_artifact: 0, num_eligible: 2, num_ineligible: 0 +INFO noop converting 2/2 install-dataset zones to artifact store, sled_id: e96e226f-4ed9-4c01-91b9-69a9cd076c9e, num_total: 2, num_already_artifact: 0 INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 diff --git a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout index 7b9d022724d..a994234a81a 100644 --- a/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout +++ b/dev-tools/reconfigurator-cli/tests/output/cmds-target-release-stdout @@ -192,9 +192,9 @@ f45ba181-4b56-42cc-a762-874d90184a43 0 > # First step: upgrade one SP. > blueprint-plan dbcbd3d6-41ff-48ae-ac0b-1becc9b2fd21 f45ba181-4b56-42cc-a762-874d90184a43 -INFO noop converting 0/9 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c -INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 -INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 @@ -377,9 +377,9 @@ external DNS: > # If we generate another plan, there should be no change. > blueprint-plan 8da82a8e-bf97-4fbd-8ddd-9f6462732cf1 f45ba181-4b56-42cc-a762-874d90184a43 -INFO noop converting 0/9 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c -INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 -INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 @@ -563,9 +563,9 @@ set sled 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 SP versions: active -> 1.0.0 generated inventory collection eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 from configured sleds > blueprint-plan 58d5e830-0884-47d8-a7cd-b2b3751adeb4 eb0796d5-ab8a-4f7b-a884-b4aeacb8ab51 -INFO noop converting 0/9 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c -INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 -INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 @@ -759,9 +759,9 @@ set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c SP versions: inactive -> 0.5.0 generated inventory collection 61f451b3-2121-4ed6-91c7-a550054f6c21 from configured sleds > blueprint-plan af934083-59b5-4bf6-8966-6fb5292c29e1 61f451b3-2121-4ed6-91c7-a550054f6c21 -INFO noop converting 0/9 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c -INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 -INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 @@ -953,9 +953,9 @@ set sled 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c SP versions: active -> 1.0.0 generated inventory collection b1bda47d-2c19-4fba-96e3-d9df28db7436 from configured sleds > blueprint-plan df06bb57-ad42-4431-9206-abff322896c7 b1bda47d-2c19-4fba-96e3-d9df28db7436 -INFO noop converting 0/9 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c -INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 -INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 @@ -1149,9 +1149,9 @@ set sled d81c6a84-79b8-4958-ae41-ea46c9b19763 SP versions: active -> 1.0.0 generated inventory collection a71f7a73-35a6-45e8-acbe-f1c5925eed69 from configured sleds > blueprint-plan 7f976e0d-d2a5-4eeb-9e82-c82bc2824aba a71f7a73-35a6-45e8-acbe-f1c5925eed69 -INFO noop converting 0/9 install-dataset zones to artifact store, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c -INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6 -INFO noop converting 0/8 install-dataset zones to artifact store, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763 +INFO performed noop image source checks on sled, sled_id: 2b8f0cb3-0295-4b3c-bc58-4fe88b57112c, num_total: 9, num_already_artifact: 0, num_eligible: 0, num_ineligible: 9 +INFO performed noop image source checks on sled, sled_id: 98e6b7c2-2efa-41ca-b20a-0a4d61102fe6, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 +INFO performed noop image source checks on sled, sled_id: d81c6a84-79b8-4958-ae41-ea46c9b19763, num_total: 8, num_already_artifact: 0, num_eligible: 0, num_ineligible: 8 INFO sufficient BoundaryNtp zones exist in plan, desired_count: 0, current_count: 0 INFO sufficient Clickhouse zones exist in plan, desired_count: 1, current_count: 1 INFO sufficient ClickhouseKeeper zones exist in plan, desired_count: 0, current_count: 0 diff --git a/nexus/reconfigurator/planning/Cargo.toml b/nexus/reconfigurator/planning/Cargo.toml index 8a05a9ca8c8..6d4725b6706 100644 --- a/nexus/reconfigurator/planning/Cargo.toml +++ b/nexus/reconfigurator/planning/Cargo.toml @@ -13,6 +13,7 @@ chrono.workspace = true debug-ignore.workspace = true daft.workspace = true gateway-client.workspace = true +iddqd.workspace = true id-map.workspace = true illumos-utils.workspace = true indexmap.workspace = true diff --git a/nexus/reconfigurator/planning/src/planner.rs b/nexus/reconfigurator/planning/src/planner.rs index b7805b1fc68..9b13b6323b5 100644 --- a/nexus/reconfigurator/planning/src/planner.rs +++ b/nexus/reconfigurator/planning/src/planner.rs @@ -14,6 +14,9 @@ use crate::blueprint_builder::Operation; use crate::blueprint_editor::DisksEditError; use crate::blueprint_editor::SledEditError; use crate::mgs_updates::plan_mgs_updates; +use crate::planner::image_source::NoopConvertInfo; +use crate::planner::image_source::NoopConvertSledStatus; +use crate::planner::image_source::NoopConvertZoneStatus; use crate::planner::omicron_zone_placement::PlacementError; use gateway_client::types::SpType; use nexus_sled_agent_shared::inventory::ConfigReconcilerInventoryResult; @@ -32,7 +35,6 @@ use nexus_types::deployment::DiskFilter; use nexus_types::deployment::PlanningInput; use nexus_types::deployment::SledDetails; use nexus_types::deployment::SledFilter; -use nexus_types::deployment::TargetReleaseDescription; use nexus_types::deployment::TufRepoContentsError; use nexus_types::deployment::ZpoolFilter; use nexus_types::external_api::views::PhysicalDiskPolicy; @@ -50,7 +52,6 @@ use slog::{Logger, info, warn}; use slog_error_chain::InlineErrorChain; use std::collections::BTreeMap; use std::collections::BTreeSet; -use std::collections::HashMap; use std::str::FromStr; pub(crate) use self::omicron_zone_placement::DiscretionaryOmicronZone; @@ -59,6 +60,7 @@ use self::omicron_zone_placement::OmicronZonePlacementSledState; pub use self::rng::PlannerRng; pub use self::rng::SledPlannerRng; +mod image_source; mod omicron_zone_placement; pub(crate) mod rng; @@ -157,7 +159,12 @@ impl<'a> Planner<'a> { fn do_plan(&mut self) -> Result<(), Error> { self.do_plan_expunge()?; self.do_plan_decommission()?; - self.do_plan_noop_image_source()?; + + let noop_info = + NoopConvertInfo::new(self.input, self.inventory, &self.blueprint)?; + noop_info.log_to(&self.log); + + self.do_plan_noop_image_source(noop_info)?; self.do_plan_add()?; if let UpdateStepResult::ContinueToNextStep = self.do_plan_mgs_updates() { @@ -504,177 +511,61 @@ impl<'a> Planner<'a> { Ok(()) } - fn do_plan_noop_image_source(&mut self) -> Result<(), Error> { - let TargetReleaseDescription::TufRepo(current_artifacts) = - self.input.tuf_repo().description() - else { - info!( - self.log, - "skipping noop image source check for all sleds \ - (no current TUF repo)", - ); - return Ok(()); + fn do_plan_noop_image_source( + &mut self, + noop_info: NoopConvertInfo, + ) -> Result<(), Error> { + let sleds = match noop_info { + NoopConvertInfo::GlobalEligible { sleds } => sleds, + NoopConvertInfo::GlobalIneligible { .. } => return Ok(()), }; - let artifacts_by_hash: HashMap<_, _> = current_artifacts - .artifacts - .iter() - .map(|artifact| (artifact.hash, artifact)) - .collect(); + for sled in sleds { + let eligible = match &sled.status { + NoopConvertSledStatus::Ineligible(_) => continue, + NoopConvertSledStatus::Eligible(eligible) => eligible, + }; - for sled_id in self.input.all_sled_ids(SledFilter::InService) { - let Some(inv_sled) = self.inventory.sled_agents.get(&sled_id) - else { - info!( + let zone_counts = eligible.zone_counts(); + if zone_counts.num_install_dataset() == 0 { + debug!( self.log, - "skipping noop image source check \ - (sled not present in latest inventory collection)"; - "sled_id" => %sled_id, + "all zones are already Artifact, so \ + no noop image source action required"; + "num_total" => zone_counts.num_total, ); continue; - }; - - let zone_manifest = match &inv_sled - .zone_image_resolver - .zone_manifest - .boot_inventory - { - Ok(zm) => zm, - Err(message) => { - // This is a string so we don't use InlineErrorChain::new. - let message: &str = message; - warn!( - self.log, - "skipping noop image source check since \ - sled-agent encountered error retrieving zone manifest \ - (this is abnormal)"; - "sled_id" => %sled_id, - "error" => %message, - ); - continue; - } - }; - - // Does the blueprint have the remove_mupdate_override field set for - // this sled? If it does, we don't want to touch the zones on this - // sled (they should all be InstallDataset until the - // remove_mupdate_override field is cleared). - if let Some(id) = - self.blueprint.sled_get_remove_mupdate_override(sled_id)? - { + } + if zone_counts.num_eligible > 0 { info!( self.log, - "skipping noop image source check on sled \ - (blueprint has get_remove_mupdate_override set for sled)"; - "sled_id" => %sled_id, - "bp_remove_mupdate_override_id" => %id, + "noop converting {}/{} install-dataset zones to artifact store", + zone_counts.num_eligible, + zone_counts.num_install_dataset(); + "sled_id" => %sled.sled_id, + "num_total" => zone_counts.num_total, + "num_already_artifact" => zone_counts.num_already_artifact, ); - continue; } - // Which zones have image sources set to InstallDataset? - let install_dataset_zones = self - .blueprint - .current_sled_zones( - sled_id, - BlueprintZoneDisposition::is_in_service, - ) - .filter(|z| { - z.image_source == BlueprintZoneImageSource::InstallDataset - }); - - // Out of these, which zones' hashes (as reported in the zone - // manifest) match the corresponding ones in the TUF repo? - let mut install_dataset_zone_count = 0; - let matching_zones: Vec<_> = install_dataset_zones - .inspect(|_| { - install_dataset_zone_count += 1; - }) - .filter_map(|z| { - let file_name = z.kind().artifact_in_install_dataset(); - let Some(artifact) = zone_manifest.artifacts.get(file_name) - else { - // The blueprint indicates that a zone should be present - // that isn't in the install dataset. This might be an old - // install dataset with a zone kind known to this version of - // Nexus that isn't present in it. Not normally a cause for - // concern. - debug!( - self.log, - "blueprint zone not found in zone manifest, \ - ignoring for noop checks"; - "sled_id" => %sled_id, - "zone_id" => %z.id, - "kind" => z.kind().report_str(), - "file_name" => file_name, - ); - return None; - }; - if let Err(message) = &artifact.status { - // The artifact is somehow invalid and corrupt -- definitely - // something to warn about and not proceed. - warn!( - self.log, - "zone manifest inventory indicated install dataset \ - artifact is invalid, not using artifact (this is \ - abnormal)"; - "sled_id" => %sled_id, - "zone_id" => %z.id, - "kind" => z.kind().report_str(), - "file_name" => file_name, - "error" => %message, - ); - return None; + for zone in &eligible.zones { + match &zone.status { + NoopConvertZoneStatus::Eligible(new_image_source) => { + self.blueprint.sled_set_zone_source( + sled.sled_id, + zone.zone_id, + new_image_source.clone(), + )?; } + NoopConvertZoneStatus::AlreadyArtifact { .. } + | NoopConvertZoneStatus::Ineligible(_) => {} + } + } - // Does the hash match what's in the TUF repo? - let Some(tuf_artifact) = - artifacts_by_hash.get(&artifact.expected_hash) - else { - debug!( - self.log, - "install dataset artifact hash not found in TUF repo, \ - ignoring for noop checks"; - "sled_id" => %sled_id, - "zone_id" => %z.id, - "kind" => z.kind().report_str(), - "file_name" => file_name, - ); - return None; - }; - - info!( - self.log, - "install dataset artifact hash matches TUF repo, \ - switching out the zone image source to Artifact"; - "sled_id" => %sled_id, - "tuf_artifact_id" => %tuf_artifact.id, - ); - Some((z.id, tuf_artifact)) - }) - .collect(); - - info!( - self.log, - "noop converting {}/{} install-dataset zones to artifact store", - matching_zones.len(), - install_dataset_zone_count; - "sled_id" => %sled_id, - ); - - // Set all these zones' image sources to the corresponding - // blueprint. - for (zone_id, tuf_artifact) in &matching_zones { - self.blueprint.sled_set_zone_source( - sled_id, - *zone_id, - BlueprintZoneImageSource::from_available_artifact( - tuf_artifact, - ), - )?; + if zone_counts.num_eligible > 0 { self.blueprint.record_operation( Operation::SledNoopZoneImageSourcesUpdated { - sled_id, - count: matching_zones.len(), + sled_id: sled.sled_id, + count: zone_counts.num_eligible, }, ); } diff --git a/nexus/reconfigurator/planning/src/planner/image_source.rs b/nexus/reconfigurator/planning/src/planner/image_source.rs new file mode 100644 index 00000000000..e4096c68a79 --- /dev/null +++ b/nexus/reconfigurator/planning/src/planner/image_source.rs @@ -0,0 +1,507 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use std::{collections::HashMap, fmt}; + +use iddqd::{IdOrdItem, IdOrdMap, id_upcast}; +use nexus_sled_agent_shared::inventory::{ZoneKind, ZoneManifestBootInventory}; +use nexus_types::{ + deployment::{ + BlueprintArtifactVersion, BlueprintZoneConfig, + BlueprintZoneDisposition, BlueprintZoneImageSource, PlanningInput, + SledFilter, TargetReleaseDescription, + }, + inventory::Collection, +}; +use omicron_common::api::external::TufArtifactMeta; +use omicron_uuid_kinds::{MupdateOverrideUuid, OmicronZoneUuid, SledUuid}; +use slog::{debug, info, o, warn}; +use tufaceous_artifact::ArtifactHash; + +use crate::blueprint_builder::{BlueprintBuilder, Error}; + +/// Information about zones eligible for noop conversion from `InstallDataset` +/// to `Artifact`. +#[derive(Clone, Debug)] +pub(crate) enum NoopConvertInfo { + /// There's a global reason due to which no-op conversions cannot occur. + GlobalIneligible(NoopConvertGlobalIneligibleReason), + + /// Global checks have passed. + GlobalEligible { sleds: IdOrdMap }, +} + +impl NoopConvertInfo { + pub(crate) fn new( + input: &PlanningInput, + inventory: &Collection, + blueprint: &BlueprintBuilder<'_>, + ) -> Result { + let TargetReleaseDescription::TufRepo(current_artifacts) = + input.tuf_repo().description() + else { + return Ok(Self::GlobalIneligible( + NoopConvertGlobalIneligibleReason::NoTargetRelease, + )); + }; + + let mut sleds = IdOrdMap::new(); + + let artifacts_by_hash: HashMap<_, _> = current_artifacts + .artifacts + .iter() + .map(|artifact| (artifact.hash, artifact)) + .collect(); + + for sled_id in input.all_sled_ids(SledFilter::InService) { + let Some(inv_sled) = inventory.sled_agents.get(&sled_id) else { + sleds + .insert_unique(NoopConvertSledInfo { + sled_id, + status: NoopConvertSledStatus::Ineligible( + NoopConvertSledIneligibleReason::NotInInventory, + ), + }) + .expect("sled IDs are unique"); + continue; + }; + + let zone_manifest = match &inv_sled + .zone_image_resolver + .zone_manifest + .boot_inventory + { + Ok(zm) => zm, + Err(message) => { + sleds + .insert_unique(NoopConvertSledInfo { + sled_id, + status: NoopConvertSledStatus::Ineligible( + NoopConvertSledIneligibleReason::ManifestError { + message: message.to_owned(), + }, + ), + }) + .expect("sled IDs are unique"); + continue; + } + }; + + // Out of these, which zones' hashes (as reported in the zone + // manifest) match the corresponding ones in the TUF repo? + let zones = blueprint + .current_sled_zones( + sled_id, + BlueprintZoneDisposition::is_in_service, + ) + .map(|zone| { + NoopConvertZoneInfo::new( + zone, + zone_manifest, + &artifacts_by_hash, + ) + }) + .collect(); + + let status = if let Some(mupdate_override_id) = + blueprint.sled_get_remove_mupdate_override(sled_id)? + { + NoopConvertSledStatus::Ineligible( + NoopConvertSledIneligibleReason::MupdateOverride { + mupdate_override_id, + zones, + }, + ) + } else { + NoopConvertSledStatus::Eligible(NoopConvertSledEligible { + zones, + }) + }; + + sleds + .insert_unique(NoopConvertSledInfo { sled_id, status }) + .expect("sled IDs are unique"); + } + + Ok(Self::GlobalEligible { sleds }) + } + + pub(crate) fn log_to(&self, log: &slog::Logger) { + match self { + Self::GlobalIneligible(reason) => { + info!( + log, + "skipping noop image source check for all sleds"; + "reason" => %reason, + ); + } + Self::GlobalEligible { sleds } => { + for sled in sleds { + let log = + log.new(o!("sled_id" => sled.sled_id.to_string())); + sled.status.log_to(&log); + } + } + } + } +} + +#[derive(Clone, Debug)] +pub(crate) enum NoopConvertGlobalIneligibleReason { + /// No target release was set. + NoTargetRelease, +} + +impl fmt::Display for NoopConvertGlobalIneligibleReason { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::NoTargetRelease => { + write!(f, "no target release is currently set") + } + } + } +} + +#[derive(Clone, Debug)] +pub(crate) struct NoopConvertSledInfo { + pub(crate) sled_id: SledUuid, + pub(crate) status: NoopConvertSledStatus, +} + +impl IdOrdItem for NoopConvertSledInfo { + type Key<'a> = SledUuid; + + fn key(&self) -> Self::Key<'_> { + self.sled_id + } + + id_upcast!(); +} + +#[derive(Clone, Debug)] +pub(crate) enum NoopConvertSledStatus { + /// The sled is ineligible for conversion. + Ineligible(NoopConvertSledIneligibleReason), + + /// The sled is eligible for conversion. + Eligible(NoopConvertSledEligible), +} + +impl NoopConvertSledStatus { + fn log_to(&self, log: &slog::Logger) { + match self { + Self::Ineligible(reason) => { + // The slog macros require that the log level is determined at + // compile time, but we want the different enum variants here to + // be logged at different levels. Hence this mess. + match reason { + NoopConvertSledIneligibleReason::NotInInventory + | NoopConvertSledIneligibleReason::MupdateOverride { + .. + } => { + info!( + log, + "skipped noop image source check on sled"; + "reason" => %reason, + ) + } + NoopConvertSledIneligibleReason::ManifestError { + .. + } => { + warn!( + log, + "skipped noop image source check on sled"; + "reason" => %reason, + ) + } + } + } + Self::Eligible(sled) => { + sled.log_to(log); + } + } + } +} + +#[derive(Clone, Debug)] +pub(crate) struct NoopConvertSledEligible { + pub(crate) zones: IdOrdMap, +} + +impl NoopConvertSledEligible { + pub(crate) fn zone_counts(&self) -> NoopConvertZoneCounts { + NoopConvertZoneCounts::new(&self.zones) + } + + fn log_to(&self, log: &slog::Logger) { + let zone_counts = self.zone_counts(); + + info!( + log, + "performed noop image source checks on sled"; + "num_total" => zone_counts.num_total, + "num_already_artifact" => zone_counts.num_already_artifact, + // Since mupdate_override_id is None, maybe-eligible zones are + // truly eligible. + "num_eligible" => zone_counts.num_eligible, + "num_ineligible" => zone_counts.num_ineligible, + ); + + for zone in &self.zones { + zone.log_to(log); + } + } +} + +#[derive(Clone, Debug)] +pub(crate) struct NoopConvertZoneCounts { + pub(crate) num_total: usize, + pub(crate) num_already_artifact: usize, + pub(crate) num_eligible: usize, + pub(crate) num_ineligible: usize, +} + +impl NoopConvertZoneCounts { + fn new(zones: &IdOrdMap) -> Self { + let mut num_already_artifact = 0; + let mut num_eligible = 0; + let mut num_ineligible = 0; + + for zone in zones { + match &zone.status { + NoopConvertZoneStatus::AlreadyArtifact { .. } => { + num_already_artifact += 1; + } + NoopConvertZoneStatus::Eligible(_) => { + num_eligible += 1; + } + NoopConvertZoneStatus::Ineligible(_) => { + num_ineligible += 1; + } + } + } + + Self { + num_total: zones.len(), + num_already_artifact, + num_eligible, + num_ineligible, + } + } + + pub(crate) fn num_install_dataset(&self) -> usize { + self.num_eligible + self.num_ineligible + } +} + +#[derive(Clone, Debug)] +pub(crate) enum NoopConvertSledIneligibleReason { + /// This sled is missing from inventory. + NotInInventory, + + /// An error occurred retrieving the sled's install dataset zone manifest. + ManifestError { message: String }, + + /// The `remove_mupdate_override` field is set for this sled in the + /// blueprint. + MupdateOverride { + /// The override ID. + mupdate_override_id: MupdateOverrideUuid, + + /// Information about zones. + /// + /// If the mupdate override is changed, a sled can transition from + /// ineligible to eligible, or vice versa. We build and retain the zone + /// map for easy state transitions. + #[expect(unused)] + zones: IdOrdMap, + }, +} + +impl fmt::Display for NoopConvertSledIneligibleReason { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::NotInInventory => write!(f, "sled not found in inventory"), + Self::ManifestError { message } => { + write!(f, "error retrieving zone manifest: {}", message) + } + Self::MupdateOverride { mupdate_override_id, .. } => { + write!( + f, + "remove_mupdate_override is set in the blueprint \ + ({mupdate_override_id})", + ) + } + } + } +} + +#[derive(Clone, Debug)] +pub(crate) struct NoopConvertZoneInfo { + pub(crate) zone_id: OmicronZoneUuid, + pub(crate) kind: ZoneKind, + pub(crate) status: NoopConvertZoneStatus, +} + +impl NoopConvertZoneInfo { + fn new( + zone: &BlueprintZoneConfig, + zone_manifest: &ZoneManifestBootInventory, + artifacts_by_hash: &HashMap, + ) -> Self { + let file_name = zone.kind().artifact_in_install_dataset(); + + match &zone.image_source { + BlueprintZoneImageSource::InstallDataset => {} + BlueprintZoneImageSource::Artifact { version, hash } => { + return NoopConvertZoneInfo { + zone_id: zone.id, + kind: zone.kind(), + status: NoopConvertZoneStatus::AlreadyArtifact { + version: version.clone(), + hash: *hash, + }, + }; + } + } + + let Some(artifact) = zone_manifest.artifacts.get(file_name) else { + return NoopConvertZoneInfo { + zone_id: zone.id, + kind: zone.kind(), + status: NoopConvertZoneStatus::Ineligible( + NoopConvertZoneIneligibleReason::NotInManifest, + ), + }; + }; + if let Err(message) = &artifact.status { + // The artifact is somehow invalid and corrupt. + return NoopConvertZoneInfo { + zone_id: zone.id, + kind: zone.kind(), + status: NoopConvertZoneStatus::Ineligible( + NoopConvertZoneIneligibleReason::ArtifactError { + message: message.to_owned(), + }, + ), + }; + } + + // Does the hash match what's in the TUF repo? + let Some(&tuf_artifact) = + artifacts_by_hash.get(&artifact.expected_hash) + else { + return NoopConvertZoneInfo { + zone_id: zone.id, + kind: zone.kind(), + status: NoopConvertZoneStatus::Ineligible( + NoopConvertZoneIneligibleReason::NotInTufRepo { + expected_hash: artifact.expected_hash, + }, + ), + }; + }; + + NoopConvertZoneInfo { + zone_id: zone.id, + kind: zone.kind(), + status: NoopConvertZoneStatus::Eligible( + BlueprintZoneImageSource::from_available_artifact(tuf_artifact), + ), + } + } + + fn log_to(&self, log: &slog::Logger) { + let log = log.new(o!( + "zone_id" => self.zone_id.to_string(), + "kind" => self.kind.report_str(), + "file_name" => self.kind.artifact_in_install_dataset(), + )); + match &self.status { + NoopConvertZoneStatus::AlreadyArtifact { version, hash } => { + // Use debug to avoid spamming reconfigurator-cli output for + // this generally expected case. + debug!( + log, + "zone has its image source set to Artifact already"; + "version" => %version, + "hash" => %hash, + ); + } + NoopConvertZoneStatus::Eligible(new_image_source) => { + debug!( + log, + "zone may be eligible for noop image source conversion"; + "new_image_source" => %new_image_source, + ); + } + NoopConvertZoneStatus::Ineligible( + NoopConvertZoneIneligibleReason::NotInManifest, + ) => { + // This case shouldn't generally happen in production, but it + // can currently occur in the reconfigurator-cli since our + // simulated systems don't have a zone manifest without them + // being initialized. Log this at the DEBUG level to avoid + // spamming reconfigurator-cli output. + debug!( + log, + "blueprint zone not found in zone manifest, \ + ignoring for noop checks (how is the zone set to \ + InstallDataset in the blueprint then?)", + ); + } + NoopConvertZoneStatus::Ineligible( + NoopConvertZoneIneligibleReason::ArtifactError { message }, + ) => { + warn!( + log, + "zone manifest inventory indicated install dataset \ + artifact is invalid, not using artifact (this is \ + abnormal)"; + "message" => %message, + ); + } + NoopConvertZoneStatus::Ineligible( + NoopConvertZoneIneligibleReason::NotInTufRepo { expected_hash }, + ) => { + // If a MUPdate happens, sleds should all be MUPdated to the + // same version, so the TUF repo is expected to contain all the + // hashes. The only time that isn't the case is right after a + // MUPdate when the TUF repo hasn't been uploaded yet. This + // isn't quite a warning or error case, so log this at the INFO + // level. + info!( + log, + "install dataset artifact hash not found in TUF repo, \ + ignoring for noop checks"; + "expected_hash" => %expected_hash, + ); + } + } + } +} + +impl IdOrdItem for NoopConvertZoneInfo { + type Key<'a> = OmicronZoneUuid; + + fn key(&self) -> Self::Key<'_> { + self.zone_id + } + + id_upcast!(); +} + +#[derive(Clone, Debug)] +pub(crate) enum NoopConvertZoneStatus { + AlreadyArtifact { version: BlueprintArtifactVersion, hash: ArtifactHash }, + Ineligible(NoopConvertZoneIneligibleReason), + Eligible(BlueprintZoneImageSource), +} + +#[derive(Clone, Debug)] +pub(crate) enum NoopConvertZoneIneligibleReason { + NotInManifest, + ArtifactError { message: String }, + NotInTufRepo { expected_hash: ArtifactHash }, +}