From b34534e15c57cbacd6d3e4796bf57e5973b7f530 Mon Sep 17 00:00:00 2001 From: Eric Newberry Date: Fri, 27 Jun 2025 23:21:45 +0000 Subject: [PATCH 1/3] nvme: Print controller GUID in shutdown_nvme_driver span --- .../src/dispatch/vtl2_settings_worker.rs | 1 + openhcl/underhill_core/src/nvme_manager.rs | 52 +++++++++++++++---- 2 files changed, 42 insertions(+), 11 deletions(-) diff --git a/openhcl/underhill_core/src/dispatch/vtl2_settings_worker.rs b/openhcl/underhill_core/src/dispatch/vtl2_settings_worker.rs index df03e53bdd..68bf2ed7c5 100644 --- a/openhcl/underhill_core/src/dispatch/vtl2_settings_worker.rs +++ b/openhcl/underhill_core/src/dispatch/vtl2_settings_worker.rs @@ -993,6 +993,7 @@ async fn make_disk_type_from_physical_device( // We can't validate yet that this namespace actually exists. That will // be checked later. return Ok(Resource::new(NvmeDiskConfig { + controller_instance_id, pci_id, nsid: sub_device_path, })); diff --git a/openhcl/underhill_core/src/nvme_manager.rs b/openhcl/underhill_core/src/nvme_manager.rs index d49a48c117..9c0cca74f7 100644 --- a/openhcl/underhill_core/src/nvme_manager.rs +++ b/openhcl/underhill_core/src/nvme_manager.rs @@ -14,6 +14,7 @@ use disk_backend::resolve::ResolvedDisk; use futures::StreamExt; use futures::TryFutureExt; use futures::future::join_all; +use guid::Guid; use inspect::Inspect; use mesh::MeshPayload; use mesh::rpc::Rpc; @@ -101,6 +102,7 @@ impl NvmeManager { let mut worker = NvmeManagerWorker { driver_source: driver_source.clone(), devices: HashMap::new(), + controller_instance_ids: HashMap::new(), vp_count, save_restore_supported, is_isolated, @@ -175,7 +177,7 @@ impl NvmeManager { enum Request { Inspect(inspect::Deferred), ForceLoadDriver(inspect::DeferredUpdate), - GetNamespace(Rpc<(String, u32), Result>), + GetNamespace(Rpc<(Guid, String, u32), Result>), Save(Rpc<(), Result>), Shutdown { span: tracing::Span, @@ -191,13 +193,22 @@ pub struct NvmeManagerClient { impl NvmeManagerClient { pub async fn get_namespace( &self, + controller_instance_id: Guid, pci_id: String, nsid: u32, ) -> anyhow::Result { Ok(self .sender - .call(Request::GetNamespace, (pci_id.clone(), nsid)) - .instrument(tracing::info_span!("nvme_get_namespace", pci_id, nsid)) + .call( + Request::GetNamespace, + (controller_instance_id, pci_id.clone(), nsid), + ) + .instrument(tracing::info_span!( + "nvme_get_namespace", + controller_instance_id = controller_instance_id.to_string(), + pci_id, + nsid + )) .await .context("nvme manager is shut down")??) } @@ -217,6 +228,8 @@ struct NvmeManagerWorker { driver_source: VmTaskDriverSource, #[inspect(iter_by_key)] devices: HashMap>, + #[inspect(iter_by_key)] + controller_instance_ids: HashMap, vp_count: u32, /// Running environment (memory layout) allows save/restore. save_restore_supported: bool, @@ -235,7 +248,7 @@ impl NvmeManagerWorker { match req { Request::Inspect(deferred) => deferred.inspect(&self), Request::ForceLoadDriver(update) => { - match self.get_driver(update.new_value().to_owned()).await { + match self.get_driver(None, update.new_value().to_owned()).await { Ok(_) => { let pci_id = update.new_value().to_string(); update.succeed(pci_id); @@ -246,8 +259,8 @@ impl NvmeManagerWorker { } } Request::GetNamespace(rpc) => { - rpc.handle(async |(pci_id, nsid)| { - self.get_namespace(pci_id.clone(), nsid) + rpc.handle(async |(controller_instance_id, pci_id, nsid)| { + self.get_namespace(controller_instance_id, pci_id.clone(), nsid) .map_err(|source| NamespaceError { pci_id, source }) .await }) @@ -283,9 +296,15 @@ impl NvmeManagerWorker { if !nvme_keepalive || !self.save_restore_supported { async { join_all(self.devices.drain().map(|(pci_id, driver)| { - driver - .shutdown() - .instrument(tracing::info_span!("shutdown_nvme_driver", pci_id)) + driver.shutdown().instrument(tracing::info_span!( + "shutdown_nvme_driver", + pci_id, + controller_instance_id = self + .controller_instance_ids + .remove(&pci_id) + .unwrap_or_default() + .to_string() + )) })) .await } @@ -296,6 +315,7 @@ impl NvmeManagerWorker { async fn get_driver( &mut self, + controller_instance_id: Option, pci_id: String, ) -> Result<&mut nvme_driver::NvmeDriver, InnerError> { let driver = match self.devices.entry(pci_id.to_owned()) { @@ -315,6 +335,12 @@ impl NvmeManagerWorker { }) .map_err(InnerError::DmaClient)?; + // Update controller instance ID map for this PCI ID (if specified) + if let Some(controller_instance_id) = controller_instance_id { + self.controller_instance_ids + .insert(entry.key().clone(), controller_instance_id); + } + let device = VfioDevice::new(&self.driver_source, entry.key(), dma_client) .instrument(tracing::info_span!("vfio_device_open", pci_id)) .await @@ -344,10 +370,13 @@ impl NvmeManagerWorker { async fn get_namespace( &mut self, + controller_instance_id: Guid, pci_id: String, nsid: u32, ) -> Result { - let driver = self.get_driver(pci_id.to_owned()).await?; + let driver = self + .get_driver(Some(controller_instance_id), pci_id.to_owned()) + .await?; driver .namespace(nsid) .await @@ -440,7 +469,7 @@ impl AsyncResolveResource for NvmeDiskResolver { ) -> Result { let namespace = self .manager - .get_namespace(rsrc.pci_id, rsrc.nsid) + .get_namespace(rsrc.controller_instance_id, rsrc.pci_id, rsrc.nsid) .await .context("could not open nvme namespace")?; @@ -450,6 +479,7 @@ impl AsyncResolveResource for NvmeDiskResolver { #[derive(MeshPayload, Default)] pub struct NvmeDiskConfig { + pub controller_instance_id: Guid, pub pci_id: String, pub nsid: u32, } From 46b605a52ae23dcb6912781fa4e13a40f07b2577 Mon Sep 17 00:00:00 2001 From: Eric Newberry Date: Sat, 28 Jun 2025 21:57:15 +0000 Subject: [PATCH 2/3] Push controller_instance_id into NvmeDevice --- .../src/dispatch/vtl2_settings_worker.rs | 2 +- openhcl/underhill_core/src/nvme_manager.rs | 38 ++++++++----------- .../disk_nvme/nvme_driver/src/driver.rs | 17 +++++++++ 3 files changed, 33 insertions(+), 24 deletions(-) diff --git a/openhcl/underhill_core/src/dispatch/vtl2_settings_worker.rs b/openhcl/underhill_core/src/dispatch/vtl2_settings_worker.rs index 68bf2ed7c5..5747c4b9a5 100644 --- a/openhcl/underhill_core/src/dispatch/vtl2_settings_worker.rs +++ b/openhcl/underhill_core/src/dispatch/vtl2_settings_worker.rs @@ -993,7 +993,7 @@ async fn make_disk_type_from_physical_device( // We can't validate yet that this namespace actually exists. That will // be checked later. return Ok(Resource::new(NvmeDiskConfig { - controller_instance_id, + controller_instance_id: controller_instance_id.to_string(), pci_id, nsid: sub_device_path, })); diff --git a/openhcl/underhill_core/src/nvme_manager.rs b/openhcl/underhill_core/src/nvme_manager.rs index 9c0cca74f7..1510f3625c 100644 --- a/openhcl/underhill_core/src/nvme_manager.rs +++ b/openhcl/underhill_core/src/nvme_manager.rs @@ -14,7 +14,6 @@ use disk_backend::resolve::ResolvedDisk; use futures::StreamExt; use futures::TryFutureExt; use futures::future::join_all; -use guid::Guid; use inspect::Inspect; use mesh::MeshPayload; use mesh::rpc::Rpc; @@ -102,7 +101,6 @@ impl NvmeManager { let mut worker = NvmeManagerWorker { driver_source: driver_source.clone(), devices: HashMap::new(), - controller_instance_ids: HashMap::new(), vp_count, save_restore_supported, is_isolated, @@ -177,7 +175,7 @@ impl NvmeManager { enum Request { Inspect(inspect::Deferred), ForceLoadDriver(inspect::DeferredUpdate), - GetNamespace(Rpc<(Guid, String, u32), Result>), + GetNamespace(Rpc<(String, String, u32), Result>), Save(Rpc<(), Result>), Shutdown { span: tracing::Span, @@ -193,7 +191,7 @@ pub struct NvmeManagerClient { impl NvmeManagerClient { pub async fn get_namespace( &self, - controller_instance_id: Guid, + controller_instance_id: String, pci_id: String, nsid: u32, ) -> anyhow::Result { @@ -201,11 +199,11 @@ impl NvmeManagerClient { .sender .call( Request::GetNamespace, - (controller_instance_id, pci_id.clone(), nsid), + (controller_instance_id.clone(), pci_id.clone(), nsid), ) .instrument(tracing::info_span!( "nvme_get_namespace", - controller_instance_id = controller_instance_id.to_string(), + controller_instance_id, pci_id, nsid )) @@ -228,8 +226,6 @@ struct NvmeManagerWorker { driver_source: VmTaskDriverSource, #[inspect(iter_by_key)] devices: HashMap>, - #[inspect(iter_by_key)] - controller_instance_ids: HashMap, vp_count: u32, /// Running environment (memory layout) allows save/restore. save_restore_supported: bool, @@ -296,14 +292,11 @@ impl NvmeManagerWorker { if !nvme_keepalive || !self.save_restore_supported { async { join_all(self.devices.drain().map(|(pci_id, driver)| { + let controller_instance_id = driver.controller_instance_id(); driver.shutdown().instrument(tracing::info_span!( "shutdown_nvme_driver", pci_id, - controller_instance_id = self - .controller_instance_ids - .remove(&pci_id) - .unwrap_or_default() - .to_string() + controller_instance_id )) })) .await @@ -315,7 +308,7 @@ impl NvmeManagerWorker { async fn get_driver( &mut self, - controller_instance_id: Option, + controller_instance_id: Option, pci_id: String, ) -> Result<&mut nvme_driver::NvmeDriver, InnerError> { let driver = match self.devices.entry(pci_id.to_owned()) { @@ -335,12 +328,6 @@ impl NvmeManagerWorker { }) .map_err(InnerError::DmaClient)?; - // Update controller instance ID map for this PCI ID (if specified) - if let Some(controller_instance_id) = controller_instance_id { - self.controller_instance_ids - .insert(entry.key().clone(), controller_instance_id); - } - let device = VfioDevice::new(&self.driver_source, entry.key(), dma_client) .instrument(tracing::info_span!("vfio_device_open", pci_id)) .await @@ -349,7 +336,7 @@ impl NvmeManagerWorker { // TODO: For now, any isolation means use bounce buffering. This // needs to change when we have nvme devices that support DMA to // confidential memory. - let driver = nvme_driver::NvmeDriver::new( + let mut driver = nvme_driver::NvmeDriver::new( &self.driver_source, self.vp_count, device, @@ -362,6 +349,11 @@ impl NvmeManagerWorker { .await .map_err(InnerError::DeviceInitFailed)?; + // Pass through controller instance ID if specified. + if let Some(controller_instance_id) = controller_instance_id { + driver.set_controller_instance_id(controller_instance_id); + } + entry.insert(driver) } }; @@ -370,7 +362,7 @@ impl NvmeManagerWorker { async fn get_namespace( &mut self, - controller_instance_id: Guid, + controller_instance_id: String, pci_id: String, nsid: u32, ) -> Result { @@ -479,7 +471,7 @@ impl AsyncResolveResource for NvmeDiskResolver { #[derive(MeshPayload, Default)] pub struct NvmeDiskConfig { - pub controller_instance_id: Guid, + pub controller_instance_id: String, pub pci_id: String, pub nsid: u32, } diff --git a/vm/devices/storage/disk_nvme/nvme_driver/src/driver.rs b/vm/devices/storage/disk_nvme/nvme_driver/src/driver.rs index 300608d124..ece1f4f7b9 100644 --- a/vm/devices/storage/disk_nvme/nvme_driver/src/driver.rs +++ b/vm/devices/storage/disk_nvme/nvme_driver/src/driver.rs @@ -56,6 +56,7 @@ pub struct NvmeDriver { #[inspect(flatten)] task: Option, WorkerState>>, device_id: String, + controller_instance_id: Option, identify: Option>, #[inspect(skip)] driver: VmTaskDriver, @@ -248,6 +249,7 @@ impl NvmeDriver { Ok(Self { device_id: device.id().to_owned(), + controller_instance_id: None, task: Some(TaskControl::new(DriverWorkerTask { device, driver: driver.clone(), @@ -555,6 +557,7 @@ impl NvmeDriver { // TODO: See the description above, save the vector once resolved. namespaces: vec![], worker_data: s, + controller_instance_id: self.controller_instance_id.clone(), }) } Err(e) => Err(e), @@ -594,6 +597,7 @@ impl NvmeDriver { let mut this = Self { device_id: device.id().to_owned(), + controller_instance_id: saved_state.controller_instance_id.clone(), task: Some(TaskControl::new(DriverWorkerTask { device, driver: driver.clone(), @@ -739,6 +743,16 @@ impl NvmeDriver { pub fn update_servicing_flags(&mut self, nvme_keepalive: bool) { self.nvme_keepalive = nvme_keepalive; } + + /// Get the controller instance ID, if set. + pub fn controller_instance_id(&self) -> Option { + self.controller_instance_id.clone() + } + + /// Set the controller instance ID. + pub fn set_controller_instance_id(&mut self, controller_instance_id: String) { + self.controller_instance_id = Some(controller_instance_id); + } } async fn handle_asynchronous_events( @@ -1051,6 +1065,9 @@ pub mod save_restore { /// NVMe driver worker task data. #[mesh(4)] pub worker_data: NvmeDriverWorkerSavedState, + /// Controller instance ID. + #[mesh(5)] + pub controller_instance_id: Option, } /// Save/restore state for NVMe driver worker task. From 129ae7c21337de97eeb4aa13a4caf854d96cf3f3 Mon Sep 17 00:00:00 2001 From: Eric Newberry Date: Sat, 19 Jul 2025 00:33:44 +0000 Subject: [PATCH 3/3] Move setter to constructor --- openhcl/underhill_core/src/nvme_manager.rs | 8 +-- .../nvme_driver/fuzz/fuzz_nvme_driver.rs | 9 +++- .../disk_nvme/nvme_driver/src/driver.rs | 21 ++++---- .../disk_nvme/nvme_driver/src/tests.rs | 54 ++++++++++++++----- .../storage_tests/tests/scsidvd_nvme.rs | 15 ++++-- 5 files changed, 75 insertions(+), 32 deletions(-) diff --git a/openhcl/underhill_core/src/nvme_manager.rs b/openhcl/underhill_core/src/nvme_manager.rs index 1510f3625c..31fbd520e9 100644 --- a/openhcl/underhill_core/src/nvme_manager.rs +++ b/openhcl/underhill_core/src/nvme_manager.rs @@ -336,11 +336,12 @@ impl NvmeManagerWorker { // TODO: For now, any isolation means use bounce buffering. This // needs to change when we have nvme devices that support DMA to // confidential memory. - let mut driver = nvme_driver::NvmeDriver::new( + let driver = nvme_driver::NvmeDriver::new( &self.driver_source, self.vp_count, device, self.is_isolated, + controller_instance_id, ) .instrument(tracing::info_span!( "nvme_driver_init", @@ -349,11 +350,6 @@ impl NvmeManagerWorker { .await .map_err(InnerError::DeviceInitFailed)?; - // Pass through controller instance ID if specified. - if let Some(controller_instance_id) = controller_instance_id { - driver.set_controller_instance_id(controller_instance_id); - } - entry.insert(driver) } }; diff --git a/vm/devices/storage/disk_nvme/nvme_driver/fuzz/fuzz_nvme_driver.rs b/vm/devices/storage/disk_nvme/nvme_driver/fuzz/fuzz_nvme_driver.rs index 3914f0b4a7..49fff8f112 100644 --- a/vm/devices/storage/disk_nvme/nvme_driver/fuzz/fuzz_nvme_driver.rs +++ b/vm/devices/storage/disk_nvme/nvme_driver/fuzz/fuzz_nvme_driver.rs @@ -64,7 +64,14 @@ impl FuzzNvmeDriver { .unwrap(); let device = FuzzEmulatedDevice::new(nvme, msi_set, mem.dma_client()); - let nvme_driver = NvmeDriver::new(&driver_source, cpu_count, device, false).await?; // TODO: [use-arbitrary-input] + let nvme_driver = NvmeDriver::new( + &driver_source, + cpu_count, + device, + false, + Some(guid.to_string()), + ) + .await?; // TODO: [use-arbitrary-input] let namespace = nvme_driver.namespace(1).await?; // TODO: [use-arbitrary-input] Ok(Self { diff --git a/vm/devices/storage/disk_nvme/nvme_driver/src/driver.rs b/vm/devices/storage/disk_nvme/nvme_driver/src/driver.rs index ece1f4f7b9..8e89b1dc71 100644 --- a/vm/devices/storage/disk_nvme/nvme_driver/src/driver.rs +++ b/vm/devices/storage/disk_nvme/nvme_driver/src/driver.rs @@ -180,11 +180,18 @@ impl NvmeDriver { cpu_count: u32, device: T, bounce_buffer: bool, + controller_instance_id: Option, ) -> anyhow::Result { let pci_id = device.id().to_owned(); - let mut this = Self::new_disabled(driver_source, cpu_count, device, bounce_buffer) - .instrument(tracing::info_span!("nvme_new_disabled", pci_id)) - .await?; + let mut this = Self::new_disabled( + driver_source, + cpu_count, + device, + bounce_buffer, + controller_instance_id, + ) + .instrument(tracing::info_span!("nvme_new_disabled", pci_id)) + .await?; match this .enable(cpu_count as u16) .instrument(tracing::info_span!("nvme_enable", pci_id)) @@ -209,6 +216,7 @@ impl NvmeDriver { cpu_count: u32, mut device: T, bounce_buffer: bool, + controller_instance_id: Option, ) -> anyhow::Result { let driver = driver_source.simple(); let bar0 = Bar0( @@ -249,7 +257,7 @@ impl NvmeDriver { Ok(Self { device_id: device.id().to_owned(), - controller_instance_id: None, + controller_instance_id, task: Some(TaskControl::new(DriverWorkerTask { device, driver: driver.clone(), @@ -748,11 +756,6 @@ impl NvmeDriver { pub fn controller_instance_id(&self) -> Option { self.controller_instance_id.clone() } - - /// Set the controller instance ID. - pub fn set_controller_instance_id(&mut self, controller_instance_id: String) { - self.controller_instance_id = Some(controller_instance_id); - } } async fn handle_asynchronous_events( diff --git a/vm/devices/storage/disk_nvme/nvme_driver/src/tests.rs b/vm/devices/storage/disk_nvme/nvme_driver/src/tests.rs index e87f8246fd..776ac2b754 100644 --- a/vm/devices/storage/disk_nvme/nvme_driver/src/tests.rs +++ b/vm/devices/storage/disk_nvme/nvme_driver/src/tests.rs @@ -59,6 +59,7 @@ async fn test_nvme_ioqueue_max_mqes(driver: DefaultDriver) { // Controller Driver Setup let driver_source = VmTaskDriverSource::new(SingleDriverBackend::new(driver)); let mut msi_set = MsiInterruptSet::new(); + let nvme_controller_guid = Guid::new_random(); let nvme = nvme::NvmeController::new( &driver_source, guest_mem, @@ -67,7 +68,7 @@ async fn test_nvme_ioqueue_max_mqes(driver: DefaultDriver) { NvmeControllerCaps { msix_count: MSIX_COUNT, max_io_queues: IO_QUEUE_COUNT, - subsystem_id: Guid::new_random(), + subsystem_id: nvme_controller_guid, }, ); @@ -78,7 +79,14 @@ async fn test_nvme_ioqueue_max_mqes(driver: DefaultDriver) { let cap: Cap = Cap::new().with_mqes_z(max_u16); device.set_mock_response_u64(Some((0, cap.into()))); - let driver = NvmeDriver::new(&driver_source, CPU_COUNT, device, false).await; + let driver = NvmeDriver::new( + &driver_source, + CPU_COUNT, + device, + false, + Some(nvme_controller_guid.to_string()), + ) + .await; assert!(driver.is_ok()); } @@ -96,6 +104,7 @@ async fn test_nvme_ioqueue_invalid_mqes(driver: DefaultDriver) { let driver_source = VmTaskDriverSource::new(SingleDriverBackend::new(driver)); let mut msi_set = MsiInterruptSet::new(); + let nvme_controller_guid = Guid::new_random(); let nvme = nvme::NvmeController::new( &driver_source, guest_mem, @@ -104,7 +113,7 @@ async fn test_nvme_ioqueue_invalid_mqes(driver: DefaultDriver) { NvmeControllerCaps { msix_count: MSIX_COUNT, max_io_queues: IO_QUEUE_COUNT, - subsystem_id: Guid::new_random(), + subsystem_id: nvme_controller_guid, }, ); @@ -113,7 +122,14 @@ async fn test_nvme_ioqueue_invalid_mqes(driver: DefaultDriver) { // Setup mock response at offset 0 let cap: Cap = Cap::new().with_mqes_z(0); device.set_mock_response_u64(Some((0, cap.into()))); - let driver = NvmeDriver::new(&driver_source, CPU_COUNT, device, false).await; + let driver = NvmeDriver::new( + &driver_source, + CPU_COUNT, + device, + false, + Some(nvme_controller_guid.to_string()), + ) + .await; assert!(driver.is_err()); } @@ -133,6 +149,7 @@ async fn test_nvme_driver(driver: DefaultDriver, allow_dma: bool) { // Arrange: Create the NVMe controller and driver. let driver_source = VmTaskDriverSource::new(SingleDriverBackend::new(driver)); let mut msi_set = MsiInterruptSet::new(); + let nvme_controller_guid = Guid::new_random(); let nvme = nvme::NvmeController::new( &driver_source, guest_mem.clone(), @@ -141,7 +158,7 @@ async fn test_nvme_driver(driver: DefaultDriver, allow_dma: bool) { NvmeControllerCaps { msix_count: MSIX_COUNT, max_io_queues: IO_QUEUE_COUNT, - subsystem_id: Guid::new_random(), + subsystem_id: nvme_controller_guid, }, ); @@ -150,9 +167,15 @@ async fn test_nvme_driver(driver: DefaultDriver, allow_dma: bool) { .await .unwrap(); let device = NvmeTestEmulatedDevice::new(nvme, msi_set, dma_client.clone()); - let driver = NvmeDriver::new(&driver_source, CPU_COUNT, device, false) - .await - .unwrap(); + let driver = NvmeDriver::new( + &driver_source, + CPU_COUNT, + device, + false, + Some(nvme_controller_guid.to_string()), + ) + .await + .unwrap(); let namespace = driver.namespace(1).await.unwrap(); // Act: Write 1024 bytes of data to disk starting at LBA 1. @@ -246,6 +269,7 @@ async fn test_nvme_save_restore_inner(driver: DefaultDriver) { let driver_source = VmTaskDriverSource::new(SingleDriverBackend::new(driver.clone())); let mut msi_x = MsiInterruptSet::new(); + let nvme_controller_guid = Guid::new_random(); let nvme_ctrl = nvme::NvmeController::new( &driver_source, guest_mem.clone(), @@ -254,7 +278,7 @@ async fn test_nvme_save_restore_inner(driver: DefaultDriver) { NvmeControllerCaps { msix_count: MSIX_COUNT, max_io_queues: IO_QUEUE_COUNT, - subsystem_id: Guid::new_random(), + subsystem_id: nvme_controller_guid, }, ); @@ -266,9 +290,15 @@ async fn test_nvme_save_restore_inner(driver: DefaultDriver) { .unwrap(); let device = NvmeTestEmulatedDevice::new(nvme_ctrl, msi_x, dma_client.clone()); - let mut nvme_driver = NvmeDriver::new(&driver_source, CPU_COUNT, device, false) - .await - .unwrap(); + let mut nvme_driver = NvmeDriver::new( + &driver_source, + CPU_COUNT, + device, + false, + Some(nvme_controller_guid.to_string()), + ) + .await + .unwrap(); let _ns1 = nvme_driver.namespace(1).await.unwrap(); let saved_state = nvme_driver.save().await.unwrap(); // As of today we do not save namespace data to avoid possible conflict diff --git a/vm/devices/storage/storage_tests/tests/scsidvd_nvme.rs b/vm/devices/storage/storage_tests/tests/scsidvd_nvme.rs index 23879e0dfc..8bb515804b 100644 --- a/vm/devices/storage/storage_tests/tests/scsidvd_nvme.rs +++ b/vm/devices/storage/storage_tests/tests/scsidvd_nvme.rs @@ -55,6 +55,7 @@ impl ScsiDvdNvmeTest { let payload_mem = mem.payload_mem(); let mut msi_set = MsiInterruptSet::new(); + let nvme_controller_guid = Guid::new_random(); let nvme = NvmeController::new( &driver_source, guest_mem.clone(), @@ -63,7 +64,7 @@ impl ScsiDvdNvmeTest { NvmeControllerCaps { msix_count: MSIX_COUNT, max_io_queues: IO_QUEUE_COUNT, - subsystem_id: Guid::new_random(), + subsystem_id: nvme_controller_guid, }, ); @@ -79,9 +80,15 @@ impl ScsiDvdNvmeTest { .unwrap(); let device = EmulatedDevice::new(nvme, msi_set, dma_client.clone()); - let nvme_driver = NvmeDriver::new(&driver_source, CPU_COUNT, device, false) - .await - .unwrap(); + let nvme_driver = NvmeDriver::new( + &driver_source, + CPU_COUNT, + device, + false, + Some(nvme_controller_guid.to_string()), + ) + .await + .unwrap(); let namespace = nvme_driver.namespace(1).await.unwrap(); let buf_range = OwnedRequestBuffers::linear(0, 16384, true); for i in 0..(sector_count / 8) {