@@ -1391,11 +1391,10 @@ impl VcpuFd {
1391
1391
// SAFETY: The data_offset is defined by the kernel to be some number of bytes
1392
1392
// into the kvm_run stucture, which we have fully mmap'd.
1393
1393
let data_ptr = unsafe { run_start. offset ( io. data_offset as isize ) } ;
1394
- // SAFETY: The slice's lifetime is limited to the lifetime of this vCPU, which is equal
1395
- // to the mmap of the `kvm_run` struct that this is slicing from.
1396
- let data_slice = unsafe {
1397
- std:: slice:: from_raw_parts_mut :: < u8 > ( data_ptr as * mut u8 , data_size)
1398
- } ;
1394
+ let data_slice =
1395
+ // SAFETY: The slice's lifetime is limited to the lifetime of this vCPU, which is equal
1396
+ // to the mmap of the `kvm_run` struct that this is slicing from.
1397
+ unsafe { std:: slice:: from_raw_parts_mut :: < u8 > ( data_ptr, data_size) } ;
1399
1398
match u32:: from ( io. direction ) {
1400
1399
KVM_EXIT_IO_IN => Ok ( VcpuExit :: IoIn ( port, data_slice) ) ,
1401
1400
KVM_EXIT_IO_OUT => Ok ( VcpuExit :: IoOut ( port, data_slice) ) ,
@@ -1597,7 +1596,7 @@ impl VcpuFd {
1597
1596
/// ```
1598
1597
#[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
1599
1598
pub fn set_sync_valid_reg ( & mut self , reg : SyncReg ) {
1600
- let mut kvm_run: & mut kvm_run = self . kvm_run_ptr . as_mut_ref ( ) ;
1599
+ let kvm_run: & mut kvm_run = self . kvm_run_ptr . as_mut_ref ( ) ;
1601
1600
kvm_run. kvm_valid_regs |= reg as u64 ;
1602
1601
}
1603
1602
@@ -1619,7 +1618,7 @@ impl VcpuFd {
1619
1618
/// ```
1620
1619
#[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
1621
1620
pub fn set_sync_dirty_reg ( & mut self , reg : SyncReg ) {
1622
- let mut kvm_run: & mut kvm_run = self . kvm_run_ptr . as_mut_ref ( ) ;
1621
+ let kvm_run: & mut kvm_run = self . kvm_run_ptr . as_mut_ref ( ) ;
1623
1622
kvm_run. kvm_dirty_regs |= reg as u64 ;
1624
1623
}
1625
1624
@@ -1641,7 +1640,7 @@ impl VcpuFd {
1641
1640
/// ```
1642
1641
#[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
1643
1642
pub fn clear_sync_valid_reg ( & mut self , reg : SyncReg ) {
1644
- let mut kvm_run: & mut kvm_run = self . kvm_run_ptr . as_mut_ref ( ) ;
1643
+ let kvm_run: & mut kvm_run = self . kvm_run_ptr . as_mut_ref ( ) ;
1645
1644
kvm_run. kvm_valid_regs &= !( reg as u64 ) ;
1646
1645
}
1647
1646
@@ -1663,7 +1662,7 @@ impl VcpuFd {
1663
1662
/// ```
1664
1663
#[ cfg( any( target_arch = "x86" , target_arch = "x86_64" ) ) ]
1665
1664
pub fn clear_sync_dirty_reg ( & mut self , reg : SyncReg ) {
1666
- let mut kvm_run: & mut kvm_run = self . kvm_run_ptr . as_mut_ref ( ) ;
1665
+ let kvm_run: & mut kvm_run = self . kvm_run_ptr . as_mut_ref ( ) ;
1667
1666
kvm_run. kvm_dirty_regs &= !( reg as u64 ) ;
1668
1667
}
1669
1668
@@ -2714,7 +2713,7 @@ mod tests {
2714
2713
2715
2714
let orig_sregs = vcpu. get_sregs ( ) . unwrap ( ) ;
2716
2715
2717
- let mut sync_regs = vcpu. sync_regs_mut ( ) ;
2716
+ let sync_regs = vcpu. sync_regs_mut ( ) ;
2718
2717
2719
2718
// Initialize the sregs in sync_regs to be the original sregs
2720
2719
sync_regs. sregs = orig_sregs;
0 commit comments