Skip to content

Commit fd94213

Browse files
shamiali2008awilliam
authored andcommitted
hisi_acc_vfio_pci: Remove the deferred_reset logic
The deferred_reset logic was added to vfio migration drivers to prevent a circular locking dependency with respect to mm_lock and state mutex. This is mainly because of the copy_to/from_user() functions(which takes mm_lock) invoked under state mutex. But for HiSilicon driver, the only place where we now hold the state mutex for copy_to_user is during the PRE_COPY IOCTL. So for pre_copy, release the lock as soon as we have updated the data and perform copy_to_user without state mutex. By this, we can get rid of the deferred_reset logic. Link: https://lore.kernel.org/kvm/20240220132459.GM13330@nvidia.com/ Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Reviewed-by: Brett Creeley <brett.creeley@amd.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20240229091152.56664-1-shameerali.kolothum.thodi@huawei.com Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
1 parent 5b99241 commit fd94213

File tree

2 files changed

+14
-40
lines changed

2 files changed

+14
-40
lines changed

drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c

Lines changed: 12 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -630,25 +630,11 @@ static void hisi_acc_vf_disable_fds(struct hisi_acc_vf_core_device *hisi_acc_vde
630630
}
631631
}
632632

633-
/*
634-
* This function is called in all state_mutex unlock cases to
635-
* handle a 'deferred_reset' if exists.
636-
*/
637-
static void
638-
hisi_acc_vf_state_mutex_unlock(struct hisi_acc_vf_core_device *hisi_acc_vdev)
633+
static void hisi_acc_vf_reset(struct hisi_acc_vf_core_device *hisi_acc_vdev)
639634
{
640-
again:
641-
spin_lock(&hisi_acc_vdev->reset_lock);
642-
if (hisi_acc_vdev->deferred_reset) {
643-
hisi_acc_vdev->deferred_reset = false;
644-
spin_unlock(&hisi_acc_vdev->reset_lock);
645-
hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
646-
hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
647-
hisi_acc_vf_disable_fds(hisi_acc_vdev);
648-
goto again;
649-
}
650-
mutex_unlock(&hisi_acc_vdev->state_mutex);
651-
spin_unlock(&hisi_acc_vdev->reset_lock);
635+
hisi_acc_vdev->vf_qm_state = QM_NOT_READY;
636+
hisi_acc_vdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
637+
hisi_acc_vf_disable_fds(hisi_acc_vdev);
652638
}
653639

654640
static void hisi_acc_vf_start_device(struct hisi_acc_vf_core_device *hisi_acc_vdev)
@@ -804,8 +790,10 @@ static long hisi_acc_vf_precopy_ioctl(struct file *filp,
804790

805791
info.dirty_bytes = 0;
806792
info.initial_bytes = migf->total_length - *pos;
793+
mutex_unlock(&migf->lock);
794+
mutex_unlock(&hisi_acc_vdev->state_mutex);
807795

808-
ret = copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
796+
return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
809797
out:
810798
mutex_unlock(&migf->lock);
811799
mutex_unlock(&hisi_acc_vdev->state_mutex);
@@ -1071,7 +1059,7 @@ hisi_acc_vfio_pci_set_device_state(struct vfio_device *vdev,
10711059
break;
10721060
}
10731061
}
1074-
hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
1062+
mutex_unlock(&hisi_acc_vdev->state_mutex);
10751063
return res;
10761064
}
10771065

@@ -1092,7 +1080,7 @@ hisi_acc_vfio_pci_get_device_state(struct vfio_device *vdev,
10921080

10931081
mutex_lock(&hisi_acc_vdev->state_mutex);
10941082
*curr_state = hisi_acc_vdev->mig_state;
1095-
hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
1083+
mutex_unlock(&hisi_acc_vdev->state_mutex);
10961084
return 0;
10971085
}
10981086

@@ -1104,21 +1092,9 @@ static void hisi_acc_vf_pci_aer_reset_done(struct pci_dev *pdev)
11041092
VFIO_MIGRATION_STOP_COPY)
11051093
return;
11061094

1107-
/*
1108-
* As the higher VFIO layers are holding locks across reset and using
1109-
* those same locks with the mm_lock we need to prevent ABBA deadlock
1110-
* with the state_mutex and mm_lock.
1111-
* In case the state_mutex was taken already we defer the cleanup work
1112-
* to the unlock flow of the other running context.
1113-
*/
1114-
spin_lock(&hisi_acc_vdev->reset_lock);
1115-
hisi_acc_vdev->deferred_reset = true;
1116-
if (!mutex_trylock(&hisi_acc_vdev->state_mutex)) {
1117-
spin_unlock(&hisi_acc_vdev->reset_lock);
1118-
return;
1119-
}
1120-
spin_unlock(&hisi_acc_vdev->reset_lock);
1121-
hisi_acc_vf_state_mutex_unlock(hisi_acc_vdev);
1095+
mutex_lock(&hisi_acc_vdev->state_mutex);
1096+
hisi_acc_vf_reset(hisi_acc_vdev);
1097+
mutex_unlock(&hisi_acc_vdev->state_mutex);
11221098
}
11231099

11241100
static int hisi_acc_vf_qm_init(struct hisi_acc_vf_core_device *hisi_acc_vdev)

drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -98,8 +98,8 @@ struct hisi_acc_vf_migration_file {
9898

9999
struct hisi_acc_vf_core_device {
100100
struct vfio_pci_core_device core_device;
101-
u8 match_done:1;
102-
u8 deferred_reset:1;
101+
u8 match_done;
102+
103103
/* For migration state */
104104
struct mutex state_mutex;
105105
enum vfio_device_mig_state mig_state;
@@ -109,8 +109,6 @@ struct hisi_acc_vf_core_device {
109109
struct hisi_qm vf_qm;
110110
u32 vf_qm_state;
111111
int vf_id;
112-
/* For reset handler */
113-
spinlock_t reset_lock;
114112
struct hisi_acc_vf_migration_file *resuming_migf;
115113
struct hisi_acc_vf_migration_file *saving_migf;
116114
};

0 commit comments

Comments
 (0)