Skip to content

Commit 07107e7

Browse files
jgunthorpejoergroedel
authored andcommitted
iommu/virtio: Move to domain_alloc_paging()
virtio has the complication that it sometimes wants to return a paging domain for IDENTITY which makes this conversion a little different than other drivers. Add a viommu_domain_alloc_paging() that combines viommu_domain_alloc() and viommu_domain_finalise() to always return a fully initialized and finalized paging domain. Use viommu_domain_alloc_identity() to implement the special non-bypass IDENTITY flow by calling viommu_domain_alloc_paging() then viommu_domain_map_identity(). Remove support for deferred finalize and the vdomain->mutex. Remove core support for domain_alloc() IDENTITY as virtio was the last driver using it. Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/3-v4-ff5fb6b03bd1+288-iommu_virtio_domains_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
1 parent 0d609a1 commit 07107e7

File tree

2 files changed

+50
-71
lines changed

2 files changed

+50
-71
lines changed

drivers/iommu/iommu.c

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1635,12 +1635,6 @@ static struct iommu_domain *__iommu_alloc_identity_domain(struct device *dev)
16351635
domain = ops->domain_alloc_identity(dev);
16361636
if (IS_ERR(domain))
16371637
return domain;
1638-
} else if (ops->domain_alloc) {
1639-
domain = ops->domain_alloc(IOMMU_DOMAIN_IDENTITY);
1640-
if (!domain)
1641-
return ERR_PTR(-ENOMEM);
1642-
if (IS_ERR(domain))
1643-
return domain;
16441638
} else {
16451639
return ERR_PTR(-EOPNOTSUPP);
16461640
}

drivers/iommu/virtio-iommu.c

Lines changed: 50 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,6 @@ struct viommu_mapping {
6363
struct viommu_domain {
6464
struct iommu_domain domain;
6565
struct viommu_dev *viommu;
66-
struct mutex mutex; /* protects viommu pointer */
6766
unsigned int id;
6867
u32 map_flags;
6968

@@ -97,6 +96,8 @@ struct viommu_event {
9796
};
9897
};
9998

99+
static struct viommu_domain viommu_identity_domain;
100+
100101
#define to_viommu_domain(domain) \
101102
container_of(domain, struct viommu_domain, domain)
102103

@@ -653,65 +654,45 @@ static void viommu_event_handler(struct virtqueue *vq)
653654

654655
/* IOMMU API */
655656

656-
static struct iommu_domain *viommu_domain_alloc(unsigned type)
657+
static struct iommu_domain *viommu_domain_alloc_paging(struct device *dev)
657658
{
659+
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
660+
struct viommu_dev *viommu = vdev->viommu;
661+
unsigned long viommu_page_size;
658662
struct viommu_domain *vdomain;
659-
660-
if (type != IOMMU_DOMAIN_UNMANAGED &&
661-
type != IOMMU_DOMAIN_DMA &&
662-
type != IOMMU_DOMAIN_IDENTITY)
663-
return NULL;
664-
665-
vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
666-
if (!vdomain)
667-
return NULL;
668-
669-
mutex_init(&vdomain->mutex);
670-
spin_lock_init(&vdomain->mappings_lock);
671-
vdomain->mappings = RB_ROOT_CACHED;
672-
673-
return &vdomain->domain;
674-
}
675-
676-
static int viommu_domain_finalise(struct viommu_endpoint *vdev,
677-
struct iommu_domain *domain)
678-
{
679663
int ret;
680-
unsigned long viommu_page_size;
681-
struct viommu_dev *viommu = vdev->viommu;
682-
struct viommu_domain *vdomain = to_viommu_domain(domain);
683664

684665
viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
685666
if (viommu_page_size > PAGE_SIZE) {
686667
dev_err(vdev->dev,
687668
"granule 0x%lx larger than system page size 0x%lx\n",
688669
viommu_page_size, PAGE_SIZE);
689-
return -ENODEV;
670+
return ERR_PTR(-ENODEV);
690671
}
691672

673+
vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
674+
if (!vdomain)
675+
return ERR_PTR(-ENOMEM);
676+
677+
spin_lock_init(&vdomain->mappings_lock);
678+
vdomain->mappings = RB_ROOT_CACHED;
679+
692680
ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
693681
viommu->last_domain, GFP_KERNEL);
694-
if (ret < 0)
695-
return ret;
682+
if (ret < 0) {
683+
kfree(vdomain);
684+
return ERR_PTR(ret);
685+
}
696686

697-
vdomain->id = (unsigned int)ret;
687+
vdomain->id = (unsigned int)ret;
698688

699-
domain->pgsize_bitmap = viommu->pgsize_bitmap;
700-
domain->geometry = viommu->geometry;
689+
vdomain->domain.pgsize_bitmap = viommu->pgsize_bitmap;
690+
vdomain->domain.geometry = viommu->geometry;
701691

702-
vdomain->map_flags = viommu->map_flags;
703-
vdomain->viommu = viommu;
692+
vdomain->map_flags = viommu->map_flags;
693+
vdomain->viommu = viommu;
704694

705-
if (domain->type == IOMMU_DOMAIN_IDENTITY) {
706-
ret = viommu_domain_map_identity(vdev, vdomain);
707-
if (ret) {
708-
ida_free(&viommu->domain_ids, vdomain->id);
709-
vdomain->viommu = NULL;
710-
return ret;
711-
}
712-
}
713-
714-
return 0;
695+
return &vdomain->domain;
715696
}
716697

717698
static void viommu_domain_free(struct iommu_domain *domain)
@@ -727,27 +708,37 @@ static void viommu_domain_free(struct iommu_domain *domain)
727708
kfree(vdomain);
728709
}
729710

711+
static struct iommu_domain *viommu_domain_alloc_identity(struct device *dev)
712+
{
713+
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
714+
struct iommu_domain *domain;
715+
int ret;
716+
717+
if (virtio_has_feature(vdev->viommu->vdev,
718+
VIRTIO_IOMMU_F_BYPASS_CONFIG))
719+
return &viommu_identity_domain.domain;
720+
721+
domain = viommu_domain_alloc_paging(dev);
722+
if (IS_ERR(domain))
723+
return domain;
724+
725+
ret = viommu_domain_map_identity(vdev, to_viommu_domain(domain));
726+
if (ret) {
727+
viommu_domain_free(domain);
728+
return ERR_PTR(ret);
729+
}
730+
return domain;
731+
}
732+
730733
static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
731734
{
732735
int ret = 0;
733736
struct virtio_iommu_req_attach req;
734737
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
735738
struct viommu_domain *vdomain = to_viommu_domain(domain);
736739

737-
mutex_lock(&vdomain->mutex);
738-
if (!vdomain->viommu) {
739-
/*
740-
* Properly initialize the domain now that we know which viommu
741-
* owns it.
742-
*/
743-
ret = viommu_domain_finalise(vdev, domain);
744-
} else if (vdomain->viommu != vdev->viommu) {
745-
ret = -EINVAL;
746-
}
747-
mutex_unlock(&vdomain->mutex);
748-
749-
if (ret)
750-
return ret;
740+
if (vdomain->viommu != vdev->viommu)
741+
return -EINVAL;
751742

752743
/*
753744
* In the virtio-iommu device, when attaching the endpoint to a new
@@ -1096,9 +1087,9 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
10961087
}
10971088

10981089
static struct iommu_ops viommu_ops = {
1099-
.identity_domain = &viommu_identity_domain.domain,
11001090
.capable = viommu_capable,
1101-
.domain_alloc = viommu_domain_alloc,
1091+
.domain_alloc_identity = viommu_domain_alloc_identity,
1092+
.domain_alloc_paging = viommu_domain_alloc_paging,
11021093
.probe_device = viommu_probe_device,
11031094
.release_device = viommu_release_device,
11041095
.device_group = viommu_device_group,
@@ -1224,12 +1215,6 @@ static int viommu_probe(struct virtio_device *vdev)
12241215
if (virtio_has_feature(viommu->vdev, VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
12251216
viommu->identity_domain_id = viommu->first_domain;
12261217
viommu->first_domain++;
1227-
} else {
1228-
/*
1229-
* Assume the VMM is sensible and it either supports bypass on
1230-
* all instances or no instances.
1231-
*/
1232-
viommu_ops.identity_domain = NULL;
12331218
}
12341219

12351220
viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;

0 commit comments

Comments
 (0)