Skip to content

Commit 0d76a6e

Browse files
jgunthorpejoergroedel
authored andcommitted
iommu/virtio: Break out bypass identity support into a global static
To make way for a domain_alloc_paging conversion add the typical global static IDENTITY domain. This supports VMMs that have a VIRTIO_IOMMU_F_BYPASS_CONFIG config. If the VMM does not have support then the domain_alloc path is still used, which creates an IDENTITY domain out of a paging domain. Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Link: https://lore.kernel.org/r/1-v4-ff5fb6b03bd1+288-iommu_virtio_domains_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
1 parent f984fb0 commit 0d76a6e

File tree

1 file changed

+67
-19
lines changed

1 file changed

+67
-19
lines changed

drivers/iommu/virtio-iommu.c

Lines changed: 67 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,7 @@ struct viommu_dev {
4848
u64 pgsize_bitmap;
4949
u32 first_domain;
5050
u32 last_domain;
51+
u32 identity_domain_id;
5152
/* Supported MAP flags */
5253
u32 map_flags;
5354
u32 probe_size;
@@ -70,7 +71,6 @@ struct viommu_domain {
7071
struct rb_root_cached mappings;
7172

7273
unsigned long nr_endpoints;
73-
bool bypass;
7474
};
7575

7676
struct viommu_endpoint {
@@ -305,6 +305,22 @@ static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
305305
return ret;
306306
}
307307

308+
static int viommu_send_attach_req(struct viommu_dev *viommu, struct device *dev,
309+
struct virtio_iommu_req_attach *req)
310+
{
311+
int ret;
312+
unsigned int i;
313+
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
314+
315+
for (i = 0; i < fwspec->num_ids; i++) {
316+
req->endpoint = cpu_to_le32(fwspec->ids[i]);
317+
ret = viommu_send_req_sync(viommu, req, sizeof(*req));
318+
if (ret)
319+
return ret;
320+
}
321+
return 0;
322+
}
323+
308324
/*
309325
* viommu_add_mapping - add a mapping to the internal tree
310326
*
@@ -687,12 +703,6 @@ static int viommu_domain_finalise(struct viommu_endpoint *vdev,
687703
vdomain->viommu = viommu;
688704

689705
if (domain->type == IOMMU_DOMAIN_IDENTITY) {
690-
if (virtio_has_feature(viommu->vdev,
691-
VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
692-
vdomain->bypass = true;
693-
return 0;
694-
}
695-
696706
ret = viommu_domain_map_identity(vdev, vdomain);
697707
if (ret) {
698708
ida_free(&viommu->domain_ids, vdomain->id);
@@ -719,10 +729,8 @@ static void viommu_domain_free(struct iommu_domain *domain)
719729

720730
static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
721731
{
722-
int i;
723732
int ret = 0;
724733
struct virtio_iommu_req_attach req;
725-
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
726734
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
727735
struct viommu_domain *vdomain = to_viommu_domain(domain);
728736

@@ -761,16 +769,9 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
761769
.domain = cpu_to_le32(vdomain->id),
762770
};
763771

764-
if (vdomain->bypass)
765-
req.flags |= cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS);
766-
767-
for (i = 0; i < fwspec->num_ids; i++) {
768-
req.endpoint = cpu_to_le32(fwspec->ids[i]);
769-
770-
ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
771-
if (ret)
772-
return ret;
773-
}
772+
ret = viommu_send_attach_req(vdomain->viommu, dev, &req);
773+
if (ret)
774+
return ret;
774775

775776
if (!vdomain->nr_endpoints) {
776777
/*
@@ -788,6 +789,40 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
788789
return 0;
789790
}
790791

792+
static int viommu_attach_identity_domain(struct iommu_domain *domain,
793+
struct device *dev)
794+
{
795+
int ret = 0;
796+
struct virtio_iommu_req_attach req;
797+
struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
798+
struct viommu_domain *vdomain = to_viommu_domain(domain);
799+
800+
req = (struct virtio_iommu_req_attach) {
801+
.head.type = VIRTIO_IOMMU_T_ATTACH,
802+
.domain = cpu_to_le32(vdev->viommu->identity_domain_id),
803+
.flags = cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS),
804+
};
805+
806+
ret = viommu_send_attach_req(vdev->viommu, dev, &req);
807+
if (ret)
808+
return ret;
809+
810+
if (vdev->vdomain)
811+
vdev->vdomain->nr_endpoints--;
812+
vdomain->nr_endpoints++;
813+
vdev->vdomain = vdomain;
814+
return 0;
815+
}
816+
817+
static struct viommu_domain viommu_identity_domain = {
818+
.domain = {
819+
.type = IOMMU_DOMAIN_IDENTITY,
820+
.ops = &(const struct iommu_domain_ops) {
821+
.attach_dev = viommu_attach_identity_domain,
822+
},
823+
},
824+
};
825+
791826
static void viommu_detach_dev(struct viommu_endpoint *vdev)
792827
{
793828
int i;
@@ -1061,6 +1096,7 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
10611096
}
10621097

10631098
static struct iommu_ops viommu_ops = {
1099+
.identity_domain = &viommu_identity_domain.domain,
10641100
.capable = viommu_capable,
10651101
.domain_alloc = viommu_domain_alloc,
10661102
.probe_device = viommu_probe_device,
@@ -1184,6 +1220,18 @@ static int viommu_probe(struct virtio_device *vdev)
11841220
if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
11851221
viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
11861222

1223+
/* Reserve an ID to use as the bypass domain */
1224+
if (virtio_has_feature(viommu->vdev, VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
1225+
viommu->identity_domain_id = viommu->first_domain;
1226+
viommu->first_domain++;
1227+
} else {
1228+
/*
1229+
* Assume the VMM is sensible and it either supports bypass on
1230+
* all instances or no instances.
1231+
*/
1232+
viommu_ops.identity_domain = NULL;
1233+
}
1234+
11871235
viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
11881236

11891237
virtio_device_ready(vdev);

0 commit comments

Comments
 (0)