@@ -48,6 +48,7 @@ struct viommu_dev {
48
48
u64 pgsize_bitmap ;
49
49
u32 first_domain ;
50
50
u32 last_domain ;
51
+ u32 identity_domain_id ;
51
52
/* Supported MAP flags */
52
53
u32 map_flags ;
53
54
u32 probe_size ;
@@ -70,7 +71,6 @@ struct viommu_domain {
70
71
struct rb_root_cached mappings ;
71
72
72
73
unsigned long nr_endpoints ;
73
- bool bypass ;
74
74
};
75
75
76
76
struct viommu_endpoint {
@@ -305,6 +305,22 @@ static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
305
305
return ret ;
306
306
}
307
307
308
+ static int viommu_send_attach_req (struct viommu_dev * viommu , struct device * dev ,
309
+ struct virtio_iommu_req_attach * req )
310
+ {
311
+ int ret ;
312
+ unsigned int i ;
313
+ struct iommu_fwspec * fwspec = dev_iommu_fwspec_get (dev );
314
+
315
+ for (i = 0 ; i < fwspec -> num_ids ; i ++ ) {
316
+ req -> endpoint = cpu_to_le32 (fwspec -> ids [i ]);
317
+ ret = viommu_send_req_sync (viommu , req , sizeof (* req ));
318
+ if (ret )
319
+ return ret ;
320
+ }
321
+ return 0 ;
322
+ }
323
+
308
324
/*
309
325
* viommu_add_mapping - add a mapping to the internal tree
310
326
*
@@ -687,12 +703,6 @@ static int viommu_domain_finalise(struct viommu_endpoint *vdev,
687
703
vdomain -> viommu = viommu ;
688
704
689
705
if (domain -> type == IOMMU_DOMAIN_IDENTITY ) {
690
- if (virtio_has_feature (viommu -> vdev ,
691
- VIRTIO_IOMMU_F_BYPASS_CONFIG )) {
692
- vdomain -> bypass = true;
693
- return 0 ;
694
- }
695
-
696
706
ret = viommu_domain_map_identity (vdev , vdomain );
697
707
if (ret ) {
698
708
ida_free (& viommu -> domain_ids , vdomain -> id );
@@ -719,10 +729,8 @@ static void viommu_domain_free(struct iommu_domain *domain)
719
729
720
730
static int viommu_attach_dev (struct iommu_domain * domain , struct device * dev )
721
731
{
722
- int i ;
723
732
int ret = 0 ;
724
733
struct virtio_iommu_req_attach req ;
725
- struct iommu_fwspec * fwspec = dev_iommu_fwspec_get (dev );
726
734
struct viommu_endpoint * vdev = dev_iommu_priv_get (dev );
727
735
struct viommu_domain * vdomain = to_viommu_domain (domain );
728
736
@@ -761,16 +769,9 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
761
769
.domain = cpu_to_le32 (vdomain -> id ),
762
770
};
763
771
764
- if (vdomain -> bypass )
765
- req .flags |= cpu_to_le32 (VIRTIO_IOMMU_ATTACH_F_BYPASS );
766
-
767
- for (i = 0 ; i < fwspec -> num_ids ; i ++ ) {
768
- req .endpoint = cpu_to_le32 (fwspec -> ids [i ]);
769
-
770
- ret = viommu_send_req_sync (vdomain -> viommu , & req , sizeof (req ));
771
- if (ret )
772
- return ret ;
773
- }
772
+ ret = viommu_send_attach_req (vdomain -> viommu , dev , & req );
773
+ if (ret )
774
+ return ret ;
774
775
775
776
if (!vdomain -> nr_endpoints ) {
776
777
/*
@@ -788,6 +789,40 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
788
789
return 0 ;
789
790
}
790
791
792
+ static int viommu_attach_identity_domain (struct iommu_domain * domain ,
793
+ struct device * dev )
794
+ {
795
+ int ret = 0 ;
796
+ struct virtio_iommu_req_attach req ;
797
+ struct viommu_endpoint * vdev = dev_iommu_priv_get (dev );
798
+ struct viommu_domain * vdomain = to_viommu_domain (domain );
799
+
800
+ req = (struct virtio_iommu_req_attach ) {
801
+ .head .type = VIRTIO_IOMMU_T_ATTACH ,
802
+ .domain = cpu_to_le32 (vdev -> viommu -> identity_domain_id ),
803
+ .flags = cpu_to_le32 (VIRTIO_IOMMU_ATTACH_F_BYPASS ),
804
+ };
805
+
806
+ ret = viommu_send_attach_req (vdev -> viommu , dev , & req );
807
+ if (ret )
808
+ return ret ;
809
+
810
+ if (vdev -> vdomain )
811
+ vdev -> vdomain -> nr_endpoints -- ;
812
+ vdomain -> nr_endpoints ++ ;
813
+ vdev -> vdomain = vdomain ;
814
+ return 0 ;
815
+ }
816
+
817
+ static struct viommu_domain viommu_identity_domain = {
818
+ .domain = {
819
+ .type = IOMMU_DOMAIN_IDENTITY ,
820
+ .ops = & (const struct iommu_domain_ops ) {
821
+ .attach_dev = viommu_attach_identity_domain ,
822
+ },
823
+ },
824
+ };
825
+
791
826
static void viommu_detach_dev (struct viommu_endpoint * vdev )
792
827
{
793
828
int i ;
@@ -1061,6 +1096,7 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
1061
1096
}
1062
1097
1063
1098
static struct iommu_ops viommu_ops = {
1099
+ .identity_domain = & viommu_identity_domain .domain ,
1064
1100
.capable = viommu_capable ,
1065
1101
.domain_alloc = viommu_domain_alloc ,
1066
1102
.probe_device = viommu_probe_device ,
@@ -1184,6 +1220,18 @@ static int viommu_probe(struct virtio_device *vdev)
1184
1220
if (virtio_has_feature (vdev , VIRTIO_IOMMU_F_MMIO ))
1185
1221
viommu -> map_flags |= VIRTIO_IOMMU_MAP_F_MMIO ;
1186
1222
1223
+ /* Reserve an ID to use as the bypass domain */
1224
+ if (virtio_has_feature (viommu -> vdev , VIRTIO_IOMMU_F_BYPASS_CONFIG )) {
1225
+ viommu -> identity_domain_id = viommu -> first_domain ;
1226
+ viommu -> first_domain ++ ;
1227
+ } else {
1228
+ /*
1229
+ * Assume the VMM is sensible and it either supports bypass on
1230
+ * all instances or no instances.
1231
+ */
1232
+ viommu_ops .identity_domain = NULL ;
1233
+ }
1234
+
1187
1235
viommu_ops .pgsize_bitmap = viommu -> pgsize_bitmap ;
1188
1236
1189
1237
virtio_device_ready (vdev );
0 commit comments