@@ -63,7 +63,6 @@ struct viommu_mapping {
63
63
struct viommu_domain {
64
64
struct iommu_domain domain ;
65
65
struct viommu_dev * viommu ;
66
- struct mutex mutex ; /* protects viommu pointer */
67
66
unsigned int id ;
68
67
u32 map_flags ;
69
68
@@ -97,6 +96,8 @@ struct viommu_event {
97
96
};
98
97
};
99
98
99
+ static struct viommu_domain viommu_identity_domain ;
100
+
100
101
#define to_viommu_domain (domain ) \
101
102
container_of(domain, struct viommu_domain, domain)
102
103
@@ -653,65 +654,45 @@ static void viommu_event_handler(struct virtqueue *vq)
653
654
654
655
/* IOMMU API */
655
656
656
- static struct iommu_domain * viommu_domain_alloc ( unsigned type )
657
+ static struct iommu_domain * viommu_domain_alloc_paging ( struct device * dev )
657
658
{
659
+ struct viommu_endpoint * vdev = dev_iommu_priv_get (dev );
660
+ struct viommu_dev * viommu = vdev -> viommu ;
661
+ unsigned long viommu_page_size ;
658
662
struct viommu_domain * vdomain ;
659
-
660
- if (type != IOMMU_DOMAIN_UNMANAGED &&
661
- type != IOMMU_DOMAIN_DMA &&
662
- type != IOMMU_DOMAIN_IDENTITY )
663
- return NULL ;
664
-
665
- vdomain = kzalloc (sizeof (* vdomain ), GFP_KERNEL );
666
- if (!vdomain )
667
- return NULL ;
668
-
669
- mutex_init (& vdomain -> mutex );
670
- spin_lock_init (& vdomain -> mappings_lock );
671
- vdomain -> mappings = RB_ROOT_CACHED ;
672
-
673
- return & vdomain -> domain ;
674
- }
675
-
676
- static int viommu_domain_finalise (struct viommu_endpoint * vdev ,
677
- struct iommu_domain * domain )
678
- {
679
663
int ret ;
680
- unsigned long viommu_page_size ;
681
- struct viommu_dev * viommu = vdev -> viommu ;
682
- struct viommu_domain * vdomain = to_viommu_domain (domain );
683
664
684
665
viommu_page_size = 1UL << __ffs (viommu -> pgsize_bitmap );
685
666
if (viommu_page_size > PAGE_SIZE ) {
686
667
dev_err (vdev -> dev ,
687
668
"granule 0x%lx larger than system page size 0x%lx\n" ,
688
669
viommu_page_size , PAGE_SIZE );
689
- return - ENODEV ;
670
+ return ERR_PTR ( - ENODEV ) ;
690
671
}
691
672
673
+ vdomain = kzalloc (sizeof (* vdomain ), GFP_KERNEL );
674
+ if (!vdomain )
675
+ return ERR_PTR (- ENOMEM );
676
+
677
+ spin_lock_init (& vdomain -> mappings_lock );
678
+ vdomain -> mappings = RB_ROOT_CACHED ;
679
+
692
680
ret = ida_alloc_range (& viommu -> domain_ids , viommu -> first_domain ,
693
681
viommu -> last_domain , GFP_KERNEL );
694
- if (ret < 0 )
695
- return ret ;
682
+ if (ret < 0 ) {
683
+ kfree (vdomain );
684
+ return ERR_PTR (ret );
685
+ }
696
686
697
- vdomain -> id = (unsigned int )ret ;
687
+ vdomain -> id = (unsigned int )ret ;
698
688
699
- domain -> pgsize_bitmap = viommu -> pgsize_bitmap ;
700
- domain -> geometry = viommu -> geometry ;
689
+ vdomain -> domain . pgsize_bitmap = viommu -> pgsize_bitmap ;
690
+ vdomain -> domain . geometry = viommu -> geometry ;
701
691
702
- vdomain -> map_flags = viommu -> map_flags ;
703
- vdomain -> viommu = viommu ;
692
+ vdomain -> map_flags = viommu -> map_flags ;
693
+ vdomain -> viommu = viommu ;
704
694
705
- if (domain -> type == IOMMU_DOMAIN_IDENTITY ) {
706
- ret = viommu_domain_map_identity (vdev , vdomain );
707
- if (ret ) {
708
- ida_free (& viommu -> domain_ids , vdomain -> id );
709
- vdomain -> viommu = NULL ;
710
- return ret ;
711
- }
712
- }
713
-
714
- return 0 ;
695
+ return & vdomain -> domain ;
715
696
}
716
697
717
698
static void viommu_domain_free (struct iommu_domain * domain )
@@ -727,27 +708,37 @@ static void viommu_domain_free(struct iommu_domain *domain)
727
708
kfree (vdomain );
728
709
}
729
710
711
+ static struct iommu_domain * viommu_domain_alloc_identity (struct device * dev )
712
+ {
713
+ struct viommu_endpoint * vdev = dev_iommu_priv_get (dev );
714
+ struct iommu_domain * domain ;
715
+ int ret ;
716
+
717
+ if (virtio_has_feature (vdev -> viommu -> vdev ,
718
+ VIRTIO_IOMMU_F_BYPASS_CONFIG ))
719
+ return & viommu_identity_domain .domain ;
720
+
721
+ domain = viommu_domain_alloc_paging (dev );
722
+ if (IS_ERR (domain ))
723
+ return domain ;
724
+
725
+ ret = viommu_domain_map_identity (vdev , to_viommu_domain (domain ));
726
+ if (ret ) {
727
+ viommu_domain_free (domain );
728
+ return ERR_PTR (ret );
729
+ }
730
+ return domain ;
731
+ }
732
+
730
733
static int viommu_attach_dev (struct iommu_domain * domain , struct device * dev )
731
734
{
732
735
int ret = 0 ;
733
736
struct virtio_iommu_req_attach req ;
734
737
struct viommu_endpoint * vdev = dev_iommu_priv_get (dev );
735
738
struct viommu_domain * vdomain = to_viommu_domain (domain );
736
739
737
- mutex_lock (& vdomain -> mutex );
738
- if (!vdomain -> viommu ) {
739
- /*
740
- * Properly initialize the domain now that we know which viommu
741
- * owns it.
742
- */
743
- ret = viommu_domain_finalise (vdev , domain );
744
- } else if (vdomain -> viommu != vdev -> viommu ) {
745
- ret = - EINVAL ;
746
- }
747
- mutex_unlock (& vdomain -> mutex );
748
-
749
- if (ret )
750
- return ret ;
740
+ if (vdomain -> viommu != vdev -> viommu )
741
+ return - EINVAL ;
751
742
752
743
/*
753
744
* In the virtio-iommu device, when attaching the endpoint to a new
@@ -1096,9 +1087,9 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap)
1096
1087
}
1097
1088
1098
1089
static struct iommu_ops viommu_ops = {
1099
- .identity_domain = & viommu_identity_domain .domain ,
1100
1090
.capable = viommu_capable ,
1101
- .domain_alloc = viommu_domain_alloc ,
1091
+ .domain_alloc_identity = viommu_domain_alloc_identity ,
1092
+ .domain_alloc_paging = viommu_domain_alloc_paging ,
1102
1093
.probe_device = viommu_probe_device ,
1103
1094
.release_device = viommu_release_device ,
1104
1095
.device_group = viommu_device_group ,
@@ -1224,12 +1215,6 @@ static int viommu_probe(struct virtio_device *vdev)
1224
1215
if (virtio_has_feature (viommu -> vdev , VIRTIO_IOMMU_F_BYPASS_CONFIG )) {
1225
1216
viommu -> identity_domain_id = viommu -> first_domain ;
1226
1217
viommu -> first_domain ++ ;
1227
- } else {
1228
- /*
1229
- * Assume the VMM is sensible and it either supports bypass on
1230
- * all instances or no instances.
1231
- */
1232
- viommu_ops .identity_domain = NULL ;
1233
1218
}
1234
1219
1235
1220
viommu_ops .pgsize_bitmap = viommu -> pgsize_bitmap ;
0 commit comments