@@ -389,6 +389,50 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
389
389
return 0 ;
390
390
}
391
391
392
+ int intel_pasid_replace_first_level (struct intel_iommu * iommu ,
393
+ struct device * dev , pgd_t * pgd ,
394
+ u32 pasid , u16 did , u16 old_did ,
395
+ int flags )
396
+ {
397
+ struct pasid_entry * pte , new_pte ;
398
+
399
+ if (!ecap_flts (iommu -> ecap )) {
400
+ pr_err ("No first level translation support on %s\n" ,
401
+ iommu -> name );
402
+ return - EINVAL ;
403
+ }
404
+
405
+ if ((flags & PASID_FLAG_FL5LP ) && !cap_fl5lp_support (iommu -> cap )) {
406
+ pr_err ("No 5-level paging support for first-level on %s\n" ,
407
+ iommu -> name );
408
+ return - EINVAL ;
409
+ }
410
+
411
+ pasid_pte_config_first_level (iommu , & new_pte , pgd , did , flags );
412
+
413
+ spin_lock (& iommu -> lock );
414
+ pte = intel_pasid_get_entry (dev , pasid );
415
+ if (!pte ) {
416
+ spin_unlock (& iommu -> lock );
417
+ return - ENODEV ;
418
+ }
419
+
420
+ if (!pasid_pte_is_present (pte )) {
421
+ spin_unlock (& iommu -> lock );
422
+ return - EINVAL ;
423
+ }
424
+
425
+ WARN_ON (old_did != pasid_get_domain_id (pte ));
426
+
427
+ * pte = new_pte ;
428
+ spin_unlock (& iommu -> lock );
429
+
430
+ intel_pasid_flush_present (iommu , dev , pasid , old_did , pte );
431
+ intel_iommu_drain_pasid_prq (dev , pasid );
432
+
433
+ return 0 ;
434
+ }
435
+
392
436
/*
393
437
* Set up the scalable mode pasid entry for second only translation type.
394
438
*/
@@ -456,6 +500,57 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
456
500
return 0 ;
457
501
}
458
502
503
+ int intel_pasid_replace_second_level (struct intel_iommu * iommu ,
504
+ struct dmar_domain * domain ,
505
+ struct device * dev , u16 old_did ,
506
+ u32 pasid )
507
+ {
508
+ struct pasid_entry * pte , new_pte ;
509
+ struct dma_pte * pgd ;
510
+ u64 pgd_val ;
511
+ u16 did ;
512
+
513
+ /*
514
+ * If hardware advertises no support for second level
515
+ * translation, return directly.
516
+ */
517
+ if (!ecap_slts (iommu -> ecap )) {
518
+ pr_err ("No second level translation support on %s\n" ,
519
+ iommu -> name );
520
+ return - EINVAL ;
521
+ }
522
+
523
+ pgd = domain -> pgd ;
524
+ pgd_val = virt_to_phys (pgd );
525
+ did = domain_id_iommu (domain , iommu );
526
+
527
+ pasid_pte_config_second_level (iommu , & new_pte , pgd_val ,
528
+ domain -> agaw , did ,
529
+ domain -> dirty_tracking );
530
+
531
+ spin_lock (& iommu -> lock );
532
+ pte = intel_pasid_get_entry (dev , pasid );
533
+ if (!pte ) {
534
+ spin_unlock (& iommu -> lock );
535
+ return - ENODEV ;
536
+ }
537
+
538
+ if (!pasid_pte_is_present (pte )) {
539
+ spin_unlock (& iommu -> lock );
540
+ return - EINVAL ;
541
+ }
542
+
543
+ WARN_ON (old_did != pasid_get_domain_id (pte ));
544
+
545
+ * pte = new_pte ;
546
+ spin_unlock (& iommu -> lock );
547
+
548
+ intel_pasid_flush_present (iommu , dev , pasid , old_did , pte );
549
+ intel_iommu_drain_pasid_prq (dev , pasid );
550
+
551
+ return 0 ;
552
+ }
553
+
459
554
/*
460
555
* Set up dirty tracking on a second only or nested translation type.
461
556
*/
@@ -568,6 +663,38 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
568
663
return 0 ;
569
664
}
570
665
666
+ int intel_pasid_replace_pass_through (struct intel_iommu * iommu ,
667
+ struct device * dev , u16 old_did ,
668
+ u32 pasid )
669
+ {
670
+ struct pasid_entry * pte , new_pte ;
671
+ u16 did = FLPT_DEFAULT_DID ;
672
+
673
+ pasid_pte_config_pass_through (iommu , & new_pte , did );
674
+
675
+ spin_lock (& iommu -> lock );
676
+ pte = intel_pasid_get_entry (dev , pasid );
677
+ if (!pte ) {
678
+ spin_unlock (& iommu -> lock );
679
+ return - ENODEV ;
680
+ }
681
+
682
+ if (!pasid_pte_is_present (pte )) {
683
+ spin_unlock (& iommu -> lock );
684
+ return - EINVAL ;
685
+ }
686
+
687
+ WARN_ON (old_did != pasid_get_domain_id (pte ));
688
+
689
+ * pte = new_pte ;
690
+ spin_unlock (& iommu -> lock );
691
+
692
+ intel_pasid_flush_present (iommu , dev , pasid , old_did , pte );
693
+ intel_iommu_drain_pasid_prq (dev , pasid );
694
+
695
+ return 0 ;
696
+ }
697
+
571
698
/*
572
699
* Set the page snoop control for a pasid entry which has been set up.
573
700
*/
@@ -698,6 +825,69 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
698
825
return 0 ;
699
826
}
700
827
828
+ int intel_pasid_replace_nested (struct intel_iommu * iommu ,
829
+ struct device * dev , u32 pasid ,
830
+ u16 old_did , struct dmar_domain * domain )
831
+ {
832
+ struct iommu_hwpt_vtd_s1 * s1_cfg = & domain -> s1_cfg ;
833
+ struct dmar_domain * s2_domain = domain -> s2_domain ;
834
+ u16 did = domain_id_iommu (domain , iommu );
835
+ struct pasid_entry * pte , new_pte ;
836
+
837
+ /* Address width should match the address width supported by hardware */
838
+ switch (s1_cfg -> addr_width ) {
839
+ case ADDR_WIDTH_4LEVEL :
840
+ break ;
841
+ case ADDR_WIDTH_5LEVEL :
842
+ if (!cap_fl5lp_support (iommu -> cap )) {
843
+ dev_err_ratelimited (dev ,
844
+ "5-level paging not supported\n" );
845
+ return - EINVAL ;
846
+ }
847
+ break ;
848
+ default :
849
+ dev_err_ratelimited (dev , "Invalid stage-1 address width %d\n" ,
850
+ s1_cfg -> addr_width );
851
+ return - EINVAL ;
852
+ }
853
+
854
+ if ((s1_cfg -> flags & IOMMU_VTD_S1_SRE ) && !ecap_srs (iommu -> ecap )) {
855
+ pr_err_ratelimited ("No supervisor request support on %s\n" ,
856
+ iommu -> name );
857
+ return - EINVAL ;
858
+ }
859
+
860
+ if ((s1_cfg -> flags & IOMMU_VTD_S1_EAFE ) && !ecap_eafs (iommu -> ecap )) {
861
+ pr_err_ratelimited ("No extended access flag support on %s\n" ,
862
+ iommu -> name );
863
+ return - EINVAL ;
864
+ }
865
+
866
+ pasid_pte_config_nestd (iommu , & new_pte , s1_cfg , s2_domain , did );
867
+
868
+ spin_lock (& iommu -> lock );
869
+ pte = intel_pasid_get_entry (dev , pasid );
870
+ if (!pte ) {
871
+ spin_unlock (& iommu -> lock );
872
+ return - ENODEV ;
873
+ }
874
+
875
+ if (!pasid_pte_is_present (pte )) {
876
+ spin_unlock (& iommu -> lock );
877
+ return - EINVAL ;
878
+ }
879
+
880
+ WARN_ON (old_did != pasid_get_domain_id (pte ));
881
+
882
+ * pte = new_pte ;
883
+ spin_unlock (& iommu -> lock );
884
+
885
+ intel_pasid_flush_present (iommu , dev , pasid , old_did , pte );
886
+ intel_iommu_drain_pasid_prq (dev , pasid );
887
+
888
+ return 0 ;
889
+ }
890
+
701
891
/*
702
892
* Interfaces to setup or teardown a pasid table to the scalable-mode
703
893
* context table entry:
0 commit comments