@@ -67,6 +67,20 @@ static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
67
67
* entry |= (pfaa & ZPCI_PTE_ADDR_MASK );
68
68
}
69
69
70
+ static inline void set_rf_rso (unsigned long * entry , phys_addr_t rso )
71
+ {
72
+ * entry &= ZPCI_RTE_FLAG_MASK ;
73
+ * entry |= (rso & ZPCI_RTE_ADDR_MASK );
74
+ * entry |= ZPCI_TABLE_TYPE_RFX ;
75
+ }
76
+
77
+ static inline void set_rs_rto (unsigned long * entry , phys_addr_t rto )
78
+ {
79
+ * entry &= ZPCI_RTE_FLAG_MASK ;
80
+ * entry |= (rto & ZPCI_RTE_ADDR_MASK );
81
+ * entry |= ZPCI_TABLE_TYPE_RSX ;
82
+ }
83
+
70
84
static inline void set_rt_sto (unsigned long * entry , phys_addr_t sto )
71
85
{
72
86
* entry &= ZPCI_RTE_FLAG_MASK ;
@@ -81,6 +95,22 @@ static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
81
95
* entry |= ZPCI_TABLE_TYPE_SX ;
82
96
}
83
97
98
+ static inline void validate_rf_entry (unsigned long * entry )
99
+ {
100
+ * entry &= ~ZPCI_TABLE_VALID_MASK ;
101
+ * entry &= ~ZPCI_TABLE_OFFSET_MASK ;
102
+ * entry |= ZPCI_TABLE_VALID ;
103
+ * entry |= ZPCI_TABLE_LEN_RFX ;
104
+ }
105
+
106
+ static inline void validate_rs_entry (unsigned long * entry )
107
+ {
108
+ * entry &= ~ZPCI_TABLE_VALID_MASK ;
109
+ * entry &= ~ZPCI_TABLE_OFFSET_MASK ;
110
+ * entry |= ZPCI_TABLE_VALID ;
111
+ * entry |= ZPCI_TABLE_LEN_RSX ;
112
+ }
113
+
84
114
static inline void validate_rt_entry (unsigned long * entry )
85
115
{
86
116
* entry &= ~ZPCI_TABLE_VALID_MASK ;
@@ -286,6 +316,70 @@ static unsigned long *dma_alloc_page_table(gfp_t gfp)
286
316
return table ;
287
317
}
288
318
319
+ static unsigned long * dma_walk_rs_table (unsigned long * rso ,
320
+ dma_addr_t dma_addr , gfp_t gfp )
321
+ {
322
+ unsigned int rsx = calc_rsx (dma_addr );
323
+ unsigned long old_rse , rse ;
324
+ unsigned long * rsep , * rto ;
325
+
326
+ rsep = & rso [rsx ];
327
+ rse = READ_ONCE (* rsep );
328
+ if (reg_entry_isvalid (rse )) {
329
+ rto = get_rs_rto (rse );
330
+ } else {
331
+ rto = dma_alloc_cpu_table (gfp );
332
+ if (!rto )
333
+ return NULL ;
334
+
335
+ set_rs_rto (& rse , virt_to_phys (rto ));
336
+ validate_rs_entry (& rse );
337
+ entry_clr_protected (& rse );
338
+
339
+ old_rse = cmpxchg (rsep , ZPCI_TABLE_INVALID , rse );
340
+ if (old_rse != ZPCI_TABLE_INVALID ) {
341
+ /* Somone else was faster, use theirs */
342
+ dma_free_cpu_table (rto );
343
+ rto = get_rs_rto (old_rse );
344
+ }
345
+ }
346
+ return rto ;
347
+ }
348
+
349
+ static unsigned long * dma_walk_rf_table (unsigned long * rfo ,
350
+ dma_addr_t dma_addr , gfp_t gfp )
351
+ {
352
+ unsigned int rfx = calc_rfx (dma_addr );
353
+ unsigned long old_rfe , rfe ;
354
+ unsigned long * rfep , * rso ;
355
+
356
+ rfep = & rfo [rfx ];
357
+ rfe = READ_ONCE (* rfep );
358
+ if (reg_entry_isvalid (rfe )) {
359
+ rso = get_rf_rso (rfe );
360
+ } else {
361
+ rso = dma_alloc_cpu_table (gfp );
362
+ if (!rso )
363
+ return NULL ;
364
+
365
+ set_rf_rso (& rfe , virt_to_phys (rso ));
366
+ validate_rf_entry (& rfe );
367
+ entry_clr_protected (& rfe );
368
+
369
+ old_rfe = cmpxchg (rfep , ZPCI_TABLE_INVALID , rfe );
370
+ if (old_rfe != ZPCI_TABLE_INVALID ) {
371
+ /* Somone else was faster, use theirs */
372
+ dma_free_cpu_table (rso );
373
+ rso = get_rf_rso (old_rfe );
374
+ }
375
+ }
376
+
377
+ if (!rso )
378
+ return NULL ;
379
+
380
+ return dma_walk_rs_table (rso , dma_addr , gfp );
381
+ }
382
+
289
383
static unsigned long * dma_get_seg_table_origin (unsigned long * rtep , gfp_t gfp )
290
384
{
291
385
unsigned long old_rte , rte ;
@@ -339,11 +433,31 @@ static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
339
433
return pto ;
340
434
}
341
435
342
- static unsigned long * dma_walk_cpu_trans (unsigned long * rto , dma_addr_t dma_addr , gfp_t gfp )
436
+ static unsigned long * dma_walk_region_tables (struct s390_domain * domain ,
437
+ dma_addr_t dma_addr , gfp_t gfp )
343
438
{
344
- unsigned long * sto , * pto ;
439
+ switch (domain -> origin_type ) {
440
+ case ZPCI_TABLE_TYPE_RFX :
441
+ return dma_walk_rf_table (domain -> dma_table , dma_addr , gfp );
442
+ case ZPCI_TABLE_TYPE_RSX :
443
+ return dma_walk_rs_table (domain -> dma_table , dma_addr , gfp );
444
+ case ZPCI_TABLE_TYPE_RTX :
445
+ return domain -> dma_table ;
446
+ default :
447
+ return NULL ;
448
+ }
449
+ }
450
+
451
+ static unsigned long * dma_walk_cpu_trans (struct s390_domain * domain ,
452
+ dma_addr_t dma_addr , gfp_t gfp )
453
+ {
454
+ unsigned long * rto , * sto , * pto ;
345
455
unsigned int rtx , sx , px ;
346
456
457
+ rto = dma_walk_region_tables (domain , dma_addr , gfp );
458
+ if (!rto )
459
+ return NULL ;
460
+
347
461
rtx = calc_rtx (dma_addr );
348
462
sto = dma_get_seg_table_origin (& rto [rtx ], gfp );
349
463
if (!sto )
@@ -690,8 +804,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
690
804
int rc ;
691
805
692
806
for (i = 0 ; i < nr_pages ; i ++ ) {
693
- entry = dma_walk_cpu_trans (s390_domain -> dma_table , dma_addr ,
694
- gfp );
807
+ entry = dma_walk_cpu_trans (s390_domain , dma_addr , gfp );
695
808
if (unlikely (!entry )) {
696
809
rc = - ENOMEM ;
697
810
goto undo_cpu_trans ;
@@ -706,8 +819,7 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
706
819
undo_cpu_trans :
707
820
while (i -- > 0 ) {
708
821
dma_addr -= PAGE_SIZE ;
709
- entry = dma_walk_cpu_trans (s390_domain -> dma_table ,
710
- dma_addr , gfp );
822
+ entry = dma_walk_cpu_trans (s390_domain , dma_addr , gfp );
711
823
if (!entry )
712
824
break ;
713
825
dma_update_cpu_trans (entry , 0 , ZPCI_PTE_INVALID );
@@ -724,8 +836,7 @@ static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain,
724
836
int rc = 0 ;
725
837
726
838
for (i = 0 ; i < nr_pages ; i ++ ) {
727
- entry = dma_walk_cpu_trans (s390_domain -> dma_table , dma_addr ,
728
- GFP_ATOMIC );
839
+ entry = dma_walk_cpu_trans (s390_domain , dma_addr , GFP_ATOMIC );
729
840
if (unlikely (!entry )) {
730
841
rc = - EINVAL ;
731
842
break ;
0 commit comments