@@ -207,6 +207,11 @@ static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
207
207
return (gic_rdists -> has_rvpeid || vm -> vlpi_count [its -> list_nr ]);
208
208
}
209
209
210
+ static bool rdists_support_shareable (void )
211
+ {
212
+ return !(gic_rdists -> flags & RDIST_FLAGS_FORCE_NON_SHAREABLE );
213
+ }
214
+
210
215
static u16 get_its_list (struct its_vm * vm )
211
216
{
212
217
struct its_node * its ;
@@ -2710,10 +2715,12 @@ static u64 inherit_vpe_l1_table_from_its(void)
2710
2715
break ;
2711
2716
}
2712
2717
val |= FIELD_PREP (GICR_VPROPBASER_4_1_ADDR , addr >> 12 );
2713
- val |= FIELD_PREP (GICR_VPROPBASER_SHAREABILITY_MASK ,
2714
- FIELD_GET (GITS_BASER_SHAREABILITY_MASK , baser ));
2715
- val |= FIELD_PREP (GICR_VPROPBASER_INNER_CACHEABILITY_MASK ,
2716
- FIELD_GET (GITS_BASER_INNER_CACHEABILITY_MASK , baser ));
2718
+ if (rdists_support_shareable ()) {
2719
+ val |= FIELD_PREP (GICR_VPROPBASER_SHAREABILITY_MASK ,
2720
+ FIELD_GET (GITS_BASER_SHAREABILITY_MASK , baser ));
2721
+ val |= FIELD_PREP (GICR_VPROPBASER_INNER_CACHEABILITY_MASK ,
2722
+ FIELD_GET (GITS_BASER_INNER_CACHEABILITY_MASK , baser ));
2723
+ }
2717
2724
val |= FIELD_PREP (GICR_VPROPBASER_4_1_SIZE , GITS_BASER_NR_PAGES (baser ) - 1 );
2718
2725
2719
2726
return val ;
@@ -2936,8 +2943,10 @@ static int allocate_vpe_l1_table(void)
2936
2943
WARN_ON (!IS_ALIGNED (pa , psz ));
2937
2944
2938
2945
val |= FIELD_PREP (GICR_VPROPBASER_4_1_ADDR , pa >> 12 );
2939
- val |= GICR_VPROPBASER_RaWb ;
2940
- val |= GICR_VPROPBASER_InnerShareable ;
2946
+ if (rdists_support_shareable ()) {
2947
+ val |= GICR_VPROPBASER_RaWb ;
2948
+ val |= GICR_VPROPBASER_InnerShareable ;
2949
+ }
2941
2950
val |= GICR_VPROPBASER_4_1_Z ;
2942
2951
val |= GICR_VPROPBASER_4_1_VALID ;
2943
2952
@@ -3126,7 +3135,7 @@ static void its_cpu_init_lpis(void)
3126
3135
gicr_write_propbaser (val , rbase + GICR_PROPBASER );
3127
3136
tmp = gicr_read_propbaser (rbase + GICR_PROPBASER );
3128
3137
3129
- if (gic_rdists -> flags & RDIST_FLAGS_FORCE_NON_SHAREABLE )
3138
+ if (! rdists_support_shareable () )
3130
3139
tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK ;
3131
3140
3132
3141
if ((tmp ^ val ) & GICR_PROPBASER_SHAREABILITY_MASK ) {
@@ -3153,7 +3162,7 @@ static void its_cpu_init_lpis(void)
3153
3162
gicr_write_pendbaser (val , rbase + GICR_PENDBASER );
3154
3163
tmp = gicr_read_pendbaser (rbase + GICR_PENDBASER );
3155
3164
3156
- if (gic_rdists -> flags & RDIST_FLAGS_FORCE_NON_SHAREABLE )
3165
+ if (! rdists_support_shareable () )
3157
3166
tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK ;
3158
3167
3159
3168
if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK )) {
@@ -3817,8 +3826,9 @@ static int its_vpe_set_affinity(struct irq_data *d,
3817
3826
bool force )
3818
3827
{
3819
3828
struct its_vpe * vpe = irq_data_get_irq_chip_data (d );
3820
- int from , cpu = cpumask_first ( mask_val ) ;
3829
+ struct cpumask common , * table_mask ;
3821
3830
unsigned long flags ;
3831
+ int from , cpu ;
3822
3832
3823
3833
/*
3824
3834
* Changing affinity is mega expensive, so let's be as lazy as
@@ -3834,19 +3844,22 @@ static int its_vpe_set_affinity(struct irq_data *d,
3834
3844
* taken on any vLPI handling path that evaluates vpe->col_idx.
3835
3845
*/
3836
3846
from = vpe_to_cpuid_lock (vpe , & flags );
3837
- if (from == cpu )
3838
- goto out ;
3839
-
3840
- vpe -> col_idx = cpu ;
3847
+ table_mask = gic_data_rdist_cpu (from )-> vpe_table_mask ;
3841
3848
3842
3849
/*
3843
- * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
3844
- * is sharing its VPE table with the current one .
3850
+ * If we are offered another CPU in the same GICv4.1 ITS
3851
+ * affinity, pick this one. Otherwise, any CPU will do .
3845
3852
*/
3846
- if (gic_data_rdist_cpu (cpu )-> vpe_table_mask &&
3847
- cpumask_test_cpu (from , gic_data_rdist_cpu (cpu )-> vpe_table_mask ))
3853
+ if (table_mask && cpumask_and (& common , mask_val , table_mask ))
3854
+ cpu = cpumask_test_cpu (from , & common ) ? from : cpumask_first (& common );
3855
+ else
3856
+ cpu = cpumask_first (mask_val );
3857
+
3858
+ if (from == cpu )
3848
3859
goto out ;
3849
3860
3861
+ vpe -> col_idx = cpu ;
3862
+
3850
3863
its_send_vmovp (vpe );
3851
3864
its_vpe_db_proxy_move (vpe , from , cpu );
3852
3865
@@ -3880,14 +3893,18 @@ static void its_vpe_schedule(struct its_vpe *vpe)
3880
3893
val = virt_to_phys (page_address (vpe -> its_vm -> vprop_page )) &
3881
3894
GENMASK_ULL (51 , 12 );
3882
3895
val |= (LPI_NRBITS - 1 ) & GICR_VPROPBASER_IDBITS_MASK ;
3883
- val |= GICR_VPROPBASER_RaWb ;
3884
- val |= GICR_VPROPBASER_InnerShareable ;
3896
+ if (rdists_support_shareable ()) {
3897
+ val |= GICR_VPROPBASER_RaWb ;
3898
+ val |= GICR_VPROPBASER_InnerShareable ;
3899
+ }
3885
3900
gicr_write_vpropbaser (val , vlpi_base + GICR_VPROPBASER );
3886
3901
3887
3902
val = virt_to_phys (page_address (vpe -> vpt_page )) &
3888
3903
GENMASK_ULL (51 , 16 );
3889
- val |= GICR_VPENDBASER_RaWaWb ;
3890
- val |= GICR_VPENDBASER_InnerShareable ;
3904
+ if (rdists_support_shareable ()) {
3905
+ val |= GICR_VPENDBASER_RaWaWb ;
3906
+ val |= GICR_VPENDBASER_InnerShareable ;
3907
+ }
3891
3908
/*
3892
3909
* There is no good way of finding out if the pending table is
3893
3910
* empty as we can race against the doorbell interrupt very
@@ -5078,6 +5095,8 @@ static int __init its_probe_one(struct its_node *its)
5078
5095
u32 ctlr ;
5079
5096
int err ;
5080
5097
5098
+ its_enable_quirks (its );
5099
+
5081
5100
if (is_v4 (its )) {
5082
5101
if (!(its -> typer & GITS_TYPER_VMOVP )) {
5083
5102
err = its_compute_its_list_map (its );
@@ -5429,7 +5448,6 @@ static int __init its_of_probe(struct device_node *node)
5429
5448
if (!its )
5430
5449
return - ENOMEM ;
5431
5450
5432
- its_enable_quirks (its );
5433
5451
err = its_probe_one (its );
5434
5452
if (err ) {
5435
5453
its_node_destroy (its );
0 commit comments