@@ -656,8 +656,9 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
656
656
return 0 ;
657
657
}
658
658
659
- static u64 gic_mpidr_to_affinity ( unsigned long mpidr )
659
+ static u64 gic_cpu_to_affinity ( int cpu )
660
660
{
661
+ u64 mpidr = cpu_logical_map (cpu );
661
662
u64 aff ;
662
663
663
664
aff = ((u64 )MPIDR_AFFINITY_LEVEL (mpidr , 3 ) << 32 |
@@ -914,7 +915,7 @@ static void __init gic_dist_init(void)
914
915
* Set all global interrupts to the boot CPU only. ARE must be
915
916
* enabled.
916
917
*/
917
- affinity = gic_mpidr_to_affinity ( cpu_logical_map ( smp_processor_id () ));
918
+ affinity = gic_cpu_to_affinity ( smp_processor_id ());
918
919
for (i = 32 ; i < GIC_LINE_NR ; i ++ )
919
920
gic_write_irouter (affinity , base + GICD_IROUTER + i * 8 );
920
921
@@ -963,14 +964,16 @@ static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
963
964
964
965
static int __gic_populate_rdist (struct redist_region * region , void __iomem * ptr )
965
966
{
966
- unsigned long mpidr = cpu_logical_map ( smp_processor_id ()) ;
967
+ unsigned long mpidr ;
967
968
u64 typer ;
968
969
u32 aff ;
969
970
970
971
/*
971
972
* Convert affinity to a 32bit value that can be matched to
972
973
* GICR_TYPER bits [63:32].
973
974
*/
975
+ mpidr = gic_cpu_to_affinity (smp_processor_id ());
976
+
974
977
aff = (MPIDR_AFFINITY_LEVEL (mpidr , 3 ) << 24 |
975
978
MPIDR_AFFINITY_LEVEL (mpidr , 2 ) << 16 |
976
979
MPIDR_AFFINITY_LEVEL (mpidr , 1 ) << 8 |
@@ -1084,7 +1087,7 @@ static inline bool gic_dist_security_disabled(void)
1084
1087
static void gic_cpu_sys_reg_init (void )
1085
1088
{
1086
1089
int i , cpu = smp_processor_id ();
1087
- u64 mpidr = cpu_logical_map (cpu );
1090
+ u64 mpidr = gic_cpu_to_affinity (cpu );
1088
1091
u64 need_rss = MPIDR_RS (mpidr );
1089
1092
bool group0 ;
1090
1093
u32 pribits ;
@@ -1183,11 +1186,11 @@ static void gic_cpu_sys_reg_init(void)
1183
1186
for_each_online_cpu (i ) {
1184
1187
bool have_rss = per_cpu (has_rss , i ) && per_cpu (has_rss , cpu );
1185
1188
1186
- need_rss |= MPIDR_RS (cpu_logical_map (i ));
1189
+ need_rss |= MPIDR_RS (gic_cpu_to_affinity (i ));
1187
1190
if (need_rss && (!have_rss ))
1188
1191
pr_crit ("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n" ,
1189
1192
cpu , (unsigned long )mpidr ,
1190
- i , (unsigned long )cpu_logical_map (i ));
1193
+ i , (unsigned long )gic_cpu_to_affinity (i ));
1191
1194
}
1192
1195
1193
1196
/**
@@ -1263,9 +1266,11 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
1263
1266
unsigned long cluster_id )
1264
1267
{
1265
1268
int next_cpu , cpu = * base_cpu ;
1266
- unsigned long mpidr = cpu_logical_map ( cpu ) ;
1269
+ unsigned long mpidr ;
1267
1270
u16 tlist = 0 ;
1268
1271
1272
+ mpidr = gic_cpu_to_affinity (cpu );
1273
+
1269
1274
while (cpu < nr_cpu_ids ) {
1270
1275
tlist |= 1 << (mpidr & 0xf );
1271
1276
@@ -1274,7 +1279,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
1274
1279
goto out ;
1275
1280
cpu = next_cpu ;
1276
1281
1277
- mpidr = cpu_logical_map (cpu );
1282
+ mpidr = gic_cpu_to_affinity (cpu );
1278
1283
1279
1284
if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID (mpidr )) {
1280
1285
cpu -- ;
@@ -1319,7 +1324,7 @@ static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
1319
1324
dsb (ishst );
1320
1325
1321
1326
for_each_cpu (cpu , mask ) {
1322
- u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID (cpu_logical_map (cpu ));
1327
+ u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID (gic_cpu_to_affinity (cpu ));
1323
1328
u16 tlist ;
1324
1329
1325
1330
tlist = gic_compute_target_list (& cpu , mask , cluster_id );
@@ -1377,7 +1382,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1377
1382
1378
1383
offset = convert_offset_index (d , GICD_IROUTER , & index );
1379
1384
reg = gic_dist_base (d ) + offset + (index * 8 );
1380
- val = gic_mpidr_to_affinity ( cpu_logical_map ( cpu ) );
1385
+ val = gic_cpu_to_affinity ( cpu );
1381
1386
1382
1387
gic_write_irouter (val , reg );
1383
1388
0 commit comments