@@ -1309,25 +1309,35 @@ void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
1309
1309
struct arm_smmu_domain * smmu_domain )
1310
1310
{
1311
1311
struct arm_smmu_ctx_desc * cd = & smmu_domain -> cd ;
1312
+ const struct io_pgtable_cfg * pgtbl_cfg =
1313
+ & io_pgtable_ops_to_pgtable (smmu_domain -> pgtbl_ops )-> cfg ;
1314
+ typeof (& pgtbl_cfg -> arm_lpae_s1_cfg .tcr ) tcr =
1315
+ & pgtbl_cfg -> arm_lpae_s1_cfg .tcr ;
1312
1316
1313
1317
memset (target , 0 , sizeof (* target ));
1314
1318
1315
1319
target -> data [0 ] = cpu_to_le64 (
1316
- cd -> tcr |
1320
+ FIELD_PREP (CTXDESC_CD_0_TCR_T0SZ , tcr -> tsz ) |
1321
+ FIELD_PREP (CTXDESC_CD_0_TCR_TG0 , tcr -> tg ) |
1322
+ FIELD_PREP (CTXDESC_CD_0_TCR_IRGN0 , tcr -> irgn ) |
1323
+ FIELD_PREP (CTXDESC_CD_0_TCR_ORGN0 , tcr -> orgn ) |
1324
+ FIELD_PREP (CTXDESC_CD_0_TCR_SH0 , tcr -> sh ) |
1317
1325
#ifdef __BIG_ENDIAN
1318
1326
CTXDESC_CD_0_ENDI |
1319
1327
#endif
1328
+ CTXDESC_CD_0_TCR_EPD1 |
1320
1329
CTXDESC_CD_0_V |
1330
+ FIELD_PREP (CTXDESC_CD_0_TCR_IPS , tcr -> ips ) |
1321
1331
CTXDESC_CD_0_AA64 |
1322
1332
(master -> stall_enabled ? CTXDESC_CD_0_S : 0 ) |
1323
1333
CTXDESC_CD_0_R |
1324
1334
CTXDESC_CD_0_A |
1325
1335
CTXDESC_CD_0_ASET |
1326
1336
FIELD_PREP (CTXDESC_CD_0_ASID , cd -> asid )
1327
1337
);
1328
-
1329
- target -> data [ 1 ] = cpu_to_le64 ( cd -> ttbr & CTXDESC_CD_1_TTB0_MASK );
1330
- target -> data [3 ] = cpu_to_le64 (cd -> mair );
1338
+ target -> data [ 1 ] = cpu_to_le64 ( pgtbl_cfg -> arm_lpae_s1_cfg . ttbr &
1339
+ CTXDESC_CD_1_TTB0_MASK );
1340
+ target -> data [3 ] = cpu_to_le64 (pgtbl_cfg -> arm_lpae_s1_cfg . mair );
1331
1341
}
1332
1342
1333
1343
void arm_smmu_clear_cd (struct arm_smmu_master * master , ioasid_t ssid )
@@ -2284,45 +2294,25 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
2284
2294
}
2285
2295
2286
2296
static int arm_smmu_domain_finalise_s1 (struct arm_smmu_device * smmu ,
2287
- struct arm_smmu_domain * smmu_domain ,
2288
- struct io_pgtable_cfg * pgtbl_cfg )
2297
+ struct arm_smmu_domain * smmu_domain )
2289
2298
{
2290
2299
int ret ;
2291
2300
u32 asid ;
2292
2301
struct arm_smmu_ctx_desc * cd = & smmu_domain -> cd ;
2293
- typeof (& pgtbl_cfg -> arm_lpae_s1_cfg .tcr ) tcr = & pgtbl_cfg -> arm_lpae_s1_cfg .tcr ;
2294
2302
2295
2303
refcount_set (& cd -> refs , 1 );
2296
2304
2297
2305
/* Prevent SVA from modifying the ASID until it is written to the CD */
2298
2306
mutex_lock (& arm_smmu_asid_lock );
2299
2307
ret = xa_alloc (& arm_smmu_asid_xa , & asid , cd ,
2300
2308
XA_LIMIT (1 , (1 << smmu -> asid_bits ) - 1 ), GFP_KERNEL );
2301
- if (ret )
2302
- goto out_unlock ;
2303
-
2304
2309
cd -> asid = (u16 )asid ;
2305
- cd -> ttbr = pgtbl_cfg -> arm_lpae_s1_cfg .ttbr ;
2306
- cd -> tcr = FIELD_PREP (CTXDESC_CD_0_TCR_T0SZ , tcr -> tsz ) |
2307
- FIELD_PREP (CTXDESC_CD_0_TCR_TG0 , tcr -> tg ) |
2308
- FIELD_PREP (CTXDESC_CD_0_TCR_IRGN0 , tcr -> irgn ) |
2309
- FIELD_PREP (CTXDESC_CD_0_TCR_ORGN0 , tcr -> orgn ) |
2310
- FIELD_PREP (CTXDESC_CD_0_TCR_SH0 , tcr -> sh ) |
2311
- FIELD_PREP (CTXDESC_CD_0_TCR_IPS , tcr -> ips ) |
2312
- CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64 ;
2313
- cd -> mair = pgtbl_cfg -> arm_lpae_s1_cfg .mair ;
2314
-
2315
- mutex_unlock (& arm_smmu_asid_lock );
2316
- return 0 ;
2317
-
2318
- out_unlock :
2319
2310
mutex_unlock (& arm_smmu_asid_lock );
2320
2311
return ret ;
2321
2312
}
2322
2313
2323
2314
static int arm_smmu_domain_finalise_s2 (struct arm_smmu_device * smmu ,
2324
- struct arm_smmu_domain * smmu_domain ,
2325
- struct io_pgtable_cfg * pgtbl_cfg )
2315
+ struct arm_smmu_domain * smmu_domain )
2326
2316
{
2327
2317
int vmid ;
2328
2318
struct arm_smmu_s2_cfg * cfg = & smmu_domain -> s2_cfg ;
@@ -2346,8 +2336,7 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
2346
2336
struct io_pgtable_cfg pgtbl_cfg ;
2347
2337
struct io_pgtable_ops * pgtbl_ops ;
2348
2338
int (* finalise_stage_fn )(struct arm_smmu_device * smmu ,
2349
- struct arm_smmu_domain * smmu_domain ,
2350
- struct io_pgtable_cfg * pgtbl_cfg );
2339
+ struct arm_smmu_domain * smmu_domain );
2351
2340
2352
2341
/* Restrict the stage to what we can actually support */
2353
2342
if (!(smmu -> features & ARM_SMMU_FEAT_TRANS_S1 ))
@@ -2390,7 +2379,7 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
2390
2379
smmu_domain -> domain .geometry .aperture_end = (1UL << pgtbl_cfg .ias ) - 1 ;
2391
2380
smmu_domain -> domain .geometry .force_aperture = true;
2392
2381
2393
- ret = finalise_stage_fn (smmu , smmu_domain , & pgtbl_cfg );
2382
+ ret = finalise_stage_fn (smmu , smmu_domain );
2394
2383
if (ret < 0 ) {
2395
2384
free_io_pgtable_ops (pgtbl_ops );
2396
2385
return ret ;
0 commit comments