@@ -971,14 +971,12 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
971
971
arm_smmu_cmdq_issue_cmd_with_sync (smmu , & cmd );
972
972
}
973
973
974
- static void arm_smmu_sync_cd (struct arm_smmu_domain * smmu_domain ,
974
+ static void arm_smmu_sync_cd (struct arm_smmu_master * master ,
975
975
int ssid , bool leaf )
976
976
{
977
977
size_t i ;
978
- unsigned long flags ;
979
- struct arm_smmu_master * master ;
980
978
struct arm_smmu_cmdq_batch cmds ;
981
- struct arm_smmu_device * smmu = smmu_domain -> smmu ;
979
+ struct arm_smmu_device * smmu = master -> smmu ;
982
980
struct arm_smmu_cmdq_ent cmd = {
983
981
.opcode = CMDQ_OP_CFGI_CD ,
984
982
.cfgi = {
@@ -988,15 +986,10 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
988
986
};
989
987
990
988
cmds .num = 0 ;
991
-
992
- spin_lock_irqsave (& smmu_domain -> devices_lock , flags );
993
- list_for_each_entry (master , & smmu_domain -> devices , domain_head ) {
994
- for (i = 0 ; i < master -> num_streams ; i ++ ) {
995
- cmd .cfgi .sid = master -> streams [i ].id ;
996
- arm_smmu_cmdq_batch_add (smmu , & cmds , & cmd );
997
- }
989
+ for (i = 0 ; i < master -> num_streams ; i ++ ) {
990
+ cmd .cfgi .sid = master -> streams [i ].id ;
991
+ arm_smmu_cmdq_batch_add (smmu , & cmds , & cmd );
998
992
}
999
- spin_unlock_irqrestore (& smmu_domain -> devices_lock , flags );
1000
993
1001
994
arm_smmu_cmdq_batch_submit (smmu , & cmds );
1002
995
}
@@ -1026,14 +1019,13 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst,
1026
1019
WRITE_ONCE (* dst , cpu_to_le64 (val ));
1027
1020
}
1028
1021
1029
- static __le64 * arm_smmu_get_cd_ptr (struct arm_smmu_domain * smmu_domain ,
1030
- u32 ssid )
1022
+ static __le64 * arm_smmu_get_cd_ptr (struct arm_smmu_master * master , u32 ssid )
1031
1023
{
1032
1024
__le64 * l1ptr ;
1033
1025
unsigned int idx ;
1034
1026
struct arm_smmu_l1_ctx_desc * l1_desc ;
1035
- struct arm_smmu_device * smmu = smmu_domain -> smmu ;
1036
- struct arm_smmu_ctx_desc_cfg * cdcfg = & smmu_domain -> cd_table ;
1027
+ struct arm_smmu_device * smmu = master -> smmu ;
1028
+ struct arm_smmu_ctx_desc_cfg * cdcfg = & master -> domain -> cd_table ;
1037
1029
1038
1030
if (cdcfg -> s1fmt == STRTAB_STE_0_S1FMT_LINEAR )
1039
1031
return cdcfg -> cdtab + ssid * CTXDESC_CD_DWORDS ;
@@ -1047,13 +1039,13 @@ static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
1047
1039
l1ptr = cdcfg -> cdtab + idx * CTXDESC_L1_DESC_DWORDS ;
1048
1040
arm_smmu_write_cd_l1_desc (l1ptr , l1_desc );
1049
1041
/* An invalid L1CD can be cached */
1050
- arm_smmu_sync_cd (smmu_domain , ssid , false);
1042
+ arm_smmu_sync_cd (master , ssid , false);
1051
1043
}
1052
1044
idx = ssid & (CTXDESC_L2_ENTRIES - 1 );
1053
1045
return l1_desc -> l2ptr + idx * CTXDESC_CD_DWORDS ;
1054
1046
}
1055
1047
1056
- int arm_smmu_write_ctx_desc (struct arm_smmu_domain * smmu_domain , int ssid ,
1048
+ int arm_smmu_write_ctx_desc (struct arm_smmu_master * master , int ssid ,
1057
1049
struct arm_smmu_ctx_desc * cd )
1058
1050
{
1059
1051
/*
@@ -1070,11 +1062,12 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
1070
1062
u64 val ;
1071
1063
bool cd_live ;
1072
1064
__le64 * cdptr ;
1065
+ struct arm_smmu_ctx_desc_cfg * cd_table = & master -> domain -> cd_table ;
1073
1066
1074
- if (WARN_ON (ssid >= (1 << smmu_domain -> cd_table . s1cdmax )))
1067
+ if (WARN_ON (ssid >= (1 << cd_table -> s1cdmax )))
1075
1068
return - E2BIG ;
1076
1069
1077
- cdptr = arm_smmu_get_cd_ptr (smmu_domain , ssid );
1070
+ cdptr = arm_smmu_get_cd_ptr (master , ssid );
1078
1071
if (!cdptr )
1079
1072
return - ENOMEM ;
1080
1073
@@ -1102,7 +1095,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
1102
1095
* order. Ensure that it observes valid values before reading
1103
1096
* V=1.
1104
1097
*/
1105
- arm_smmu_sync_cd (smmu_domain , ssid , true);
1098
+ arm_smmu_sync_cd (master , ssid , true);
1106
1099
1107
1100
val = cd -> tcr |
1108
1101
#ifdef __BIG_ENDIAN
@@ -1114,7 +1107,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
1114
1107
FIELD_PREP (CTXDESC_CD_0_ASID , cd -> asid ) |
1115
1108
CTXDESC_CD_0_V ;
1116
1109
1117
- if (smmu_domain -> cd_table . stall_enabled )
1110
+ if (cd_table -> stall_enabled )
1118
1111
val |= CTXDESC_CD_0_S ;
1119
1112
}
1120
1113
@@ -1128,7 +1121,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
1128
1121
* without first making the structure invalid.
1129
1122
*/
1130
1123
WRITE_ONCE (cdptr [0 ], cpu_to_le64 (val ));
1131
- arm_smmu_sync_cd (smmu_domain , ssid , true);
1124
+ arm_smmu_sync_cd (master , ssid , true);
1132
1125
return 0 ;
1133
1126
}
1134
1127
@@ -1138,7 +1131,7 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain,
1138
1131
int ret ;
1139
1132
size_t l1size ;
1140
1133
size_t max_contexts ;
1141
- struct arm_smmu_device * smmu = smmu_domain -> smmu ;
1134
+ struct arm_smmu_device * smmu = master -> smmu ;
1142
1135
struct arm_smmu_ctx_desc_cfg * cdcfg = & smmu_domain -> cd_table ;
1143
1136
1144
1137
cdcfg -> stall_enabled = master -> stall_enabled ;
@@ -2122,12 +2115,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
2122
2115
CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64 ;
2123
2116
cd -> mair = pgtbl_cfg -> arm_lpae_s1_cfg .mair ;
2124
2117
2125
- /*
2126
- * Note that this will end up calling arm_smmu_sync_cd() before
2127
- * the master has been added to the devices list for this domain.
2128
- * This isn't an issue because the STE hasn't been installed yet.
2129
- */
2130
- ret = arm_smmu_write_ctx_desc (smmu_domain , IOMMU_NO_PASID , cd );
2118
+ ret = arm_smmu_write_ctx_desc (master , IOMMU_NO_PASID , cd );
2131
2119
if (ret )
2132
2120
goto out_free_cd_tables ;
2133
2121
0 commit comments