@@ -1010,9 +1010,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
1010
1010
}
1011
1011
1012
1012
#ifdef CONFIG_HAS_DMA
1013
- int spi_map_buf (struct spi_controller * ctlr , struct device * dev ,
1014
- struct sg_table * sgt , void * buf , size_t len ,
1015
- enum dma_data_direction dir )
1013
+ static int spi_map_buf_attrs (struct spi_controller * ctlr , struct device * dev ,
1014
+ struct sg_table * sgt , void * buf , size_t len ,
1015
+ enum dma_data_direction dir , unsigned long attrs )
1016
1016
{
1017
1017
const bool vmalloced_buf = is_vmalloc_addr (buf );
1018
1018
unsigned int max_seg_size = dma_get_max_seg_size (dev );
@@ -1078,28 +1078,39 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1078
1078
sg = sg_next (sg );
1079
1079
}
1080
1080
1081
- ret = dma_map_sg (dev , sgt -> sgl , sgt -> nents , dir );
1082
- if (!ret )
1083
- ret = - ENOMEM ;
1081
+ ret = dma_map_sgtable (dev , sgt , dir , attrs );
1084
1082
if (ret < 0 ) {
1085
1083
sg_free_table (sgt );
1086
1084
return ret ;
1087
1085
}
1088
1086
1089
- sgt -> nents = ret ;
1090
-
1091
1087
return 0 ;
1092
1088
}
1093
1089
1094
- void spi_unmap_buf (struct spi_controller * ctlr , struct device * dev ,
1095
- struct sg_table * sgt , enum dma_data_direction dir )
1090
+ int spi_map_buf (struct spi_controller * ctlr , struct device * dev ,
1091
+ struct sg_table * sgt , void * buf , size_t len ,
1092
+ enum dma_data_direction dir )
1093
+ {
1094
+ return spi_map_buf_attrs (ctlr , dev , sgt , buf , len , dir , 0 );
1095
+ }
1096
+
1097
+ static void spi_unmap_buf_attrs (struct spi_controller * ctlr ,
1098
+ struct device * dev , struct sg_table * sgt ,
1099
+ enum dma_data_direction dir ,
1100
+ unsigned long attrs )
1096
1101
{
1097
1102
if (sgt -> orig_nents ) {
1098
- dma_unmap_sg (dev , sgt -> sgl , sgt -> orig_nents , dir );
1103
+ dma_unmap_sgtable (dev , sgt , dir , attrs );
1099
1104
sg_free_table (sgt );
1100
1105
}
1101
1106
}
1102
1107
1108
+ void spi_unmap_buf (struct spi_controller * ctlr , struct device * dev ,
1109
+ struct sg_table * sgt , enum dma_data_direction dir )
1110
+ {
1111
+ spi_unmap_buf_attrs (ctlr , dev , sgt , dir , 0 );
1112
+ }
1113
+
1103
1114
static int __spi_map_msg (struct spi_controller * ctlr , struct spi_message * msg )
1104
1115
{
1105
1116
struct device * tx_dev , * rx_dev ;
@@ -1124,24 +1135,30 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1124
1135
rx_dev = ctlr -> dev .parent ;
1125
1136
1126
1137
list_for_each_entry (xfer , & msg -> transfers , transfer_list ) {
1138
+ /* The sync is done before each transfer. */
1139
+ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC ;
1140
+
1127
1141
if (!ctlr -> can_dma (ctlr , msg -> spi , xfer ))
1128
1142
continue ;
1129
1143
1130
1144
if (xfer -> tx_buf != NULL ) {
1131
- ret = spi_map_buf (ctlr , tx_dev , & xfer -> tx_sg ,
1132
- (void * )xfer -> tx_buf , xfer -> len ,
1133
- DMA_TO_DEVICE );
1145
+ ret = spi_map_buf_attrs (ctlr , tx_dev , & xfer -> tx_sg ,
1146
+ (void * )xfer -> tx_buf ,
1147
+ xfer -> len , DMA_TO_DEVICE ,
1148
+ attrs );
1134
1149
if (ret != 0 )
1135
1150
return ret ;
1136
1151
}
1137
1152
1138
1153
if (xfer -> rx_buf != NULL ) {
1139
- ret = spi_map_buf (ctlr , rx_dev , & xfer -> rx_sg ,
1140
- xfer -> rx_buf , xfer -> len ,
1141
- DMA_FROM_DEVICE );
1154
+ ret = spi_map_buf_attrs (ctlr , rx_dev , & xfer -> rx_sg ,
1155
+ xfer -> rx_buf , xfer -> len ,
1156
+ DMA_FROM_DEVICE , attrs );
1142
1157
if (ret != 0 ) {
1143
- spi_unmap_buf (ctlr , tx_dev , & xfer -> tx_sg ,
1144
- DMA_TO_DEVICE );
1158
+ spi_unmap_buf_attrs (ctlr , tx_dev ,
1159
+ & xfer -> tx_sg , DMA_TO_DEVICE ,
1160
+ attrs );
1161
+
1145
1162
return ret ;
1146
1163
}
1147
1164
}
@@ -1164,17 +1181,52 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1164
1181
return 0 ;
1165
1182
1166
1183
list_for_each_entry (xfer , & msg -> transfers , transfer_list ) {
1184
+ /* The sync has already been done after each transfer. */
1185
+ unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC ;
1186
+
1167
1187
if (!ctlr -> can_dma (ctlr , msg -> spi , xfer ))
1168
1188
continue ;
1169
1189
1170
- spi_unmap_buf (ctlr , rx_dev , & xfer -> rx_sg , DMA_FROM_DEVICE );
1171
- spi_unmap_buf (ctlr , tx_dev , & xfer -> tx_sg , DMA_TO_DEVICE );
1190
+ spi_unmap_buf_attrs (ctlr , rx_dev , & xfer -> rx_sg ,
1191
+ DMA_FROM_DEVICE , attrs );
1192
+ spi_unmap_buf_attrs (ctlr , tx_dev , & xfer -> tx_sg ,
1193
+ DMA_TO_DEVICE , attrs );
1172
1194
}
1173
1195
1174
1196
ctlr -> cur_msg_mapped = false;
1175
1197
1176
1198
return 0 ;
1177
1199
}
1200
+
1201
+ static void spi_dma_sync_for_device (struct spi_controller * ctlr ,
1202
+ struct spi_transfer * xfer )
1203
+ {
1204
+ struct device * rx_dev = ctlr -> cur_rx_dma_dev ;
1205
+ struct device * tx_dev = ctlr -> cur_tx_dma_dev ;
1206
+
1207
+ if (!ctlr -> cur_msg_mapped )
1208
+ return ;
1209
+
1210
+ if (xfer -> tx_sg .orig_nents )
1211
+ dma_sync_sgtable_for_device (tx_dev , & xfer -> tx_sg , DMA_TO_DEVICE );
1212
+ if (xfer -> rx_sg .orig_nents )
1213
+ dma_sync_sgtable_for_device (rx_dev , & xfer -> rx_sg , DMA_FROM_DEVICE );
1214
+ }
1215
+
1216
+ static void spi_dma_sync_for_cpu (struct spi_controller * ctlr ,
1217
+ struct spi_transfer * xfer )
1218
+ {
1219
+ struct device * rx_dev = ctlr -> cur_rx_dma_dev ;
1220
+ struct device * tx_dev = ctlr -> cur_tx_dma_dev ;
1221
+
1222
+ if (!ctlr -> cur_msg_mapped )
1223
+ return ;
1224
+
1225
+ if (xfer -> rx_sg .orig_nents )
1226
+ dma_sync_sgtable_for_cpu (rx_dev , & xfer -> rx_sg , DMA_FROM_DEVICE );
1227
+ if (xfer -> tx_sg .orig_nents )
1228
+ dma_sync_sgtable_for_cpu (tx_dev , & xfer -> tx_sg , DMA_TO_DEVICE );
1229
+ }
1178
1230
#else /* !CONFIG_HAS_DMA */
1179
1231
static inline int __spi_map_msg (struct spi_controller * ctlr ,
1180
1232
struct spi_message * msg )
@@ -1187,6 +1239,16 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1187
1239
{
1188
1240
return 0 ;
1189
1241
}
1242
+
1243
+ static void spi_dma_sync_for_device (struct spi_controller * ctrl ,
1244
+ struct spi_transfer * xfer )
1245
+ {
1246
+ }
1247
+
1248
+ static void spi_dma_sync_for_cpu (struct spi_controller * ctrl ,
1249
+ struct spi_transfer * xfer )
1250
+ {
1251
+ }
1190
1252
#endif /* !CONFIG_HAS_DMA */
1191
1253
1192
1254
static inline int spi_unmap_msg (struct spi_controller * ctlr ,
@@ -1445,8 +1507,11 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
1445
1507
reinit_completion (& ctlr -> xfer_completion );
1446
1508
1447
1509
fallback_pio :
1510
+ spi_dma_sync_for_device (ctlr , xfer );
1448
1511
ret = ctlr -> transfer_one (ctlr , msg -> spi , xfer );
1449
1512
if (ret < 0 ) {
1513
+ spi_dma_sync_for_cpu (ctlr , xfer );
1514
+
1450
1515
if (ctlr -> cur_msg_mapped &&
1451
1516
(xfer -> error & SPI_TRANS_FAIL_NO_START )) {
1452
1517
__spi_unmap_msg (ctlr , msg );
@@ -1469,6 +1534,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
1469
1534
if (ret < 0 )
1470
1535
msg -> status = ret ;
1471
1536
}
1537
+
1538
+ spi_dma_sync_for_cpu (ctlr , xfer );
1472
1539
} else {
1473
1540
if (xfer -> len )
1474
1541
dev_err (& msg -> spi -> dev ,
0 commit comments