Skip to content

Commit 0c17ba7

Browse files
vwaxbroonie
authored andcommitted
spi: Fix cache corruption due to DMA/PIO overlap
The SPI core DMA mapping support performs cache management once for the entire message and not between transfers, and this leads to cache corruption if a message has two or more RX transfers with both transfers targeting the same cache line, and the controller driver decides to handle one using DMA and the other using PIO (for example, because one is much larger than the other). Fix it by syncing before/after the actual transfers. This also means that we can skip the sync during the map/unmap of the message. Fixes: 99adef3 ("spi: Provide core support for DMA mapping transfers") Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com> Link: https://lore.kernel.org/r/20220927112117.77599-3-vincent.whitchurch@axis.com Signed-off-by: Mark Brown <broonie@kernel.org>
1 parent f25723d commit 0c17ba7

File tree

1 file changed

+88
-21
lines changed

1 file changed

+88
-21
lines changed

drivers/spi/spi.c

Lines changed: 88 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1010,9 +1010,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
10101010
}
10111011

10121012
#ifdef CONFIG_HAS_DMA
1013-
int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1014-
struct sg_table *sgt, void *buf, size_t len,
1015-
enum dma_data_direction dir)
1013+
static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1014+
struct sg_table *sgt, void *buf, size_t len,
1015+
enum dma_data_direction dir, unsigned long attrs)
10161016
{
10171017
const bool vmalloced_buf = is_vmalloc_addr(buf);
10181018
unsigned int max_seg_size = dma_get_max_seg_size(dev);
@@ -1078,28 +1078,39 @@ int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
10781078
sg = sg_next(sg);
10791079
}
10801080

1081-
ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
1082-
if (!ret)
1083-
ret = -ENOMEM;
1081+
ret = dma_map_sgtable(dev, sgt, dir, attrs);
10841082
if (ret < 0) {
10851083
sg_free_table(sgt);
10861084
return ret;
10871085
}
10881086

1089-
sgt->nents = ret;
1090-
10911087
return 0;
10921088
}
10931089

1094-
void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1095-
struct sg_table *sgt, enum dma_data_direction dir)
1090+
int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1091+
struct sg_table *sgt, void *buf, size_t len,
1092+
enum dma_data_direction dir)
1093+
{
1094+
return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1095+
}
1096+
1097+
static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1098+
struct device *dev, struct sg_table *sgt,
1099+
enum dma_data_direction dir,
1100+
unsigned long attrs)
10961101
{
10971102
if (sgt->orig_nents) {
1098-
dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
1103+
dma_unmap_sgtable(dev, sgt, dir, attrs);
10991104
sg_free_table(sgt);
11001105
}
11011106
}
11021107

1108+
void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1109+
struct sg_table *sgt, enum dma_data_direction dir)
1110+
{
1111+
spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1112+
}
1113+
11031114
static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
11041115
{
11051116
struct device *tx_dev, *rx_dev;
@@ -1124,24 +1135,30 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
11241135
rx_dev = ctlr->dev.parent;
11251136

11261137
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1138+
/* The sync is done before each transfer. */
1139+
unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1140+
11271141
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
11281142
continue;
11291143

11301144
if (xfer->tx_buf != NULL) {
1131-
ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
1132-
(void *)xfer->tx_buf, xfer->len,
1133-
DMA_TO_DEVICE);
1145+
ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1146+
(void *)xfer->tx_buf,
1147+
xfer->len, DMA_TO_DEVICE,
1148+
attrs);
11341149
if (ret != 0)
11351150
return ret;
11361151
}
11371152

11381153
if (xfer->rx_buf != NULL) {
1139-
ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
1140-
xfer->rx_buf, xfer->len,
1141-
DMA_FROM_DEVICE);
1154+
ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1155+
xfer->rx_buf, xfer->len,
1156+
DMA_FROM_DEVICE, attrs);
11421157
if (ret != 0) {
1143-
spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
1144-
DMA_TO_DEVICE);
1158+
spi_unmap_buf_attrs(ctlr, tx_dev,
1159+
&xfer->tx_sg, DMA_TO_DEVICE,
1160+
attrs);
1161+
11451162
return ret;
11461163
}
11471164
}
@@ -1164,17 +1181,52 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
11641181
return 0;
11651182

11661183
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1184+
/* The sync has already been done after each transfer. */
1185+
unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1186+
11671187
if (!ctlr->can_dma(ctlr, msg->spi, xfer))
11681188
continue;
11691189

1170-
spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1171-
spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1190+
spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1191+
DMA_FROM_DEVICE, attrs);
1192+
spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1193+
DMA_TO_DEVICE, attrs);
11721194
}
11731195

11741196
ctlr->cur_msg_mapped = false;
11751197

11761198
return 0;
11771199
}
1200+
1201+
static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1202+
struct spi_transfer *xfer)
1203+
{
1204+
struct device *rx_dev = ctlr->cur_rx_dma_dev;
1205+
struct device *tx_dev = ctlr->cur_tx_dma_dev;
1206+
1207+
if (!ctlr->cur_msg_mapped)
1208+
return;
1209+
1210+
if (xfer->tx_sg.orig_nents)
1211+
dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1212+
if (xfer->rx_sg.orig_nents)
1213+
dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1214+
}
1215+
1216+
static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1217+
struct spi_transfer *xfer)
1218+
{
1219+
struct device *rx_dev = ctlr->cur_rx_dma_dev;
1220+
struct device *tx_dev = ctlr->cur_tx_dma_dev;
1221+
1222+
if (!ctlr->cur_msg_mapped)
1223+
return;
1224+
1225+
if (xfer->rx_sg.orig_nents)
1226+
dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1227+
if (xfer->tx_sg.orig_nents)
1228+
dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1229+
}
11781230
#else /* !CONFIG_HAS_DMA */
11791231
static inline int __spi_map_msg(struct spi_controller *ctlr,
11801232
struct spi_message *msg)
@@ -1187,6 +1239,16 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr,
11871239
{
11881240
return 0;
11891241
}
1242+
1243+
static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1244+
struct spi_transfer *xfer)
1245+
{
1246+
}
1247+
1248+
static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1249+
struct spi_transfer *xfer)
1250+
{
1251+
}
11901252
#endif /* !CONFIG_HAS_DMA */
11911253

11921254
static inline int spi_unmap_msg(struct spi_controller *ctlr,
@@ -1445,8 +1507,11 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
14451507
reinit_completion(&ctlr->xfer_completion);
14461508

14471509
fallback_pio:
1510+
spi_dma_sync_for_device(ctlr, xfer);
14481511
ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
14491512
if (ret < 0) {
1513+
spi_dma_sync_for_cpu(ctlr, xfer);
1514+
14501515
if (ctlr->cur_msg_mapped &&
14511516
(xfer->error & SPI_TRANS_FAIL_NO_START)) {
14521517
__spi_unmap_msg(ctlr, msg);
@@ -1469,6 +1534,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
14691534
if (ret < 0)
14701535
msg->status = ret;
14711536
}
1537+
1538+
spi_dma_sync_for_cpu(ctlr, xfer);
14721539
} else {
14731540
if (xfer->len)
14741541
dev_err(&msg->spi->dev,

0 commit comments

Comments
 (0)