Skip to content

Commit 017f1b0

Browse files
Vishwaroop Abroonie
authored andcommitted
spi: tegra210-quad: Add support for internal DMA
Add support for internal DMA in Tegra234 devices. Tegra234 has an internal DMA controller, while Tegra241 continues to use an external DMA controller (GPCDMA). This patch adds support for both internal and external DMA controllers. Signed-off-by: Vishwaroop A <va@nvidia.com> Reviewed-by: Jon Hunter <jonathanh@nvidia.com> Link: https://patch.msgid.link/20250513200043.608292-2-va@nvidia.com Signed-off-by: Mark Brown <broonie@kernel.org>
1 parent 65cb56d commit 017f1b0

File tree

1 file changed

+131
-94
lines changed

1 file changed

+131
-94
lines changed

drivers/spi/spi-tegra210-quad.c

Lines changed: 131 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -111,6 +111,9 @@
111111
#define QSPI_DMA_BLK 0x024
112112
#define QSPI_DMA_BLK_SET(x) (((x) & 0xffff) << 0)
113113

114+
#define QSPI_DMA_MEM_ADDRESS 0x028
115+
#define QSPI_DMA_HI_ADDRESS 0x02c
116+
114117
#define QSPI_TX_FIFO 0x108
115118
#define QSPI_RX_FIFO 0x188
116119

@@ -167,9 +170,9 @@ enum tegra_qspi_transfer_type {
167170
};
168171

169172
struct tegra_qspi_soc_data {
170-
bool has_dma;
171173
bool cmb_xfer_capable;
172174
bool supports_tpm;
175+
bool has_ext_dma;
173176
unsigned int cs_count;
174177
};
175178

@@ -605,13 +608,16 @@ static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_trans
605608

606609
len = DIV_ROUND_UP(tqspi->curr_dma_words * tqspi->bytes_per_word, 4) * 4;
607610

608-
dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
609-
dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
611+
if (t->tx_buf)
612+
dma_unmap_single(tqspi->dev, t->tx_dma, len, DMA_TO_DEVICE);
613+
if (t->rx_buf)
614+
dma_unmap_single(tqspi->dev, t->rx_dma, len, DMA_FROM_DEVICE);
610615
}
611616

612617
static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct spi_transfer *t)
613618
{
614619
struct dma_slave_config dma_sconfig = { 0 };
620+
dma_addr_t rx_dma_phys, tx_dma_phys;
615621
unsigned int len;
616622
u8 dma_burst;
617623
int ret = 0;
@@ -634,60 +640,86 @@ static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct
634640
len = tqspi->curr_dma_words * 4;
635641

636642
/* set attention level based on length of transfer */
637-
val = 0;
638-
if (len & 0xf) {
639-
val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
640-
dma_burst = 1;
641-
} else if (((len) >> 4) & 0x1) {
642-
val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
643-
dma_burst = 4;
644-
} else {
645-
val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
646-
dma_burst = 8;
643+
if (tqspi->soc_data->has_ext_dma) {
644+
val = 0;
645+
if (len & 0xf) {
646+
val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1;
647+
dma_burst = 1;
648+
} else if (((len) >> 4) & 0x1) {
649+
val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4;
650+
dma_burst = 4;
651+
} else {
652+
val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8;
653+
dma_burst = 8;
654+
}
655+
656+
tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
647657
}
648658

649-
tegra_qspi_writel(tqspi, val, QSPI_DMA_CTL);
650659
tqspi->dma_control_reg = val;
651660

652661
dma_sconfig.device_fc = true;
662+
653663
if (tqspi->cur_direction & DATA_DIR_TX) {
654-
dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
655-
dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
656-
dma_sconfig.dst_maxburst = dma_burst;
657-
ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
658-
if (ret < 0) {
659-
dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
660-
return ret;
661-
}
664+
if (tqspi->tx_dma_chan) {
665+
dma_sconfig.dst_addr = tqspi->phys + QSPI_TX_FIFO;
666+
dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
667+
dma_sconfig.dst_maxburst = dma_burst;
668+
ret = dmaengine_slave_config(tqspi->tx_dma_chan, &dma_sconfig);
669+
if (ret < 0) {
670+
dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
671+
return ret;
672+
}
662673

663-
tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
664-
ret = tegra_qspi_start_tx_dma(tqspi, t, len);
665-
if (ret < 0) {
666-
dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
667-
return ret;
674+
tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
675+
ret = tegra_qspi_start_tx_dma(tqspi, t, len);
676+
if (ret < 0) {
677+
dev_err(tqspi->dev, "failed to starting TX DMA: %d\n", ret);
678+
return ret;
679+
}
680+
} else {
681+
if (tqspi->is_packed)
682+
tx_dma_phys = t->tx_dma;
683+
else
684+
tx_dma_phys = tqspi->tx_dma_phys;
685+
tegra_qspi_copy_client_txbuf_to_qspi_txbuf(tqspi, t);
686+
tegra_qspi_writel(tqspi, lower_32_bits(tx_dma_phys),
687+
QSPI_DMA_MEM_ADDRESS);
688+
tegra_qspi_writel(tqspi, (upper_32_bits(tx_dma_phys) & 0xff),
689+
QSPI_DMA_HI_ADDRESS);
668690
}
669691
}
670692

671693
if (tqspi->cur_direction & DATA_DIR_RX) {
672-
dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
673-
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
674-
dma_sconfig.src_maxburst = dma_burst;
675-
ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
676-
if (ret < 0) {
677-
dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
678-
return ret;
679-
}
680-
681-
dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
682-
tqspi->dma_buf_size,
683-
DMA_FROM_DEVICE);
694+
if (tqspi->rx_dma_chan) {
695+
dma_sconfig.src_addr = tqspi->phys + QSPI_RX_FIFO;
696+
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
697+
dma_sconfig.src_maxburst = dma_burst;
698+
ret = dmaengine_slave_config(tqspi->rx_dma_chan, &dma_sconfig);
699+
if (ret < 0) {
700+
dev_err(tqspi->dev, "failed DMA slave config: %d\n", ret);
701+
return ret;
702+
}
684703

685-
ret = tegra_qspi_start_rx_dma(tqspi, t, len);
686-
if (ret < 0) {
687-
dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
688-
if (tqspi->cur_direction & DATA_DIR_TX)
689-
dmaengine_terminate_all(tqspi->tx_dma_chan);
690-
return ret;
704+
dma_sync_single_for_device(tqspi->dev, tqspi->rx_dma_phys,
705+
tqspi->dma_buf_size, DMA_FROM_DEVICE);
706+
ret = tegra_qspi_start_rx_dma(tqspi, t, len);
707+
if (ret < 0) {
708+
dev_err(tqspi->dev, "failed to start RX DMA: %d\n", ret);
709+
if (tqspi->cur_direction & DATA_DIR_TX)
710+
dmaengine_terminate_all(tqspi->tx_dma_chan);
711+
return ret;
712+
}
713+
} else {
714+
if (tqspi->is_packed)
715+
rx_dma_phys = t->rx_dma;
716+
else
717+
rx_dma_phys = tqspi->rx_dma_phys;
718+
719+
tegra_qspi_writel(tqspi, lower_32_bits(rx_dma_phys),
720+
QSPI_DMA_MEM_ADDRESS);
721+
tegra_qspi_writel(tqspi, (upper_32_bits(rx_dma_phys) & 0xff),
722+
QSPI_DMA_HI_ADDRESS);
691723
}
692724
}
693725

@@ -726,9 +758,6 @@ static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct s
726758

727759
static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi)
728760
{
729-
if (!tqspi->soc_data->has_dma)
730-
return;
731-
732761
if (tqspi->tx_dma_buf) {
733762
dma_free_coherent(tqspi->dev, tqspi->dma_buf_size,
734763
tqspi->tx_dma_buf, tqspi->tx_dma_phys);
@@ -759,16 +788,29 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
759788
u32 *dma_buf;
760789
int err;
761790

762-
if (!tqspi->soc_data->has_dma)
763-
return 0;
791+
if (tqspi->soc_data->has_ext_dma) {
792+
dma_chan = dma_request_chan(tqspi->dev, "rx");
793+
if (IS_ERR(dma_chan)) {
794+
err = PTR_ERR(dma_chan);
795+
goto err_out;
796+
}
764797

765-
dma_chan = dma_request_chan(tqspi->dev, "rx");
766-
if (IS_ERR(dma_chan)) {
767-
err = PTR_ERR(dma_chan);
768-
goto err_out;
769-
}
798+
tqspi->rx_dma_chan = dma_chan;
770799

771-
tqspi->rx_dma_chan = dma_chan;
800+
dma_chan = dma_request_chan(tqspi->dev, "tx");
801+
if (IS_ERR(dma_chan)) {
802+
err = PTR_ERR(dma_chan);
803+
goto err_out;
804+
}
805+
806+
tqspi->tx_dma_chan = dma_chan;
807+
} else {
808+
if (!device_iommu_mapped(tqspi->dev)) {
809+
dev_warn(tqspi->dev,
810+
"IOMMU not enabled in device-tree, falling back to PIO mode\n");
811+
return 0;
812+
}
813+
}
772814

773815
dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
774816
if (!dma_buf) {
@@ -779,14 +821,6 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
779821
tqspi->rx_dma_buf = dma_buf;
780822
tqspi->rx_dma_phys = dma_phys;
781823

782-
dma_chan = dma_request_chan(tqspi->dev, "tx");
783-
if (IS_ERR(dma_chan)) {
784-
err = PTR_ERR(dma_chan);
785-
goto err_out;
786-
}
787-
788-
tqspi->tx_dma_chan = dma_chan;
789-
790824
dma_buf = dma_alloc_coherent(tqspi->dev, tqspi->dma_buf_size, &dma_phys, GFP_KERNEL);
791825
if (!dma_buf) {
792826
err = -ENOMEM;
@@ -1128,15 +1162,14 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
11281162
if (WARN_ON_ONCE(ret == 0)) {
11291163
dev_err_ratelimited(tqspi->dev,
11301164
"QSPI Transfer failed with timeout\n");
1131-
if (tqspi->is_curr_dma_xfer &&
1132-
(tqspi->cur_direction & DATA_DIR_TX))
1133-
dmaengine_terminate_all
1134-
(tqspi->tx_dma_chan);
1135-
1136-
if (tqspi->is_curr_dma_xfer &&
1137-
(tqspi->cur_direction & DATA_DIR_RX))
1138-
dmaengine_terminate_all
1139-
(tqspi->rx_dma_chan);
1165+
if (tqspi->is_curr_dma_xfer) {
1166+
if ((tqspi->cur_direction & DATA_DIR_TX) &&
1167+
tqspi->tx_dma_chan)
1168+
dmaengine_terminate_all(tqspi->tx_dma_chan);
1169+
if ((tqspi->cur_direction & DATA_DIR_RX) &&
1170+
tqspi->rx_dma_chan)
1171+
dmaengine_terminate_all(tqspi->rx_dma_chan);
1172+
}
11401173

11411174
/* Abort transfer by resetting pio/dma bit */
11421175
if (!tqspi->is_curr_dma_xfer) {
@@ -1251,10 +1284,12 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
12511284
QSPI_DMA_TIMEOUT);
12521285
if (WARN_ON(ret == 0)) {
12531286
dev_err(tqspi->dev, "transfer timeout\n");
1254-
if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX))
1255-
dmaengine_terminate_all(tqspi->tx_dma_chan);
1256-
if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_RX))
1257-
dmaengine_terminate_all(tqspi->rx_dma_chan);
1287+
if (tqspi->is_curr_dma_xfer) {
1288+
if ((tqspi->cur_direction & DATA_DIR_TX) && tqspi->tx_dma_chan)
1289+
dmaengine_terminate_all(tqspi->tx_dma_chan);
1290+
if ((tqspi->cur_direction & DATA_DIR_RX) && tqspi->rx_dma_chan)
1291+
dmaengine_terminate_all(tqspi->rx_dma_chan);
1292+
}
12581293
tegra_qspi_handle_error(tqspi);
12591294
ret = -EIO;
12601295
goto complete_xfer;
@@ -1323,7 +1358,7 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
13231358
return false;
13241359
xfer = list_next_entry(xfer, transfer_list);
13251360
}
1326-
if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
1361+
if (!tqspi->soc_data->has_ext_dma && xfer->len > (QSPI_FIFO_DEPTH << 2))
13271362
return false;
13281363

13291364
return true;
@@ -1384,41 +1419,43 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
13841419
unsigned int total_fifo_words;
13851420
unsigned long flags;
13861421
long wait_status;
1387-
int err = 0;
1422+
int num_errors = 0;
13881423

13891424
if (tqspi->cur_direction & DATA_DIR_TX) {
13901425
if (tqspi->tx_status) {
1391-
dmaengine_terminate_all(tqspi->tx_dma_chan);
1392-
err += 1;
1393-
} else {
1426+
if (tqspi->tx_dma_chan)
1427+
dmaengine_terminate_all(tqspi->tx_dma_chan);
1428+
num_errors++;
1429+
} else if (tqspi->tx_dma_chan) {
13941430
wait_status = wait_for_completion_interruptible_timeout(
13951431
&tqspi->tx_dma_complete, QSPI_DMA_TIMEOUT);
13961432
if (wait_status <= 0) {
13971433
dmaengine_terminate_all(tqspi->tx_dma_chan);
13981434
dev_err(tqspi->dev, "failed TX DMA transfer\n");
1399-
err += 1;
1435+
num_errors++;
14001436
}
14011437
}
14021438
}
14031439

14041440
if (tqspi->cur_direction & DATA_DIR_RX) {
14051441
if (tqspi->rx_status) {
1406-
dmaengine_terminate_all(tqspi->rx_dma_chan);
1407-
err += 2;
1408-
} else {
1442+
if (tqspi->rx_dma_chan)
1443+
dmaengine_terminate_all(tqspi->rx_dma_chan);
1444+
num_errors++;
1445+
} else if (tqspi->rx_dma_chan) {
14091446
wait_status = wait_for_completion_interruptible_timeout(
14101447
&tqspi->rx_dma_complete, QSPI_DMA_TIMEOUT);
14111448
if (wait_status <= 0) {
14121449
dmaengine_terminate_all(tqspi->rx_dma_chan);
14131450
dev_err(tqspi->dev, "failed RX DMA transfer\n");
1414-
err += 2;
1451+
num_errors++;
14151452
}
14161453
}
14171454
}
14181455

14191456
spin_lock_irqsave(&tqspi->lock, flags);
14201457

1421-
if (err) {
1458+
if (num_errors) {
14221459
tegra_qspi_dma_unmap_xfer(tqspi, t);
14231460
tegra_qspi_handle_error(tqspi);
14241461
complete(&tqspi->xfer_completion);
@@ -1444,9 +1481,9 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
14441481
/* continue transfer in current message */
14451482
total_fifo_words = tegra_qspi_calculate_curr_xfer_param(tqspi, t);
14461483
if (total_fifo_words > QSPI_FIFO_DEPTH)
1447-
err = tegra_qspi_start_dma_based_transfer(tqspi, t);
1484+
num_errors = tegra_qspi_start_dma_based_transfer(tqspi, t);
14481485
else
1449-
err = tegra_qspi_start_cpu_based_transfer(tqspi, t);
1486+
num_errors = tegra_qspi_start_cpu_based_transfer(tqspi, t);
14501487

14511488
exit:
14521489
spin_unlock_irqrestore(&tqspi->lock, flags);
@@ -1474,28 +1511,28 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
14741511
}
14751512

14761513
static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
1477-
.has_dma = true,
1514+
.has_ext_dma = true,
14781515
.cmb_xfer_capable = false,
14791516
.supports_tpm = false,
14801517
.cs_count = 1,
14811518
};
14821519

14831520
static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
1484-
.has_dma = true,
1521+
.has_ext_dma = true,
14851522
.cmb_xfer_capable = true,
14861523
.supports_tpm = false,
14871524
.cs_count = 1,
14881525
};
14891526

14901527
static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
1491-
.has_dma = false,
1528+
.has_ext_dma = false,
14921529
.cmb_xfer_capable = true,
14931530
.supports_tpm = true,
14941531
.cs_count = 1,
14951532
};
14961533

14971534
static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
1498-
.has_dma = false,
1535+
.has_ext_dma = true,
14991536
.cmb_xfer_capable = true,
15001537
.supports_tpm = true,
15011538
.cs_count = 4,

0 commit comments

Comments
 (0)