111
111
#define QSPI_DMA_BLK 0x024
112
112
#define QSPI_DMA_BLK_SET (x ) (((x) & 0xffff) << 0)
113
113
114
+ #define QSPI_DMA_MEM_ADDRESS 0x028
115
+ #define QSPI_DMA_HI_ADDRESS 0x02c
116
+
114
117
#define QSPI_TX_FIFO 0x108
115
118
#define QSPI_RX_FIFO 0x188
116
119
@@ -167,9 +170,9 @@ enum tegra_qspi_transfer_type {
167
170
};
168
171
169
172
struct tegra_qspi_soc_data {
170
- bool has_dma ;
171
173
bool cmb_xfer_capable ;
172
174
bool supports_tpm ;
175
+ bool has_ext_dma ;
173
176
unsigned int cs_count ;
174
177
};
175
178
@@ -605,13 +608,16 @@ static void tegra_qspi_dma_unmap_xfer(struct tegra_qspi *tqspi, struct spi_trans
605
608
606
609
len = DIV_ROUND_UP (tqspi -> curr_dma_words * tqspi -> bytes_per_word , 4 ) * 4 ;
607
610
608
- dma_unmap_single (tqspi -> dev , t -> tx_dma , len , DMA_TO_DEVICE );
609
- dma_unmap_single (tqspi -> dev , t -> rx_dma , len , DMA_FROM_DEVICE );
611
+ if (t -> tx_buf )
612
+ dma_unmap_single (tqspi -> dev , t -> tx_dma , len , DMA_TO_DEVICE );
613
+ if (t -> rx_buf )
614
+ dma_unmap_single (tqspi -> dev , t -> rx_dma , len , DMA_FROM_DEVICE );
610
615
}
611
616
612
617
static int tegra_qspi_start_dma_based_transfer (struct tegra_qspi * tqspi , struct spi_transfer * t )
613
618
{
614
619
struct dma_slave_config dma_sconfig = { 0 };
620
+ dma_addr_t rx_dma_phys , tx_dma_phys ;
615
621
unsigned int len ;
616
622
u8 dma_burst ;
617
623
int ret = 0 ;
@@ -634,60 +640,86 @@ static int tegra_qspi_start_dma_based_transfer(struct tegra_qspi *tqspi, struct
634
640
len = tqspi -> curr_dma_words * 4 ;
635
641
636
642
/* set attention level based on length of transfer */
637
- val = 0 ;
638
- if (len & 0xf ) {
639
- val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1 ;
640
- dma_burst = 1 ;
641
- } else if (((len ) >> 4 ) & 0x1 ) {
642
- val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4 ;
643
- dma_burst = 4 ;
644
- } else {
645
- val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8 ;
646
- dma_burst = 8 ;
643
+ if (tqspi -> soc_data -> has_ext_dma ) {
644
+ val = 0 ;
645
+ if (len & 0xf ) {
646
+ val |= QSPI_TX_TRIG_1 | QSPI_RX_TRIG_1 ;
647
+ dma_burst = 1 ;
648
+ } else if (((len ) >> 4 ) & 0x1 ) {
649
+ val |= QSPI_TX_TRIG_4 | QSPI_RX_TRIG_4 ;
650
+ dma_burst = 4 ;
651
+ } else {
652
+ val |= QSPI_TX_TRIG_8 | QSPI_RX_TRIG_8 ;
653
+ dma_burst = 8 ;
654
+ }
655
+
656
+ tegra_qspi_writel (tqspi , val , QSPI_DMA_CTL );
647
657
}
648
658
649
- tegra_qspi_writel (tqspi , val , QSPI_DMA_CTL );
650
659
tqspi -> dma_control_reg = val ;
651
660
652
661
dma_sconfig .device_fc = true;
662
+
653
663
if (tqspi -> cur_direction & DATA_DIR_TX ) {
654
- dma_sconfig .dst_addr = tqspi -> phys + QSPI_TX_FIFO ;
655
- dma_sconfig .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
656
- dma_sconfig .dst_maxburst = dma_burst ;
657
- ret = dmaengine_slave_config (tqspi -> tx_dma_chan , & dma_sconfig );
658
- if (ret < 0 ) {
659
- dev_err (tqspi -> dev , "failed DMA slave config: %d\n" , ret );
660
- return ret ;
661
- }
664
+ if (tqspi -> tx_dma_chan ) {
665
+ dma_sconfig .dst_addr = tqspi -> phys + QSPI_TX_FIFO ;
666
+ dma_sconfig .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
667
+ dma_sconfig .dst_maxburst = dma_burst ;
668
+ ret = dmaengine_slave_config (tqspi -> tx_dma_chan , & dma_sconfig );
669
+ if (ret < 0 ) {
670
+ dev_err (tqspi -> dev , "failed DMA slave config: %d\n" , ret );
671
+ return ret ;
672
+ }
662
673
663
- tegra_qspi_copy_client_txbuf_to_qspi_txbuf (tqspi , t );
664
- ret = tegra_qspi_start_tx_dma (tqspi , t , len );
665
- if (ret < 0 ) {
666
- dev_err (tqspi -> dev , "failed to starting TX DMA: %d\n" , ret );
667
- return ret ;
674
+ tegra_qspi_copy_client_txbuf_to_qspi_txbuf (tqspi , t );
675
+ ret = tegra_qspi_start_tx_dma (tqspi , t , len );
676
+ if (ret < 0 ) {
677
+ dev_err (tqspi -> dev , "failed to starting TX DMA: %d\n" , ret );
678
+ return ret ;
679
+ }
680
+ } else {
681
+ if (tqspi -> is_packed )
682
+ tx_dma_phys = t -> tx_dma ;
683
+ else
684
+ tx_dma_phys = tqspi -> tx_dma_phys ;
685
+ tegra_qspi_copy_client_txbuf_to_qspi_txbuf (tqspi , t );
686
+ tegra_qspi_writel (tqspi , lower_32_bits (tx_dma_phys ),
687
+ QSPI_DMA_MEM_ADDRESS );
688
+ tegra_qspi_writel (tqspi , (upper_32_bits (tx_dma_phys ) & 0xff ),
689
+ QSPI_DMA_HI_ADDRESS );
668
690
}
669
691
}
670
692
671
693
if (tqspi -> cur_direction & DATA_DIR_RX ) {
672
- dma_sconfig .src_addr = tqspi -> phys + QSPI_RX_FIFO ;
673
- dma_sconfig .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
674
- dma_sconfig .src_maxburst = dma_burst ;
675
- ret = dmaengine_slave_config (tqspi -> rx_dma_chan , & dma_sconfig );
676
- if (ret < 0 ) {
677
- dev_err (tqspi -> dev , "failed DMA slave config: %d\n" , ret );
678
- return ret ;
679
- }
680
-
681
- dma_sync_single_for_device (tqspi -> dev , tqspi -> rx_dma_phys ,
682
- tqspi -> dma_buf_size ,
683
- DMA_FROM_DEVICE );
694
+ if (tqspi -> rx_dma_chan ) {
695
+ dma_sconfig .src_addr = tqspi -> phys + QSPI_RX_FIFO ;
696
+ dma_sconfig .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES ;
697
+ dma_sconfig .src_maxburst = dma_burst ;
698
+ ret = dmaengine_slave_config (tqspi -> rx_dma_chan , & dma_sconfig );
699
+ if (ret < 0 ) {
700
+ dev_err (tqspi -> dev , "failed DMA slave config: %d\n" , ret );
701
+ return ret ;
702
+ }
684
703
685
- ret = tegra_qspi_start_rx_dma (tqspi , t , len );
686
- if (ret < 0 ) {
687
- dev_err (tqspi -> dev , "failed to start RX DMA: %d\n" , ret );
688
- if (tqspi -> cur_direction & DATA_DIR_TX )
689
- dmaengine_terminate_all (tqspi -> tx_dma_chan );
690
- return ret ;
704
+ dma_sync_single_for_device (tqspi -> dev , tqspi -> rx_dma_phys ,
705
+ tqspi -> dma_buf_size , DMA_FROM_DEVICE );
706
+ ret = tegra_qspi_start_rx_dma (tqspi , t , len );
707
+ if (ret < 0 ) {
708
+ dev_err (tqspi -> dev , "failed to start RX DMA: %d\n" , ret );
709
+ if (tqspi -> cur_direction & DATA_DIR_TX )
710
+ dmaengine_terminate_all (tqspi -> tx_dma_chan );
711
+ return ret ;
712
+ }
713
+ } else {
714
+ if (tqspi -> is_packed )
715
+ rx_dma_phys = t -> rx_dma ;
716
+ else
717
+ rx_dma_phys = tqspi -> rx_dma_phys ;
718
+
719
+ tegra_qspi_writel (tqspi , lower_32_bits (rx_dma_phys ),
720
+ QSPI_DMA_MEM_ADDRESS );
721
+ tegra_qspi_writel (tqspi , (upper_32_bits (rx_dma_phys ) & 0xff ),
722
+ QSPI_DMA_HI_ADDRESS );
691
723
}
692
724
}
693
725
@@ -726,9 +758,6 @@ static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct s
726
758
727
759
static void tegra_qspi_deinit_dma (struct tegra_qspi * tqspi )
728
760
{
729
- if (!tqspi -> soc_data -> has_dma )
730
- return ;
731
-
732
761
if (tqspi -> tx_dma_buf ) {
733
762
dma_free_coherent (tqspi -> dev , tqspi -> dma_buf_size ,
734
763
tqspi -> tx_dma_buf , tqspi -> tx_dma_phys );
@@ -759,16 +788,29 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
759
788
u32 * dma_buf ;
760
789
int err ;
761
790
762
- if (!tqspi -> soc_data -> has_dma )
763
- return 0 ;
791
+ if (tqspi -> soc_data -> has_ext_dma ) {
792
+ dma_chan = dma_request_chan (tqspi -> dev , "rx" );
793
+ if (IS_ERR (dma_chan )) {
794
+ err = PTR_ERR (dma_chan );
795
+ goto err_out ;
796
+ }
764
797
765
- dma_chan = dma_request_chan (tqspi -> dev , "rx" );
766
- if (IS_ERR (dma_chan )) {
767
- err = PTR_ERR (dma_chan );
768
- goto err_out ;
769
- }
798
+ tqspi -> rx_dma_chan = dma_chan ;
770
799
771
- tqspi -> rx_dma_chan = dma_chan ;
800
+ dma_chan = dma_request_chan (tqspi -> dev , "tx" );
801
+ if (IS_ERR (dma_chan )) {
802
+ err = PTR_ERR (dma_chan );
803
+ goto err_out ;
804
+ }
805
+
806
+ tqspi -> tx_dma_chan = dma_chan ;
807
+ } else {
808
+ if (!device_iommu_mapped (tqspi -> dev )) {
809
+ dev_warn (tqspi -> dev ,
810
+ "IOMMU not enabled in device-tree, falling back to PIO mode\n" );
811
+ return 0 ;
812
+ }
813
+ }
772
814
773
815
dma_buf = dma_alloc_coherent (tqspi -> dev , tqspi -> dma_buf_size , & dma_phys , GFP_KERNEL );
774
816
if (!dma_buf ) {
@@ -779,14 +821,6 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi)
779
821
tqspi -> rx_dma_buf = dma_buf ;
780
822
tqspi -> rx_dma_phys = dma_phys ;
781
823
782
- dma_chan = dma_request_chan (tqspi -> dev , "tx" );
783
- if (IS_ERR (dma_chan )) {
784
- err = PTR_ERR (dma_chan );
785
- goto err_out ;
786
- }
787
-
788
- tqspi -> tx_dma_chan = dma_chan ;
789
-
790
824
dma_buf = dma_alloc_coherent (tqspi -> dev , tqspi -> dma_buf_size , & dma_phys , GFP_KERNEL );
791
825
if (!dma_buf ) {
792
826
err = - ENOMEM ;
@@ -1128,15 +1162,14 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi,
1128
1162
if (WARN_ON_ONCE (ret == 0 )) {
1129
1163
dev_err_ratelimited (tqspi -> dev ,
1130
1164
"QSPI Transfer failed with timeout\n" );
1131
- if (tqspi -> is_curr_dma_xfer &&
1132
- (tqspi -> cur_direction & DATA_DIR_TX ))
1133
- dmaengine_terminate_all
1134
- (tqspi -> tx_dma_chan );
1135
-
1136
- if (tqspi -> is_curr_dma_xfer &&
1137
- (tqspi -> cur_direction & DATA_DIR_RX ))
1138
- dmaengine_terminate_all
1139
- (tqspi -> rx_dma_chan );
1165
+ if (tqspi -> is_curr_dma_xfer ) {
1166
+ if ((tqspi -> cur_direction & DATA_DIR_TX ) &&
1167
+ tqspi -> tx_dma_chan )
1168
+ dmaengine_terminate_all (tqspi -> tx_dma_chan );
1169
+ if ((tqspi -> cur_direction & DATA_DIR_RX ) &&
1170
+ tqspi -> rx_dma_chan )
1171
+ dmaengine_terminate_all (tqspi -> rx_dma_chan );
1172
+ }
1140
1173
1141
1174
/* Abort transfer by resetting pio/dma bit */
1142
1175
if (!tqspi -> is_curr_dma_xfer ) {
@@ -1251,10 +1284,12 @@ static int tegra_qspi_non_combined_seq_xfer(struct tegra_qspi *tqspi,
1251
1284
QSPI_DMA_TIMEOUT );
1252
1285
if (WARN_ON (ret == 0 )) {
1253
1286
dev_err (tqspi -> dev , "transfer timeout\n" );
1254
- if (tqspi -> is_curr_dma_xfer && (tqspi -> cur_direction & DATA_DIR_TX ))
1255
- dmaengine_terminate_all (tqspi -> tx_dma_chan );
1256
- if (tqspi -> is_curr_dma_xfer && (tqspi -> cur_direction & DATA_DIR_RX ))
1257
- dmaengine_terminate_all (tqspi -> rx_dma_chan );
1287
+ if (tqspi -> is_curr_dma_xfer ) {
1288
+ if ((tqspi -> cur_direction & DATA_DIR_TX ) && tqspi -> tx_dma_chan )
1289
+ dmaengine_terminate_all (tqspi -> tx_dma_chan );
1290
+ if ((tqspi -> cur_direction & DATA_DIR_RX ) && tqspi -> rx_dma_chan )
1291
+ dmaengine_terminate_all (tqspi -> rx_dma_chan );
1292
+ }
1258
1293
tegra_qspi_handle_error (tqspi );
1259
1294
ret = - EIO ;
1260
1295
goto complete_xfer ;
@@ -1323,7 +1358,7 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi,
1323
1358
return false;
1324
1359
xfer = list_next_entry (xfer , transfer_list );
1325
1360
}
1326
- if (!tqspi -> soc_data -> has_dma && xfer -> len > (QSPI_FIFO_DEPTH << 2 ))
1361
+ if (!tqspi -> soc_data -> has_ext_dma && xfer -> len > (QSPI_FIFO_DEPTH << 2 ))
1327
1362
return false;
1328
1363
1329
1364
return true;
@@ -1384,41 +1419,43 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
1384
1419
unsigned int total_fifo_words ;
1385
1420
unsigned long flags ;
1386
1421
long wait_status ;
1387
- int err = 0 ;
1422
+ int num_errors = 0 ;
1388
1423
1389
1424
if (tqspi -> cur_direction & DATA_DIR_TX ) {
1390
1425
if (tqspi -> tx_status ) {
1391
- dmaengine_terminate_all (tqspi -> tx_dma_chan );
1392
- err += 1 ;
1393
- } else {
1426
+ if (tqspi -> tx_dma_chan )
1427
+ dmaengine_terminate_all (tqspi -> tx_dma_chan );
1428
+ num_errors ++ ;
1429
+ } else if (tqspi -> tx_dma_chan ) {
1394
1430
wait_status = wait_for_completion_interruptible_timeout (
1395
1431
& tqspi -> tx_dma_complete , QSPI_DMA_TIMEOUT );
1396
1432
if (wait_status <= 0 ) {
1397
1433
dmaengine_terminate_all (tqspi -> tx_dma_chan );
1398
1434
dev_err (tqspi -> dev , "failed TX DMA transfer\n" );
1399
- err += 1 ;
1435
+ num_errors ++ ;
1400
1436
}
1401
1437
}
1402
1438
}
1403
1439
1404
1440
if (tqspi -> cur_direction & DATA_DIR_RX ) {
1405
1441
if (tqspi -> rx_status ) {
1406
- dmaengine_terminate_all (tqspi -> rx_dma_chan );
1407
- err += 2 ;
1408
- } else {
1442
+ if (tqspi -> rx_dma_chan )
1443
+ dmaengine_terminate_all (tqspi -> rx_dma_chan );
1444
+ num_errors ++ ;
1445
+ } else if (tqspi -> rx_dma_chan ) {
1409
1446
wait_status = wait_for_completion_interruptible_timeout (
1410
1447
& tqspi -> rx_dma_complete , QSPI_DMA_TIMEOUT );
1411
1448
if (wait_status <= 0 ) {
1412
1449
dmaengine_terminate_all (tqspi -> rx_dma_chan );
1413
1450
dev_err (tqspi -> dev , "failed RX DMA transfer\n" );
1414
- err += 2 ;
1451
+ num_errors ++ ;
1415
1452
}
1416
1453
}
1417
1454
}
1418
1455
1419
1456
spin_lock_irqsave (& tqspi -> lock , flags );
1420
1457
1421
- if (err ) {
1458
+ if (num_errors ) {
1422
1459
tegra_qspi_dma_unmap_xfer (tqspi , t );
1423
1460
tegra_qspi_handle_error (tqspi );
1424
1461
complete (& tqspi -> xfer_completion );
@@ -1444,9 +1481,9 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)
1444
1481
/* continue transfer in current message */
1445
1482
total_fifo_words = tegra_qspi_calculate_curr_xfer_param (tqspi , t );
1446
1483
if (total_fifo_words > QSPI_FIFO_DEPTH )
1447
- err = tegra_qspi_start_dma_based_transfer (tqspi , t );
1484
+ num_errors = tegra_qspi_start_dma_based_transfer (tqspi , t );
1448
1485
else
1449
- err = tegra_qspi_start_cpu_based_transfer (tqspi , t );
1486
+ num_errors = tegra_qspi_start_cpu_based_transfer (tqspi , t );
1450
1487
1451
1488
exit :
1452
1489
spin_unlock_irqrestore (& tqspi -> lock , flags );
@@ -1474,28 +1511,28 @@ static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)
1474
1511
}
1475
1512
1476
1513
static struct tegra_qspi_soc_data tegra210_qspi_soc_data = {
1477
- .has_dma = true,
1514
+ .has_ext_dma = true,
1478
1515
.cmb_xfer_capable = false,
1479
1516
.supports_tpm = false,
1480
1517
.cs_count = 1 ,
1481
1518
};
1482
1519
1483
1520
static struct tegra_qspi_soc_data tegra186_qspi_soc_data = {
1484
- .has_dma = true,
1521
+ .has_ext_dma = true,
1485
1522
.cmb_xfer_capable = true,
1486
1523
.supports_tpm = false,
1487
1524
.cs_count = 1 ,
1488
1525
};
1489
1526
1490
1527
static struct tegra_qspi_soc_data tegra234_qspi_soc_data = {
1491
- .has_dma = false,
1528
+ .has_ext_dma = false,
1492
1529
.cmb_xfer_capable = true,
1493
1530
.supports_tpm = true,
1494
1531
.cs_count = 1 ,
1495
1532
};
1496
1533
1497
1534
static struct tegra_qspi_soc_data tegra241_qspi_soc_data = {
1498
- .has_dma = false ,
1535
+ .has_ext_dma = true ,
1499
1536
.cmb_xfer_capable = true,
1500
1537
.supports_tpm = true,
1501
1538
.cs_count = 4 ,
0 commit comments