@@ -83,6 +83,9 @@ struct xdma_chan {
83
83
* @dblk_num: Number of hardware descriptor blocks
84
84
* @desc_num: Number of hardware descriptors
85
85
* @completed_desc_num: Completed hardware descriptors
86
+ * @cyclic: Cyclic transfer vs. scatter-gather
87
+ * @periods: Number of periods in the cyclic transfer
88
+ * @period_size: Size of a period in bytes in cyclic transfers
86
89
*/
87
90
struct xdma_desc {
88
91
struct virt_dma_desc vdesc ;
@@ -93,6 +96,9 @@ struct xdma_desc {
93
96
u32 dblk_num ;
94
97
u32 desc_num ;
95
98
u32 completed_desc_num ;
99
+ bool cyclic ;
100
+ u32 periods ;
101
+ u32 period_size ;
96
102
};
97
103
98
104
#define XDMA_DEV_STATUS_REG_DMA BIT(0)
@@ -174,6 +180,25 @@ static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc)
174
180
desc -> control = cpu_to_le32 (XDMA_DESC_CONTROL_LAST );
175
181
}
176
182
183
+ /**
184
+ * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer
185
+ * @sw_desc: Tx descriptor pointer
186
+ */
187
+ static void xdma_link_cyclic_desc_blocks (struct xdma_desc * sw_desc )
188
+ {
189
+ struct xdma_desc_block * block ;
190
+ struct xdma_hw_desc * desc ;
191
+ int i ;
192
+
193
+ block = sw_desc -> desc_blocks ;
194
+ for (i = 0 ; i < sw_desc -> desc_num - 1 ; i ++ ) {
195
+ desc = block -> virt_addr + i * XDMA_DESC_SIZE ;
196
+ desc -> next_desc = cpu_to_le64 (block -> dma_addr + ((i + 1 ) * XDMA_DESC_SIZE ));
197
+ }
198
+ desc = block -> virt_addr + i * XDMA_DESC_SIZE ;
199
+ desc -> next_desc = cpu_to_le64 (block -> dma_addr );
200
+ }
201
+
177
202
static inline struct xdma_chan * to_xdma_chan (struct dma_chan * chan )
178
203
{
179
204
return container_of (chan , struct xdma_chan , vchan .chan );
@@ -231,9 +256,10 @@ static void xdma_free_desc(struct virt_dma_desc *vdesc)
231
256
* xdma_alloc_desc - Allocate descriptor
232
257
* @chan: DMA channel pointer
233
258
* @desc_num: Number of hardware descriptors
259
+ * @cyclic: Whether this is a cyclic transfer
234
260
*/
235
261
static struct xdma_desc *
236
- xdma_alloc_desc (struct xdma_chan * chan , u32 desc_num )
262
+ xdma_alloc_desc (struct xdma_chan * chan , u32 desc_num , bool cyclic )
237
263
{
238
264
struct xdma_desc * sw_desc ;
239
265
struct xdma_hw_desc * desc ;
@@ -249,13 +275,17 @@ xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
249
275
250
276
sw_desc -> chan = chan ;
251
277
sw_desc -> desc_num = desc_num ;
278
+ sw_desc -> cyclic = cyclic ;
252
279
dblk_num = DIV_ROUND_UP (desc_num , XDMA_DESC_ADJACENT );
253
280
sw_desc -> desc_blocks = kcalloc (dblk_num , sizeof (* sw_desc -> desc_blocks ),
254
281
GFP_NOWAIT );
255
282
if (!sw_desc -> desc_blocks )
256
283
goto failed ;
257
284
258
- control = XDMA_DESC_CONTROL (1 , 0 );
285
+ if (cyclic )
286
+ control = XDMA_DESC_CONTROL_CYCLIC ;
287
+ else
288
+ control = XDMA_DESC_CONTROL (1 , 0 );
259
289
260
290
sw_desc -> dblk_num = dblk_num ;
261
291
for (i = 0 ; i < sw_desc -> dblk_num ; i ++ ) {
@@ -269,7 +299,10 @@ xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
269
299
desc [j ].control = cpu_to_le32 (control );
270
300
}
271
301
272
- xdma_link_sg_desc_blocks (sw_desc );
302
+ if (cyclic )
303
+ xdma_link_cyclic_desc_blocks (sw_desc );
304
+ else
305
+ xdma_link_sg_desc_blocks (sw_desc );
273
306
274
307
return sw_desc ;
275
308
@@ -469,7 +502,7 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
469
502
for_each_sg (sgl , sg , sg_len , i )
470
503
desc_num += DIV_ROUND_UP (sg_dma_len (sg ), XDMA_DESC_BLEN_MAX );
471
504
472
- sw_desc = xdma_alloc_desc (xdma_chan , desc_num );
505
+ sw_desc = xdma_alloc_desc (xdma_chan , desc_num , false );
473
506
if (!sw_desc )
474
507
return NULL ;
475
508
sw_desc -> dir = dir ;
@@ -524,6 +557,81 @@ xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
524
557
return NULL ;
525
558
}
526
559
560
+ /**
561
+ * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions
562
+ * @chan: DMA channel pointer
563
+ * @address: Device DMA address to access
564
+ * @size: Total length to transfer
565
+ * @period_size: Period size to use for each transfer
566
+ * @dir: Transfer direction
567
+ * @flags: Transfer ack flags
568
+ */
569
+ static struct dma_async_tx_descriptor *
570
+ xdma_prep_dma_cyclic (struct dma_chan * chan , dma_addr_t address ,
571
+ size_t size , size_t period_size ,
572
+ enum dma_transfer_direction dir ,
573
+ unsigned long flags )
574
+ {
575
+ struct xdma_chan * xdma_chan = to_xdma_chan (chan );
576
+ struct xdma_device * xdev = xdma_chan -> xdev_hdl ;
577
+ unsigned int periods = size / period_size ;
578
+ struct dma_async_tx_descriptor * tx_desc ;
579
+ struct xdma_desc_block * dblk ;
580
+ struct xdma_hw_desc * desc ;
581
+ struct xdma_desc * sw_desc ;
582
+ unsigned int i ;
583
+
584
+ /*
585
+ * Simplify the whole logic by preventing an abnormally high number of
586
+ * periods and periods size.
587
+ */
588
+ if (period_size > XDMA_DESC_BLEN_MAX ) {
589
+ xdma_err (xdev , "period size limited to %lu bytes\n" , XDMA_DESC_BLEN_MAX );
590
+ return NULL ;
591
+ }
592
+
593
+ if (periods > XDMA_DESC_ADJACENT ) {
594
+ xdma_err (xdev , "number of periods limited to %u\n" , XDMA_DESC_ADJACENT );
595
+ return NULL ;
596
+ }
597
+
598
+ sw_desc = xdma_alloc_desc (xdma_chan , periods , true);
599
+ if (!sw_desc )
600
+ return NULL ;
601
+
602
+ sw_desc -> periods = periods ;
603
+ sw_desc -> period_size = period_size ;
604
+ sw_desc -> dir = dir ;
605
+
606
+ dblk = sw_desc -> desc_blocks ;
607
+ desc = dblk -> virt_addr ;
608
+
609
+ /* fill hardware descriptor */
610
+ for (i = 0 ; i < periods ; i ++ ) {
611
+ desc -> bytes = cpu_to_le32 (period_size );
612
+ if (dir == DMA_MEM_TO_DEV ) {
613
+ desc -> src_addr = cpu_to_le64 (address + i * period_size );
614
+ desc -> dst_addr = cpu_to_le64 (xdma_chan -> cfg .dst_addr );
615
+ } else {
616
+ desc -> src_addr = cpu_to_le64 (xdma_chan -> cfg .src_addr );
617
+ desc -> dst_addr = cpu_to_le64 (address + i * period_size );
618
+ }
619
+
620
+ desc ++ ;
621
+ }
622
+
623
+ tx_desc = vchan_tx_prep (& xdma_chan -> vchan , & sw_desc -> vdesc , flags );
624
+ if (!tx_desc )
625
+ goto failed ;
626
+
627
+ return tx_desc ;
628
+
629
+ failed :
630
+ xdma_free_desc (& sw_desc -> vdesc );
631
+
632
+ return NULL ;
633
+ }
634
+
527
635
/**
528
636
* xdma_device_config - Configure the DMA channel
529
637
* @chan: DMA channel
@@ -583,7 +691,36 @@ static int xdma_alloc_chan_resources(struct dma_chan *chan)
583
691
static enum dma_status xdma_tx_status (struct dma_chan * chan , dma_cookie_t cookie ,
584
692
struct dma_tx_state * state )
585
693
{
586
- return dma_cookie_status (chan , cookie , state );
694
+ struct xdma_chan * xdma_chan = to_xdma_chan (chan );
695
+ struct xdma_desc * desc = NULL ;
696
+ struct virt_dma_desc * vd ;
697
+ enum dma_status ret ;
698
+ unsigned long flags ;
699
+ unsigned int period_idx ;
700
+ u32 residue = 0 ;
701
+
702
+ ret = dma_cookie_status (chan , cookie , state );
703
+ if (ret == DMA_COMPLETE )
704
+ return ret ;
705
+
706
+ spin_lock_irqsave (& xdma_chan -> vchan .lock , flags );
707
+
708
+ vd = vchan_find_desc (& xdma_chan -> vchan , cookie );
709
+ if (vd )
710
+ desc = to_xdma_desc (vd );
711
+ if (!desc || !desc -> cyclic ) {
712
+ spin_unlock_irqrestore (& xdma_chan -> vchan .lock , flags );
713
+ return ret ;
714
+ }
715
+
716
+ period_idx = desc -> completed_desc_num % desc -> periods ;
717
+ residue = (desc -> periods - period_idx ) * desc -> period_size ;
718
+
719
+ spin_unlock_irqrestore (& xdma_chan -> vchan .lock , flags );
720
+
721
+ dma_set_residue (state , residue );
722
+
723
+ return ret ;
587
724
}
588
725
589
726
/**
@@ -599,6 +736,7 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
599
736
struct virt_dma_desc * vd ;
600
737
struct xdma_desc * desc ;
601
738
int ret ;
739
+ u32 st ;
602
740
603
741
spin_lock (& xchan -> vchan .lock );
604
742
@@ -617,6 +755,19 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
617
755
goto out ;
618
756
619
757
desc -> completed_desc_num += complete_desc_num ;
758
+
759
+ if (desc -> cyclic ) {
760
+ ret = regmap_read (xdev -> rmap , xchan -> base + XDMA_CHAN_STATUS ,
761
+ & st );
762
+ if (ret )
763
+ goto out ;
764
+
765
+ regmap_write (xdev -> rmap , xchan -> base + XDMA_CHAN_STATUS , st );
766
+
767
+ vchan_cyclic_callback (vd );
768
+ goto out ;
769
+ }
770
+
620
771
/*
621
772
* if all data blocks are transferred, remove and complete the request
622
773
*/
@@ -630,7 +781,7 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
630
781
complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT )
631
782
goto out ;
632
783
633
- /* transfer the rest of data */
784
+ /* transfer the rest of data (SG only) */
634
785
xdma_xfer_start (xchan );
635
786
636
787
out :
@@ -928,8 +1079,10 @@ static int xdma_probe(struct platform_device *pdev)
928
1079
929
1080
dma_cap_set (DMA_SLAVE , xdev -> dma_dev .cap_mask );
930
1081
dma_cap_set (DMA_PRIVATE , xdev -> dma_dev .cap_mask );
1082
+ dma_cap_set (DMA_CYCLIC , xdev -> dma_dev .cap_mask );
931
1083
932
1084
xdev -> dma_dev .dev = & pdev -> dev ;
1085
+ xdev -> dma_dev .residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT ;
933
1086
xdev -> dma_dev .device_free_chan_resources = xdma_free_chan_resources ;
934
1087
xdev -> dma_dev .device_alloc_chan_resources = xdma_alloc_chan_resources ;
935
1088
xdev -> dma_dev .device_tx_status = xdma_tx_status ;
@@ -939,6 +1092,7 @@ static int xdma_probe(struct platform_device *pdev)
939
1092
xdev -> dma_dev .filter .map = pdata -> device_map ;
940
1093
xdev -> dma_dev .filter .mapcnt = pdata -> device_map_cnt ;
941
1094
xdev -> dma_dev .filter .fn = xdma_filter_fn ;
1095
+ xdev -> dma_dev .device_prep_dma_cyclic = xdma_prep_dma_cyclic ;
942
1096
943
1097
ret = dma_async_device_register (& xdev -> dma_dev );
944
1098
if (ret ) {
0 commit comments