84
84
#define AXI_DMAC_REG_DBG2 0x444
85
85
#define AXI_DMAC_REG_PARTIAL_XFER_LEN 0x44c
86
86
#define AXI_DMAC_REG_PARTIAL_XFER_ID 0x450
87
+ #define AXI_DMAC_REG_CURRENT_SG_ID 0x454
88
+ #define AXI_DMAC_REG_SG_ADDRESS 0x47c
89
+ #define AXI_DMAC_REG_SG_ADDRESS_HIGH 0x4bc
87
90
88
91
#define AXI_DMAC_CTRL_ENABLE BIT(0)
89
92
#define AXI_DMAC_CTRL_PAUSE BIT(1)
93
+ #define AXI_DMAC_CTRL_ENABLE_SG BIT(2)
90
94
91
95
#define AXI_DMAC_IRQ_SOT BIT(0)
92
96
#define AXI_DMAC_IRQ_EOT BIT(1)
100
104
/* The maximum ID allocated by the hardware is 31 */
101
105
#define AXI_DMAC_SG_UNUSED 32U
102
106
107
+ /* Flags for axi_dmac_hw_desc.flags */
108
+ #define AXI_DMAC_HW_FLAG_LAST BIT(0)
109
+ #define AXI_DMAC_HW_FLAG_IRQ BIT(1)
110
+
103
111
struct axi_dmac_hw_desc {
104
112
u32 flags ;
105
113
u32 id ;
106
114
u64 dest_addr ;
107
115
u64 src_addr ;
108
- u64 __unused ;
116
+ u64 next_sg_addr ;
109
117
u32 y_len ;
110
118
u32 x_len ;
111
119
u32 src_stride ;
@@ -153,6 +161,7 @@ struct axi_dmac_chan {
153
161
bool hw_partial_xfer ;
154
162
bool hw_cyclic ;
155
163
bool hw_2d ;
164
+ bool hw_sg ;
156
165
};
157
166
158
167
struct axi_dmac {
@@ -227,9 +236,11 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
227
236
unsigned int flags = 0 ;
228
237
unsigned int val ;
229
238
230
- val = axi_dmac_read (dmac , AXI_DMAC_REG_START_TRANSFER );
231
- if (val ) /* Queue is full, wait for the next SOT IRQ */
232
- return ;
239
+ if (!chan -> hw_sg ) {
240
+ val = axi_dmac_read (dmac , AXI_DMAC_REG_START_TRANSFER );
241
+ if (val ) /* Queue is full, wait for the next SOT IRQ */
242
+ return ;
243
+ }
233
244
234
245
desc = chan -> next_desc ;
235
246
@@ -248,9 +259,10 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
248
259
return ;
249
260
}
250
261
251
- desc -> num_submitted ++ ;
252
- if (desc -> num_submitted == desc -> num_sgs ||
253
- desc -> have_partial_xfer ) {
262
+ if (chan -> hw_sg ) {
263
+ chan -> next_desc = NULL ;
264
+ } else if (++ desc -> num_submitted == desc -> num_sgs ||
265
+ desc -> have_partial_xfer ) {
254
266
if (desc -> cyclic )
255
267
desc -> num_submitted = 0 ; /* Start again */
256
268
else
@@ -262,14 +274,16 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
262
274
263
275
sg -> hw -> id = axi_dmac_read (dmac , AXI_DMAC_REG_TRANSFER_ID );
264
276
265
- if (axi_dmac_dest_is_mem (chan )) {
266
- axi_dmac_write (dmac , AXI_DMAC_REG_DEST_ADDRESS , sg -> hw -> dest_addr );
267
- axi_dmac_write (dmac , AXI_DMAC_REG_DEST_STRIDE , sg -> hw -> dst_stride );
268
- }
277
+ if (!chan -> hw_sg ) {
278
+ if (axi_dmac_dest_is_mem (chan )) {
279
+ axi_dmac_write (dmac , AXI_DMAC_REG_DEST_ADDRESS , sg -> hw -> dest_addr );
280
+ axi_dmac_write (dmac , AXI_DMAC_REG_DEST_STRIDE , sg -> hw -> dst_stride );
281
+ }
269
282
270
- if (axi_dmac_src_is_mem (chan )) {
271
- axi_dmac_write (dmac , AXI_DMAC_REG_SRC_ADDRESS , sg -> hw -> src_addr );
272
- axi_dmac_write (dmac , AXI_DMAC_REG_SRC_STRIDE , sg -> hw -> src_stride );
283
+ if (axi_dmac_src_is_mem (chan )) {
284
+ axi_dmac_write (dmac , AXI_DMAC_REG_SRC_ADDRESS , sg -> hw -> src_addr );
285
+ axi_dmac_write (dmac , AXI_DMAC_REG_SRC_STRIDE , sg -> hw -> src_stride );
286
+ }
273
287
}
274
288
275
289
/*
@@ -284,8 +298,14 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
284
298
if (chan -> hw_partial_xfer )
285
299
flags |= AXI_DMAC_FLAG_PARTIAL_REPORT ;
286
300
287
- axi_dmac_write (dmac , AXI_DMAC_REG_X_LENGTH , sg -> hw -> x_len );
288
- axi_dmac_write (dmac , AXI_DMAC_REG_Y_LENGTH , sg -> hw -> y_len );
301
+ if (chan -> hw_sg ) {
302
+ axi_dmac_write (dmac , AXI_DMAC_REG_SG_ADDRESS , (u32 )sg -> hw_phys );
303
+ axi_dmac_write (dmac , AXI_DMAC_REG_SG_ADDRESS_HIGH ,
304
+ (u64 )sg -> hw_phys >> 32 );
305
+ } else {
306
+ axi_dmac_write (dmac , AXI_DMAC_REG_X_LENGTH , sg -> hw -> x_len );
307
+ axi_dmac_write (dmac , AXI_DMAC_REG_Y_LENGTH , sg -> hw -> y_len );
308
+ }
289
309
axi_dmac_write (dmac , AXI_DMAC_REG_FLAGS , flags );
290
310
axi_dmac_write (dmac , AXI_DMAC_REG_START_TRANSFER , 1 );
291
311
}
@@ -362,6 +382,9 @@ static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
362
382
rslt -> result = DMA_TRANS_NOERROR ;
363
383
rslt -> residue = 0 ;
364
384
385
+ if (chan -> hw_sg )
386
+ return ;
387
+
365
388
/*
366
389
* We get here if the last completed segment is partial, which
367
390
* means we can compute the residue from that segment onwards
@@ -388,36 +411,46 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
388
411
(completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE ))
389
412
axi_dmac_dequeue_partial_xfers (chan );
390
413
391
- do {
392
- sg = & active -> sg [active -> num_completed ];
393
- if (sg -> hw -> id == AXI_DMAC_SG_UNUSED ) /* Not yet submitted */
394
- break ;
395
- if (!(BIT (sg -> hw -> id ) & completed_transfers ))
396
- break ;
397
- active -> num_completed ++ ;
398
- sg -> hw -> id = AXI_DMAC_SG_UNUSED ;
399
- if (sg -> schedule_when_free ) {
400
- sg -> schedule_when_free = false;
401
- start_next = true;
414
+ if (chan -> hw_sg ) {
415
+ if (active -> cyclic ) {
416
+ vchan_cyclic_callback (& active -> vdesc );
417
+ } else {
418
+ list_del (& active -> vdesc .node );
419
+ vchan_cookie_complete (& active -> vdesc );
420
+ active = axi_dmac_active_desc (chan );
402
421
}
422
+ } else {
423
+ do {
424
+ sg = & active -> sg [active -> num_completed ];
425
+ if (sg -> hw -> id == AXI_DMAC_SG_UNUSED ) /* Not yet submitted */
426
+ break ;
427
+ if (!(BIT (sg -> hw -> id ) & completed_transfers ))
428
+ break ;
429
+ active -> num_completed ++ ;
430
+ sg -> hw -> id = AXI_DMAC_SG_UNUSED ;
431
+ if (sg -> schedule_when_free ) {
432
+ sg -> schedule_when_free = false;
433
+ start_next = true;
434
+ }
403
435
404
- if (sg -> partial_len )
405
- axi_dmac_compute_residue (chan , active );
436
+ if (sg -> partial_len )
437
+ axi_dmac_compute_residue (chan , active );
406
438
407
- if (active -> cyclic )
408
- vchan_cyclic_callback (& active -> vdesc );
439
+ if (active -> cyclic )
440
+ vchan_cyclic_callback (& active -> vdesc );
409
441
410
- if (active -> num_completed == active -> num_sgs ||
411
- sg -> partial_len ) {
412
- if (active -> cyclic ) {
413
- active -> num_completed = 0 ; /* wrap around */
414
- } else {
415
- list_del (& active -> vdesc .node );
416
- vchan_cookie_complete (& active -> vdesc );
417
- active = axi_dmac_active_desc (chan );
442
+ if (active -> num_completed == active -> num_sgs ||
443
+ sg -> partial_len ) {
444
+ if (active -> cyclic ) {
445
+ active -> num_completed = 0 ; /* wrap around */
446
+ } else {
447
+ list_del (& active -> vdesc .node );
448
+ vchan_cookie_complete (& active -> vdesc );
449
+ active = axi_dmac_active_desc (chan );
450
+ }
418
451
}
419
- }
420
- } while ( active );
452
+ } while ( active );
453
+ }
421
454
422
455
return start_next ;
423
456
}
@@ -481,8 +514,12 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
481
514
struct axi_dmac_chan * chan = to_axi_dmac_chan (c );
482
515
struct axi_dmac * dmac = chan_to_axi_dmac (chan );
483
516
unsigned long flags ;
517
+ u32 ctrl = AXI_DMAC_CTRL_ENABLE ;
518
+
519
+ if (chan -> hw_sg )
520
+ ctrl |= AXI_DMAC_CTRL_ENABLE_SG ;
484
521
485
- axi_dmac_write (dmac , AXI_DMAC_REG_CTRL , AXI_DMAC_CTRL_ENABLE );
522
+ axi_dmac_write (dmac , AXI_DMAC_REG_CTRL , ctrl );
486
523
487
524
spin_lock_irqsave (& chan -> vchan .lock , flags );
488
525
if (vchan_issue_pending (& chan -> vchan ))
@@ -519,9 +556,13 @@ axi_dmac_alloc_desc(struct axi_dmac_chan *chan, unsigned int num_sgs)
519
556
520
557
hws [i ].id = AXI_DMAC_SG_UNUSED ;
521
558
hws [i ].flags = 0 ;
559
+
560
+ /* Link hardware descriptors */
561
+ hws [i ].next_sg_addr = hw_phys + (i + 1 ) * sizeof (* hws );
522
562
}
523
563
524
- desc -> num_sgs = num_sgs ;
564
+ /* The last hardware descriptor will trigger an interrupt */
565
+ desc -> sg [num_sgs - 1 ].hw -> flags = AXI_DMAC_HW_FLAG_LAST | AXI_DMAC_HW_FLAG_IRQ ;
525
566
526
567
return desc ;
527
568
}
@@ -781,6 +822,9 @@ static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
781
822
case AXI_DMAC_REG_DBG2 :
782
823
case AXI_DMAC_REG_PARTIAL_XFER_LEN :
783
824
case AXI_DMAC_REG_PARTIAL_XFER_ID :
825
+ case AXI_DMAC_REG_CURRENT_SG_ID :
826
+ case AXI_DMAC_REG_SG_ADDRESS :
827
+ case AXI_DMAC_REG_SG_ADDRESS_HIGH :
784
828
return true;
785
829
default :
786
830
return false;
@@ -933,6 +977,10 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
933
977
if (axi_dmac_read (dmac , AXI_DMAC_REG_FLAGS ) == AXI_DMAC_FLAG_CYCLIC )
934
978
chan -> hw_cyclic = true;
935
979
980
+ axi_dmac_write (dmac , AXI_DMAC_REG_SG_ADDRESS , 0xffffffff );
981
+ if (axi_dmac_read (dmac , AXI_DMAC_REG_SG_ADDRESS ))
982
+ chan -> hw_sg = true;
983
+
936
984
axi_dmac_write (dmac , AXI_DMAC_REG_Y_LENGTH , 1 );
937
985
if (axi_dmac_read (dmac , AXI_DMAC_REG_Y_LENGTH ) == 1 )
938
986
chan -> hw_2d = true;
@@ -1034,6 +1082,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
1034
1082
dma_dev -> dst_addr_widths = BIT (dmac -> chan .dest_width );
1035
1083
dma_dev -> directions = BIT (dmac -> chan .direction );
1036
1084
dma_dev -> residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR ;
1085
+ dma_dev -> max_sg_burst = 31 ; /* 31 SGs maximum in one burst */
1037
1086
INIT_LIST_HEAD (& dma_dev -> channels );
1038
1087
1039
1088
dmac -> chan .vchan .desc_free = axi_dmac_desc_free ;
0 commit comments