100
100
/* The maximum ID allocated by the hardware is 31 */
101
101
#define AXI_DMAC_SG_UNUSED 32U
102
102
103
+ struct axi_dmac_hw_desc {
104
+ u32 flags ;
105
+ u32 id ;
106
+ u64 dest_addr ;
107
+ u64 src_addr ;
108
+ u64 __unused ;
109
+ u32 y_len ;
110
+ u32 x_len ;
111
+ u32 src_stride ;
112
+ u32 dst_stride ;
113
+ u64 __pad [2 ];
114
+ };
115
+
103
116
struct axi_dmac_sg {
104
- dma_addr_t src_addr ;
105
- dma_addr_t dest_addr ;
106
- unsigned int x_len ;
107
- unsigned int y_len ;
108
- unsigned int dest_stride ;
109
- unsigned int src_stride ;
110
- unsigned int id ;
111
117
unsigned int partial_len ;
112
118
bool schedule_when_free ;
119
+
120
+ struct axi_dmac_hw_desc * hw ;
121
+ dma_addr_t hw_phys ;
113
122
};
114
123
115
124
struct axi_dmac_desc {
116
125
struct virt_dma_desc vdesc ;
126
+ struct axi_dmac_chan * chan ;
127
+
117
128
bool cyclic ;
118
129
bool have_partial_xfer ;
119
130
@@ -232,7 +243,7 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
232
243
sg = & desc -> sg [desc -> num_submitted ];
233
244
234
245
/* Already queued in cyclic mode. Wait for it to finish */
235
- if (sg -> id != AXI_DMAC_SG_UNUSED ) {
246
+ if (sg -> hw -> id != AXI_DMAC_SG_UNUSED ) {
236
247
sg -> schedule_when_free = true;
237
248
return ;
238
249
}
@@ -249,16 +260,16 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
249
260
chan -> next_desc = desc ;
250
261
}
251
262
252
- sg -> id = axi_dmac_read (dmac , AXI_DMAC_REG_TRANSFER_ID );
263
+ sg -> hw -> id = axi_dmac_read (dmac , AXI_DMAC_REG_TRANSFER_ID );
253
264
254
265
if (axi_dmac_dest_is_mem (chan )) {
255
- axi_dmac_write (dmac , AXI_DMAC_REG_DEST_ADDRESS , sg -> dest_addr );
256
- axi_dmac_write (dmac , AXI_DMAC_REG_DEST_STRIDE , sg -> dest_stride );
266
+ axi_dmac_write (dmac , AXI_DMAC_REG_DEST_ADDRESS , sg -> hw -> dest_addr );
267
+ axi_dmac_write (dmac , AXI_DMAC_REG_DEST_STRIDE , sg -> hw -> dst_stride );
257
268
}
258
269
259
270
if (axi_dmac_src_is_mem (chan )) {
260
- axi_dmac_write (dmac , AXI_DMAC_REG_SRC_ADDRESS , sg -> src_addr );
261
- axi_dmac_write (dmac , AXI_DMAC_REG_SRC_STRIDE , sg -> src_stride );
271
+ axi_dmac_write (dmac , AXI_DMAC_REG_SRC_ADDRESS , sg -> hw -> src_addr );
272
+ axi_dmac_write (dmac , AXI_DMAC_REG_SRC_STRIDE , sg -> hw -> src_stride );
262
273
}
263
274
264
275
/*
@@ -273,8 +284,8 @@ static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
273
284
if (chan -> hw_partial_xfer )
274
285
flags |= AXI_DMAC_FLAG_PARTIAL_REPORT ;
275
286
276
- axi_dmac_write (dmac , AXI_DMAC_REG_X_LENGTH , sg -> x_len - 1 );
277
- axi_dmac_write (dmac , AXI_DMAC_REG_Y_LENGTH , sg -> y_len - 1 );
287
+ axi_dmac_write (dmac , AXI_DMAC_REG_X_LENGTH , sg -> hw -> x_len );
288
+ axi_dmac_write (dmac , AXI_DMAC_REG_Y_LENGTH , sg -> hw -> y_len );
278
289
axi_dmac_write (dmac , AXI_DMAC_REG_FLAGS , flags );
279
290
axi_dmac_write (dmac , AXI_DMAC_REG_START_TRANSFER , 1 );
280
291
}
@@ -289,9 +300,9 @@ static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
289
300
struct axi_dmac_sg * sg )
290
301
{
291
302
if (chan -> hw_2d )
292
- return sg -> x_len * sg -> y_len ;
303
+ return ( sg -> hw -> x_len + 1 ) * ( sg -> hw -> y_len + 1 ) ;
293
304
else
294
- return sg -> x_len ;
305
+ return ( sg -> hw -> x_len + 1 ) ;
295
306
}
296
307
297
308
static void axi_dmac_dequeue_partial_xfers (struct axi_dmac_chan * chan )
@@ -310,9 +321,9 @@ static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
310
321
list_for_each_entry (desc , & chan -> active_descs , vdesc .node ) {
311
322
for (i = 0 ; i < desc -> num_sgs ; i ++ ) {
312
323
sg = & desc -> sg [i ];
313
- if (sg -> id == AXI_DMAC_SG_UNUSED )
324
+ if (sg -> hw -> id == AXI_DMAC_SG_UNUSED )
314
325
continue ;
315
- if (sg -> id == id ) {
326
+ if (sg -> hw -> id == id ) {
316
327
desc -> have_partial_xfer = true;
317
328
sg -> partial_len = len ;
318
329
found_sg = true;
@@ -379,12 +390,12 @@ static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
379
390
380
391
do {
381
392
sg = & active -> sg [active -> num_completed ];
382
- if (sg -> id == AXI_DMAC_SG_UNUSED ) /* Not yet submitted */
393
+ if (sg -> hw -> id == AXI_DMAC_SG_UNUSED ) /* Not yet submitted */
383
394
break ;
384
- if (!(BIT (sg -> id ) & completed_transfers ))
395
+ if (!(BIT (sg -> hw -> id ) & completed_transfers ))
385
396
break ;
386
397
active -> num_completed ++ ;
387
- sg -> id = AXI_DMAC_SG_UNUSED ;
398
+ sg -> hw -> id = AXI_DMAC_SG_UNUSED ;
388
399
if (sg -> schedule_when_free ) {
389
400
sg -> schedule_when_free = false;
390
401
start_next = true;
@@ -479,23 +490,54 @@ static void axi_dmac_issue_pending(struct dma_chan *c)
479
490
spin_unlock_irqrestore (& chan -> vchan .lock , flags );
480
491
}
481
492
482
- static struct axi_dmac_desc * axi_dmac_alloc_desc (unsigned int num_sgs )
493
+ static struct axi_dmac_desc *
494
+ axi_dmac_alloc_desc (struct axi_dmac_chan * chan , unsigned int num_sgs )
483
495
{
496
+ struct axi_dmac * dmac = chan_to_axi_dmac (chan );
497
+ struct device * dev = dmac -> dma_dev .dev ;
498
+ struct axi_dmac_hw_desc * hws ;
484
499
struct axi_dmac_desc * desc ;
500
+ dma_addr_t hw_phys ;
485
501
unsigned int i ;
486
502
487
503
desc = kzalloc (struct_size (desc , sg , num_sgs ), GFP_NOWAIT );
488
504
if (!desc )
489
505
return NULL ;
506
+ desc -> num_sgs = num_sgs ;
507
+ desc -> chan = chan ;
508
+
509
+ hws = dma_alloc_coherent (dev , PAGE_ALIGN (num_sgs * sizeof (* hws )),
510
+ & hw_phys , GFP_ATOMIC );
511
+ if (!hws ) {
512
+ kfree (desc );
513
+ return NULL ;
514
+ }
490
515
491
- for (i = 0 ; i < num_sgs ; i ++ )
492
- desc -> sg [i ].id = AXI_DMAC_SG_UNUSED ;
516
+ for (i = 0 ; i < num_sgs ; i ++ ) {
517
+ desc -> sg [i ].hw = & hws [i ];
518
+ desc -> sg [i ].hw_phys = hw_phys + i * sizeof (* hws );
519
+
520
+ hws [i ].id = AXI_DMAC_SG_UNUSED ;
521
+ hws [i ].flags = 0 ;
522
+ }
493
523
494
524
desc -> num_sgs = num_sgs ;
495
525
496
526
return desc ;
497
527
}
498
528
529
+ static void axi_dmac_free_desc (struct axi_dmac_desc * desc )
530
+ {
531
+ struct axi_dmac * dmac = chan_to_axi_dmac (desc -> chan );
532
+ struct device * dev = dmac -> dma_dev .dev ;
533
+ struct axi_dmac_hw_desc * hw = desc -> sg [0 ].hw ;
534
+ dma_addr_t hw_phys = desc -> sg [0 ].hw_phys ;
535
+
536
+ dma_free_coherent (dev , PAGE_ALIGN (desc -> num_sgs * sizeof (* hw )),
537
+ hw , hw_phys );
538
+ kfree (desc );
539
+ }
540
+
499
541
static struct axi_dmac_sg * axi_dmac_fill_linear_sg (struct axi_dmac_chan * chan ,
500
542
enum dma_transfer_direction direction , dma_addr_t addr ,
501
543
unsigned int num_periods , unsigned int period_len ,
@@ -519,21 +561,22 @@ static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
519
561
for (i = 0 ; i < num_periods ; i ++ ) {
520
562
for (len = period_len ; len > segment_size ; sg ++ ) {
521
563
if (direction == DMA_DEV_TO_MEM )
522
- sg -> dest_addr = addr ;
564
+ sg -> hw -> dest_addr = addr ;
523
565
else
524
- sg -> src_addr = addr ;
525
- sg -> x_len = segment_size ;
526
- sg -> y_len = 1 ;
566
+ sg -> hw -> src_addr = addr ;
567
+ sg -> hw -> x_len = segment_size - 1 ;
568
+ sg -> hw -> y_len = 0 ;
569
+ sg -> hw -> flags = 0 ;
527
570
addr += segment_size ;
528
571
len -= segment_size ;
529
572
}
530
573
531
574
if (direction == DMA_DEV_TO_MEM )
532
- sg -> dest_addr = addr ;
575
+ sg -> hw -> dest_addr = addr ;
533
576
else
534
- sg -> src_addr = addr ;
535
- sg -> x_len = len ;
536
- sg -> y_len = 1 ;
577
+ sg -> hw -> src_addr = addr ;
578
+ sg -> hw -> x_len = len - 1 ;
579
+ sg -> hw -> y_len = 0 ;
537
580
sg ++ ;
538
581
addr += len ;
539
582
}
@@ -560,7 +603,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
560
603
for_each_sg (sgl , sg , sg_len , i )
561
604
num_sgs += DIV_ROUND_UP (sg_dma_len (sg ), chan -> max_length );
562
605
563
- desc = axi_dmac_alloc_desc (num_sgs );
606
+ desc = axi_dmac_alloc_desc (chan , num_sgs );
564
607
if (!desc )
565
608
return NULL ;
566
609
@@ -569,7 +612,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
569
612
for_each_sg (sgl , sg , sg_len , i ) {
570
613
if (!axi_dmac_check_addr (chan , sg_dma_address (sg )) ||
571
614
!axi_dmac_check_len (chan , sg_dma_len (sg ))) {
572
- kfree (desc );
615
+ axi_dmac_free_desc (desc );
573
616
return NULL ;
574
617
}
575
618
@@ -619,7 +662,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
619
662
num_periods = buf_len / period_len ;
620
663
num_segments = DIV_ROUND_UP (period_len , chan -> max_length );
621
664
622
- desc = axi_dmac_alloc_desc (num_periods * num_segments );
665
+ desc = axi_dmac_alloc_desc (chan , num_periods * num_segments );
623
666
if (!desc )
624
667
return NULL ;
625
668
@@ -674,26 +717,26 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
674
717
return NULL ;
675
718
}
676
719
677
- desc = axi_dmac_alloc_desc (1 );
720
+ desc = axi_dmac_alloc_desc (chan , 1 );
678
721
if (!desc )
679
722
return NULL ;
680
723
681
724
if (axi_dmac_src_is_mem (chan )) {
682
- desc -> sg [0 ].src_addr = xt -> src_start ;
683
- desc -> sg [0 ].src_stride = xt -> sgl [0 ].size + src_icg ;
725
+ desc -> sg [0 ].hw -> src_addr = xt -> src_start ;
726
+ desc -> sg [0 ].hw -> src_stride = xt -> sgl [0 ].size + src_icg ;
684
727
}
685
728
686
729
if (axi_dmac_dest_is_mem (chan )) {
687
- desc -> sg [0 ].dest_addr = xt -> dst_start ;
688
- desc -> sg [0 ].dest_stride = xt -> sgl [0 ].size + dst_icg ;
730
+ desc -> sg [0 ].hw -> dest_addr = xt -> dst_start ;
731
+ desc -> sg [0 ].hw -> dst_stride = xt -> sgl [0 ].size + dst_icg ;
689
732
}
690
733
691
734
if (chan -> hw_2d ) {
692
- desc -> sg [0 ].x_len = xt -> sgl [0 ].size ;
693
- desc -> sg [0 ].y_len = xt -> numf ;
735
+ desc -> sg [0 ].hw -> x_len = xt -> sgl [0 ].size - 1 ;
736
+ desc -> sg [0 ].hw -> y_len = xt -> numf - 1 ;
694
737
} else {
695
- desc -> sg [0 ].x_len = xt -> sgl [0 ].size * xt -> numf ;
696
- desc -> sg [0 ].y_len = 1 ;
738
+ desc -> sg [0 ].hw -> x_len = xt -> sgl [0 ].size * xt -> numf - 1 ;
739
+ desc -> sg [0 ].hw -> y_len = 0 ;
697
740
}
698
741
699
742
if (flags & DMA_CYCLIC )
@@ -709,7 +752,7 @@ static void axi_dmac_free_chan_resources(struct dma_chan *c)
709
752
710
753
static void axi_dmac_desc_free (struct virt_dma_desc * vdesc )
711
754
{
712
- kfree ( container_of ( vdesc , struct axi_dmac_desc , vdesc ));
755
+ axi_dmac_free_desc ( to_axi_dmac_desc ( vdesc ));
713
756
}
714
757
715
758
static bool axi_dmac_regmap_rdwr (struct device * dev , unsigned int reg )
0 commit comments