6
6
* Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
7
7
*/
8
8
9
+ #include <linux/dmaengine.h>
9
10
#include <linux/mhi_ep.h>
10
11
#include <linux/module.h>
12
+ #include <linux/of_dma.h>
11
13
#include <linux/platform_device.h>
12
14
#include <linux/pci-epc.h>
13
15
#include <linux/pci-epf.h>
16
18
17
19
#define to_epf_mhi (cntrl ) container_of(cntrl, struct pci_epf_mhi, cntrl)
18
20
21
+ /* Platform specific flags */
22
+ #define MHI_EPF_USE_DMA BIT(0)
23
+
19
24
struct pci_epf_mhi_ep_info {
20
25
const struct mhi_ep_cntrl_config * config ;
21
26
struct pci_epf_header * epf_header ;
22
27
enum pci_barno bar_num ;
23
28
u32 epf_flags ;
24
29
u32 msi_count ;
25
30
u32 mru ;
31
+ u32 flags ;
26
32
};
27
33
28
34
#define MHI_EP_CHANNEL_CONFIG (ch_num , ch_name , direction ) \
@@ -99,6 +105,8 @@ struct pci_epf_mhi {
99
105
struct mutex lock ;
100
106
void __iomem * mmio ;
101
107
resource_size_t mmio_phys ;
108
+ struct dma_chan * dma_chan_tx ;
109
+ struct dma_chan * dma_chan_rx ;
102
110
u32 mmio_size ;
103
111
int irq ;
104
112
};
@@ -184,8 +192,8 @@ static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
184
192
vector + 1 );
185
193
}
186
194
187
- static int pci_epf_mhi_read_from_host (struct mhi_ep_cntrl * mhi_cntrl , u64 from ,
188
- void * to , size_t size )
195
+ static int pci_epf_mhi_iatu_read (struct mhi_ep_cntrl * mhi_cntrl , u64 from ,
196
+ void * to , size_t size )
189
197
{
190
198
struct pci_epf_mhi * epf_mhi = to_epf_mhi (mhi_cntrl );
191
199
size_t offset = get_align_offset (epf_mhi , from );
@@ -212,8 +220,8 @@ static int pci_epf_mhi_read_from_host(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
212
220
return 0 ;
213
221
}
214
222
215
- static int pci_epf_mhi_write_to_host (struct mhi_ep_cntrl * mhi_cntrl ,
216
- void * from , u64 to , size_t size )
223
+ static int pci_epf_mhi_iatu_write (struct mhi_ep_cntrl * mhi_cntrl ,
224
+ void * from , u64 to , size_t size )
217
225
{
218
226
struct pci_epf_mhi * epf_mhi = to_epf_mhi (mhi_cntrl );
219
227
size_t offset = get_align_offset (epf_mhi , to );
@@ -240,6 +248,200 @@ static int pci_epf_mhi_write_to_host(struct mhi_ep_cntrl *mhi_cntrl,
240
248
return 0 ;
241
249
}
242
250
251
+ static void pci_epf_mhi_dma_callback (void * param )
252
+ {
253
+ complete (param );
254
+ }
255
+
256
+ static int pci_epf_mhi_edma_read (struct mhi_ep_cntrl * mhi_cntrl , u64 from ,
257
+ void * to , size_t size )
258
+ {
259
+ struct pci_epf_mhi * epf_mhi = to_epf_mhi (mhi_cntrl );
260
+ struct device * dma_dev = epf_mhi -> epf -> epc -> dev .parent ;
261
+ struct dma_chan * chan = epf_mhi -> dma_chan_rx ;
262
+ struct device * dev = & epf_mhi -> epf -> dev ;
263
+ DECLARE_COMPLETION_ONSTACK (complete );
264
+ struct dma_async_tx_descriptor * desc ;
265
+ struct dma_slave_config config = {};
266
+ dma_cookie_t cookie ;
267
+ dma_addr_t dst_addr ;
268
+ int ret ;
269
+
270
+ mutex_lock (& epf_mhi -> lock );
271
+
272
+ config .direction = DMA_DEV_TO_MEM ;
273
+ config .src_addr = from ;
274
+
275
+ ret = dmaengine_slave_config (chan , & config );
276
+ if (ret ) {
277
+ dev_err (dev , "Failed to configure DMA channel\n" );
278
+ goto err_unlock ;
279
+ }
280
+
281
+ dst_addr = dma_map_single (dma_dev , to , size , DMA_FROM_DEVICE );
282
+ ret = dma_mapping_error (dma_dev , dst_addr );
283
+ if (ret ) {
284
+ dev_err (dev , "Failed to map remote memory\n" );
285
+ goto err_unlock ;
286
+ }
287
+
288
+ desc = dmaengine_prep_slave_single (chan , dst_addr , size , DMA_DEV_TO_MEM ,
289
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT );
290
+ if (!desc ) {
291
+ dev_err (dev , "Failed to prepare DMA\n" );
292
+ ret = - EIO ;
293
+ goto err_unmap ;
294
+ }
295
+
296
+ desc -> callback = pci_epf_mhi_dma_callback ;
297
+ desc -> callback_param = & complete ;
298
+
299
+ cookie = dmaengine_submit (desc );
300
+ ret = dma_submit_error (cookie );
301
+ if (ret ) {
302
+ dev_err (dev , "Failed to do DMA submit\n" );
303
+ goto err_unmap ;
304
+ }
305
+
306
+ dma_async_issue_pending (chan );
307
+ ret = wait_for_completion_timeout (& complete , msecs_to_jiffies (1000 ));
308
+ if (!ret ) {
309
+ dev_err (dev , "DMA transfer timeout\n" );
310
+ dmaengine_terminate_sync (chan );
311
+ ret = - ETIMEDOUT ;
312
+ }
313
+
314
+ err_unmap :
315
+ dma_unmap_single (dma_dev , dst_addr , size , DMA_FROM_DEVICE );
316
+ err_unlock :
317
+ mutex_unlock (& epf_mhi -> lock );
318
+
319
+ return ret ;
320
+ }
321
+
322
+ static int pci_epf_mhi_edma_write (struct mhi_ep_cntrl * mhi_cntrl , void * from ,
323
+ u64 to , size_t size )
324
+ {
325
+ struct pci_epf_mhi * epf_mhi = to_epf_mhi (mhi_cntrl );
326
+ struct device * dma_dev = epf_mhi -> epf -> epc -> dev .parent ;
327
+ struct dma_chan * chan = epf_mhi -> dma_chan_tx ;
328
+ struct device * dev = & epf_mhi -> epf -> dev ;
329
+ DECLARE_COMPLETION_ONSTACK (complete );
330
+ struct dma_async_tx_descriptor * desc ;
331
+ struct dma_slave_config config = {};
332
+ dma_cookie_t cookie ;
333
+ dma_addr_t src_addr ;
334
+ int ret ;
335
+
336
+ mutex_lock (& epf_mhi -> lock );
337
+
338
+ config .direction = DMA_MEM_TO_DEV ;
339
+ config .dst_addr = to ;
340
+
341
+ ret = dmaengine_slave_config (chan , & config );
342
+ if (ret ) {
343
+ dev_err (dev , "Failed to configure DMA channel\n" );
344
+ goto err_unlock ;
345
+ }
346
+
347
+ src_addr = dma_map_single (dma_dev , from , size , DMA_TO_DEVICE );
348
+ ret = dma_mapping_error (dma_dev , src_addr );
349
+ if (ret ) {
350
+ dev_err (dev , "Failed to map remote memory\n" );
351
+ goto err_unlock ;
352
+ }
353
+
354
+ desc = dmaengine_prep_slave_single (chan , src_addr , size , DMA_MEM_TO_DEV ,
355
+ DMA_CTRL_ACK | DMA_PREP_INTERRUPT );
356
+ if (!desc ) {
357
+ dev_err (dev , "Failed to prepare DMA\n" );
358
+ ret = - EIO ;
359
+ goto err_unmap ;
360
+ }
361
+
362
+ desc -> callback = pci_epf_mhi_dma_callback ;
363
+ desc -> callback_param = & complete ;
364
+
365
+ cookie = dmaengine_submit (desc );
366
+ ret = dma_submit_error (cookie );
367
+ if (ret ) {
368
+ dev_err (dev , "Failed to do DMA submit\n" );
369
+ goto err_unmap ;
370
+ }
371
+
372
+ dma_async_issue_pending (chan );
373
+ ret = wait_for_completion_timeout (& complete , msecs_to_jiffies (1000 ));
374
+ if (!ret ) {
375
+ dev_err (dev , "DMA transfer timeout\n" );
376
+ dmaengine_terminate_sync (chan );
377
+ ret = - ETIMEDOUT ;
378
+ }
379
+
380
+ err_unmap :
381
+ dma_unmap_single (dma_dev , src_addr , size , DMA_FROM_DEVICE );
382
+ err_unlock :
383
+ mutex_unlock (& epf_mhi -> lock );
384
+
385
+ return ret ;
386
+ }
387
+
388
+ struct epf_dma_filter {
389
+ struct device * dev ;
390
+ u32 dma_mask ;
391
+ };
392
+
393
+ static bool pci_epf_mhi_filter (struct dma_chan * chan , void * node )
394
+ {
395
+ struct epf_dma_filter * filter = node ;
396
+ struct dma_slave_caps caps ;
397
+
398
+ memset (& caps , 0 , sizeof (caps ));
399
+ dma_get_slave_caps (chan , & caps );
400
+
401
+ return chan -> device -> dev == filter -> dev && filter -> dma_mask &
402
+ caps .directions ;
403
+ }
404
+
405
+ static int pci_epf_mhi_dma_init (struct pci_epf_mhi * epf_mhi )
406
+ {
407
+ struct device * dma_dev = epf_mhi -> epf -> epc -> dev .parent ;
408
+ struct device * dev = & epf_mhi -> epf -> dev ;
409
+ struct epf_dma_filter filter ;
410
+ dma_cap_mask_t mask ;
411
+
412
+ dma_cap_zero (mask );
413
+ dma_cap_set (DMA_SLAVE , mask );
414
+
415
+ filter .dev = dma_dev ;
416
+ filter .dma_mask = BIT (DMA_MEM_TO_DEV );
417
+ epf_mhi -> dma_chan_tx = dma_request_channel (mask , pci_epf_mhi_filter ,
418
+ & filter );
419
+ if (IS_ERR_OR_NULL (epf_mhi -> dma_chan_tx )) {
420
+ dev_err (dev , "Failed to request tx channel\n" );
421
+ return - ENODEV ;
422
+ }
423
+
424
+ filter .dma_mask = BIT (DMA_DEV_TO_MEM );
425
+ epf_mhi -> dma_chan_rx = dma_request_channel (mask , pci_epf_mhi_filter ,
426
+ & filter );
427
+ if (IS_ERR_OR_NULL (epf_mhi -> dma_chan_rx )) {
428
+ dev_err (dev , "Failed to request rx channel\n" );
429
+ dma_release_channel (epf_mhi -> dma_chan_tx );
430
+ epf_mhi -> dma_chan_tx = NULL ;
431
+ return - ENODEV ;
432
+ }
433
+
434
+ return 0 ;
435
+ }
436
+
437
+ static void pci_epf_mhi_dma_deinit (struct pci_epf_mhi * epf_mhi )
438
+ {
439
+ dma_release_channel (epf_mhi -> dma_chan_tx );
440
+ dma_release_channel (epf_mhi -> dma_chan_rx );
441
+ epf_mhi -> dma_chan_tx = NULL ;
442
+ epf_mhi -> dma_chan_rx = NULL ;
443
+ }
444
+
243
445
static int pci_epf_mhi_core_init (struct pci_epf * epf )
244
446
{
245
447
struct pci_epf_mhi * epf_mhi = epf_get_drvdata (epf );
@@ -289,6 +491,14 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
289
491
struct device * dev = & epf -> dev ;
290
492
int ret ;
291
493
494
+ if (info -> flags & MHI_EPF_USE_DMA ) {
495
+ ret = pci_epf_mhi_dma_init (epf_mhi );
496
+ if (ret ) {
497
+ dev_err (dev , "Failed to initialize DMA: %d\n" , ret );
498
+ return ret ;
499
+ }
500
+ }
501
+
292
502
mhi_cntrl -> mmio = epf_mhi -> mmio ;
293
503
mhi_cntrl -> irq = epf_mhi -> irq ;
294
504
mhi_cntrl -> mru = info -> mru ;
@@ -298,13 +508,20 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
298
508
mhi_cntrl -> raise_irq = pci_epf_mhi_raise_irq ;
299
509
mhi_cntrl -> alloc_map = pci_epf_mhi_alloc_map ;
300
510
mhi_cntrl -> unmap_free = pci_epf_mhi_unmap_free ;
301
- mhi_cntrl -> read_from_host = pci_epf_mhi_read_from_host ;
302
- mhi_cntrl -> write_to_host = pci_epf_mhi_write_to_host ;
511
+ if (info -> flags & MHI_EPF_USE_DMA ) {
512
+ mhi_cntrl -> read_from_host = pci_epf_mhi_edma_read ;
513
+ mhi_cntrl -> write_to_host = pci_epf_mhi_edma_write ;
514
+ } else {
515
+ mhi_cntrl -> read_from_host = pci_epf_mhi_iatu_read ;
516
+ mhi_cntrl -> write_to_host = pci_epf_mhi_iatu_write ;
517
+ }
303
518
304
519
/* Register the MHI EP controller */
305
520
ret = mhi_ep_register_controller (mhi_cntrl , info -> config );
306
521
if (ret ) {
307
522
dev_err (dev , "Failed to register MHI EP controller: %d\n" , ret );
523
+ if (info -> flags & MHI_EPF_USE_DMA )
524
+ pci_epf_mhi_dma_deinit (epf_mhi );
308
525
return ret ;
309
526
}
310
527
@@ -314,10 +531,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
314
531
static int pci_epf_mhi_link_down (struct pci_epf * epf )
315
532
{
316
533
struct pci_epf_mhi * epf_mhi = epf_get_drvdata (epf );
534
+ const struct pci_epf_mhi_ep_info * info = epf_mhi -> info ;
317
535
struct mhi_ep_cntrl * mhi_cntrl = & epf_mhi -> mhi_cntrl ;
318
536
319
537
if (mhi_cntrl -> mhi_dev ) {
320
538
mhi_ep_power_down (mhi_cntrl );
539
+ if (info -> flags & MHI_EPF_USE_DMA )
540
+ pci_epf_mhi_dma_deinit (epf_mhi );
321
541
mhi_ep_unregister_controller (mhi_cntrl );
322
542
}
323
543
@@ -327,6 +547,7 @@ static int pci_epf_mhi_link_down(struct pci_epf *epf)
327
547
static int pci_epf_mhi_bme (struct pci_epf * epf )
328
548
{
329
549
struct pci_epf_mhi * epf_mhi = epf_get_drvdata (epf );
550
+ const struct pci_epf_mhi_ep_info * info = epf_mhi -> info ;
330
551
struct mhi_ep_cntrl * mhi_cntrl = & epf_mhi -> mhi_cntrl ;
331
552
struct device * dev = & epf -> dev ;
332
553
int ret ;
@@ -339,6 +560,8 @@ static int pci_epf_mhi_bme(struct pci_epf *epf)
339
560
ret = mhi_ep_power_up (mhi_cntrl );
340
561
if (ret ) {
341
562
dev_err (dev , "Failed to power up MHI EP: %d\n" , ret );
563
+ if (info -> flags & MHI_EPF_USE_DMA )
564
+ pci_epf_mhi_dma_deinit (epf_mhi );
342
565
mhi_ep_unregister_controller (mhi_cntrl );
343
566
}
344
567
}
@@ -389,6 +612,8 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf)
389
612
*/
390
613
if (mhi_cntrl -> mhi_dev ) {
391
614
mhi_ep_power_down (mhi_cntrl );
615
+ if (info -> flags & MHI_EPF_USE_DMA )
616
+ pci_epf_mhi_dma_deinit (epf_mhi );
392
617
mhi_ep_unregister_controller (mhi_cntrl );
393
618
}
394
619
0 commit comments