Skip to content

Commit 7b99aaa

Browse files
Mani-Sadhasivamkwilczynski
authored andcommitted
PCI: epf-mhi: Add eDMA support
Add support for Embedded DMA (eDMA) available in the DesignWare PCIe IP to transfer the MHI buffers between the host and the endpoint. The eDMA use helps achieve greater throughput as the transfers are offloaded from CPUs. For differentiating the iATU and eDMA APIs, the pci_epf_mhi_{read/write} APIs are renamed to pci_epf_mhi_iatu_{read/write} and separate eDMA specific APIs pci_epf_mhi_edma_{read/write} are introduced. Platforms that require eDMA support can pass the MHI_EPF_USE_DMA flag through pci_epf_mhi_ep_info. [kwilczynski: commit log] Link: https://lore.kernel.org/linux-pci/20230717065459.14138-5-manivannan.sadhasivam@linaro.org Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> Signed-off-by: Krzysztof Wilczyński <kwilczynski@kernel.org>
1 parent ff8d920 commit 7b99aaa

File tree

1 file changed

+231
-6
lines changed

1 file changed

+231
-6
lines changed

drivers/pci/endpoint/functions/pci-epf-mhi.c

Lines changed: 231 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,10 @@
66
* Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
77
*/
88

9+
#include <linux/dmaengine.h>
910
#include <linux/mhi_ep.h>
1011
#include <linux/module.h>
12+
#include <linux/of_dma.h>
1113
#include <linux/platform_device.h>
1214
#include <linux/pci-epc.h>
1315
#include <linux/pci-epf.h>
@@ -16,13 +18,17 @@
1618

1719
#define to_epf_mhi(cntrl) container_of(cntrl, struct pci_epf_mhi, cntrl)
1820

21+
/* Platform specific flags */
22+
#define MHI_EPF_USE_DMA BIT(0)
23+
1924
struct pci_epf_mhi_ep_info {
2025
const struct mhi_ep_cntrl_config *config;
2126
struct pci_epf_header *epf_header;
2227
enum pci_barno bar_num;
2328
u32 epf_flags;
2429
u32 msi_count;
2530
u32 mru;
31+
u32 flags;
2632
};
2733

2834
#define MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, direction) \
@@ -99,6 +105,8 @@ struct pci_epf_mhi {
99105
struct mutex lock;
100106
void __iomem *mmio;
101107
resource_size_t mmio_phys;
108+
struct dma_chan *dma_chan_tx;
109+
struct dma_chan *dma_chan_rx;
102110
u32 mmio_size;
103111
int irq;
104112
};
@@ -184,8 +192,8 @@ static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
184192
vector + 1);
185193
}
186194

187-
static int pci_epf_mhi_read_from_host(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
188-
void *to, size_t size)
195+
static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
196+
void *to, size_t size)
189197
{
190198
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
191199
size_t offset = get_align_offset(epf_mhi, from);
@@ -212,8 +220,8 @@ static int pci_epf_mhi_read_from_host(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
212220
return 0;
213221
}
214222

215-
static int pci_epf_mhi_write_to_host(struct mhi_ep_cntrl *mhi_cntrl,
216-
void *from, u64 to, size_t size)
223+
static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
224+
void *from, u64 to, size_t size)
217225
{
218226
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
219227
size_t offset = get_align_offset(epf_mhi, to);
@@ -240,6 +248,200 @@ static int pci_epf_mhi_write_to_host(struct mhi_ep_cntrl *mhi_cntrl,
240248
return 0;
241249
}
242250

251+
static void pci_epf_mhi_dma_callback(void *param)
252+
{
253+
complete(param);
254+
}
255+
256+
static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
257+
void *to, size_t size)
258+
{
259+
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
260+
struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
261+
struct dma_chan *chan = epf_mhi->dma_chan_rx;
262+
struct device *dev = &epf_mhi->epf->dev;
263+
DECLARE_COMPLETION_ONSTACK(complete);
264+
struct dma_async_tx_descriptor *desc;
265+
struct dma_slave_config config = {};
266+
dma_cookie_t cookie;
267+
dma_addr_t dst_addr;
268+
int ret;
269+
270+
mutex_lock(&epf_mhi->lock);
271+
272+
config.direction = DMA_DEV_TO_MEM;
273+
config.src_addr = from;
274+
275+
ret = dmaengine_slave_config(chan, &config);
276+
if (ret) {
277+
dev_err(dev, "Failed to configure DMA channel\n");
278+
goto err_unlock;
279+
}
280+
281+
dst_addr = dma_map_single(dma_dev, to, size, DMA_FROM_DEVICE);
282+
ret = dma_mapping_error(dma_dev, dst_addr);
283+
if (ret) {
284+
dev_err(dev, "Failed to map remote memory\n");
285+
goto err_unlock;
286+
}
287+
288+
desc = dmaengine_prep_slave_single(chan, dst_addr, size, DMA_DEV_TO_MEM,
289+
DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
290+
if (!desc) {
291+
dev_err(dev, "Failed to prepare DMA\n");
292+
ret = -EIO;
293+
goto err_unmap;
294+
}
295+
296+
desc->callback = pci_epf_mhi_dma_callback;
297+
desc->callback_param = &complete;
298+
299+
cookie = dmaengine_submit(desc);
300+
ret = dma_submit_error(cookie);
301+
if (ret) {
302+
dev_err(dev, "Failed to do DMA submit\n");
303+
goto err_unmap;
304+
}
305+
306+
dma_async_issue_pending(chan);
307+
ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
308+
if (!ret) {
309+
dev_err(dev, "DMA transfer timeout\n");
310+
dmaengine_terminate_sync(chan);
311+
ret = -ETIMEDOUT;
312+
}
313+
314+
err_unmap:
315+
dma_unmap_single(dma_dev, dst_addr, size, DMA_FROM_DEVICE);
316+
err_unlock:
317+
mutex_unlock(&epf_mhi->lock);
318+
319+
return ret;
320+
}
321+
322+
static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
323+
u64 to, size_t size)
324+
{
325+
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
326+
struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
327+
struct dma_chan *chan = epf_mhi->dma_chan_tx;
328+
struct device *dev = &epf_mhi->epf->dev;
329+
DECLARE_COMPLETION_ONSTACK(complete);
330+
struct dma_async_tx_descriptor *desc;
331+
struct dma_slave_config config = {};
332+
dma_cookie_t cookie;
333+
dma_addr_t src_addr;
334+
int ret;
335+
336+
mutex_lock(&epf_mhi->lock);
337+
338+
config.direction = DMA_MEM_TO_DEV;
339+
config.dst_addr = to;
340+
341+
ret = dmaengine_slave_config(chan, &config);
342+
if (ret) {
343+
dev_err(dev, "Failed to configure DMA channel\n");
344+
goto err_unlock;
345+
}
346+
347+
src_addr = dma_map_single(dma_dev, from, size, DMA_TO_DEVICE);
348+
ret = dma_mapping_error(dma_dev, src_addr);
349+
if (ret) {
350+
dev_err(dev, "Failed to map remote memory\n");
351+
goto err_unlock;
352+
}
353+
354+
desc = dmaengine_prep_slave_single(chan, src_addr, size, DMA_MEM_TO_DEV,
355+
DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
356+
if (!desc) {
357+
dev_err(dev, "Failed to prepare DMA\n");
358+
ret = -EIO;
359+
goto err_unmap;
360+
}
361+
362+
desc->callback = pci_epf_mhi_dma_callback;
363+
desc->callback_param = &complete;
364+
365+
cookie = dmaengine_submit(desc);
366+
ret = dma_submit_error(cookie);
367+
if (ret) {
368+
dev_err(dev, "Failed to do DMA submit\n");
369+
goto err_unmap;
370+
}
371+
372+
dma_async_issue_pending(chan);
373+
ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
374+
if (!ret) {
375+
dev_err(dev, "DMA transfer timeout\n");
376+
dmaengine_terminate_sync(chan);
377+
ret = -ETIMEDOUT;
378+
}
379+
380+
err_unmap:
381+
dma_unmap_single(dma_dev, src_addr, size, DMA_FROM_DEVICE);
382+
err_unlock:
383+
mutex_unlock(&epf_mhi->lock);
384+
385+
return ret;
386+
}
387+
388+
struct epf_dma_filter {
389+
struct device *dev;
390+
u32 dma_mask;
391+
};
392+
393+
static bool pci_epf_mhi_filter(struct dma_chan *chan, void *node)
394+
{
395+
struct epf_dma_filter *filter = node;
396+
struct dma_slave_caps caps;
397+
398+
memset(&caps, 0, sizeof(caps));
399+
dma_get_slave_caps(chan, &caps);
400+
401+
return chan->device->dev == filter->dev && filter->dma_mask &
402+
caps.directions;
403+
}
404+
405+
static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
406+
{
407+
struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
408+
struct device *dev = &epf_mhi->epf->dev;
409+
struct epf_dma_filter filter;
410+
dma_cap_mask_t mask;
411+
412+
dma_cap_zero(mask);
413+
dma_cap_set(DMA_SLAVE, mask);
414+
415+
filter.dev = dma_dev;
416+
filter.dma_mask = BIT(DMA_MEM_TO_DEV);
417+
epf_mhi->dma_chan_tx = dma_request_channel(mask, pci_epf_mhi_filter,
418+
&filter);
419+
if (IS_ERR_OR_NULL(epf_mhi->dma_chan_tx)) {
420+
dev_err(dev, "Failed to request tx channel\n");
421+
return -ENODEV;
422+
}
423+
424+
filter.dma_mask = BIT(DMA_DEV_TO_MEM);
425+
epf_mhi->dma_chan_rx = dma_request_channel(mask, pci_epf_mhi_filter,
426+
&filter);
427+
if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
428+
dev_err(dev, "Failed to request rx channel\n");
429+
dma_release_channel(epf_mhi->dma_chan_tx);
430+
epf_mhi->dma_chan_tx = NULL;
431+
return -ENODEV;
432+
}
433+
434+
return 0;
435+
}
436+
437+
static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
438+
{
439+
dma_release_channel(epf_mhi->dma_chan_tx);
440+
dma_release_channel(epf_mhi->dma_chan_rx);
441+
epf_mhi->dma_chan_tx = NULL;
442+
epf_mhi->dma_chan_rx = NULL;
443+
}
444+
243445
static int pci_epf_mhi_core_init(struct pci_epf *epf)
244446
{
245447
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
@@ -289,6 +491,14 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
289491
struct device *dev = &epf->dev;
290492
int ret;
291493

494+
if (info->flags & MHI_EPF_USE_DMA) {
495+
ret = pci_epf_mhi_dma_init(epf_mhi);
496+
if (ret) {
497+
dev_err(dev, "Failed to initialize DMA: %d\n", ret);
498+
return ret;
499+
}
500+
}
501+
292502
mhi_cntrl->mmio = epf_mhi->mmio;
293503
mhi_cntrl->irq = epf_mhi->irq;
294504
mhi_cntrl->mru = info->mru;
@@ -298,13 +508,20 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
298508
mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
299509
mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
300510
mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
301-
mhi_cntrl->read_from_host = pci_epf_mhi_read_from_host;
302-
mhi_cntrl->write_to_host = pci_epf_mhi_write_to_host;
511+
if (info->flags & MHI_EPF_USE_DMA) {
512+
mhi_cntrl->read_from_host = pci_epf_mhi_edma_read;
513+
mhi_cntrl->write_to_host = pci_epf_mhi_edma_write;
514+
} else {
515+
mhi_cntrl->read_from_host = pci_epf_mhi_iatu_read;
516+
mhi_cntrl->write_to_host = pci_epf_mhi_iatu_write;
517+
}
303518

304519
/* Register the MHI EP controller */
305520
ret = mhi_ep_register_controller(mhi_cntrl, info->config);
306521
if (ret) {
307522
dev_err(dev, "Failed to register MHI EP controller: %d\n", ret);
523+
if (info->flags & MHI_EPF_USE_DMA)
524+
pci_epf_mhi_dma_deinit(epf_mhi);
308525
return ret;
309526
}
310527

@@ -314,10 +531,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
314531
static int pci_epf_mhi_link_down(struct pci_epf *epf)
315532
{
316533
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
534+
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
317535
struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
318536

319537
if (mhi_cntrl->mhi_dev) {
320538
mhi_ep_power_down(mhi_cntrl);
539+
if (info->flags & MHI_EPF_USE_DMA)
540+
pci_epf_mhi_dma_deinit(epf_mhi);
321541
mhi_ep_unregister_controller(mhi_cntrl);
322542
}
323543

@@ -327,6 +547,7 @@ static int pci_epf_mhi_link_down(struct pci_epf *epf)
327547
static int pci_epf_mhi_bme(struct pci_epf *epf)
328548
{
329549
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
550+
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
330551
struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
331552
struct device *dev = &epf->dev;
332553
int ret;
@@ -339,6 +560,8 @@ static int pci_epf_mhi_bme(struct pci_epf *epf)
339560
ret = mhi_ep_power_up(mhi_cntrl);
340561
if (ret) {
341562
dev_err(dev, "Failed to power up MHI EP: %d\n", ret);
563+
if (info->flags & MHI_EPF_USE_DMA)
564+
pci_epf_mhi_dma_deinit(epf_mhi);
342565
mhi_ep_unregister_controller(mhi_cntrl);
343566
}
344567
}
@@ -389,6 +612,8 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf)
389612
*/
390613
if (mhi_cntrl->mhi_dev) {
391614
mhi_ep_power_down(mhi_cntrl);
615+
if (info->flags & MHI_EPF_USE_DMA)
616+
pci_epf_mhi_dma_deinit(epf_mhi);
392617
mhi_ep_unregister_controller(mhi_cntrl);
393618
}
394619

0 commit comments

Comments
 (0)