Skip to content

Commit 2d2db7d

Browse files
Oleksandr Tyshchenkojgross1
authored andcommitted
xen/gntdev: Fix the abuse of underlying struct page in DMA-buf import
DO NOT access the underlying struct page of an sg table exported by DMA-buf in dmabuf_imp_to_refs(), this is not allowed. Please see drivers/dma-buf/dma-buf.c:mangle_sg_table() for details. Fortunately, here (for special Xen device) we can avoid using pages and calculate gfns directly from dma addresses provided by the sg table. Suggested-by: Daniel Vetter <daniel@ffwll.ch> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> Acked-by: Christian König <christian.koenig@amd.com> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> Acked-by: Daniel Vetter <daniel@ffwll.ch> Link: https://lore.kernel.org/r/20240107103426.2038075-1-olekstysh@gmail.com Signed-off-by: Juergen Gross <jgross@suse.com>
1 parent f1479f0 commit 2d2db7d

File tree

1 file changed

+25
-25
lines changed

1 file changed

+25
-25
lines changed

drivers/xen/gntdev-dmabuf.c

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#include <linux/kernel.h>
1212
#include <linux/errno.h>
1313
#include <linux/dma-buf.h>
14+
#include <linux/dma-direct.h>
1415
#include <linux/slab.h>
1516
#include <linux/types.h>
1617
#include <linux/uaccess.h>
@@ -50,7 +51,7 @@ struct gntdev_dmabuf {
5051

5152
/* Number of pages this buffer has. */
5253
int nr_pages;
53-
/* Pages of this buffer. */
54+
/* Pages of this buffer (only for dma-buf export). */
5455
struct page **pages;
5556
};
5657

@@ -484,7 +485,7 @@ static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
484485
/* DMA buffer import support. */
485486

486487
static int
487-
dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
488+
dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
488489
int count, int domid)
489490
{
490491
grant_ref_t priv_gref_head;
@@ -507,7 +508,7 @@ dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
507508
}
508509

509510
gnttab_grant_foreign_access_ref(cur_ref, domid,
510-
xen_page_to_gfn(pages[i]), 0);
511+
gfns[i], 0);
511512
refs[i] = cur_ref;
512513
}
513514

@@ -529,7 +530,6 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
529530

530531
static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
531532
{
532-
kfree(gntdev_dmabuf->pages);
533533
kfree(gntdev_dmabuf->u.imp.refs);
534534
kfree(gntdev_dmabuf);
535535
}
@@ -549,12 +549,6 @@ static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
549549
if (!gntdev_dmabuf->u.imp.refs)
550550
goto fail;
551551

552-
gntdev_dmabuf->pages = kcalloc(count,
553-
sizeof(gntdev_dmabuf->pages[0]),
554-
GFP_KERNEL);
555-
if (!gntdev_dmabuf->pages)
556-
goto fail;
557-
558552
gntdev_dmabuf->nr_pages = count;
559553

560554
for (i = 0; i < count; i++)
@@ -576,7 +570,8 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
576570
struct dma_buf *dma_buf;
577571
struct dma_buf_attachment *attach;
578572
struct sg_table *sgt;
579-
struct sg_page_iter sg_iter;
573+
struct sg_dma_page_iter sg_iter;
574+
unsigned long *gfns;
580575
int i;
581576

582577
dma_buf = dma_buf_get(fd);
@@ -624,26 +619,31 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
624619

625620
gntdev_dmabuf->u.imp.sgt = sgt;
626621

627-
/* Now convert sgt to array of pages and check for page validity. */
622+
gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
623+
if (!gfns) {
624+
ret = ERR_PTR(-ENOMEM);
625+
goto fail_unmap;
626+
}
627+
628+
/*
629+
* Now convert sgt to array of gfns without accessing underlying pages.
630+
* It is not allowed to access the underlying struct page of an sg table
631+
* exported by DMA-buf, but since we deal with special Xen dma device here
632+
* (not a normal physical one) look at the dma addresses in the sg table
633+
* and then calculate gfns directly from them.
634+
*/
628635
i = 0;
629-
for_each_sgtable_page(sgt, &sg_iter, 0) {
630-
struct page *page = sg_page_iter_page(&sg_iter);
631-
/*
632-
* Check if page is valid: this can happen if we are given
633-
* a page from VRAM or other resources which are not backed
634-
* by a struct page.
635-
*/
636-
if (!pfn_valid(page_to_pfn(page))) {
637-
ret = ERR_PTR(-EINVAL);
638-
goto fail_unmap;
639-
}
636+
for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
637+
dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
638+
unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
640639

641-
gntdev_dmabuf->pages[i++] = page;
640+
gfns[i++] = pfn_to_gfn(pfn);
642641
}
643642

644-
ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
643+
ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
645644
gntdev_dmabuf->u.imp.refs,
646645
count, domid));
646+
kfree(gfns);
647647
if (IS_ERR(ret))
648648
goto fail_end_access;
649649

0 commit comments

Comments
 (0)