Skip to content

Commit 0d087de

Browse files
Thomas Zimmermanngregkh
authored andcommitted
drm/fbdev-dma: Add shadow buffering for deferred I/O
[ Upstream commit 3603996 ] DMA areas are not necessarily backed by struct page, so we cannot rely on it for deferred I/O. Allocate a shadow buffer for drivers that require deferred I/O and use it as framebuffer memory. Fixes driver errors about being "Unable to handle kernel NULL pointer dereference at virtual address" or "Unable to handle kernel paging request at virtual address". The patch splits drm_fbdev_dma_driver_fbdev_probe() in an initial allocation, which creates the DMA-backed buffer object, and a tail that sets up the fbdev data structures. There is a tail function for direct memory mappings and a tail function for deferred I/O with the shadow buffer. It is no longer possible to use deferred I/O without shadow buffer. It can be re-added if there exists a reliably test for usable struct page in the allocated DMA-backed buffer object. Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de> Reported-by: Nuno Gonçalves <nunojpg@gmail.com> CLoses: https://lore.kernel.org/dri-devel/CAEXMXLR55DziAMbv_+2hmLeH-jP96pmit6nhs6siB22cpQFr9w@mail.gmail.com/ Tested-by: Nuno Gonçalves <nunojpg@gmail.com> Fixes: 5ab9144 ("drm/tiny/ili9225: Use fbdev-dma") Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: <stable@vger.kernel.org> # v6.11+ Reviewed-by: Simona Vetter <simona.vetter@ffwll.ch> Reviewed-by: Javier Martinez Canillas <javierm@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20241211090643.74250-1-tzimmermann@suse.de Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 8e2dcdf commit 0d087de

File tree

1 file changed

+156
-63
lines changed

1 file changed

+156
-63
lines changed

drivers/gpu/drm/drm_fbdev_dma.c

Lines changed: 156 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
// SPDX-License-Identifier: MIT
22

33
#include <linux/fb.h>
4+
#include <linux/vmalloc.h>
45

56
#include <drm/drm_crtc_helper.h>
67
#include <drm/drm_drv.h>
@@ -72,43 +73,108 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
7273
.fb_destroy = drm_fbdev_dma_fb_destroy,
7374
};
7475

75-
FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
76+
FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
7677
drm_fb_helper_damage_range,
7778
drm_fb_helper_damage_area);
7879

79-
static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
80+
static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
8081
{
8182
struct drm_fb_helper *fb_helper = info->par;
82-
struct drm_framebuffer *fb = fb_helper->fb;
83-
struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
83+
void *shadow = info->screen_buffer;
84+
85+
if (!fb_helper->dev)
86+
return;
8487

85-
if (!dma->map_noncoherent)
86-
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
88+
if (info->fbdefio)
89+
fb_deferred_io_cleanup(info);
90+
drm_fb_helper_fini(fb_helper);
91+
vfree(shadow);
8792

88-
return fb_deferred_io_mmap(info, vma);
93+
drm_client_buffer_vunmap(fb_helper->buffer);
94+
drm_client_framebuffer_delete(fb_helper->buffer);
95+
drm_client_release(&fb_helper->client);
96+
drm_fb_helper_unprepare(fb_helper);
97+
kfree(fb_helper);
8998
}
9099

91-
static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
100+
static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
92101
.owner = THIS_MODULE,
93102
.fb_open = drm_fbdev_dma_fb_open,
94103
.fb_release = drm_fbdev_dma_fb_release,
95-
__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
104+
FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
96105
DRM_FB_HELPER_DEFAULT_OPS,
97-
__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
98-
.fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
99-
.fb_destroy = drm_fbdev_dma_fb_destroy,
106+
.fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
100107
};
101108

102109
/*
103110
* struct drm_fb_helper
104111
*/
105112

113+
static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
114+
struct drm_clip_rect *clip,
115+
struct iosys_map *dst)
116+
{
117+
struct drm_framebuffer *fb = fb_helper->fb;
118+
size_t offset = clip->y1 * fb->pitches[0];
119+
size_t len = clip->x2 - clip->x1;
120+
unsigned int y;
121+
void *src;
122+
123+
switch (drm_format_info_bpp(fb->format, 0)) {
124+
case 1:
125+
offset += clip->x1 / 8;
126+
len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
127+
break;
128+
case 2:
129+
offset += clip->x1 / 4;
130+
len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
131+
break;
132+
case 4:
133+
offset += clip->x1 / 2;
134+
len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
135+
break;
136+
default:
137+
offset += clip->x1 * fb->format->cpp[0];
138+
len *= fb->format->cpp[0];
139+
break;
140+
}
141+
142+
src = fb_helper->info->screen_buffer + offset;
143+
iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
144+
145+
for (y = clip->y1; y < clip->y2; y++) {
146+
iosys_map_memcpy_to(dst, 0, src, len);
147+
iosys_map_incr(dst, fb->pitches[0]);
148+
src += fb->pitches[0];
149+
}
150+
}
151+
152+
static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
153+
struct drm_clip_rect *clip)
154+
{
155+
struct drm_client_buffer *buffer = fb_helper->buffer;
156+
struct iosys_map dst;
157+
158+
/*
159+
* For fbdev emulation, we only have to protect against fbdev modeset
160+
* operations. Nothing else will involve the client buffer's BO. So it
161+
* is sufficient to acquire struct drm_fb_helper.lock here.
162+
*/
163+
mutex_lock(&fb_helper->lock);
164+
165+
dst = buffer->map;
166+
drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
167+
168+
mutex_unlock(&fb_helper->lock);
169+
170+
return 0;
171+
}
172+
106173
static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
107174
struct drm_fb_helper_surface_size *sizes)
108175
{
109176
return drm_fbdev_dma_driver_fbdev_probe(fb_helper, sizes);
110177
}
111-
112178
static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
113179
struct drm_clip_rect *clip)
114180
{
@@ -120,6 +186,10 @@ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
120186
return 0;
121187

122188
if (helper->fb->funcs->dirty) {
189+
ret = drm_fbdev_dma_damage_blit(helper, clip);
190+
if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
191+
return ret;
192+
123193
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
124194
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
125195
return ret;
@@ -137,14 +207,80 @@ static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
137207
* struct drm_fb_helper
138208
*/
139209

210+
static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
211+
struct drm_fb_helper_surface_size *sizes)
212+
{
213+
struct drm_device *dev = fb_helper->dev;
214+
struct drm_client_buffer *buffer = fb_helper->buffer;
215+
struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
216+
struct drm_framebuffer *fb = fb_helper->fb;
217+
struct fb_info *info = fb_helper->info;
218+
struct iosys_map map = buffer->map;
219+
220+
info->fbops = &drm_fbdev_dma_fb_ops;
221+
222+
/* screen */
223+
info->flags |= FBINFO_VIRTFB; /* system memory */
224+
if (dma_obj->map_noncoherent)
225+
info->flags |= FBINFO_READS_FAST; /* signal caching */
226+
info->screen_size = sizes->surface_height * fb->pitches[0];
227+
info->screen_buffer = map.vaddr;
228+
if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
229+
if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
230+
info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
231+
}
232+
info->fix.smem_len = info->screen_size;
233+
234+
return 0;
235+
}
236+
237+
static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
238+
struct drm_fb_helper_surface_size *sizes)
239+
{
240+
struct drm_client_buffer *buffer = fb_helper->buffer;
241+
struct fb_info *info = fb_helper->info;
242+
size_t screen_size = buffer->gem->size;
243+
void *screen_buffer;
244+
int ret;
245+
246+
/*
247+
* Deferred I/O requires struct page for framebuffer memory,
248+
* which is not guaranteed for all DMA ranges. We thus create
249+
* a shadow buffer in system memory.
250+
*/
251+
screen_buffer = vzalloc(screen_size);
252+
if (!screen_buffer)
253+
return -ENOMEM;
254+
255+
info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
256+
257+
/* screen */
258+
info->flags |= FBINFO_VIRTFB; /* system memory */
259+
info->flags |= FBINFO_READS_FAST; /* signal caching */
260+
info->screen_buffer = screen_buffer;
261+
info->fix.smem_len = screen_size;
262+
263+
fb_helper->fbdefio.delay = HZ / 20;
264+
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
265+
266+
info->fbdefio = &fb_helper->fbdefio;
267+
ret = fb_deferred_io_init(info);
268+
if (ret)
269+
goto err_vfree;
270+
271+
return 0;
272+
273+
err_vfree:
274+
vfree(screen_buffer);
275+
return ret;
276+
}
277+
140278
int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
141279
struct drm_fb_helper_surface_size *sizes)
142280
{
143281
struct drm_client_dev *client = &fb_helper->client;
144282
struct drm_device *dev = fb_helper->dev;
145-
bool use_deferred_io = false;
146283
struct drm_client_buffer *buffer;
147-
struct drm_gem_dma_object *dma_obj;
148284
struct drm_framebuffer *fb;
149285
struct fb_info *info;
150286
u32 format;
@@ -161,19 +297,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
161297
sizes->surface_height, format);
162298
if (IS_ERR(buffer))
163299
return PTR_ERR(buffer);
164-
dma_obj = to_drm_gem_dma_obj(buffer->gem);
165300

166301
fb = buffer->fb;
167302

168-
/*
169-
* Deferred I/O requires struct page for framebuffer memory,
170-
* which is not guaranteed for all DMA ranges. We thus only
171-
* install deferred I/O if we have a framebuffer that requires
172-
* it.
173-
*/
174-
if (fb->funcs->dirty)
175-
use_deferred_io = true;
176-
177303
ret = drm_client_buffer_vmap(buffer, &map);
178304
if (ret) {
179305
goto err_drm_client_buffer_delete;
@@ -194,45 +320,12 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
194320

195321
drm_fb_helper_fill_info(info, fb_helper, sizes);
196322

197-
if (use_deferred_io)
198-
info->fbops = &drm_fbdev_dma_deferred_fb_ops;
323+
if (fb->funcs->dirty)
324+
ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
199325
else
200-
info->fbops = &drm_fbdev_dma_fb_ops;
201-
202-
/* screen */
203-
info->flags |= FBINFO_VIRTFB; /* system memory */
204-
if (dma_obj->map_noncoherent)
205-
info->flags |= FBINFO_READS_FAST; /* signal caching */
206-
info->screen_size = sizes->surface_height * fb->pitches[0];
207-
info->screen_buffer = map.vaddr;
208-
if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
209-
if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
210-
info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
211-
}
212-
info->fix.smem_len = info->screen_size;
213-
214-
/*
215-
* Only set up deferred I/O if the screen buffer supports
216-
* it. If this disagrees with the previous test for ->dirty,
217-
* mmap on the /dev/fb file might not work correctly.
218-
*/
219-
if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
220-
unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
221-
222-
if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
223-
use_deferred_io = false;
224-
}
225-
226-
/* deferred I/O */
227-
if (use_deferred_io) {
228-
fb_helper->fbdefio.delay = HZ / 20;
229-
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
230-
231-
info->fbdefio = &fb_helper->fbdefio;
232-
ret = fb_deferred_io_init(info);
233-
if (ret)
234-
goto err_drm_fb_helper_release_info;
235-
}
326+
ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
327+
if (ret)
328+
goto err_drm_fb_helper_release_info;
236329

237330
return 0;
238331

0 commit comments

Comments
 (0)