1
1
// SPDX-License-Identifier: MIT
2
2
3
3
#include <linux/fb.h>
4
+ #include <linux/vmalloc.h>
4
5
5
6
#include <drm/drm_crtc_helper.h>
6
7
#include <drm/drm_drv.h>
@@ -72,43 +73,108 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
72
73
.fb_destroy = drm_fbdev_dma_fb_destroy ,
73
74
};
74
75
75
- FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS (drm_fbdev_dma ,
76
+ FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS (drm_fbdev_dma_shadowed ,
76
77
drm_fb_helper_damage_range ,
77
78
drm_fb_helper_damage_area );
78
79
79
- static int drm_fbdev_dma_deferred_fb_mmap (struct fb_info * info , struct vm_area_struct * vma )
80
+ static void drm_fbdev_dma_shadowed_fb_destroy (struct fb_info * info )
80
81
{
81
82
struct drm_fb_helper * fb_helper = info -> par ;
82
- struct drm_framebuffer * fb = fb_helper -> fb ;
83
- struct drm_gem_dma_object * dma = drm_fb_dma_get_gem_obj (fb , 0 );
83
+ void * shadow = info -> screen_buffer ;
84
+
85
+ if (!fb_helper -> dev )
86
+ return ;
84
87
85
- if (!dma -> map_noncoherent )
86
- vma -> vm_page_prot = pgprot_writecombine (vma -> vm_page_prot );
88
+ if (info -> fbdefio )
89
+ fb_deferred_io_cleanup (info );
90
+ drm_fb_helper_fini (fb_helper );
91
+ vfree (shadow );
87
92
88
- return fb_deferred_io_mmap (info , vma );
93
+ drm_client_buffer_vunmap (fb_helper -> buffer );
94
+ drm_client_framebuffer_delete (fb_helper -> buffer );
95
+ drm_client_release (& fb_helper -> client );
96
+ drm_fb_helper_unprepare (fb_helper );
97
+ kfree (fb_helper );
89
98
}
90
99
91
- static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
100
+ static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
92
101
.owner = THIS_MODULE ,
93
102
.fb_open = drm_fbdev_dma_fb_open ,
94
103
.fb_release = drm_fbdev_dma_fb_release ,
95
- __FB_DEFAULT_DEFERRED_OPS_RDWR ( drm_fbdev_dma ),
104
+ FB_DEFAULT_DEFERRED_OPS ( drm_fbdev_dma_shadowed ),
96
105
DRM_FB_HELPER_DEFAULT_OPS ,
97
- __FB_DEFAULT_DEFERRED_OPS_DRAW (drm_fbdev_dma ),
98
- .fb_mmap = drm_fbdev_dma_deferred_fb_mmap ,
99
- .fb_destroy = drm_fbdev_dma_fb_destroy ,
106
+ .fb_destroy = drm_fbdev_dma_shadowed_fb_destroy ,
100
107
};
101
108
102
109
/*
103
110
* struct drm_fb_helper
104
111
*/
105
112
113
+ static void drm_fbdev_dma_damage_blit_real (struct drm_fb_helper * fb_helper ,
114
+ struct drm_clip_rect * clip ,
115
+ struct iosys_map * dst )
116
+ {
117
+ struct drm_framebuffer * fb = fb_helper -> fb ;
118
+ size_t offset = clip -> y1 * fb -> pitches [0 ];
119
+ size_t len = clip -> x2 - clip -> x1 ;
120
+ unsigned int y ;
121
+ void * src ;
122
+
123
+ switch (drm_format_info_bpp (fb -> format , 0 )) {
124
+ case 1 :
125
+ offset += clip -> x1 / 8 ;
126
+ len = DIV_ROUND_UP (len + clip -> x1 % 8 , 8 );
127
+ break ;
128
+ case 2 :
129
+ offset += clip -> x1 / 4 ;
130
+ len = DIV_ROUND_UP (len + clip -> x1 % 4 , 4 );
131
+ break ;
132
+ case 4 :
133
+ offset += clip -> x1 / 2 ;
134
+ len = DIV_ROUND_UP (len + clip -> x1 % 2 , 2 );
135
+ break ;
136
+ default :
137
+ offset += clip -> x1 * fb -> format -> cpp [0 ];
138
+ len *= fb -> format -> cpp [0 ];
139
+ break ;
140
+ }
141
+
142
+ src = fb_helper -> info -> screen_buffer + offset ;
143
+ iosys_map_incr (dst , offset ); /* go to first pixel within clip rect */
144
+
145
+ for (y = clip -> y1 ; y < clip -> y2 ; y ++ ) {
146
+ iosys_map_memcpy_to (dst , 0 , src , len );
147
+ iosys_map_incr (dst , fb -> pitches [0 ]);
148
+ src += fb -> pitches [0 ];
149
+ }
150
+ }
151
+
152
+ static int drm_fbdev_dma_damage_blit (struct drm_fb_helper * fb_helper ,
153
+ struct drm_clip_rect * clip )
154
+ {
155
+ struct drm_client_buffer * buffer = fb_helper -> buffer ;
156
+ struct iosys_map dst ;
157
+
158
+ /*
159
+ * For fbdev emulation, we only have to protect against fbdev modeset
160
+ * operations. Nothing else will involve the client buffer's BO. So it
161
+ * is sufficient to acquire struct drm_fb_helper.lock here.
162
+ */
163
+ mutex_lock (& fb_helper -> lock );
164
+
165
+ dst = buffer -> map ;
166
+ drm_fbdev_dma_damage_blit_real (fb_helper , clip , & dst );
167
+
168
+ mutex_unlock (& fb_helper -> lock );
169
+
170
+ return 0 ;
171
+ }
172
+
106
173
static int drm_fbdev_dma_helper_fb_probe (struct drm_fb_helper * fb_helper ,
107
174
struct drm_fb_helper_surface_size * sizes )
108
175
{
109
176
return drm_fbdev_dma_driver_fbdev_probe (fb_helper , sizes );
110
177
}
111
-
112
178
static int drm_fbdev_dma_helper_fb_dirty (struct drm_fb_helper * helper ,
113
179
struct drm_clip_rect * clip )
114
180
{
@@ -120,6 +186,10 @@ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
120
186
return 0 ;
121
187
122
188
if (helper -> fb -> funcs -> dirty ) {
189
+ ret = drm_fbdev_dma_damage_blit (helper , clip );
190
+ if (drm_WARN_ONCE (dev , ret , "Damage blitter failed: ret=%d\n" , ret ))
191
+ return ret ;
192
+
123
193
ret = helper -> fb -> funcs -> dirty (helper -> fb , NULL , 0 , 0 , clip , 1 );
124
194
if (drm_WARN_ONCE (dev , ret , "Dirty helper failed: ret=%d\n" , ret ))
125
195
return ret ;
@@ -137,14 +207,80 @@ static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
137
207
* struct drm_fb_helper
138
208
*/
139
209
210
+ static int drm_fbdev_dma_driver_fbdev_probe_tail (struct drm_fb_helper * fb_helper ,
211
+ struct drm_fb_helper_surface_size * sizes )
212
+ {
213
+ struct drm_device * dev = fb_helper -> dev ;
214
+ struct drm_client_buffer * buffer = fb_helper -> buffer ;
215
+ struct drm_gem_dma_object * dma_obj = to_drm_gem_dma_obj (buffer -> gem );
216
+ struct drm_framebuffer * fb = fb_helper -> fb ;
217
+ struct fb_info * info = fb_helper -> info ;
218
+ struct iosys_map map = buffer -> map ;
219
+
220
+ info -> fbops = & drm_fbdev_dma_fb_ops ;
221
+
222
+ /* screen */
223
+ info -> flags |= FBINFO_VIRTFB ; /* system memory */
224
+ if (dma_obj -> map_noncoherent )
225
+ info -> flags |= FBINFO_READS_FAST ; /* signal caching */
226
+ info -> screen_size = sizes -> surface_height * fb -> pitches [0 ];
227
+ info -> screen_buffer = map .vaddr ;
228
+ if (!(info -> flags & FBINFO_HIDE_SMEM_START )) {
229
+ if (!drm_WARN_ON (dev , is_vmalloc_addr (info -> screen_buffer )))
230
+ info -> fix .smem_start = page_to_phys (virt_to_page (info -> screen_buffer ));
231
+ }
232
+ info -> fix .smem_len = info -> screen_size ;
233
+
234
+ return 0 ;
235
+ }
236
+
237
+ static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed (struct drm_fb_helper * fb_helper ,
238
+ struct drm_fb_helper_surface_size * sizes )
239
+ {
240
+ struct drm_client_buffer * buffer = fb_helper -> buffer ;
241
+ struct fb_info * info = fb_helper -> info ;
242
+ size_t screen_size = buffer -> gem -> size ;
243
+ void * screen_buffer ;
244
+ int ret ;
245
+
246
+ /*
247
+ * Deferred I/O requires struct page for framebuffer memory,
248
+ * which is not guaranteed for all DMA ranges. We thus create
249
+ * a shadow buffer in system memory.
250
+ */
251
+ screen_buffer = vzalloc (screen_size );
252
+ if (!screen_buffer )
253
+ return - ENOMEM ;
254
+
255
+ info -> fbops = & drm_fbdev_dma_shadowed_fb_ops ;
256
+
257
+ /* screen */
258
+ info -> flags |= FBINFO_VIRTFB ; /* system memory */
259
+ info -> flags |= FBINFO_READS_FAST ; /* signal caching */
260
+ info -> screen_buffer = screen_buffer ;
261
+ info -> fix .smem_len = screen_size ;
262
+
263
+ fb_helper -> fbdefio .delay = HZ / 20 ;
264
+ fb_helper -> fbdefio .deferred_io = drm_fb_helper_deferred_io ;
265
+
266
+ info -> fbdefio = & fb_helper -> fbdefio ;
267
+ ret = fb_deferred_io_init (info );
268
+ if (ret )
269
+ goto err_vfree ;
270
+
271
+ return 0 ;
272
+
273
+ err_vfree :
274
+ vfree (screen_buffer );
275
+ return ret ;
276
+ }
277
+
140
278
int drm_fbdev_dma_driver_fbdev_probe (struct drm_fb_helper * fb_helper ,
141
279
struct drm_fb_helper_surface_size * sizes )
142
280
{
143
281
struct drm_client_dev * client = & fb_helper -> client ;
144
282
struct drm_device * dev = fb_helper -> dev ;
145
- bool use_deferred_io = false;
146
283
struct drm_client_buffer * buffer ;
147
- struct drm_gem_dma_object * dma_obj ;
148
284
struct drm_framebuffer * fb ;
149
285
struct fb_info * info ;
150
286
u32 format ;
@@ -161,19 +297,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
161
297
sizes -> surface_height , format );
162
298
if (IS_ERR (buffer ))
163
299
return PTR_ERR (buffer );
164
- dma_obj = to_drm_gem_dma_obj (buffer -> gem );
165
300
166
301
fb = buffer -> fb ;
167
302
168
- /*
169
- * Deferred I/O requires struct page for framebuffer memory,
170
- * which is not guaranteed for all DMA ranges. We thus only
171
- * install deferred I/O if we have a framebuffer that requires
172
- * it.
173
- */
174
- if (fb -> funcs -> dirty )
175
- use_deferred_io = true;
176
-
177
303
ret = drm_client_buffer_vmap (buffer , & map );
178
304
if (ret ) {
179
305
goto err_drm_client_buffer_delete ;
@@ -194,45 +320,12 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
194
320
195
321
drm_fb_helper_fill_info (info , fb_helper , sizes );
196
322
197
- if (use_deferred_io )
198
- info -> fbops = & drm_fbdev_dma_deferred_fb_ops ;
323
+ if (fb -> funcs -> dirty )
324
+ ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed ( fb_helper , sizes ) ;
199
325
else
200
- info -> fbops = & drm_fbdev_dma_fb_ops ;
201
-
202
- /* screen */
203
- info -> flags |= FBINFO_VIRTFB ; /* system memory */
204
- if (dma_obj -> map_noncoherent )
205
- info -> flags |= FBINFO_READS_FAST ; /* signal caching */
206
- info -> screen_size = sizes -> surface_height * fb -> pitches [0 ];
207
- info -> screen_buffer = map .vaddr ;
208
- if (!(info -> flags & FBINFO_HIDE_SMEM_START )) {
209
- if (!drm_WARN_ON (dev , is_vmalloc_addr (info -> screen_buffer )))
210
- info -> fix .smem_start = page_to_phys (virt_to_page (info -> screen_buffer ));
211
- }
212
- info -> fix .smem_len = info -> screen_size ;
213
-
214
- /*
215
- * Only set up deferred I/O if the screen buffer supports
216
- * it. If this disagrees with the previous test for ->dirty,
217
- * mmap on the /dev/fb file might not work correctly.
218
- */
219
- if (!is_vmalloc_addr (info -> screen_buffer ) && info -> fix .smem_start ) {
220
- unsigned long pfn = info -> fix .smem_start >> PAGE_SHIFT ;
221
-
222
- if (drm_WARN_ON (dev , !pfn_to_page (pfn )))
223
- use_deferred_io = false;
224
- }
225
-
226
- /* deferred I/O */
227
- if (use_deferred_io ) {
228
- fb_helper -> fbdefio .delay = HZ / 20 ;
229
- fb_helper -> fbdefio .deferred_io = drm_fb_helper_deferred_io ;
230
-
231
- info -> fbdefio = & fb_helper -> fbdefio ;
232
- ret = fb_deferred_io_init (info );
233
- if (ret )
234
- goto err_drm_fb_helper_release_info ;
235
- }
326
+ ret = drm_fbdev_dma_driver_fbdev_probe_tail (fb_helper , sizes );
327
+ if (ret )
328
+ goto err_drm_fb_helper_release_info ;
236
329
237
330
return 0 ;
238
331
0 commit comments