Skip to content

Commit 20a4c81

Browse files
committed
Merge tag 'drm-misc-fixes-2025-05-08' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes
Short summary of fixes pull: drm: - Fix overflow when generating wedged event ivpu: - Increate timeouts - Fix deadlock in cmdq ioctl - Unlock mutices in correct order panel: - simple: Fix timings for AUO G101EVN010 ttm: - Fix documentation - Remove struct ttm_backup v3d: - Avoid memory leak in job handling Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Zimmermann <tzimmermann@suse.de> Link: https://lore.kernel.org/r/20250508104939.GA76697@2a02-2454-fd5e-fd00-c110-cbf2-6528-c5be.dyn6.pyur.net
2 parents 92a09c4 + 7c6fa17 commit 20a4c81

File tree

10 files changed

+86
-78
lines changed

10 files changed

+86
-78
lines changed

drivers/accel/ivpu/ivpu_hw.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ static void timeouts_init(struct ivpu_device *vdev)
119119
else
120120
vdev->timeout.autosuspend = 100;
121121
vdev->timeout.d0i3_entry_msg = 5;
122-
vdev->timeout.state_dump_msg = 10;
122+
vdev->timeout.state_dump_msg = 100;
123123
}
124124
}
125125

drivers/accel/ivpu/ivpu_job.c

Lines changed: 25 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -681,8 +681,8 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
681681
err_erase_xa:
682682
xa_erase(&vdev->submitted_jobs_xa, job->job_id);
683683
err_unlock:
684-
mutex_unlock(&vdev->submitted_jobs_lock);
685684
mutex_unlock(&file_priv->lock);
685+
mutex_unlock(&vdev->submitted_jobs_lock);
686686
ivpu_rpm_put(vdev);
687687
return ret;
688688
}
@@ -874,15 +874,21 @@ int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *
874874
int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
875875
{
876876
struct ivpu_file_priv *file_priv = file->driver_priv;
877+
struct ivpu_device *vdev = file_priv->vdev;
877878
struct drm_ivpu_cmdq_create *args = data;
878879
struct ivpu_cmdq *cmdq;
880+
int ret;
879881

880-
if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
882+
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
881883
return -ENODEV;
882884

883885
if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
884886
return -EINVAL;
885887

888+
ret = ivpu_rpm_get(vdev);
889+
if (ret < 0)
890+
return ret;
891+
886892
mutex_lock(&file_priv->lock);
887893

888894
cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), false);
@@ -891,6 +897,8 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
891897

892898
mutex_unlock(&file_priv->lock);
893899

900+
ivpu_rpm_put(vdev);
901+
894902
return cmdq ? 0 : -ENOMEM;
895903
}
896904

@@ -900,28 +908,35 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file
900908
struct ivpu_device *vdev = file_priv->vdev;
901909
struct drm_ivpu_cmdq_destroy *args = data;
902910
struct ivpu_cmdq *cmdq;
903-
u32 cmdq_id;
911+
u32 cmdq_id = 0;
904912
int ret;
905913

906914
if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
907915
return -ENODEV;
908916

917+
ret = ivpu_rpm_get(vdev);
918+
if (ret < 0)
919+
return ret;
920+
909921
mutex_lock(&file_priv->lock);
910922

911923
cmdq = xa_load(&file_priv->cmdq_xa, args->cmdq_id);
912924
if (!cmdq || cmdq->is_legacy) {
913925
ret = -ENOENT;
914-
goto err_unlock;
926+
} else {
927+
cmdq_id = cmdq->id;
928+
ivpu_cmdq_destroy(file_priv, cmdq);
929+
ret = 0;
915930
}
916931

917-
cmdq_id = cmdq->id;
918-
ivpu_cmdq_destroy(file_priv, cmdq);
919932
mutex_unlock(&file_priv->lock);
920-
ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
921-
return 0;
922933

923-
err_unlock:
924-
mutex_unlock(&file_priv->lock);
934+
/* Abort any pending jobs only if cmdq was destroyed */
935+
if (!ret)
936+
ivpu_cmdq_abort_all_jobs(vdev, file_priv->ctx.id, cmdq_id);
937+
938+
ivpu_rpm_put(vdev);
939+
925940
return ret;
926941
}
927942

drivers/gpu/drm/drm_drv.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -549,7 +549,7 @@ int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
549549
if (drm_WARN_ONCE(dev, !recovery, "invalid recovery method %u\n", opt))
550550
break;
551551

552-
len += scnprintf(event_string + len, sizeof(event_string), "%s,", recovery);
552+
len += scnprintf(event_string + len, sizeof(event_string) - len, "%s,", recovery);
553553
}
554554

555555
if (recovery)

drivers/gpu/drm/panel/panel-simple.c

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1027,27 +1027,28 @@ static const struct panel_desc auo_g070vvn01 = {
10271027
},
10281028
};
10291029

1030-
static const struct drm_display_mode auo_g101evn010_mode = {
1031-
.clock = 68930,
1032-
.hdisplay = 1280,
1033-
.hsync_start = 1280 + 82,
1034-
.hsync_end = 1280 + 82 + 2,
1035-
.htotal = 1280 + 82 + 2 + 84,
1036-
.vdisplay = 800,
1037-
.vsync_start = 800 + 8,
1038-
.vsync_end = 800 + 8 + 2,
1039-
.vtotal = 800 + 8 + 2 + 6,
1030+
static const struct display_timing auo_g101evn010_timing = {
1031+
.pixelclock = { 64000000, 68930000, 85000000 },
1032+
.hactive = { 1280, 1280, 1280 },
1033+
.hfront_porch = { 8, 64, 256 },
1034+
.hback_porch = { 8, 64, 256 },
1035+
.hsync_len = { 40, 168, 767 },
1036+
.vactive = { 800, 800, 800 },
1037+
.vfront_porch = { 4, 8, 100 },
1038+
.vback_porch = { 4, 8, 100 },
1039+
.vsync_len = { 8, 16, 223 },
10401040
};
10411041

10421042
static const struct panel_desc auo_g101evn010 = {
1043-
.modes = &auo_g101evn010_mode,
1044-
.num_modes = 1,
1043+
.timings = &auo_g101evn010_timing,
1044+
.num_timings = 1,
10451045
.bpc = 6,
10461046
.size = {
10471047
.width = 216,
10481048
.height = 135,
10491049
},
10501050
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
1051+
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
10511052
.connector_type = DRM_MODE_CONNECTOR_LVDS,
10521053
};
10531054

drivers/gpu/drm/ttm/ttm_backup.c

Lines changed: 12 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -7,20 +7,6 @@
77
#include <linux/page-flags.h>
88
#include <linux/swap.h>
99

10-
/*
11-
* Casting from randomized struct file * to struct ttm_backup * is fine since
12-
* struct ttm_backup is never defined nor dereferenced.
13-
*/
14-
static struct file *ttm_backup_to_file(struct ttm_backup *backup)
15-
{
16-
return (void *)backup;
17-
}
18-
19-
static struct ttm_backup *ttm_file_to_backup(struct file *file)
20-
{
21-
return (void *)file;
22-
}
23-
2410
/*
2511
* Need to map shmem indices to handle since a handle value
2612
* of 0 means error, following the swp_entry_t convention.
@@ -40,12 +26,12 @@ static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
4026
* @backup: The struct backup pointer used to obtain the handle
4127
* @handle: The handle obtained from the @backup_page function.
4228
*/
43-
void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
29+
void ttm_backup_drop(struct file *backup, pgoff_t handle)
4430
{
4531
loff_t start = ttm_backup_handle_to_shmem_idx(handle);
4632

4733
start <<= PAGE_SHIFT;
48-
shmem_truncate_range(file_inode(ttm_backup_to_file(backup)), start,
34+
shmem_truncate_range(file_inode(backup), start,
4935
start + PAGE_SIZE - 1);
5036
}
5137

@@ -55,16 +41,15 @@ void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
5541
* @backup: The struct backup pointer used to back up the page.
5642
* @dst: The struct page to copy into.
5743
* @handle: The handle returned when the page was backed up.
58-
* @intr: Try to perform waits interruptable or at least killable.
44+
* @intr: Try to perform waits interruptible or at least killable.
5945
*
6046
* Return: 0 on success, Negative error code on failure, notably
6147
* -EINTR if @intr was set to true and a signal is pending.
6248
*/
63-
int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
49+
int ttm_backup_copy_page(struct file *backup, struct page *dst,
6450
pgoff_t handle, bool intr)
6551
{
66-
struct file *filp = ttm_backup_to_file(backup);
67-
struct address_space *mapping = filp->f_mapping;
52+
struct address_space *mapping = backup->f_mapping;
6853
struct folio *from_folio;
6954
pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
7055

@@ -106,12 +91,11 @@ int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
10691
* the folio size- and usage.
10792
*/
10893
s64
109-
ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
94+
ttm_backup_backup_page(struct file *backup, struct page *page,
11095
bool writeback, pgoff_t idx, gfp_t page_gfp,
11196
gfp_t alloc_gfp)
11297
{
113-
struct file *filp = ttm_backup_to_file(backup);
114-
struct address_space *mapping = filp->f_mapping;
98+
struct address_space *mapping = backup->f_mapping;
11599
unsigned long handle = 0;
116100
struct folio *to_folio;
117101
int ret;
@@ -161,9 +145,9 @@ ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
161145
*
162146
* After a call to this function, it's illegal to use the @backup pointer.
163147
*/
164-
void ttm_backup_fini(struct ttm_backup *backup)
148+
void ttm_backup_fini(struct file *backup)
165149
{
166-
fput(ttm_backup_to_file(backup));
150+
fput(backup);
167151
}
168152

169153
/**
@@ -194,14 +178,10 @@ EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
194178
*
195179
* Create a backup utilizing shmem objects.
196180
*
197-
* Return: A pointer to a struct ttm_backup on success,
181+
* Return: A pointer to a struct file on success,
198182
* an error pointer on error.
199183
*/
200-
struct ttm_backup *ttm_backup_shmem_create(loff_t size)
184+
struct file *ttm_backup_shmem_create(loff_t size)
201185
{
202-
struct file *filp;
203-
204-
filp = shmem_file_setup("ttm shmem backup", size, 0);
205-
206-
return ttm_file_to_backup(filp);
186+
return shmem_file_setup("ttm shmem backup", size, 0);
207187
}

drivers/gpu/drm/ttm/ttm_pool.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -506,7 +506,7 @@ static void ttm_pool_allocated_page_commit(struct page *allocated,
506506
* if successful, populate the page-table and dma-address arrays.
507507
*/
508508
static int ttm_pool_restore_commit(struct ttm_pool_tt_restore *restore,
509-
struct ttm_backup *backup,
509+
struct file *backup,
510510
const struct ttm_operation_ctx *ctx,
511511
struct ttm_pool_alloc_state *alloc)
512512

@@ -655,7 +655,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
655655
pgoff_t start_page, pgoff_t end_page)
656656
{
657657
struct page **pages = &tt->pages[start_page];
658-
struct ttm_backup *backup = tt->backup;
658+
struct file *backup = tt->backup;
659659
pgoff_t i, nr;
660660

661661
for (i = start_page; i < end_page; i += nr, pages += nr) {
@@ -963,7 +963,7 @@ void ttm_pool_drop_backed_up(struct ttm_tt *tt)
963963
long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
964964
const struct ttm_backup_flags *flags)
965965
{
966-
struct ttm_backup *backup = tt->backup;
966+
struct file *backup = tt->backup;
967967
struct page *page;
968968
unsigned long handle;
969969
gfp_t alloc_gfp;

drivers/gpu/drm/ttm/ttm_tt.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -544,7 +544,7 @@ EXPORT_SYMBOL(ttm_tt_pages_limit);
544544
*/
545545
int ttm_tt_setup_backup(struct ttm_tt *tt)
546546
{
547-
struct ttm_backup *backup =
547+
struct file *backup =
548548
ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
549549

550550
if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)))

drivers/gpu/drm/v3d/v3d_sched.c

Lines changed: 21 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -744,11 +744,16 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
744744
return DRM_GPU_SCHED_STAT_NOMINAL;
745745
}
746746

747-
/* If the current address or return address have changed, then the GPU
748-
* has probably made progress and we should delay the reset. This
749-
* could fail if the GPU got in an infinite loop in the CL, but that
750-
* is pretty unlikely outside of an i-g-t testcase.
751-
*/
747+
static void
748+
v3d_sched_skip_reset(struct drm_sched_job *sched_job)
749+
{
750+
struct drm_gpu_scheduler *sched = sched_job->sched;
751+
752+
spin_lock(&sched->job_list_lock);
753+
list_add(&sched_job->list, &sched->pending_list);
754+
spin_unlock(&sched->job_list_lock);
755+
}
756+
752757
static enum drm_gpu_sched_stat
753758
v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
754759
u32 *timedout_ctca, u32 *timedout_ctra)
@@ -758,9 +763,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
758763
u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
759764
u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
760765

766+
/* If the current address or return address have changed, then the GPU
767+
* has probably made progress and we should delay the reset. This
768+
* could fail if the GPU got in an infinite loop in the CL, but that
769+
* is pretty unlikely outside of an i-g-t testcase.
770+
*/
761771
if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
762772
*timedout_ctca = ctca;
763773
*timedout_ctra = ctra;
774+
775+
v3d_sched_skip_reset(sched_job);
764776
return DRM_GPU_SCHED_STAT_NOMINAL;
765777
}
766778

@@ -800,11 +812,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
800812
struct v3d_dev *v3d = job->base.v3d;
801813
u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver));
802814

803-
/* If we've made progress, skip reset and let the timer get
804-
* rearmed.
815+
/* If we've made progress, skip reset, add the job to the pending
816+
* list, and let the timer get rearmed.
805817
*/
806818
if (job->timedout_batches != batches) {
807819
job->timedout_batches = batches;
820+
821+
v3d_sched_skip_reset(sched_job);
808822
return DRM_GPU_SCHED_STAT_NOMINAL;
809823
}
810824

include/drm/ttm/ttm_backup.h

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,12 @@
99
#include <linux/mm_types.h>
1010
#include <linux/shmem_fs.h>
1111

12-
struct ttm_backup;
13-
1412
/**
1513
* ttm_backup_handle_to_page_ptr() - Convert handle to struct page pointer
1614
* @handle: The handle to convert.
1715
*
1816
* Converts an opaque handle received from the
19-
* struct ttm_backoup_ops::backup_page() function to an (invalid)
17+
* ttm_backup_backup_page() function to an (invalid)
2018
* struct page pointer suitable for a struct page array.
2119
*
2220
* Return: An (invalid) struct page pointer.
@@ -45,8 +43,8 @@ static inline bool ttm_backup_page_ptr_is_handle(const struct page *page)
4543
*
4644
* Return: The handle that was previously used in
4745
* ttm_backup_handle_to_page_ptr() to obtain a struct page pointer, suitable
48-
* for use as argument in the struct ttm_backup_ops drop() or
49-
* copy_backed_up_page() functions.
46+
* for use as argument in the struct ttm_backup_drop() or
47+
* ttm_backup_copy_page() functions.
5048
*/
5149
static inline unsigned long
5250
ttm_backup_page_ptr_to_handle(const struct page *page)
@@ -55,20 +53,20 @@ ttm_backup_page_ptr_to_handle(const struct page *page)
5553
return (unsigned long)page >> 1;
5654
}
5755

58-
void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle);
56+
void ttm_backup_drop(struct file *backup, pgoff_t handle);
5957

60-
int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
58+
int ttm_backup_copy_page(struct file *backup, struct page *dst,
6159
pgoff_t handle, bool intr);
6260

6361
s64
64-
ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
62+
ttm_backup_backup_page(struct file *backup, struct page *page,
6563
bool writeback, pgoff_t idx, gfp_t page_gfp,
6664
gfp_t alloc_gfp);
6765

68-
void ttm_backup_fini(struct ttm_backup *backup);
66+
void ttm_backup_fini(struct file *backup);
6967

7068
u64 ttm_backup_bytes_avail(void);
7169

72-
struct ttm_backup *ttm_backup_shmem_create(loff_t size);
70+
struct file *ttm_backup_shmem_create(loff_t size);
7371

7472
#endif

include/drm/ttm/ttm_tt.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ struct ttm_tt {
118118
* ttm_tt_create() callback is responsible for assigning
119119
* this field.
120120
*/
121-
struct ttm_backup *backup;
121+
struct file *backup;
122122
/**
123123
* @caching: The current caching state of the pages, see enum
124124
* ttm_caching.

0 commit comments

Comments
 (0)