Skip to content

Commit 28f587a

Browse files
committed
Merge tag 'drm-fixes-2025-03-07' of https://gitlab.freedesktop.org/drm/kernel
Pull drm fixes from Dave Airlie: "Fixes across the board, mostly xe and imagination with some amd and misc others. The xe fixes are mostly hmm related, though there are some others in there as well, nothing really stands out otherwise. The nouveau Kconfig to select FW_CACHE is in this, which we discussed a while back. nouveau: - rely on fw caching Kconfig fix imagination: - avoid deadlock on fence release - fix fence initialisation - fix timestamps firmware traces scheduler: - fix include guard bochs: - dpms fix i915: - bump max stream count to match pipes xe: - Remove double page flip on initial plane - Properly setup userptr pfn_flags_mask - Fix GT "for each engine" workarounds - Fix userptr races and missed validations - Userptr invalid page access fixes - Cleanup some style nits amdgpu: - Fix NULL check in DC code - SMU 14 fix amdkfd: - Fix NULL check in queue validation radeon: - RS400 HyperZ fix" * tag 'drm-fixes-2025-03-07' of https://gitlab.freedesktop.org/drm/kernel: (22 commits) drm/bochs: Fix DPMS regression drm/xe/userptr: Unmap userptrs in the mmu notifier drm/xe/hmm: Don't dereference struct page pointers without notifier lock drm/xe/hmm: Style- and include fixes drm/xe: Add staging tree for VM binds drm/xe: Fix fault mode invalidation with unbind drm/xe/vm: Fix a misplaced #endif drm/xe/vm: Validate userptr during gpu vma prefetching drm/amd/pm: always allow ih interrupt from fw drm/radeon: Fix rs400_gpu_init for ATI mobility radeon Xpress 200M drm/amdkfd: Fix NULL Pointer Dereference in KFD queue drm/amd/display: Fix null check for pipe_ctx->plane_state in resource_build_scaling_params drm/xe: Fix GT "for each engine" workarounds drm/xe/userptr: properly setup pfn_flags_mask drm/i915/mst: update max stream count to match number of pipes drm/xe: Remove double pageflip drm/sched: Fix preprocessor guard drm/imagination: Fix timestamps in firmware traces drm/imagination: only init job done fences once drm/imagination: Hold drm_gem_gpuva lock for unmap ...
2 parents 0f52fd4 + c8bc662 commit 28f587a

File tree

26 files changed

+456
-197
lines changed

26 files changed

+456
-197
lines changed

drivers/gpu/drm/amd/amdkfd/kfd_queue.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -266,8 +266,8 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
266266
/* EOP buffer is not required for all ASICs */
267267
if (properties->eop_ring_buffer_address) {
268268
if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) {
269-
pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n",
270-
properties->eop_buf_bo->tbo.base.size,
269+
pr_debug("queue eop bo size 0x%x not equal to node eop buf size 0x%x\n",
270+
properties->eop_ring_buffer_size,
271271
topo_dev->node_props.eop_buffer_size);
272272
err = -EINVAL;
273273
goto out_err_unreserve;

drivers/gpu/drm/amd/display/dc/core/dc_resource.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1455,7 +1455,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
14551455
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
14561456

14571457
/* Invalid input */
1458-
if (!plane_state->dst_rect.width ||
1458+
if (!plane_state ||
1459+
!plane_state->dst_rect.width ||
14591460
!plane_state->dst_rect.height ||
14601461
!plane_state->src_rect.width ||
14611462
!plane_state->src_rect.height) {

drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1895,16 +1895,6 @@ static int smu_v14_0_allow_ih_interrupt(struct smu_context *smu)
18951895
NULL);
18961896
}
18971897

1898-
static int smu_v14_0_process_pending_interrupt(struct smu_context *smu)
1899-
{
1900-
int ret = 0;
1901-
1902-
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
1903-
ret = smu_v14_0_allow_ih_interrupt(smu);
1904-
1905-
return ret;
1906-
}
1907-
19081898
int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
19091899
{
19101900
int ret = 0;
@@ -1916,7 +1906,7 @@ int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
19161906
if (ret)
19171907
return ret;
19181908

1919-
return smu_v14_0_process_pending_interrupt(smu);
1909+
return smu_v14_0_allow_ih_interrupt(smu);
19201910
}
19211911

19221912
int smu_v14_0_disable_thermal_alert(struct smu_context *smu)

drivers/gpu/drm/i915/display/intel_dp_mst.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1867,7 +1867,8 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
18671867
/* create encoders */
18681868
mst_stream_encoders_create(dig_port);
18691869
ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, display->drm,
1870-
&intel_dp->aux, 16, 3, conn_base_id);
1870+
&intel_dp->aux, 16,
1871+
INTEL_NUM_PIPES(display), conn_base_id);
18711872
if (ret) {
18721873
intel_dp->mst_mgr.cbs = NULL;
18731874
return ret;

drivers/gpu/drm/imagination/pvr_fw_meta.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -527,8 +527,10 @@ pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
527527
static void
528528
pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
529529
{
530-
pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start,
531-
fw_obj->fw_mm_node.size);
530+
struct pvr_gem_object *pvr_obj = fw_obj->gem;
531+
532+
pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj,
533+
fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size);
532534
}
533535

534536
static bool

drivers/gpu/drm/imagination/pvr_fw_trace.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -333,8 +333,8 @@ static int fw_trace_seq_show(struct seq_file *s, void *v)
333333
if (sf_id == ROGUE_FW_SF_LAST)
334334
return -EINVAL;
335335

336-
timestamp = read_fw_trace(trace_seq_data, 1) |
337-
((u64)read_fw_trace(trace_seq_data, 2) << 32);
336+
timestamp = ((u64)read_fw_trace(trace_seq_data, 1) << 32) |
337+
read_fw_trace(trace_seq_data, 2);
338338
timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >>
339339
ROGUE_FWT_TIMESTAMP_TIME_SHIFT;
340340

drivers/gpu/drm/imagination/pvr_queue.c

Lines changed: 14 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -109,12 +109,20 @@ pvr_queue_fence_get_driver_name(struct dma_fence *f)
109109
return PVR_DRIVER_NAME;
110110
}
111111

112+
static void pvr_queue_fence_release_work(struct work_struct *w)
113+
{
114+
struct pvr_queue_fence *fence = container_of(w, struct pvr_queue_fence, release_work);
115+
116+
pvr_context_put(fence->queue->ctx);
117+
dma_fence_free(&fence->base);
118+
}
119+
112120
static void pvr_queue_fence_release(struct dma_fence *f)
113121
{
114122
struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
123+
struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev;
115124

116-
pvr_context_put(fence->queue->ctx);
117-
dma_fence_free(f);
125+
queue_work(pvr_dev->sched_wq, &fence->release_work);
118126
}
119127

120128
static const char *
@@ -268,6 +276,7 @@ pvr_queue_fence_init(struct dma_fence *f,
268276

269277
pvr_context_get(queue->ctx);
270278
fence->queue = queue;
279+
INIT_WORK(&fence->release_work, pvr_queue_fence_release_work);
271280
dma_fence_init(&fence->base, fence_ops,
272281
&fence_ctx->lock, fence_ctx->id,
273282
atomic_inc_return(&fence_ctx->seqno));
@@ -304,8 +313,9 @@ pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
304313
static void
305314
pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
306315
{
307-
pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
308-
&queue->job_fence_ctx);
316+
if (!fence->ops)
317+
pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
318+
&queue->job_fence_ctx);
309319
}
310320

311321
/**

drivers/gpu/drm/imagination/pvr_queue.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#define PVR_QUEUE_H
66

77
#include <drm/gpu_scheduler.h>
8+
#include <linux/workqueue.h>
89

910
#include "pvr_cccb.h"
1011
#include "pvr_device.h"
@@ -63,6 +64,9 @@ struct pvr_queue_fence {
6364

6465
/** @queue: Queue that created this fence. */
6566
struct pvr_queue *queue;
67+
68+
/** @release_work: Fence release work structure. */
69+
struct work_struct release_work;
6670
};
6771

6872
/**

drivers/gpu/drm/imagination/pvr_vm.c

Lines changed: 108 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -293,8 +293,9 @@ pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
293293

294294
static int
295295
pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
296-
struct pvr_vm_context *vm_ctx, u64 device_addr,
297-
u64 size)
296+
struct pvr_vm_context *vm_ctx,
297+
struct pvr_gem_object *pvr_obj,
298+
u64 device_addr, u64 size)
298299
{
299300
int err;
300301

@@ -318,6 +319,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
318319
goto err_bind_op_fini;
319320
}
320321

322+
bind_op->pvr_obj = pvr_obj;
321323
bind_op->vm_ctx = vm_ctx;
322324
bind_op->device_addr = device_addr;
323325
bind_op->size = size;
@@ -597,20 +599,6 @@ pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
597599
return ERR_PTR(err);
598600
}
599601

600-
/**
601-
* pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
602-
* @vm_ctx: Target VM context.
603-
*
604-
* This function ensures that no mappings are left dangling by unmapping them
605-
* all in order of ascending device-virtual address.
606-
*/
607-
void
608-
pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
609-
{
610-
WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
611-
vm_ctx->gpuvm_mgr.mm_range));
612-
}
613-
614602
/**
615603
* pvr_vm_context_release() - Teardown a VM context.
616604
* @ref_count: Pointer to reference counter of the VM context.
@@ -703,11 +691,7 @@ pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
703691
struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
704692
struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
705693

706-
/* Unmap operations don't have an object to lock. */
707-
if (!pvr_obj)
708-
return 0;
709-
710-
/* Acquire lock on the GEM being mapped. */
694+
/* Acquire lock on the GEM object being mapped/unmapped. */
711695
return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
712696
}
713697

@@ -772,8 +756,10 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
772756
}
773757

774758
/**
775-
* pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
759+
* pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual
760+
* memory.
776761
* @vm_ctx: Target VM context.
762+
* @pvr_obj: Target PowerVR memory object.
777763
* @device_addr: Virtual device address at the start of the target mapping.
778764
* @size: Size of the target mapping.
779765
*
@@ -784,9 +770,13 @@ pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
784770
* * Any error encountered while performing internal operations required to
785771
* destroy the mapping (returned from pvr_vm_gpuva_unmap or
786772
* pvr_vm_gpuva_remap).
773+
*
774+
* The vm_ctx->lock must be held when calling this function.
787775
*/
788-
int
789-
pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
776+
static int
777+
pvr_vm_unmap_obj_locked(struct pvr_vm_context *vm_ctx,
778+
struct pvr_gem_object *pvr_obj,
779+
u64 device_addr, u64 size)
790780
{
791781
struct pvr_vm_bind_op bind_op = {0};
792782
struct drm_gpuvm_exec vm_exec = {
@@ -799,11 +789,13 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
799789
},
800790
};
801791

802-
int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
803-
size);
792+
int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, pvr_obj,
793+
device_addr, size);
804794
if (err)
805795
return err;
806796

797+
pvr_gem_object_get(pvr_obj);
798+
807799
err = drm_gpuvm_exec_lock(&vm_exec);
808800
if (err)
809801
goto err_cleanup;
@@ -818,6 +810,96 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
818810
return err;
819811
}
820812

813+
/**
814+
* pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual
815+
* memory.
816+
* @vm_ctx: Target VM context.
817+
* @pvr_obj: Target PowerVR memory object.
818+
* @device_addr: Virtual device address at the start of the target mapping.
819+
* @size: Size of the target mapping.
820+
*
821+
* Return:
822+
* * 0 on success,
823+
* * Any error encountered by pvr_vm_unmap_obj_locked.
824+
*/
825+
int
826+
pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
827+
u64 device_addr, u64 size)
828+
{
829+
int err;
830+
831+
mutex_lock(&vm_ctx->lock);
832+
err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, device_addr, size);
833+
mutex_unlock(&vm_ctx->lock);
834+
835+
return err;
836+
}
837+
838+
/**
839+
* pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
840+
* @vm_ctx: Target VM context.
841+
* @device_addr: Virtual device address at the start of the target mapping.
842+
* @size: Size of the target mapping.
843+
*
844+
* Return:
845+
* * 0 on success,
846+
* * Any error encountered by drm_gpuva_find,
847+
* * Any error encountered by pvr_vm_unmap_obj_locked.
848+
*/
849+
int
850+
pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
851+
{
852+
struct pvr_gem_object *pvr_obj;
853+
struct drm_gpuva *va;
854+
int err;
855+
856+
mutex_lock(&vm_ctx->lock);
857+
858+
va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size);
859+
if (va) {
860+
pvr_obj = gem_to_pvr_gem(va->gem.obj);
861+
err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
862+
va->va.addr, va->va.range);
863+
} else {
864+
err = -ENOENT;
865+
}
866+
867+
mutex_unlock(&vm_ctx->lock);
868+
869+
return err;
870+
}
871+
872+
/**
873+
* pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
874+
* @vm_ctx: Target VM context.
875+
*
876+
* This function ensures that no mappings are left dangling by unmapping them
877+
* all in order of ascending device-virtual address.
878+
*/
879+
void
880+
pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
881+
{
882+
mutex_lock(&vm_ctx->lock);
883+
884+
for (;;) {
885+
struct pvr_gem_object *pvr_obj;
886+
struct drm_gpuva *va;
887+
888+
va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr,
889+
vm_ctx->gpuvm_mgr.mm_start,
890+
vm_ctx->gpuvm_mgr.mm_range);
891+
if (!va)
892+
break;
893+
894+
pvr_obj = gem_to_pvr_gem(va->gem.obj);
895+
896+
WARN_ON(pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
897+
va->va.addr, va->va.range));
898+
}
899+
900+
mutex_unlock(&vm_ctx->lock);
901+
}
902+
821903
/* Static data areas are determined by firmware. */
822904
static const struct drm_pvr_static_data_area static_data_areas[] = {
823905
{

drivers/gpu/drm/imagination/pvr_vm.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,9 @@ struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev,
3838
int pvr_vm_map(struct pvr_vm_context *vm_ctx,
3939
struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
4040
u64 device_addr, u64 size);
41+
int pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx,
42+
struct pvr_gem_object *pvr_obj,
43+
u64 device_addr, u64 size);
4144
int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size);
4245
void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx);
4346

0 commit comments

Comments
 (0)