@@ -125,7 +125,7 @@ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
125
125
/*
126
126
* Mark the doorbell as unregistered and reset job queue pointers.
127
127
* This function needs to be called when the VPU hardware is restarted
128
- * and FW looses job queue state. The next time job queue is used it
128
+ * and FW loses job queue state. The next time job queue is used it
129
129
* will be registered again.
130
130
*/
131
131
static void ivpu_cmdq_reset_locked (struct ivpu_file_priv * file_priv , u16 engine )
@@ -239,60 +239,32 @@ static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
239
239
return & fence -> base ;
240
240
}
241
241
242
- static void job_get (struct ivpu_job * job , struct ivpu_job * * link )
242
+ static void ivpu_job_destroy (struct ivpu_job * job )
243
243
{
244
244
struct ivpu_device * vdev = job -> vdev ;
245
-
246
- kref_get (& job -> ref );
247
- * link = job ;
248
-
249
- ivpu_dbg (vdev , KREF , "Job get: id %u refcount %u\n" , job -> job_id , kref_read (& job -> ref ));
250
- }
251
-
252
- static void job_release (struct kref * ref )
253
- {
254
- struct ivpu_job * job = container_of (ref , struct ivpu_job , ref );
255
- struct ivpu_device * vdev = job -> vdev ;
256
245
u32 i ;
257
246
247
+ ivpu_dbg (vdev , JOB , "Job destroyed: id %3u ctx %2d engine %d" ,
248
+ job -> job_id , job -> file_priv -> ctx .id , job -> engine_idx );
249
+
258
250
for (i = 0 ; i < job -> bo_count ; i ++ )
259
251
if (job -> bos [i ])
260
252
drm_gem_object_put (& job -> bos [i ]-> base .base );
261
253
262
254
dma_fence_put (job -> done_fence );
263
255
ivpu_file_priv_put (& job -> file_priv );
264
-
265
- ivpu_dbg (vdev , KREF , "Job released: id %u\n" , job -> job_id );
266
256
kfree (job );
267
-
268
- /* Allow the VPU to get suspended, must be called after ivpu_file_priv_put() */
269
- ivpu_rpm_put (vdev );
270
- }
271
-
272
- static void job_put (struct ivpu_job * job )
273
- {
274
- struct ivpu_device * vdev = job -> vdev ;
275
-
276
- ivpu_dbg (vdev , KREF , "Job put: id %u refcount %u\n" , job -> job_id , kref_read (& job -> ref ));
277
- kref_put (& job -> ref , job_release );
278
257
}
279
258
280
259
static struct ivpu_job *
281
- ivpu_create_job (struct ivpu_file_priv * file_priv , u32 engine_idx , u32 bo_count )
260
+ ivpu_job_create (struct ivpu_file_priv * file_priv , u32 engine_idx , u32 bo_count )
282
261
{
283
262
struct ivpu_device * vdev = file_priv -> vdev ;
284
263
struct ivpu_job * job ;
285
- int ret ;
286
-
287
- ret = ivpu_rpm_get (vdev );
288
- if (ret < 0 )
289
- return NULL ;
290
264
291
265
job = kzalloc (struct_size (job , bos , bo_count ), GFP_KERNEL );
292
266
if (!job )
293
- goto err_rpm_put ;
294
-
295
- kref_init (& job -> ref );
267
+ return NULL ;
296
268
297
269
job -> vdev = vdev ;
298
270
job -> engine_idx = engine_idx ;
@@ -306,17 +278,14 @@ ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
306
278
job -> file_priv = ivpu_file_priv_get (file_priv );
307
279
308
280
ivpu_dbg (vdev , JOB , "Job created: ctx %2d engine %d" , file_priv -> ctx .id , job -> engine_idx );
309
-
310
281
return job ;
311
282
312
283
err_free_job :
313
284
kfree (job );
314
- err_rpm_put :
315
- ivpu_rpm_put (vdev );
316
285
return NULL ;
317
286
}
318
287
319
- static int ivpu_job_done (struct ivpu_device * vdev , u32 job_id , u32 job_status )
288
+ static int ivpu_job_signal_and_destroy (struct ivpu_device * vdev , u32 job_id , u32 job_status )
320
289
{
321
290
struct ivpu_job * job ;
322
291
@@ -333,9 +302,10 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
333
302
ivpu_dbg (vdev , JOB , "Job complete: id %3u ctx %2d engine %d status 0x%x\n" ,
334
303
job -> job_id , job -> file_priv -> ctx .id , job -> engine_idx , job_status );
335
304
305
+ ivpu_job_destroy (job );
336
306
ivpu_stop_job_timeout_detection (vdev );
337
307
338
- job_put ( job );
308
+ ivpu_rpm_put ( vdev );
339
309
return 0 ;
340
310
}
341
311
@@ -345,64 +315,76 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
345
315
unsigned long id ;
346
316
347
317
xa_for_each (& vdev -> submitted_jobs_xa , id , job )
348
- ivpu_job_done (vdev , id , VPU_JSM_STATUS_ABORTED );
318
+ ivpu_job_signal_and_destroy (vdev , id , VPU_JSM_STATUS_ABORTED );
349
319
}
350
320
351
- static int ivpu_direct_job_submission (struct ivpu_job * job )
321
+ static int ivpu_job_submit (struct ivpu_job * job )
352
322
{
353
323
struct ivpu_file_priv * file_priv = job -> file_priv ;
354
324
struct ivpu_device * vdev = job -> vdev ;
355
325
struct xa_limit job_id_range ;
356
326
struct ivpu_cmdq * cmdq ;
357
327
int ret ;
358
328
329
+ ret = ivpu_rpm_get (vdev );
330
+ if (ret < 0 )
331
+ return ret ;
332
+
359
333
mutex_lock (& file_priv -> lock );
360
334
361
335
cmdq = ivpu_cmdq_acquire (job -> file_priv , job -> engine_idx );
362
336
if (!cmdq ) {
363
- ivpu_warn (vdev , "Failed get job queue, ctx %d engine %d\n" ,
364
- file_priv -> ctx .id , job -> engine_idx );
337
+ ivpu_warn_ratelimited (vdev , "Failed get job queue, ctx %d engine %d\n" ,
338
+ file_priv -> ctx .id , job -> engine_idx );
365
339
ret = - EINVAL ;
366
- goto err_unlock ;
340
+ goto err_unlock_file_priv ;
367
341
}
368
342
369
343
job_id_range .min = FIELD_PREP (JOB_ID_CONTEXT_MASK , (file_priv -> ctx .id - 1 ));
370
344
job_id_range .max = job_id_range .min | JOB_ID_JOB_MASK ;
371
345
372
- job_get ( job , & job );
373
- ret = xa_alloc (& vdev -> submitted_jobs_xa , & job -> job_id , job , job_id_range , GFP_KERNEL );
346
+ xa_lock ( & vdev -> submitted_jobs_xa );
347
+ ret = __xa_alloc (& vdev -> submitted_jobs_xa , & job -> job_id , job , job_id_range , GFP_KERNEL );
374
348
if (ret ) {
375
- ivpu_warn_ratelimited (vdev , "Failed to allocate job id: %d\n" , ret );
376
- goto err_job_put ;
349
+ ivpu_dbg (vdev , JOB , "Too many active jobs in ctx %d\n" ,
350
+ file_priv -> ctx .id );
351
+ ret = - EBUSY ;
352
+ goto err_unlock_submitted_jobs_xa ;
377
353
}
378
354
379
355
ret = ivpu_cmdq_push_job (cmdq , job );
380
356
if (ret )
381
- goto err_xa_erase ;
357
+ goto err_erase_xa ;
382
358
383
359
ivpu_start_job_timeout_detection (vdev );
384
360
385
- ivpu_dbg (vdev , JOB , "Job submitted: id %3u addr 0x%llx ctx %2d engine %d next %d\n" ,
386
- job -> job_id , job -> cmd_buf_vpu_addr , file_priv -> ctx .id ,
387
- job -> engine_idx , cmdq -> jobq -> header .tail );
388
-
389
- if (ivpu_test_mode & IVPU_TEST_MODE_NULL_HW ) {
390
- ivpu_job_done (vdev , job -> job_id , VPU_JSM_STATUS_SUCCESS );
361
+ if (unlikely (ivpu_test_mode & IVPU_TEST_MODE_NULL_HW )) {
391
362
cmdq -> jobq -> header .head = cmdq -> jobq -> header .tail ;
392
363
wmb (); /* Flush WC buffer for jobq header */
393
364
} else {
394
365
ivpu_cmdq_ring_db (vdev , cmdq );
395
366
}
396
367
368
+ ivpu_dbg (vdev , JOB , "Job submitted: id %3u ctx %2d engine %d addr 0x%llx next %d\n" ,
369
+ job -> job_id , file_priv -> ctx .id , job -> engine_idx ,
370
+ job -> cmd_buf_vpu_addr , cmdq -> jobq -> header .tail );
371
+
372
+ xa_unlock (& vdev -> submitted_jobs_xa );
373
+
397
374
mutex_unlock (& file_priv -> lock );
375
+
376
+ if (unlikely (ivpu_test_mode & IVPU_TEST_MODE_NULL_HW ))
377
+ ivpu_job_signal_and_destroy (vdev , job -> job_id , VPU_JSM_STATUS_SUCCESS );
378
+
398
379
return 0 ;
399
380
400
- err_xa_erase :
401
- xa_erase (& vdev -> submitted_jobs_xa , job -> job_id );
402
- err_job_put :
403
- job_put ( job );
404
- err_unlock :
381
+ err_erase_xa :
382
+ __xa_erase (& vdev -> submitted_jobs_xa , job -> job_id );
383
+ err_unlock_submitted_jobs_xa :
384
+ xa_unlock ( & vdev -> submitted_jobs_xa );
385
+ err_unlock_file_priv :
405
386
mutex_unlock (& file_priv -> lock );
387
+ ivpu_rpm_put (vdev );
406
388
return ret ;
407
389
}
408
390
@@ -508,44 +490,47 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
508
490
params -> buffer_count * sizeof (u32 ));
509
491
if (ret ) {
510
492
ret = - EFAULT ;
511
- goto free_handles ;
493
+ goto err_free_handles ;
512
494
}
513
495
514
496
if (!drm_dev_enter (& vdev -> drm , & idx )) {
515
497
ret = - ENODEV ;
516
- goto free_handles ;
498
+ goto err_free_handles ;
517
499
}
518
500
519
501
ivpu_dbg (vdev , JOB , "Submit ioctl: ctx %u buf_count %u\n" ,
520
502
file_priv -> ctx .id , params -> buffer_count );
521
503
522
- job = ivpu_create_job (file_priv , params -> engine , params -> buffer_count );
504
+ job = ivpu_job_create (file_priv , params -> engine , params -> buffer_count );
523
505
if (!job ) {
524
506
ivpu_err (vdev , "Failed to create job\n" );
525
507
ret = - ENOMEM ;
526
- goto dev_exit ;
508
+ goto err_exit_dev ;
527
509
}
528
510
529
511
ret = ivpu_job_prepare_bos_for_submit (file , job , buf_handles , params -> buffer_count ,
530
512
params -> commands_offset );
531
513
if (ret ) {
532
- ivpu_err (vdev , "Failed to prepare job, ret %d\n" , ret );
533
- goto job_put ;
514
+ ivpu_err (vdev , "Failed to prepare job: %d\n" , ret );
515
+ goto err_destroy_job ;
534
516
}
535
517
536
- ret = ivpu_direct_job_submission (job );
537
- if (ret ) {
538
- dma_fence_signal (job -> done_fence );
539
- ivpu_err (vdev , "Failed to submit job to the HW, ret %d\n" , ret );
540
- }
518
+ ret = ivpu_job_submit (job );
519
+ if (ret )
520
+ goto err_signal_fence ;
541
521
542
- job_put :
543
- job_put (job );
544
- dev_exit :
545
522
drm_dev_exit (idx );
546
- free_handles :
547
523
kfree (buf_handles );
524
+ return ret ;
548
525
526
+ err_signal_fence :
527
+ dma_fence_signal (job -> done_fence );
528
+ err_destroy_job :
529
+ ivpu_job_destroy (job );
530
+ err_exit_dev :
531
+ drm_dev_exit (idx );
532
+ err_free_handles :
533
+ kfree (buf_handles );
549
534
return ret ;
550
535
}
551
536
@@ -567,7 +552,7 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
567
552
}
568
553
569
554
payload = (struct vpu_ipc_msg_payload_job_done * )& jsm_msg -> payload ;
570
- ret = ivpu_job_done (vdev , payload -> job_id , payload -> job_status );
555
+ ret = ivpu_job_signal_and_destroy (vdev , payload -> job_id , payload -> job_status );
571
556
if (!ret && !xa_empty (& vdev -> submitted_jobs_xa ))
572
557
ivpu_start_job_timeout_detection (vdev );
573
558
}
0 commit comments