12
12
#include <linux/module.h>
13
13
#include <linux/platform_device.h>
14
14
#include <linux/pm_runtime.h>
15
+ #include <linux/slab.h>
15
16
#include <media/v4l2-device.h>
16
17
#include <media/v4l2-ioctl.h>
17
18
#include <media/videobuf2-dma-contig.h>
@@ -172,8 +173,6 @@ struct pispbe_node {
172
173
struct mutex node_lock ;
173
174
/* vb2_queue lock */
174
175
struct mutex queue_lock ;
175
- /* Protect pispbe_node->ready_queue and pispbe_buffer->ready_list */
176
- spinlock_t ready_lock ;
177
176
struct list_head ready_queue ;
178
177
struct vb2_queue queue ;
179
178
struct v4l2_format format ;
@@ -219,6 +218,9 @@ struct pispbe_hw_enables {
219
218
220
219
/* Records a job configuration and memory addresses. */
221
220
struct pispbe_job_descriptor {
221
+ struct list_head queue ;
222
+ struct pispbe_buffer * buffers [PISPBE_NUM_NODES ];
223
+ struct pispbe_node_group * node_group ;
222
224
dma_addr_t hw_dma_addrs [N_HW_ADDRESSES ];
223
225
struct pisp_be_tiles_config * config ;
224
226
struct pispbe_hw_enables hw_enables ;
@@ -235,8 +237,10 @@ struct pispbe_dev {
235
237
struct clk * clk ;
236
238
struct pispbe_node_group node_group [PISPBE_NUM_NODE_GROUPS ];
237
239
struct pispbe_job queued_job , running_job ;
238
- spinlock_t hw_lock ; /* protects "hw_busy" flag and streaming_map */
240
+ /* protects "hw_busy" flag, streaming_map and job_queue */
241
+ spinlock_t hw_lock ;
239
242
bool hw_busy ; /* non-zero if a job is queued or is being started */
243
+ struct list_head job_queue ;
240
244
int irq ;
241
245
u32 hw_version ;
242
246
u8 done , started ;
@@ -460,43 +464,49 @@ static void pispbe_xlate_addrs(struct pispbe_job_descriptor *job,
460
464
* For Output0, Output1, Tdn and Stitch, a buffer only needs to be
461
465
* available if the blocks are enabled in the config.
462
466
*
463
- * Needs to be called with hw_lock held.
467
+ * If all the buffers required to form a job are available, append the
468
+ * job descriptor to the job queue to be later queued to the HW.
464
469
*
465
470
* Returns 0 if a job has been successfully prepared, < 0 otherwise.
466
471
*/
467
- static int pispbe_prepare_job (struct pispbe_node_group * node_group ,
468
- struct pispbe_job_descriptor * job )
472
+ static int pispbe_prepare_job (struct pispbe_node_group * node_group )
469
473
{
474
+ struct pispbe_job_descriptor __free (kfree ) * job = NULL ;
470
475
struct pispbe_buffer * buf [PISPBE_NUM_NODES ] = {};
471
476
struct pispbe_dev * pispbe = node_group -> pispbe ;
477
+ unsigned int streaming_map ;
472
478
unsigned int config_index ;
473
479
struct pispbe_node * node ;
474
- unsigned long flags ;
475
480
476
- lockdep_assert_held ( & pispbe -> hw_lock );
481
+ lockdep_assert_irqs_enabled ( );
477
482
478
- memset (job , 0 , sizeof (struct pispbe_job_descriptor ));
483
+ scoped_guard (spinlock_irq , & pispbe -> hw_lock ) {
484
+ static const u32 mask = BIT (CONFIG_NODE ) | BIT (MAIN_INPUT_NODE );
479
485
480
- if (((BIT (CONFIG_NODE ) | BIT (MAIN_INPUT_NODE )) &
481
- node_group -> streaming_map ) !=
482
- (BIT (CONFIG_NODE ) | BIT (MAIN_INPUT_NODE )))
483
- return - ENODEV ;
486
+ if ((node_group -> streaming_map & mask ) != mask )
487
+ return - ENODEV ;
488
+
489
+ /*
490
+ * Take a copy of streaming_map: nodes activated after this
491
+ * point are ignored when preparing this job.
492
+ */
493
+ streaming_map = node_group -> streaming_map ;
494
+ }
495
+
496
+ job = kzalloc (sizeof (* job ), GFP_KERNEL );
497
+ if (!job )
498
+ return - ENOMEM ;
484
499
485
500
node = & node_group -> node [CONFIG_NODE ];
486
- spin_lock_irqsave (& node -> ready_lock , flags );
487
501
buf [CONFIG_NODE ] = list_first_entry_or_null (& node -> ready_queue ,
488
502
struct pispbe_buffer ,
489
503
ready_list );
490
- if (buf [CONFIG_NODE ]) {
491
- list_del (& buf [CONFIG_NODE ]-> ready_list );
492
- pispbe -> queued_job .buf [CONFIG_NODE ] = buf [CONFIG_NODE ];
493
- }
494
- spin_unlock_irqrestore (& node -> ready_lock , flags );
495
-
496
- /* Exit early if no config buffer has been queued. */
497
504
if (!buf [CONFIG_NODE ])
498
505
return - ENODEV ;
499
506
507
+ list_del (& buf [CONFIG_NODE ]-> ready_list );
508
+ job -> buffers [CONFIG_NODE ] = buf [CONFIG_NODE ];
509
+
500
510
config_index = buf [CONFIG_NODE ]-> vb .vb2_buf .index ;
501
511
job -> config = & node_group -> config [config_index ];
502
512
job -> tiles = node_group -> config_dma_addr +
@@ -516,7 +526,7 @@ static int pispbe_prepare_job(struct pispbe_node_group *node_group,
516
526
continue ;
517
527
518
528
buf [i ] = NULL ;
519
- if (!(node_group -> streaming_map & BIT (i )))
529
+ if (!(streaming_map & BIT (i )))
520
530
continue ;
521
531
522
532
if ((!(rgb_en & PISP_BE_RGB_ENABLE_OUTPUT0 ) &&
@@ -543,25 +553,30 @@ static int pispbe_prepare_job(struct pispbe_node_group *node_group,
543
553
node = & node_group -> node [i ];
544
554
545
555
/* Pull a buffer from each V4L2 queue to form the queued job */
546
- spin_lock_irqsave (& node -> ready_lock , flags );
547
556
buf [i ] = list_first_entry_or_null (& node -> ready_queue ,
548
557
struct pispbe_buffer ,
549
558
ready_list );
550
559
if (buf [i ]) {
551
560
list_del (& buf [i ]-> ready_list );
552
- pispbe -> queued_job . buf [i ] = buf [i ];
561
+ job -> buffers [i ] = buf [i ];
553
562
}
554
- spin_unlock_irqrestore (& node -> ready_lock , flags );
555
563
556
564
if (!buf [i ] && !ignore_buffers )
557
565
goto err_return_buffers ;
558
566
}
559
567
560
- pispbe -> queued_job . node_group = node_group ;
568
+ job -> node_group = node_group ;
561
569
562
570
/* Convert buffers to DMA addresses for the hardware */
563
571
pispbe_xlate_addrs (job , buf , node_group );
564
572
573
+ scoped_guard (spinlock_irq , & pispbe -> hw_lock ) {
574
+ list_add_tail (& job -> queue , & pispbe -> job_queue );
575
+ }
576
+
577
+ /* Set job to NULL to avoid automatic release due to __free(). */
578
+ job = NULL ;
579
+
565
580
return 0 ;
566
581
567
582
err_return_buffers :
@@ -572,63 +587,51 @@ static int pispbe_prepare_job(struct pispbe_node_group *node_group,
572
587
continue ;
573
588
574
589
/* Return the buffer to the ready_list queue */
575
- spin_lock_irqsave (& n -> ready_lock , flags );
576
590
list_add (& buf [i ]-> ready_list , & n -> ready_queue );
577
- spin_unlock_irqrestore (& n -> ready_lock , flags );
578
591
}
579
592
580
- memset (& pispbe -> queued_job , 0 , sizeof (pispbe -> queued_job ));
581
-
582
593
return - ENODEV ;
583
594
}
584
595
585
596
static void pispbe_schedule (struct pispbe_dev * pispbe ,
586
597
struct pispbe_node_group * node_group ,
587
598
bool clear_hw_busy )
588
599
{
589
- struct pispbe_job_descriptor job ;
590
- unsigned long flags ;
600
+ struct pispbe_job_descriptor * job ;
591
601
592
- spin_lock_irqsave (& pispbe -> hw_lock , flags );
602
+ scoped_guard (spinlock_irqsave , & pispbe -> hw_lock ) {
603
+ if (clear_hw_busy )
604
+ pispbe -> hw_busy = false;
593
605
594
- if (clear_hw_busy )
595
- pispbe -> hw_busy = false;
596
-
597
- if (pispbe -> hw_busy )
598
- goto unlock_and_return ;
606
+ if (pispbe -> hw_busy )
607
+ return ;
599
608
600
- for (unsigned int i = 0 ; i < PISPBE_NUM_NODE_GROUPS ; i ++ ) {
601
- int ret ;
609
+ job = list_first_entry_or_null (& pispbe -> job_queue ,
610
+ struct pispbe_job_descriptor ,
611
+ queue );
612
+ if (!job )
613
+ return ;
602
614
603
- /* Schedule jobs only for a specific group. */
604
- if (node_group && & pispbe -> node_group [i ] != node_group )
615
+ if (node_group && job -> node_group != node_group )
605
616
continue ;
606
617
607
- /*
608
- * Prepare a job for this group, if the group is not ready
609
- * continue and try with the next one.
610
- */
611
- ret = pispbe_prepare_job (& pispbe -> node_group [i ], & job );
612
- if (ret )
613
- continue ;
614
-
615
- /*
616
- * We can kick the job off without the hw_lock, as this can
617
- * never run again until hw_busy is cleared, which will happen
618
- * only when the following job has been queued and an interrupt
619
- * is rised.
620
- */
621
- pispbe -> hw_busy = true;
622
- spin_unlock_irqrestore (& pispbe -> hw_lock , flags );
618
+ list_del (& job -> queue );
623
619
624
- pispbe_queue_job (pispbe , & job );
620
+ for (unsigned int i = 0 ; i < PISPBE_NUM_NODES ; i ++ )
621
+ pispbe -> queued_job .buf [i ] = job -> buffers [i ];
622
+ pispbe -> queued_job .node_group = job -> node_group ;
625
623
626
- return ;
624
+ pispbe -> hw_busy = true ;
627
625
}
628
626
629
- unlock_and_return :
630
- /* No job has been queued, just release the lock and return. */
631
- spin_unlock_irqrestore (& pispbe -> hw_lock , flags );
627
+ /*
628
+ * We can kick the job off without the hw_lock, as this can
629
+ * never run again until hw_busy is cleared, which will happen
630
+ * only when the following job has been queued and an interrupt
631
+ * is rised.
632
+ */
633
+ pispbe_queue_job (pispbe , job );
634
+ kfree (job );
632
635
}
633
636
634
637
static void pispbe_isr_jobdone (struct pispbe_dev * pispbe ,
@@ -881,18 +884,16 @@ static void pispbe_node_buffer_queue(struct vb2_buffer *buf)
881
884
struct pispbe_node * node = vb2_get_drv_priv (buf -> vb2_queue );
882
885
struct pispbe_node_group * node_group = node -> node_group ;
883
886
struct pispbe_dev * pispbe = node -> node_group -> pispbe ;
884
- unsigned long flags ;
885
887
886
888
dev_dbg (pispbe -> dev , "%s: for node %s\n" , __func__ , NODE_NAME (node ));
887
- spin_lock_irqsave (& node -> ready_lock , flags );
888
889
list_add_tail (& buffer -> ready_list , & node -> ready_queue );
889
- spin_unlock_irqrestore (& node -> ready_lock , flags );
890
890
891
891
/*
892
892
* Every time we add a buffer, check if there's now some work for the hw
893
893
* to do, but only for this client.
894
894
*/
895
- pispbe_schedule (node_group -> pispbe , node_group , false);
895
+ if (!pispbe_prepare_job (node_group ))
896
+ pispbe_schedule (pispbe , node_group , false);
896
897
}
897
898
898
899
static int pispbe_node_start_streaming (struct vb2_queue * q , unsigned int count )
@@ -901,35 +902,33 @@ static int pispbe_node_start_streaming(struct vb2_queue *q, unsigned int count)
901
902
struct pispbe_node_group * node_group = node -> node_group ;
902
903
struct pispbe_dev * pispbe = node_group -> pispbe ;
903
904
struct pispbe_buffer * buf , * tmp ;
904
- unsigned long flags ;
905
905
int ret ;
906
906
907
907
ret = pm_runtime_resume_and_get (pispbe -> dev );
908
908
if (ret < 0 )
909
909
goto err_return_buffers ;
910
910
911
- spin_lock_irqsave ( & pispbe -> hw_lock , flags );
912
- node -> node_group -> streaming_map |= BIT (node -> id );
913
- node -> node_group -> sequence = 0 ;
914
- spin_unlock_irqrestore ( & pispbe -> hw_lock , flags );
911
+ scoped_guard ( spinlock_irq , & pispbe -> hw_lock ) {
912
+ node -> node_group -> streaming_map |= BIT (node -> id );
913
+ node -> node_group -> sequence = 0 ;
914
+ }
915
915
916
916
dev_dbg (pispbe -> dev , "%s: for node %s (count %u)\n" ,
917
917
__func__ , NODE_NAME (node ), count );
918
918
dev_dbg (pispbe -> dev , "Nodes streaming for this group now 0x%x\n" ,
919
919
node -> node_group -> streaming_map );
920
920
921
921
/* Maybe we're ready to run. */
922
- pispbe_schedule (node_group -> pispbe , node_group , false);
922
+ if (!pispbe_prepare_job (node_group ))
923
+ pispbe_schedule (pispbe , node_group , false);
923
924
924
925
return 0 ;
925
926
926
927
err_return_buffers :
927
- spin_lock_irqsave (& pispbe -> hw_lock , flags );
928
928
list_for_each_entry_safe (buf , tmp , & node -> ready_queue , ready_list ) {
929
929
list_del (& buf -> ready_list );
930
930
vb2_buffer_done (& buf -> vb .vb2_buf , VB2_BUF_STATE_QUEUED );
931
931
}
932
- spin_unlock_irqrestore (& pispbe -> hw_lock , flags );
933
932
934
933
return ret ;
935
934
}
@@ -939,8 +938,9 @@ static void pispbe_node_stop_streaming(struct vb2_queue *q)
939
938
struct pispbe_node * node = vb2_get_drv_priv (q );
940
939
struct pispbe_node_group * node_group = node -> node_group ;
941
940
struct pispbe_dev * pispbe = node_group -> pispbe ;
941
+ struct pispbe_job_descriptor * job , * temp ;
942
942
struct pispbe_buffer * buf ;
943
- unsigned long flags ;
943
+ LIST_HEAD ( tmp_list ) ;
944
944
945
945
/*
946
946
* Now this is a bit awkward. In a simple M2M device we could just wait
@@ -952,27 +952,34 @@ static void pispbe_node_stop_streaming(struct vb2_queue *q)
952
952
* This may return buffers out of order.
953
953
*/
954
954
dev_dbg (pispbe -> dev , "%s: for node %s\n" , __func__ , NODE_NAME (node ));
955
- spin_lock_irqsave (& pispbe -> hw_lock , flags );
956
955
do {
957
- unsigned long flags1 ;
958
-
959
- spin_lock_irqsave (& node -> ready_lock , flags1 );
960
956
buf = list_first_entry_or_null (& node -> ready_queue ,
961
957
struct pispbe_buffer ,
962
958
ready_list );
963
959
if (buf ) {
964
960
list_del (& buf -> ready_list );
965
961
vb2_buffer_done (& buf -> vb .vb2_buf , VB2_BUF_STATE_ERROR );
966
962
}
967
- spin_unlock_irqrestore (& node -> ready_lock , flags1 );
968
963
} while (buf );
969
- spin_unlock_irqrestore (& pispbe -> hw_lock , flags );
970
964
971
965
vb2_wait_for_all_buffers (& node -> queue );
972
966
973
- spin_lock_irqsave (& pispbe -> hw_lock , flags );
967
+ spin_lock_irq (& pispbe -> hw_lock );
974
968
node_group -> streaming_map &= ~BIT (node -> id );
975
- spin_unlock_irqrestore (& pispbe -> hw_lock , flags );
969
+
970
+ if (node_group -> streaming_map == 0 ) {
971
+ /*
972
+ * If all nodes have stopped streaming release all jobs
973
+ * without holding the lock.
974
+ */
975
+ list_splice_init (& pispbe -> job_queue , & tmp_list );
976
+ }
977
+ spin_unlock_irq (& pispbe -> hw_lock );
978
+
979
+ list_for_each_entry_safe (job , temp , & tmp_list , queue ) {
980
+ list_del (& job -> queue );
981
+ kfree (job );
982
+ }
976
983
977
984
pm_runtime_mark_last_busy (pispbe -> dev );
978
985
pm_runtime_put_autosuspend (pispbe -> dev );
@@ -1432,7 +1439,6 @@ static int pispbe_init_node(struct pispbe_node_group *node_group,
1432
1439
mutex_init (& node -> node_lock );
1433
1440
mutex_init (& node -> queue_lock );
1434
1441
INIT_LIST_HEAD (& node -> ready_queue );
1435
- spin_lock_init (& node -> ready_lock );
1436
1442
1437
1443
node -> format .type = node -> buf_type ;
1438
1444
pispbe_node_def_fmt (node );
@@ -1731,6 +1737,8 @@ static int pispbe_probe(struct platform_device *pdev)
1731
1737
if (!pispbe )
1732
1738
return - ENOMEM ;
1733
1739
1740
+ INIT_LIST_HEAD (& pispbe -> job_queue );
1741
+
1734
1742
dev_set_drvdata (& pdev -> dev , pispbe );
1735
1743
pispbe -> dev = & pdev -> dev ;
1736
1744
platform_set_drvdata (pdev , pispbe );
0 commit comments