@@ -6728,39 +6728,38 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
6728
6728
}
6729
6729
6730
6730
for_each_buffer_cpu (buffer , cpu ) {
6731
+ struct buffer_data_page * old_free_data_page ;
6732
+ struct list_head old_pages ;
6733
+ unsigned long flags ;
6731
6734
6732
6735
if (!cpumask_test_cpu (cpu , buffer -> cpumask ))
6733
6736
continue ;
6734
6737
6735
6738
cpu_buffer = buffer -> buffers [cpu ];
6736
6739
6740
+ raw_spin_lock_irqsave (& cpu_buffer -> reader_lock , flags );
6741
+
6737
6742
/* Clear the head bit to make the link list normal to read */
6738
6743
rb_head_page_deactivate (cpu_buffer );
6739
6744
6740
- /* Now walk the list and free all the old sub buffers */
6741
- list_for_each_entry_safe (bpage , tmp , cpu_buffer -> pages , list ) {
6742
- list_del_init (& bpage -> list );
6743
- free_buffer_page (bpage );
6744
- }
6745
- /* The above loop stopped an the last page needing to be freed */
6746
- bpage = list_entry (cpu_buffer -> pages , struct buffer_page , list );
6747
- free_buffer_page (bpage );
6748
-
6749
- /* Free the current reader page */
6750
- free_buffer_page (cpu_buffer -> reader_page );
6745
+ /*
6746
+ * Collect buffers from the cpu_buffer pages list and the
6747
+ * reader_page on old_pages, so they can be freed later when not
6748
+ * under a spinlock. The pages list is a linked list with no
6749
+ * head, adding old_pages turns it into a regular list with
6750
+ * old_pages being the head.
6751
+ */
6752
+ list_add (& old_pages , cpu_buffer -> pages );
6753
+ list_add (& cpu_buffer -> reader_page -> list , & old_pages );
6751
6754
6752
6755
/* One page was allocated for the reader page */
6753
6756
cpu_buffer -> reader_page = list_entry (cpu_buffer -> new_pages .next ,
6754
6757
struct buffer_page , list );
6755
6758
list_del_init (& cpu_buffer -> reader_page -> list );
6756
6759
6757
- /* The cpu_buffer pages are a link list with no head */
6760
+ /* Install the new pages, remove the head from the list */
6758
6761
cpu_buffer -> pages = cpu_buffer -> new_pages .next ;
6759
- cpu_buffer -> new_pages .next -> prev = cpu_buffer -> new_pages .prev ;
6760
- cpu_buffer -> new_pages .prev -> next = cpu_buffer -> new_pages .next ;
6761
-
6762
- /* Clear the new_pages list */
6763
- INIT_LIST_HEAD (& cpu_buffer -> new_pages );
6762
+ list_del_init (& cpu_buffer -> new_pages );
6764
6763
6765
6764
cpu_buffer -> head_page
6766
6765
= list_entry (cpu_buffer -> pages , struct buffer_page , list );
@@ -6769,11 +6768,20 @@ int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
6769
6768
cpu_buffer -> nr_pages = cpu_buffer -> nr_pages_to_update ;
6770
6769
cpu_buffer -> nr_pages_to_update = 0 ;
6771
6770
6772
- free_pages (( unsigned long ) cpu_buffer -> free_page , old_order ) ;
6771
+ old_free_data_page = cpu_buffer -> free_page ;
6773
6772
cpu_buffer -> free_page = NULL ;
6774
6773
6775
6774
rb_head_page_activate (cpu_buffer );
6776
6775
6776
+ raw_spin_unlock_irqrestore (& cpu_buffer -> reader_lock , flags );
6777
+
6778
+ /* Free old sub buffers */
6779
+ list_for_each_entry_safe (bpage , tmp , & old_pages , list ) {
6780
+ list_del_init (& bpage -> list );
6781
+ free_buffer_page (bpage );
6782
+ }
6783
+ free_pages ((unsigned long )old_free_data_page , old_order );
6784
+
6777
6785
rb_check_pages (cpu_buffer );
6778
6786
}
6779
6787
0 commit comments