@@ -605,6 +605,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
605
605
*/
606
606
mutex_lock (& vmbus_connection .channel_mutex );
607
607
608
+ list_for_each_entry (channel , & vmbus_connection .chn_list , listentry ) {
609
+ if (guid_equal (& channel -> offermsg .offer .if_type ,
610
+ & newchannel -> offermsg .offer .if_type ) &&
611
+ guid_equal (& channel -> offermsg .offer .if_instance ,
612
+ & newchannel -> offermsg .offer .if_instance )) {
613
+ fnew = false;
614
+ newchannel -> primary_channel = channel ;
615
+ break ;
616
+ }
617
+ }
618
+
608
619
init_vp_index (newchannel );
609
620
610
621
/* Remember the channels that should be cleaned up upon suspend. */
@@ -617,16 +628,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
617
628
*/
618
629
atomic_dec (& vmbus_connection .offer_in_progress );
619
630
620
- list_for_each_entry (channel , & vmbus_connection .chn_list , listentry ) {
621
- if (guid_equal (& channel -> offermsg .offer .if_type ,
622
- & newchannel -> offermsg .offer .if_type ) &&
623
- guid_equal (& channel -> offermsg .offer .if_instance ,
624
- & newchannel -> offermsg .offer .if_instance )) {
625
- fnew = false;
626
- break ;
627
- }
628
- }
629
-
630
631
if (fnew ) {
631
632
list_add_tail (& newchannel -> listentry ,
632
633
& vmbus_connection .chn_list );
@@ -647,7 +648,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
647
648
/*
648
649
* Process the sub-channel.
649
650
*/
650
- newchannel -> primary_channel = channel ;
651
651
list_add_tail (& newchannel -> sc_list , & channel -> sc_list );
652
652
}
653
653
@@ -683,6 +683,30 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
683
683
queue_work (wq , & newchannel -> add_channel_work );
684
684
}
685
685
686
+ /*
687
+ * Check if CPUs used by other channels of the same device.
688
+ * It should only be called by init_vp_index().
689
+ */
690
+ static bool hv_cpuself_used (u32 cpu , struct vmbus_channel * chn )
691
+ {
692
+ struct vmbus_channel * primary = chn -> primary_channel ;
693
+ struct vmbus_channel * sc ;
694
+
695
+ lockdep_assert_held (& vmbus_connection .channel_mutex );
696
+
697
+ if (!primary )
698
+ return false;
699
+
700
+ if (primary -> target_cpu == cpu )
701
+ return true;
702
+
703
+ list_for_each_entry (sc , & primary -> sc_list , sc_list )
704
+ if (sc != chn && sc -> target_cpu == cpu )
705
+ return true;
706
+
707
+ return false;
708
+ }
709
+
686
710
/*
687
711
* We use this state to statically distribute the channel interrupt load.
688
712
*/
@@ -702,6 +726,7 @@ static int next_numa_node_id;
702
726
static void init_vp_index (struct vmbus_channel * channel )
703
727
{
704
728
bool perf_chn = hv_is_perf_channel (channel );
729
+ u32 i , ncpu = num_online_cpus ();
705
730
cpumask_var_t available_mask ;
706
731
struct cpumask * alloced_mask ;
707
732
u32 target_cpu ;
@@ -724,31 +749,38 @@ static void init_vp_index(struct vmbus_channel *channel)
724
749
return ;
725
750
}
726
751
727
- while (true) {
728
- numa_node = next_numa_node_id ++ ;
729
- if (numa_node == nr_node_ids ) {
730
- next_numa_node_id = 0 ;
731
- continue ;
752
+ for (i = 1 ; i <= ncpu + 1 ; i ++ ) {
753
+ while (true) {
754
+ numa_node = next_numa_node_id ++ ;
755
+ if (numa_node == nr_node_ids ) {
756
+ next_numa_node_id = 0 ;
757
+ continue ;
758
+ }
759
+ if (cpumask_empty (cpumask_of_node (numa_node )))
760
+ continue ;
761
+ break ;
762
+ }
763
+ alloced_mask = & hv_context .hv_numa_map [numa_node ];
764
+
765
+ if (cpumask_weight (alloced_mask ) ==
766
+ cpumask_weight (cpumask_of_node (numa_node ))) {
767
+ /*
768
+ * We have cycled through all the CPUs in the node;
769
+ * reset the alloced map.
770
+ */
771
+ cpumask_clear (alloced_mask );
732
772
}
733
- if (cpumask_empty (cpumask_of_node (numa_node )))
734
- continue ;
735
- break ;
736
- }
737
- alloced_mask = & hv_context .hv_numa_map [numa_node ];
738
773
739
- if (cpumask_weight (alloced_mask ) ==
740
- cpumask_weight (cpumask_of_node (numa_node ))) {
741
- /*
742
- * We have cycled through all the CPUs in the node;
743
- * reset the alloced map.
744
- */
745
- cpumask_clear (alloced_mask );
746
- }
774
+ cpumask_xor (available_mask , alloced_mask ,
775
+ cpumask_of_node (numa_node ));
747
776
748
- cpumask_xor (available_mask , alloced_mask , cpumask_of_node (numa_node ));
777
+ target_cpu = cpumask_first (available_mask );
778
+ cpumask_set_cpu (target_cpu , alloced_mask );
749
779
750
- target_cpu = cpumask_first (available_mask );
751
- cpumask_set_cpu (target_cpu , alloced_mask );
780
+ if (channel -> offermsg .offer .sub_channel_index >= ncpu ||
781
+ i > ncpu || !hv_cpuself_used (target_cpu , channel ))
782
+ break ;
783
+ }
752
784
753
785
channel -> target_cpu = target_cpu ;
754
786
0 commit comments