@@ -80,6 +80,11 @@ struct virtnet_stat_desc {
80
80
size_t offset ;
81
81
};
82
82
83
+ struct virtnet_sq_free_stats {
84
+ u64 packets ;
85
+ u64 bytes ;
86
+ };
87
+
83
88
struct virtnet_sq_stats {
84
89
struct u64_stats_sync syncp ;
85
90
u64_stats_t packets ;
@@ -304,6 +309,12 @@ struct virtnet_info {
304
309
/* Work struct for config space updates */
305
310
struct work_struct config_work ;
306
311
312
+ /* Work struct for setting rx mode */
313
+ struct work_struct rx_mode_work ;
314
+
315
+ /* OK to queue work setting RX mode? */
316
+ bool rx_mode_work_enabled ;
317
+
307
318
/* Does the affinity hint is set for virtqueues? */
308
319
bool affinity_hint_set ;
309
320
@@ -366,6 +377,31 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
366
377
return (struct xdp_frame * )((unsigned long )ptr & ~VIRTIO_XDP_FLAG );
367
378
}
368
379
380
+ static void __free_old_xmit (struct send_queue * sq , bool in_napi ,
381
+ struct virtnet_sq_free_stats * stats )
382
+ {
383
+ unsigned int len ;
384
+ void * ptr ;
385
+
386
+ while ((ptr = virtqueue_get_buf (sq -> vq , & len )) != NULL ) {
387
+ ++ stats -> packets ;
388
+
389
+ if (!is_xdp_frame (ptr )) {
390
+ struct sk_buff * skb = ptr ;
391
+
392
+ pr_debug ("Sent skb %p\n" , skb );
393
+
394
+ stats -> bytes += skb -> len ;
395
+ napi_consume_skb (skb , in_napi );
396
+ } else {
397
+ struct xdp_frame * frame = ptr_to_xdp (ptr );
398
+
399
+ stats -> bytes += xdp_get_frame_len (frame );
400
+ xdp_return_frame (frame );
401
+ }
402
+ }
403
+ }
404
+
369
405
/* Converting between virtqueue no. and kernel tx/rx queue no.
370
406
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
371
407
*/
@@ -447,6 +483,20 @@ static void disable_delayed_refill(struct virtnet_info *vi)
447
483
spin_unlock_bh (& vi -> refill_lock );
448
484
}
449
485
486
+ static void enable_rx_mode_work (struct virtnet_info * vi )
487
+ {
488
+ rtnl_lock ();
489
+ vi -> rx_mode_work_enabled = true;
490
+ rtnl_unlock ();
491
+ }
492
+
493
+ static void disable_rx_mode_work (struct virtnet_info * vi )
494
+ {
495
+ rtnl_lock ();
496
+ vi -> rx_mode_work_enabled = false;
497
+ rtnl_unlock ();
498
+ }
499
+
450
500
static void virtqueue_napi_schedule (struct napi_struct * napi ,
451
501
struct virtqueue * vq )
452
502
{
@@ -776,39 +826,21 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
776
826
virtnet_rq_free_buf (vi , rq , buf );
777
827
}
778
828
779
- static void free_old_xmit_skbs (struct send_queue * sq , bool in_napi )
829
+ static void free_old_xmit (struct send_queue * sq , bool in_napi )
780
830
{
781
- unsigned int len ;
782
- unsigned int packets = 0 ;
783
- unsigned int bytes = 0 ;
784
- void * ptr ;
785
-
786
- while ((ptr = virtqueue_get_buf (sq -> vq , & len )) != NULL ) {
787
- if (likely (!is_xdp_frame (ptr ))) {
788
- struct sk_buff * skb = ptr ;
789
-
790
- pr_debug ("Sent skb %p\n" , skb );
831
+ struct virtnet_sq_free_stats stats = {0 };
791
832
792
- bytes += skb -> len ;
793
- napi_consume_skb (skb , in_napi );
794
- } else {
795
- struct xdp_frame * frame = ptr_to_xdp (ptr );
796
-
797
- bytes += xdp_get_frame_len (frame );
798
- xdp_return_frame (frame );
799
- }
800
- packets ++ ;
801
- }
833
+ __free_old_xmit (sq , in_napi , & stats );
802
834
803
835
/* Avoid overhead when no packets have been processed
804
836
* happens when called speculatively from start_xmit.
805
837
*/
806
- if (!packets )
838
+ if (!stats . packets )
807
839
return ;
808
840
809
841
u64_stats_update_begin (& sq -> stats .syncp );
810
- u64_stats_add (& sq -> stats .bytes , bytes );
811
- u64_stats_add (& sq -> stats .packets , packets );
842
+ u64_stats_add (& sq -> stats .bytes , stats . bytes );
843
+ u64_stats_add (& sq -> stats .packets , stats . packets );
812
844
u64_stats_update_end (& sq -> stats .syncp );
813
845
}
814
846
@@ -848,7 +880,7 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
848
880
virtqueue_napi_schedule (& sq -> napi , sq -> vq );
849
881
} else if (unlikely (!virtqueue_enable_cb_delayed (sq -> vq ))) {
850
882
/* More just got used, free them then recheck. */
851
- free_old_xmit_skbs (sq , false);
883
+ free_old_xmit (sq , false);
852
884
if (sq -> vq -> num_free >= 2 + MAX_SKB_FRAGS ) {
853
885
netif_start_subqueue (dev , qnum );
854
886
virtqueue_disable_cb (sq -> vq );
@@ -947,15 +979,12 @@ static int virtnet_xdp_xmit(struct net_device *dev,
947
979
int n , struct xdp_frame * * frames , u32 flags )
948
980
{
949
981
struct virtnet_info * vi = netdev_priv (dev );
982
+ struct virtnet_sq_free_stats stats = {0 };
950
983
struct receive_queue * rq = vi -> rq ;
951
984
struct bpf_prog * xdp_prog ;
952
985
struct send_queue * sq ;
953
- unsigned int len ;
954
- int packets = 0 ;
955
- int bytes = 0 ;
956
986
int nxmit = 0 ;
957
987
int kicks = 0 ;
958
- void * ptr ;
959
988
int ret ;
960
989
int i ;
961
990
@@ -974,20 +1003,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
974
1003
}
975
1004
976
1005
/* Free up any pending old buffers before queueing new ones. */
977
- while ((ptr = virtqueue_get_buf (sq -> vq , & len )) != NULL ) {
978
- if (likely (is_xdp_frame (ptr ))) {
979
- struct xdp_frame * frame = ptr_to_xdp (ptr );
980
-
981
- bytes += xdp_get_frame_len (frame );
982
- xdp_return_frame (frame );
983
- } else {
984
- struct sk_buff * skb = ptr ;
985
-
986
- bytes += skb -> len ;
987
- napi_consume_skb (skb , false);
988
- }
989
- packets ++ ;
990
- }
1006
+ __free_old_xmit (sq , false, & stats );
991
1007
992
1008
for (i = 0 ; i < n ; i ++ ) {
993
1009
struct xdp_frame * xdpf = frames [i ];
@@ -1007,8 +1023,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
1007
1023
}
1008
1024
out :
1009
1025
u64_stats_update_begin (& sq -> stats .syncp );
1010
- u64_stats_add (& sq -> stats .bytes , bytes );
1011
- u64_stats_add (& sq -> stats .packets , packets );
1026
+ u64_stats_add (& sq -> stats .bytes , stats . bytes );
1027
+ u64_stats_add (& sq -> stats .packets , stats . packets );
1012
1028
u64_stats_add (& sq -> stats .xdp_tx , n );
1013
1029
u64_stats_add (& sq -> stats .xdp_tx_drops , n - nxmit );
1014
1030
u64_stats_add (& sq -> stats .kicks , kicks );
@@ -2160,7 +2176,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
2160
2176
2161
2177
do {
2162
2178
virtqueue_disable_cb (sq -> vq );
2163
- free_old_xmit_skbs (sq , true);
2179
+ free_old_xmit (sq , true);
2164
2180
} while (unlikely (!virtqueue_enable_cb_delayed (sq -> vq )));
2165
2181
2166
2182
if (sq -> vq -> num_free >= 2 + MAX_SKB_FRAGS )
@@ -2308,7 +2324,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
2308
2324
txq = netdev_get_tx_queue (vi -> dev , index );
2309
2325
__netif_tx_lock (txq , raw_smp_processor_id ());
2310
2326
virtqueue_disable_cb (sq -> vq );
2311
- free_old_xmit_skbs (sq , true);
2327
+ free_old_xmit (sq , true);
2312
2328
2313
2329
if (sq -> vq -> num_free >= 2 + MAX_SKB_FRAGS )
2314
2330
netif_tx_wake_queue (txq );
@@ -2398,7 +2414,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
2398
2414
if (use_napi )
2399
2415
virtqueue_disable_cb (sq -> vq );
2400
2416
2401
- free_old_xmit_skbs (sq , false);
2417
+ free_old_xmit (sq , false);
2402
2418
2403
2419
} while (use_napi && kick &&
2404
2420
unlikely (!virtqueue_enable_cb_delayed (sq -> vq )));
@@ -2550,8 +2566,10 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
2550
2566
* into the hypervisor, so the request should be handled immediately.
2551
2567
*/
2552
2568
while (!virtqueue_get_buf (vi -> cvq , & tmp ) &&
2553
- !virtqueue_is_broken (vi -> cvq ))
2569
+ !virtqueue_is_broken (vi -> cvq )) {
2570
+ cond_resched ();
2554
2571
cpu_relax ();
2572
+ }
2555
2573
2556
2574
return vi -> ctrl -> status == VIRTIO_NET_OK ;
2557
2575
}
@@ -2706,9 +2724,11 @@ static int virtnet_close(struct net_device *dev)
2706
2724
return 0 ;
2707
2725
}
2708
2726
2709
- static void virtnet_set_rx_mode (struct net_device * dev )
2727
+ static void virtnet_rx_mode_work (struct work_struct * work )
2710
2728
{
2711
- struct virtnet_info * vi = netdev_priv (dev );
2729
+ struct virtnet_info * vi =
2730
+ container_of (work , struct virtnet_info , rx_mode_work );
2731
+ struct net_device * dev = vi -> dev ;
2712
2732
struct scatterlist sg [2 ];
2713
2733
struct virtio_net_ctrl_mac * mac_data ;
2714
2734
struct netdev_hw_addr * ha ;
@@ -2721,6 +2741,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
2721
2741
if (!virtio_has_feature (vi -> vdev , VIRTIO_NET_F_CTRL_RX ))
2722
2742
return ;
2723
2743
2744
+ rtnl_lock ();
2745
+
2724
2746
vi -> ctrl -> promisc = ((dev -> flags & IFF_PROMISC ) != 0 );
2725
2747
vi -> ctrl -> allmulti = ((dev -> flags & IFF_ALLMULTI ) != 0 );
2726
2748
@@ -2738,14 +2760,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
2738
2760
dev_warn (& dev -> dev , "Failed to %sable allmulti mode.\n" ,
2739
2761
vi -> ctrl -> allmulti ? "en" : "dis" );
2740
2762
2763
+ netif_addr_lock_bh (dev );
2764
+
2741
2765
uc_count = netdev_uc_count (dev );
2742
2766
mc_count = netdev_mc_count (dev );
2743
2767
/* MAC filter - use one buffer for both lists */
2744
2768
buf = kzalloc (((uc_count + mc_count ) * ETH_ALEN ) +
2745
2769
(2 * sizeof (mac_data -> entries )), GFP_ATOMIC );
2746
2770
mac_data = buf ;
2747
- if (!buf )
2771
+ if (!buf ) {
2772
+ netif_addr_unlock_bh (dev );
2773
+ rtnl_unlock ();
2748
2774
return ;
2775
+ }
2749
2776
2750
2777
sg_init_table (sg , 2 );
2751
2778
@@ -2766,16 +2793,28 @@ static void virtnet_set_rx_mode(struct net_device *dev)
2766
2793
netdev_for_each_mc_addr (ha , dev )
2767
2794
memcpy (& mac_data -> macs [i ++ ][0 ], ha -> addr , ETH_ALEN );
2768
2795
2796
+ netif_addr_unlock_bh (dev );
2797
+
2769
2798
sg_set_buf (& sg [1 ], mac_data ,
2770
2799
sizeof (mac_data -> entries ) + (mc_count * ETH_ALEN ));
2771
2800
2772
2801
if (!virtnet_send_command (vi , VIRTIO_NET_CTRL_MAC ,
2773
2802
VIRTIO_NET_CTRL_MAC_TABLE_SET , sg ))
2774
2803
dev_warn (& dev -> dev , "Failed to set MAC filter table.\n" );
2775
2804
2805
+ rtnl_unlock ();
2806
+
2776
2807
kfree (buf );
2777
2808
}
2778
2809
2810
+ static void virtnet_set_rx_mode (struct net_device * dev )
2811
+ {
2812
+ struct virtnet_info * vi = netdev_priv (dev );
2813
+
2814
+ if (vi -> rx_mode_work_enabled )
2815
+ schedule_work (& vi -> rx_mode_work );
2816
+ }
2817
+
2779
2818
static int virtnet_vlan_rx_add_vid (struct net_device * dev ,
2780
2819
__be16 proto , u16 vid )
2781
2820
{
@@ -3856,6 +3895,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
3856
3895
3857
3896
/* Make sure no work handler is accessing the device */
3858
3897
flush_work (& vi -> config_work );
3898
+ disable_rx_mode_work (vi );
3899
+ flush_work (& vi -> rx_mode_work );
3859
3900
3860
3901
netif_tx_lock_bh (vi -> dev );
3861
3902
netif_device_detach (vi -> dev );
@@ -3878,6 +3919,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
3878
3919
virtio_device_ready (vdev );
3879
3920
3880
3921
enable_delayed_refill (vi );
3922
+ enable_rx_mode_work (vi );
3881
3923
3882
3924
if (netif_running (vi -> dev )) {
3883
3925
err = virtnet_open (vi -> dev );
@@ -4676,6 +4718,7 @@ static int virtnet_probe(struct virtio_device *vdev)
4676
4718
vdev -> priv = vi ;
4677
4719
4678
4720
INIT_WORK (& vi -> config_work , virtnet_config_changed_work );
4721
+ INIT_WORK (& vi -> rx_mode_work , virtnet_rx_mode_work );
4679
4722
spin_lock_init (& vi -> refill_lock );
4680
4723
4681
4724
if (virtio_has_feature (vdev , VIRTIO_NET_F_MRG_RXBUF )) {
@@ -4798,6 +4841,8 @@ static int virtnet_probe(struct virtio_device *vdev)
4798
4841
if (vi -> has_rss || vi -> has_rss_hash_report )
4799
4842
virtnet_init_default_rss (vi );
4800
4843
4844
+ enable_rx_mode_work (vi );
4845
+
4801
4846
/* serialize netdev register + virtio_device_ready() with ndo_open() */
4802
4847
rtnl_lock ();
4803
4848
@@ -4895,6 +4940,8 @@ static void virtnet_remove(struct virtio_device *vdev)
4895
4940
4896
4941
/* Make sure no work handler is accessing the device. */
4897
4942
flush_work (& vi -> config_work );
4943
+ disable_rx_mode_work (vi );
4944
+ flush_work (& vi -> rx_mode_work );
4898
4945
4899
4946
unregister_netdev (vi -> dev );
4900
4947
0 commit comments