@@ -67,6 +67,7 @@ static LIST_HEAD(vector_devices);
67
67
static int driver_registered ;
68
68
69
69
static void vector_eth_configure (int n , struct arglist * def );
70
+ static int vector_mmsg_rx (struct vector_private * vp , int budget );
70
71
71
72
/* Argument accessors to set variables (and/or set default values)
72
73
* mtu, buffer sizing, default headroom, etc
@@ -77,7 +78,6 @@ static void vector_eth_configure(int n, struct arglist *def);
77
78
#define DEFAULT_VECTOR_SIZE 64
78
79
#define TX_SMALL_PACKET 128
79
80
#define MAX_IOV_SIZE (MAX_SKB_FRAGS + 1)
80
- #define MAX_ITERATIONS 64
81
81
82
82
static const struct {
83
83
const char string [ETH_GSTRING_LEN ];
@@ -458,7 +458,6 @@ static int vector_send(struct vector_queue *qi)
458
458
vp -> estats .tx_queue_running_average =
459
459
(vp -> estats .tx_queue_running_average + result ) >> 1 ;
460
460
}
461
- netif_trans_update (qi -> dev );
462
461
netif_wake_queue (qi -> dev );
463
462
/* if TX is busy, break out of the send loop,
464
463
* poll write IRQ will reschedule xmit for us
@@ -470,8 +469,6 @@ static int vector_send(struct vector_queue *qi)
470
469
}
471
470
}
472
471
spin_unlock (& qi -> head_lock );
473
- } else {
474
- tasklet_schedule (& vp -> tx_poll );
475
472
}
476
473
return queue_depth ;
477
474
}
@@ -608,7 +605,7 @@ static struct vector_queue *create_queue(
608
605
609
606
/*
610
607
* We do not use the RX queue as a proper wraparound queue for now
611
- * This is not necessary because the consumption via netif_rx ()
608
+ * This is not necessary because the consumption via napi_gro_receive ()
612
609
* happens in-line. While we can try using the return code of
613
610
* netif_rx() for flow control there are no drivers doing this today.
614
611
* For this RX specific use we ignore the tail/head locks and
@@ -896,7 +893,7 @@ static int vector_legacy_rx(struct vector_private *vp)
896
893
skb -> protocol = eth_type_trans (skb , skb -> dev );
897
894
vp -> dev -> stats .rx_bytes += skb -> len ;
898
895
vp -> dev -> stats .rx_packets ++ ;
899
- netif_rx ( skb );
896
+ napi_gro_receive ( & vp -> napi , skb );
900
897
} else {
901
898
dev_kfree_skb_irq (skb );
902
899
}
@@ -955,7 +952,7 @@ static int writev_tx(struct vector_private *vp, struct sk_buff *skb)
955
952
* mmsg vector matched to an skb vector which we prepared earlier.
956
953
*/
957
954
958
- static int vector_mmsg_rx (struct vector_private * vp )
955
+ static int vector_mmsg_rx (struct vector_private * vp , int budget )
959
956
{
960
957
int packet_count , i ;
961
958
struct vector_queue * qi = vp -> rx_queue ;
@@ -972,6 +969,9 @@ static int vector_mmsg_rx(struct vector_private *vp)
972
969
973
970
/* Fire the Lazy Gun - get as many packets as we can in one go. */
974
971
972
+ if (budget > qi -> max_depth )
973
+ budget = qi -> max_depth ;
974
+
975
975
packet_count = uml_vector_recvmmsg (
976
976
vp -> fds -> rx_fd , qi -> mmsg_vector , qi -> max_depth , 0 );
977
977
@@ -1021,7 +1021,7 @@ static int vector_mmsg_rx(struct vector_private *vp)
1021
1021
*/
1022
1022
vp -> dev -> stats .rx_bytes += skb -> len ;
1023
1023
vp -> dev -> stats .rx_packets ++ ;
1024
- netif_rx ( skb );
1024
+ napi_gro_receive ( & vp -> napi , skb );
1025
1025
} else {
1026
1026
/* Overlay header too short to do anything - discard.
1027
1027
* We can actually keep this skb and reuse it,
@@ -1044,23 +1044,6 @@ static int vector_mmsg_rx(struct vector_private *vp)
1044
1044
return packet_count ;
1045
1045
}
1046
1046
1047
- static void vector_rx (struct vector_private * vp )
1048
- {
1049
- int err ;
1050
- int iter = 0 ;
1051
-
1052
- if ((vp -> options & VECTOR_RX ) > 0 )
1053
- while (((err = vector_mmsg_rx (vp )) > 0 ) && (iter < MAX_ITERATIONS ))
1054
- iter ++ ;
1055
- else
1056
- while (((err = vector_legacy_rx (vp )) > 0 ) && (iter < MAX_ITERATIONS ))
1057
- iter ++ ;
1058
- if ((err != 0 ) && net_ratelimit ())
1059
- netdev_err (vp -> dev , "vector_rx: error(%d)\n" , err );
1060
- if (iter == MAX_ITERATIONS )
1061
- netdev_err (vp -> dev , "vector_rx: device stuck, remote end may have closed the connection\n" );
1062
- }
1063
-
1064
1047
static int vector_net_start_xmit (struct sk_buff * skb , struct net_device * dev )
1065
1048
{
1066
1049
struct vector_private * vp = netdev_priv (dev );
@@ -1085,25 +1068,15 @@ static int vector_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
1085
1068
netdev_sent_queue (vp -> dev , skb -> len );
1086
1069
queue_depth = vector_enqueue (vp -> tx_queue , skb );
1087
1070
1088
- /* if the device queue is full, stop the upper layers and
1089
- * flush it.
1090
- */
1091
-
1092
- if (queue_depth >= vp -> tx_queue -> max_depth - 1 ) {
1093
- vp -> estats .tx_kicks ++ ;
1094
- netif_stop_queue (dev );
1095
- vector_send (vp -> tx_queue );
1096
- return NETDEV_TX_OK ;
1097
- }
1098
- if (netdev_xmit_more ()) {
1071
+ if (queue_depth < vp -> tx_queue -> max_depth && netdev_xmit_more ()) {
1099
1072
mod_timer (& vp -> tl , vp -> coalesce );
1100
1073
return NETDEV_TX_OK ;
1074
+ } else {
1075
+ queue_depth = vector_send (vp -> tx_queue );
1076
+ if (queue_depth > 0 )
1077
+ napi_schedule (& vp -> napi );
1101
1078
}
1102
- if (skb -> len < TX_SMALL_PACKET ) {
1103
- vp -> estats .tx_kicks ++ ;
1104
- vector_send (vp -> tx_queue );
1105
- } else
1106
- tasklet_schedule (& vp -> tx_poll );
1079
+
1107
1080
return NETDEV_TX_OK ;
1108
1081
}
1109
1082
@@ -1114,7 +1087,7 @@ static irqreturn_t vector_rx_interrupt(int irq, void *dev_id)
1114
1087
1115
1088
if (!netif_running (dev ))
1116
1089
return IRQ_NONE ;
1117
- vector_rx ( vp );
1090
+ napi_schedule ( & vp -> napi );
1118
1091
return IRQ_HANDLED ;
1119
1092
1120
1093
}
@@ -1133,8 +1106,7 @@ static irqreturn_t vector_tx_interrupt(int irq, void *dev_id)
1133
1106
* tweaking the IRQ mask less costly
1134
1107
*/
1135
1108
1136
- if (vp -> in_write_poll )
1137
- tasklet_schedule (& vp -> tx_poll );
1109
+ napi_schedule (& vp -> napi );
1138
1110
return IRQ_HANDLED ;
1139
1111
1140
1112
}
@@ -1161,7 +1133,8 @@ static int vector_net_close(struct net_device *dev)
1161
1133
um_free_irq (vp -> tx_irq , dev );
1162
1134
vp -> tx_irq = 0 ;
1163
1135
}
1164
- tasklet_kill (& vp -> tx_poll );
1136
+ napi_disable (& vp -> napi );
1137
+ netif_napi_del (& vp -> napi );
1165
1138
if (vp -> fds -> rx_fd > 0 ) {
1166
1139
if (vp -> bpf )
1167
1140
uml_vector_detach_bpf (vp -> fds -> rx_fd , vp -> bpf );
@@ -1193,15 +1166,32 @@ static int vector_net_close(struct net_device *dev)
1193
1166
return 0 ;
1194
1167
}
1195
1168
1196
- /* TX tasklet */
1197
-
1198
- static void vector_tx_poll (struct tasklet_struct * t )
1169
+ static int vector_poll (struct napi_struct * napi , int budget )
1199
1170
{
1200
- struct vector_private * vp = from_tasklet (vp , t , tx_poll );
1171
+ struct vector_private * vp = container_of (napi , struct vector_private , napi );
1172
+ int work_done = 0 ;
1173
+ int err ;
1174
+ bool tx_enqueued = false;
1201
1175
1202
- vp -> estats .tx_kicks ++ ;
1203
- vector_send (vp -> tx_queue );
1176
+ if ((vp -> options & VECTOR_TX ) != 0 )
1177
+ tx_enqueued = (vector_send (vp -> tx_queue ) > 0 );
1178
+ if ((vp -> options & VECTOR_RX ) > 0 )
1179
+ err = vector_mmsg_rx (vp , budget );
1180
+ else {
1181
+ err = vector_legacy_rx (vp );
1182
+ if (err > 0 )
1183
+ err = 1 ;
1184
+ }
1185
+ if (err > 0 )
1186
+ work_done += err ;
1187
+
1188
+ if (tx_enqueued || err > 0 )
1189
+ napi_schedule (napi );
1190
+ if (work_done < budget )
1191
+ napi_complete_done (napi , work_done );
1192
+ return work_done ;
1204
1193
}
1194
+
1205
1195
static void vector_reset_tx (struct work_struct * work )
1206
1196
{
1207
1197
struct vector_private * vp =
@@ -1265,6 +1255,9 @@ static int vector_net_open(struct net_device *dev)
1265
1255
goto out_close ;
1266
1256
}
1267
1257
1258
+ netif_napi_add (vp -> dev , & vp -> napi , vector_poll , get_depth (vp -> parsed ));
1259
+ napi_enable (& vp -> napi );
1260
+
1268
1261
/* READ IRQ */
1269
1262
err = um_request_irq (
1270
1263
irq_rr + VECTOR_BASE_IRQ , vp -> fds -> rx_fd ,
@@ -1306,15 +1299,15 @@ static int vector_net_open(struct net_device *dev)
1306
1299
uml_vector_attach_bpf (vp -> fds -> rx_fd , vp -> bpf );
1307
1300
1308
1301
netif_start_queue (dev );
1302
+ vector_reset_stats (vp );
1309
1303
1310
1304
/* clear buffer - it can happen that the host side of the interface
1311
1305
* is full when we get here. In this case, new data is never queued,
1312
1306
* SIGIOs never arrive, and the net never works.
1313
1307
*/
1314
1308
1315
- vector_rx ( vp );
1309
+ napi_schedule ( & vp -> napi );
1316
1310
1317
- vector_reset_stats (vp );
1318
1311
vdevice = find_device (vp -> unit );
1319
1312
vdevice -> opened = 1 ;
1320
1313
@@ -1543,15 +1536,16 @@ static const struct net_device_ops vector_netdev_ops = {
1543
1536
#endif
1544
1537
};
1545
1538
1546
-
1547
1539
static void vector_timer_expire (struct timer_list * t )
1548
1540
{
1549
1541
struct vector_private * vp = from_timer (vp , t , tl );
1550
1542
1551
1543
vp -> estats .tx_kicks ++ ;
1552
- vector_send ( vp -> tx_queue );
1544
+ napi_schedule ( & vp -> napi );
1553
1545
}
1554
1546
1547
+
1548
+
1555
1549
static void vector_eth_configure (
1556
1550
int n ,
1557
1551
struct arglist * def
@@ -1634,7 +1628,6 @@ static void vector_eth_configure(
1634
1628
});
1635
1629
1636
1630
dev -> features = dev -> hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST );
1637
- tasklet_setup (& vp -> tx_poll , vector_tx_poll );
1638
1631
INIT_WORK (& vp -> reset_tx , vector_reset_tx );
1639
1632
1640
1633
timer_setup (& vp -> tl , vector_timer_expire , 0 );
0 commit comments