Skip to content

Commit d95fcdf

Browse files
committed
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio updates from Michael Tsirkin: - Per vq sizes in vdpa - Info query for block devices support in vdpa - DMA sync callbacks in vduse - Fixes, cleanups * tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (35 commits) virtio_net: rename free_old_xmit_skbs to free_old_xmit virtio_net: unify the code for recycling the xmit ptr virtio-net: add cond_resched() to the command waiting loop virtio-net: convert rx mode setting to use workqueue virtio: packed: fix unmap leak for indirect desc table vDPA: report virtio-blk flush info to user space vDPA: report virtio-block read-only info to user space vDPA: report virtio-block write zeroes configuration to user space vDPA: report virtio-block discarding configuration to user space vDPA: report virtio-block topology info to user space vDPA: report virtio-block MQ info to user space vDPA: report virtio-block max segments in a request to user space vDPA: report virtio-block block-size to user space vDPA: report virtio-block max segment size to user space vDPA: report virtio-block capacity to user space virtio: make virtio_bus const vdpa: make vdpa_bus const vDPA/ifcvf: implement vdpa_config_ops.get_vq_num_min vDPA/ifcvf: get_max_vq_size to return max size virtio_vdpa: create vqs with the actual size ...
2 parents 0815d5c + 5da7137 commit d95fcdf

File tree

24 files changed

+521
-82
lines changed

24 files changed

+521
-82
lines changed

drivers/net/virtio_net.c

Lines changed: 99 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,11 @@ struct virtnet_stat_desc {
8080
size_t offset;
8181
};
8282

83+
struct virtnet_sq_free_stats {
84+
u64 packets;
85+
u64 bytes;
86+
};
87+
8388
struct virtnet_sq_stats {
8489
struct u64_stats_sync syncp;
8590
u64_stats_t packets;
@@ -304,6 +309,12 @@ struct virtnet_info {
304309
/* Work struct for config space updates */
305310
struct work_struct config_work;
306311

312+
/* Work struct for setting rx mode */
313+
struct work_struct rx_mode_work;
314+
315+
/* OK to queue work setting RX mode? */
316+
bool rx_mode_work_enabled;
317+
307318
/* Does the affinity hint is set for virtqueues? */
308319
bool affinity_hint_set;
309320

@@ -366,6 +377,31 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
366377
return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
367378
}
368379

380+
static void __free_old_xmit(struct send_queue *sq, bool in_napi,
381+
struct virtnet_sq_free_stats *stats)
382+
{
383+
unsigned int len;
384+
void *ptr;
385+
386+
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
387+
++stats->packets;
388+
389+
if (!is_xdp_frame(ptr)) {
390+
struct sk_buff *skb = ptr;
391+
392+
pr_debug("Sent skb %p\n", skb);
393+
394+
stats->bytes += skb->len;
395+
napi_consume_skb(skb, in_napi);
396+
} else {
397+
struct xdp_frame *frame = ptr_to_xdp(ptr);
398+
399+
stats->bytes += xdp_get_frame_len(frame);
400+
xdp_return_frame(frame);
401+
}
402+
}
403+
}
404+
369405
/* Converting between virtqueue no. and kernel tx/rx queue no.
370406
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
371407
*/
@@ -447,6 +483,20 @@ static void disable_delayed_refill(struct virtnet_info *vi)
447483
spin_unlock_bh(&vi->refill_lock);
448484
}
449485

486+
static void enable_rx_mode_work(struct virtnet_info *vi)
487+
{
488+
rtnl_lock();
489+
vi->rx_mode_work_enabled = true;
490+
rtnl_unlock();
491+
}
492+
493+
static void disable_rx_mode_work(struct virtnet_info *vi)
494+
{
495+
rtnl_lock();
496+
vi->rx_mode_work_enabled = false;
497+
rtnl_unlock();
498+
}
499+
450500
static void virtqueue_napi_schedule(struct napi_struct *napi,
451501
struct virtqueue *vq)
452502
{
@@ -776,39 +826,21 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
776826
virtnet_rq_free_buf(vi, rq, buf);
777827
}
778828

779-
static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
829+
static void free_old_xmit(struct send_queue *sq, bool in_napi)
780830
{
781-
unsigned int len;
782-
unsigned int packets = 0;
783-
unsigned int bytes = 0;
784-
void *ptr;
785-
786-
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
787-
if (likely(!is_xdp_frame(ptr))) {
788-
struct sk_buff *skb = ptr;
789-
790-
pr_debug("Sent skb %p\n", skb);
831+
struct virtnet_sq_free_stats stats = {0};
791832

792-
bytes += skb->len;
793-
napi_consume_skb(skb, in_napi);
794-
} else {
795-
struct xdp_frame *frame = ptr_to_xdp(ptr);
796-
797-
bytes += xdp_get_frame_len(frame);
798-
xdp_return_frame(frame);
799-
}
800-
packets++;
801-
}
833+
__free_old_xmit(sq, in_napi, &stats);
802834

803835
/* Avoid overhead when no packets have been processed
804836
* happens when called speculatively from start_xmit.
805837
*/
806-
if (!packets)
838+
if (!stats.packets)
807839
return;
808840

809841
u64_stats_update_begin(&sq->stats.syncp);
810-
u64_stats_add(&sq->stats.bytes, bytes);
811-
u64_stats_add(&sq->stats.packets, packets);
842+
u64_stats_add(&sq->stats.bytes, stats.bytes);
843+
u64_stats_add(&sq->stats.packets, stats.packets);
812844
u64_stats_update_end(&sq->stats.syncp);
813845
}
814846

@@ -848,7 +880,7 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
848880
virtqueue_napi_schedule(&sq->napi, sq->vq);
849881
} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
850882
/* More just got used, free them then recheck. */
851-
free_old_xmit_skbs(sq, false);
883+
free_old_xmit(sq, false);
852884
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
853885
netif_start_subqueue(dev, qnum);
854886
virtqueue_disable_cb(sq->vq);
@@ -947,15 +979,12 @@ static int virtnet_xdp_xmit(struct net_device *dev,
947979
int n, struct xdp_frame **frames, u32 flags)
948980
{
949981
struct virtnet_info *vi = netdev_priv(dev);
982+
struct virtnet_sq_free_stats stats = {0};
950983
struct receive_queue *rq = vi->rq;
951984
struct bpf_prog *xdp_prog;
952985
struct send_queue *sq;
953-
unsigned int len;
954-
int packets = 0;
955-
int bytes = 0;
956986
int nxmit = 0;
957987
int kicks = 0;
958-
void *ptr;
959988
int ret;
960989
int i;
961990

@@ -974,20 +1003,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
9741003
}
9751004

9761005
/* Free up any pending old buffers before queueing new ones. */
977-
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
978-
if (likely(is_xdp_frame(ptr))) {
979-
struct xdp_frame *frame = ptr_to_xdp(ptr);
980-
981-
bytes += xdp_get_frame_len(frame);
982-
xdp_return_frame(frame);
983-
} else {
984-
struct sk_buff *skb = ptr;
985-
986-
bytes += skb->len;
987-
napi_consume_skb(skb, false);
988-
}
989-
packets++;
990-
}
1006+
__free_old_xmit(sq, false, &stats);
9911007

9921008
for (i = 0; i < n; i++) {
9931009
struct xdp_frame *xdpf = frames[i];
@@ -1007,8 +1023,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
10071023
}
10081024
out:
10091025
u64_stats_update_begin(&sq->stats.syncp);
1010-
u64_stats_add(&sq->stats.bytes, bytes);
1011-
u64_stats_add(&sq->stats.packets, packets);
1026+
u64_stats_add(&sq->stats.bytes, stats.bytes);
1027+
u64_stats_add(&sq->stats.packets, stats.packets);
10121028
u64_stats_add(&sq->stats.xdp_tx, n);
10131029
u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
10141030
u64_stats_add(&sq->stats.kicks, kicks);
@@ -2160,7 +2176,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
21602176

21612177
do {
21622178
virtqueue_disable_cb(sq->vq);
2163-
free_old_xmit_skbs(sq, true);
2179+
free_old_xmit(sq, true);
21642180
} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
21652181

21662182
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
@@ -2308,7 +2324,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
23082324
txq = netdev_get_tx_queue(vi->dev, index);
23092325
__netif_tx_lock(txq, raw_smp_processor_id());
23102326
virtqueue_disable_cb(sq->vq);
2311-
free_old_xmit_skbs(sq, true);
2327+
free_old_xmit(sq, true);
23122328

23132329
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
23142330
netif_tx_wake_queue(txq);
@@ -2398,7 +2414,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
23982414
if (use_napi)
23992415
virtqueue_disable_cb(sq->vq);
24002416

2401-
free_old_xmit_skbs(sq, false);
2417+
free_old_xmit(sq, false);
24022418

24032419
} while (use_napi && kick &&
24042420
unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
@@ -2550,8 +2566,10 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
25502566
* into the hypervisor, so the request should be handled immediately.
25512567
*/
25522568
while (!virtqueue_get_buf(vi->cvq, &tmp) &&
2553-
!virtqueue_is_broken(vi->cvq))
2569+
!virtqueue_is_broken(vi->cvq)) {
2570+
cond_resched();
25542571
cpu_relax();
2572+
}
25552573

25562574
return vi->ctrl->status == VIRTIO_NET_OK;
25572575
}
@@ -2706,9 +2724,11 @@ static int virtnet_close(struct net_device *dev)
27062724
return 0;
27072725
}
27082726

2709-
static void virtnet_set_rx_mode(struct net_device *dev)
2727+
static void virtnet_rx_mode_work(struct work_struct *work)
27102728
{
2711-
struct virtnet_info *vi = netdev_priv(dev);
2729+
struct virtnet_info *vi =
2730+
container_of(work, struct virtnet_info, rx_mode_work);
2731+
struct net_device *dev = vi->dev;
27122732
struct scatterlist sg[2];
27132733
struct virtio_net_ctrl_mac *mac_data;
27142734
struct netdev_hw_addr *ha;
@@ -2721,6 +2741,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
27212741
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
27222742
return;
27232743

2744+
rtnl_lock();
2745+
27242746
vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
27252747
vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
27262748

@@ -2738,14 +2760,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
27382760
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
27392761
vi->ctrl->allmulti ? "en" : "dis");
27402762

2763+
netif_addr_lock_bh(dev);
2764+
27412765
uc_count = netdev_uc_count(dev);
27422766
mc_count = netdev_mc_count(dev);
27432767
/* MAC filter - use one buffer for both lists */
27442768
buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
27452769
(2 * sizeof(mac_data->entries)), GFP_ATOMIC);
27462770
mac_data = buf;
2747-
if (!buf)
2771+
if (!buf) {
2772+
netif_addr_unlock_bh(dev);
2773+
rtnl_unlock();
27482774
return;
2775+
}
27492776

27502777
sg_init_table(sg, 2);
27512778

@@ -2766,16 +2793,28 @@ static void virtnet_set_rx_mode(struct net_device *dev)
27662793
netdev_for_each_mc_addr(ha, dev)
27672794
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
27682795

2796+
netif_addr_unlock_bh(dev);
2797+
27692798
sg_set_buf(&sg[1], mac_data,
27702799
sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
27712800

27722801
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
27732802
VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
27742803
dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
27752804

2805+
rtnl_unlock();
2806+
27762807
kfree(buf);
27772808
}
27782809

2810+
static void virtnet_set_rx_mode(struct net_device *dev)
2811+
{
2812+
struct virtnet_info *vi = netdev_priv(dev);
2813+
2814+
if (vi->rx_mode_work_enabled)
2815+
schedule_work(&vi->rx_mode_work);
2816+
}
2817+
27792818
static int virtnet_vlan_rx_add_vid(struct net_device *dev,
27802819
__be16 proto, u16 vid)
27812820
{
@@ -3856,6 +3895,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
38563895

38573896
/* Make sure no work handler is accessing the device */
38583897
flush_work(&vi->config_work);
3898+
disable_rx_mode_work(vi);
3899+
flush_work(&vi->rx_mode_work);
38593900

38603901
netif_tx_lock_bh(vi->dev);
38613902
netif_device_detach(vi->dev);
@@ -3878,6 +3919,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
38783919
virtio_device_ready(vdev);
38793920

38803921
enable_delayed_refill(vi);
3922+
enable_rx_mode_work(vi);
38813923

38823924
if (netif_running(vi->dev)) {
38833925
err = virtnet_open(vi->dev);
@@ -4676,6 +4718,7 @@ static int virtnet_probe(struct virtio_device *vdev)
46764718
vdev->priv = vi;
46774719

46784720
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
4721+
INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
46794722
spin_lock_init(&vi->refill_lock);
46804723

46814724
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
@@ -4798,6 +4841,8 @@ static int virtnet_probe(struct virtio_device *vdev)
47984841
if (vi->has_rss || vi->has_rss_hash_report)
47994842
virtnet_init_default_rss(vi);
48004843

4844+
enable_rx_mode_work(vi);
4845+
48014846
/* serialize netdev register + virtio_device_ready() with ndo_open() */
48024847
rtnl_lock();
48034848

@@ -4895,6 +4940,8 @@ static void virtnet_remove(struct virtio_device *vdev)
48954940

48964941
/* Make sure no work handler is accessing the device. */
48974942
flush_work(&vi->config_work);
4943+
disable_rx_mode_work(vi);
4944+
flush_work(&vi->rx_mode_work);
48984945

48994946
unregister_netdev(vi->dev);
49004947

drivers/vdpa/alibaba/eni_vdpa.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -254,6 +254,13 @@ static u16 eni_vdpa_get_vq_num_min(struct vdpa_device *vdpa)
254254
return vp_legacy_get_queue_size(ldev, 0);
255255
}
256256

257+
static u16 eni_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 qid)
258+
{
259+
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
260+
261+
return vp_legacy_get_queue_size(ldev, qid);
262+
}
263+
257264
static int eni_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
258265
struct vdpa_vq_state *state)
259266
{
@@ -416,6 +423,7 @@ static const struct vdpa_config_ops eni_vdpa_ops = {
416423
.reset = eni_vdpa_reset,
417424
.get_vq_num_max = eni_vdpa_get_vq_num_max,
418425
.get_vq_num_min = eni_vdpa_get_vq_num_min,
426+
.get_vq_size = eni_vdpa_get_vq_size,
419427
.get_vq_state = eni_vdpa_get_vq_state,
420428
.set_vq_state = eni_vdpa_set_vq_state,
421429
.set_vq_cb = eni_vdpa_set_vq_cb,

drivers/vdpa/ifcvf/ifcvf_base.c

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -69,20 +69,19 @@ static int ifcvf_read_config_range(struct pci_dev *dev,
6969
return 0;
7070
}
7171

72-
static u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid)
72+
u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid)
7373
{
7474
u16 queue_size;
7575

76+
if (qid >= hw->nr_vring)
77+
return 0;
78+
7679
vp_iowrite16(qid, &hw->common_cfg->queue_select);
7780
queue_size = vp_ioread16(&hw->common_cfg->queue_size);
7881

7982
return queue_size;
8083
}
8184

82-
/* This function returns the max allowed safe size for
83-
* all virtqueues. It is the minimal size that can be
84-
* suppprted by all virtqueues.
85-
*/
8685
u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw)
8786
{
8887
u16 queue_size, max_size, qid;
@@ -94,7 +93,7 @@ u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw)
9493
if (!queue_size)
9594
continue;
9695

97-
max_size = min(queue_size, max_size);
96+
max_size = max(queue_size, max_size);
9897
}
9998

10099
return max_size;

0 commit comments

Comments
 (0)