Skip to content

Commit c7ec4f2

Browse files
jbeulichjgross1
authored andcommitted
xen-netback: don't produce zero-size SKB frags
While frontends may submit zero-size requests (wasting a precious slot), core networking code as of at least 3ece782 ("sock: skb_copy_ubufs support for compound pages") can't deal with SKBs when they have all zero-size fragments. Respond to empty requests right when populating fragments; all further processing is fragment based and hence won't encounter these empty requests anymore. In a way this should have been that way from the beginning: When no data is to be transferred for a particular request, there's not even a point in validating the respective grant ref. That's no different from e.g. passing NULL into memcpy() when at the same time the size is 0. This is XSA-448 / CVE-2023-46838. Cc: stable@vger.kernel.org Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Juergen Gross <jgross@suse.com> Reviewed-by: Paul Durrant <paul@xen.org>
1 parent 0dd3ee3 commit c7ec4f2

File tree

1 file changed

+38
-6
lines changed

1 file changed

+38
-6
lines changed

drivers/net/xen-netback/netback.c

Lines changed: 38 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -463,12 +463,25 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
463463
}
464464

465465
for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
466-
shinfo->nr_frags++, gop++, nr_slots--) {
466+
nr_slots--) {
467+
if (unlikely(!txp->size)) {
468+
unsigned long flags;
469+
470+
spin_lock_irqsave(&queue->response_lock, flags);
471+
make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
472+
push_tx_responses(queue);
473+
spin_unlock_irqrestore(&queue->response_lock, flags);
474+
++txp;
475+
continue;
476+
}
477+
467478
index = pending_index(queue->pending_cons++);
468479
pending_idx = queue->pending_ring[index];
469480
xenvif_tx_create_map_op(queue, pending_idx, txp,
470481
txp == first ? extra_count : 0, gop);
471482
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
483+
++shinfo->nr_frags;
484+
++gop;
472485

473486
if (txp == first)
474487
txp = txfrags;
@@ -481,20 +494,39 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
481494
shinfo = skb_shinfo(nskb);
482495
frags = shinfo->frags;
483496

484-
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
485-
shinfo->nr_frags++, txp++, gop++) {
497+
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
498+
if (unlikely(!txp->size)) {
499+
unsigned long flags;
500+
501+
spin_lock_irqsave(&queue->response_lock, flags);
502+
make_tx_response(queue, txp, 0,
503+
XEN_NETIF_RSP_OKAY);
504+
push_tx_responses(queue);
505+
spin_unlock_irqrestore(&queue->response_lock,
506+
flags);
507+
continue;
508+
}
509+
486510
index = pending_index(queue->pending_cons++);
487511
pending_idx = queue->pending_ring[index];
488512
xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
489513
gop);
490514
frag_set_pending_idx(&frags[shinfo->nr_frags],
491515
pending_idx);
516+
++shinfo->nr_frags;
517+
++gop;
492518
}
493519

494-
skb_shinfo(skb)->frag_list = nskb;
495-
} else if (nskb) {
520+
if (shinfo->nr_frags) {
521+
skb_shinfo(skb)->frag_list = nskb;
522+
nskb = NULL;
523+
}
524+
}
525+
526+
if (nskb) {
496527
/* A frag_list skb was allocated but it is no longer needed
497-
* because enough slots were converted to copy ops above.
528+
* because enough slots were converted to copy ops above or some
529+
* were empty.
498530
*/
499531
kfree_skb(nskb);
500532
}

0 commit comments

Comments
 (0)