@@ -283,11 +283,11 @@ __skb_insert_after(struct sk_buff *skb, struct sk_buff *nskb)
283
283
static int
284
284
__extend_pgfrags (struct sk_buff * skb_head , struct sk_buff * skb , int from , int n )
285
285
{
286
- int i , n_shift , n_excess = 0 ;
287
286
struct skb_shared_info * si = skb_shinfo (skb );
287
+ int i , n_shift , n_excess = 0 , tail_frags = si -> nr_frags - from ;
288
288
289
289
BUG_ON ((n <= 0 ) || (n > 2 ));
290
- BUG_ON (from > si -> nr_frags );
290
+ BUG_ON (tail_frags < 0 );
291
291
292
292
/* No room for @n extra page fragments in the SKB. */
293
293
if (si -> nr_frags + n > MAX_SKB_FRAGS ) {
@@ -326,23 +326,36 @@ __extend_pgfrags(struct sk_buff *skb_head, struct sk_buff *skb, int from, int n)
326
326
}
327
327
328
328
/* No fragments to shift. */
329
- if (from == si -> nr_frags )
329
+ if (! tail_frags )
330
330
return 0 ;
331
331
332
- /* Shift @n_excess number of page fragments to new SKB. */
333
- for (i = n_excess - 1 ; i >= 0 ; -- i ) {
332
+ /*
333
+ * Move @n_excess number of page fragments to new SKB. We
334
+ * must move @n_excess fragments to next/new skb, except
335
+ * those, which we are inserting (@n fragments) - so we
336
+ * must move last @n_excess fragments: not more than
337
+ * @tail_frags, and not more than @n_excess itself
338
+ * (maximum @n_excess fragments can be moved).
339
+ */
340
+ for (i = n_excess - 1 ; i >= max (n_excess - tail_frags , 0 ); -- i ) {
334
341
f = & si -> frags [MAX_SKB_FRAGS - n + i ];
335
342
skb_shinfo (nskb )-> frags [i ] = * f ;
336
343
e_size += skb_frag_size (f );
337
344
}
338
345
ss_skb_adjust_data_len (skb , - e_size );
339
346
ss_skb_adjust_data_len (nskb , e_size );
340
347
}
341
-
342
- /* Make room for @n page fragments in the SKB. */
343
- n_shift = si -> nr_frags - from - n_excess ;
344
- BUG_ON (n_shift < 0 );
345
- if (n_shift )
348
+ /*
349
+ * Make room for @n page fragments in current SKB. We must shift
350
+ * @tail_frags fragments inside current skb, except those, which we
351
+ * moved to next/new skb (above); in case of too small @tail_frags
352
+ * and/or too big @n values, the value of @n_shift will be negative,
353
+ * but considering maximum @n value must be not greater than 2, the
354
+ * minimum @n_shift value must be not less than -1.
355
+ */
356
+ n_shift = tail_frags - n_excess ;
357
+ BUG_ON (n_shift + 1 < 0 );
358
+ if (n_shift > 0 )
346
359
memmove (& si -> frags [from + n ],
347
360
& si -> frags [from ], n_shift * sizeof (skb_frag_t ));
348
361
si -> nr_frags += n - n_excess ;
@@ -570,9 +583,9 @@ __split_pgfrag_add(struct sk_buff *skb_head, struct sk_buff *skb, int i, int off
570
583
skb_frag_size_sub (frag , tail_len );
571
584
572
585
/* Make the fragment with the tail part. */
573
- i = (i + 2 ) % MAX_SKB_FRAGS ;
574
- __skb_fill_page_desc ( skb_dst , i , skb_frag_page (frag ),
575
- frag -> page_offset + off , tail_len );
586
+ __skb_fill_page_desc ( skb_dst , (i + 2 ) % MAX_SKB_FRAGS ,
587
+ skb_frag_page (frag ), frag -> page_offset + off ,
588
+ tail_len );
576
589
__skb_frag_ref (frag );
577
590
578
591
/* Adjust SKB data lengths. */
0 commit comments