@@ -329,7 +329,14 @@ __extend_pgfrags(struct sk_buff *skb_head, struct sk_buff *skb, int from, int n)
329
329
if (!tail_frags )
330
330
return 0 ;
331
331
332
- /* Shift @n_excess number of page fragments to new SKB. */
332
+ /*
333
+ * Move @n_excess number of page fragments to new SKB. We
334
+ * must move @n_excess fragments to next/new skb, except
335
+ * those, which we are inserting (@n fragments) - so we
336
+ * must move last @n_excess fragments: not more than
337
+ * @tail_frags, and not more than @n_excess itself
338
+ * (maximum @n_excess fragments can be moved).
339
+ */
333
340
for (i = n_excess - 1 ; i >= max (n_excess - tail_frags , 0 ); -- i ) {
334
341
f = & si -> frags [MAX_SKB_FRAGS - n + i ];
335
342
skb_shinfo (nskb )-> frags [i ] = * f ;
@@ -339,9 +346,12 @@ __extend_pgfrags(struct sk_buff *skb_head, struct sk_buff *skb, int from, int n)
339
346
ss_skb_adjust_data_len (nskb , e_size );
340
347
}
341
348
/*
342
- * Make room for @n page fragments in the SKB. Considering maximum @n
343
- * value must be not greater than 2, the minimum @n_shift value must
344
- * be not less than -1.
349
+ * Make room for @n page fragments in current SKB. We must shift
350
+ * @tail_frags fragments inside current skb, except those, which we
351
+ * moved to next/new skb (above); in case of too small @tail_frags
352
+ * and/or too big @n values, the value of @n_shift will be negative,
353
+ * but considering maximum @n value must be not greater than 2, the
354
+ * minimum @n_shift value must be not less than -1.
345
355
*/
346
356
n_shift = tail_frags - n_excess ;
347
357
BUG_ON (n_shift + 1 < 0 );
0 commit comments