Skip to content

Commit 5d4acb6

Browse files
goldsteinntorvalds
authored andcommitted
x86/csum: Remove unnecessary odd handling
The special case for odd aligned buffers is unnecessary and mostly just adds overhead. Aligned buffers is the expectations, and even for unaligned buffer, the only case that was helped is if the buffer was 1-byte from word aligned which is ~1/7 of the cases. Overall it seems highly unlikely to be worth to extra branch. It was left in the previous perf improvement patch because I was erroneously comparing the exact output of `csum_partial(...)`, but really we only need `csum_fold(csum_partial(...))` to match so its safe to remove. All csum kunit tests pass. Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: David Laight <david.laight@aculab.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 5eff55d commit 5d4acb6

File tree

1 file changed

+4
-32
lines changed

1 file changed

+4
-32
lines changed

arch/x86/lib/csum-partial_64.c

Lines changed: 4 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -11,26 +11,9 @@
1111
#include <asm/checksum.h>
1212
#include <asm/word-at-a-time.h>
1313

14-
static inline unsigned short from32to16(unsigned a)
14+
static inline __wsum csum_finalize_sum(u64 temp64)
1515
{
16-
unsigned short b = a >> 16;
17-
asm("addw %w2,%w0\n\t"
18-
"adcw $0,%w0\n"
19-
: "=r" (b)
20-
: "0" (b), "r" (a));
21-
return b;
22-
}
23-
24-
static inline __wsum csum_tail(u64 temp64, int odd)
25-
{
26-
unsigned int result;
27-
28-
result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
29-
if (unlikely(odd)) {
30-
result = from32to16(result);
31-
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
32-
}
33-
return (__force __wsum)result;
16+
return (__force __wsum)((temp64 + ror64(temp64, 32)) >> 32);
3417
}
3518

3619
/*
@@ -47,17 +30,6 @@ static inline __wsum csum_tail(u64 temp64, int odd)
4730
__wsum csum_partial(const void *buff, int len, __wsum sum)
4831
{
4932
u64 temp64 = (__force u64)sum;
50-
unsigned odd;
51-
52-
odd = 1 & (unsigned long) buff;
53-
if (unlikely(odd)) {
54-
if (unlikely(len == 0))
55-
return sum;
56-
temp64 = ror32((__force u32)sum, 8);
57-
temp64 += (*(unsigned char *)buff << 8);
58-
len--;
59-
buff++;
60-
}
6133

6234
/*
6335
* len == 40 is the hot case due to IPv6 headers, but annotating it likely()
@@ -73,7 +45,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
7345
"adcq $0,%[res]"
7446
: [res] "+r"(temp64)
7547
: [src] "r"(buff), "m"(*(const char(*)[40])buff));
76-
return csum_tail(temp64, odd);
48+
return csum_finalize_sum(temp64);
7749
}
7850
if (unlikely(len >= 64)) {
7951
/*
@@ -143,7 +115,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
143115
: [res] "+r"(temp64)
144116
: [trail] "r"(trail));
145117
}
146-
return csum_tail(temp64, odd);
118+
return csum_finalize_sum(temp64);
147119
}
148120
EXPORT_SYMBOL(csum_partial);
149121

0 commit comments

Comments
 (0)