Skip to content

Commit a476aae

Browse files
committed
x86/csum: clean up `csum_partial' further
Commit 688eb81 ("x86/csum: Improve performance of `csum_partial`") ended up improving the code generation for the IP csum calculations, and in particular special-casing the 40-byte case that is a hot case for IPv6 headers. It then had _another_ special case for the 64-byte unrolled loop, which did two chains of 32-byte blocks, which allows modern CPU's to improve performance by doing the chains in parallel thanks to renaming the carry flag. This just unifies the special cases and combines them into just one single helper the 40-byte csum case, and replaces the 64-byte case by a 80-byte case that just does that single helper twice. It avoids having all these different versions of inline assembly, and actually improved performance further in my tests. There was never anything magical about the 64-byte unrolled case, even though it happens to be a common size (and typically is the cacheline size). Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
1 parent 5d4acb6 commit a476aae

File tree

1 file changed

+37
-44
lines changed

1 file changed

+37
-44
lines changed

arch/x86/lib/csum-partial_64.c

Lines changed: 37 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,20 @@ static inline __wsum csum_finalize_sum(u64 temp64)
1616
return (__force __wsum)((temp64 + ror64(temp64, 32)) >> 32);
1717
}
1818

19+
static inline unsigned long update_csum_40b(unsigned long sum, const unsigned long m[5])
20+
{
21+
asm("addq %1,%0\n\t"
22+
"adcq %2,%0\n\t"
23+
"adcq %3,%0\n\t"
24+
"adcq %4,%0\n\t"
25+
"adcq %5,%0\n\t"
26+
"adcq $0,%0"
27+
:"+r" (sum)
28+
:"m" (m[0]), "m" (m[1]), "m" (m[2]),
29+
"m" (m[3]), "m" (m[4]));
30+
return sum;
31+
}
32+
1933
/*
2034
* Do a checksum on an arbitrary memory area.
2135
* Returns a 32bit checksum.
@@ -31,52 +45,31 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
3145
{
3246
u64 temp64 = (__force u64)sum;
3347

34-
/*
35-
* len == 40 is the hot case due to IPv6 headers, but annotating it likely()
36-
* has noticeable negative affect on codegen for all other cases with
37-
* minimal performance benefit here.
38-
*/
39-
if (len == 40) {
40-
asm("addq 0*8(%[src]),%[res]\n\t"
41-
"adcq 1*8(%[src]),%[res]\n\t"
42-
"adcq 2*8(%[src]),%[res]\n\t"
43-
"adcq 3*8(%[src]),%[res]\n\t"
44-
"adcq 4*8(%[src]),%[res]\n\t"
45-
"adcq $0,%[res]"
46-
: [res] "+r"(temp64)
47-
: [src] "r"(buff), "m"(*(const char(*)[40])buff));
48-
return csum_finalize_sum(temp64);
48+
/* Do two 40-byte chunks in parallel to get better ILP */
49+
if (likely(len >= 80)) {
50+
u64 temp64_2 = 0;
51+
do {
52+
temp64 = update_csum_40b(temp64, buff);
53+
temp64_2 = update_csum_40b(temp64_2, buff + 40);
54+
buff += 80;
55+
len -= 80;
56+
} while (len >= 80);
57+
58+
asm("addq %1,%0\n\t"
59+
"adcq $0,%0"
60+
:"+r" (temp64): "r" (temp64_2));
4961
}
50-
if (unlikely(len >= 64)) {
51-
/*
52-
* Extra accumulators for better ILP in the loop.
53-
*/
54-
u64 tmp_accum, tmp_carries;
5562

56-
asm("xorl %k[tmp_accum],%k[tmp_accum]\n\t"
57-
"xorl %k[tmp_carries],%k[tmp_carries]\n\t"
58-
"subl $64, %[len]\n\t"
59-
"1:\n\t"
60-
"addq 0*8(%[src]),%[res]\n\t"
61-
"adcq 1*8(%[src]),%[res]\n\t"
62-
"adcq 2*8(%[src]),%[res]\n\t"
63-
"adcq 3*8(%[src]),%[res]\n\t"
64-
"adcl $0,%k[tmp_carries]\n\t"
65-
"addq 4*8(%[src]),%[tmp_accum]\n\t"
66-
"adcq 5*8(%[src]),%[tmp_accum]\n\t"
67-
"adcq 6*8(%[src]),%[tmp_accum]\n\t"
68-
"adcq 7*8(%[src]),%[tmp_accum]\n\t"
69-
"adcl $0,%k[tmp_carries]\n\t"
70-
"addq $64, %[src]\n\t"
71-
"subl $64, %[len]\n\t"
72-
"jge 1b\n\t"
73-
"addq %[tmp_accum],%[res]\n\t"
74-
"adcq %[tmp_carries],%[res]\n\t"
75-
"adcq $0,%[res]"
76-
: [tmp_accum] "=&r"(tmp_accum),
77-
[tmp_carries] "=&r"(tmp_carries), [res] "+r"(temp64),
78-
[len] "+r"(len), [src] "+r"(buff)
79-
: "m"(*(const char *)buff));
63+
/*
64+
* len == 40 is the hot case due to IPv6 headers, so return
65+
* early for that exact case without checking the tail bytes.
66+
*/
67+
if (len >= 40) {
68+
temp64 = update_csum_40b(temp64, buff);
69+
len -= 40;
70+
if (!len)
71+
return csum_finalize_sum(temp64);
72+
buff += 40;
8073
}
8174

8275
if (len & 32) {

0 commit comments

Comments
 (0)