From f4a35328936859dea25c35bfbd2f100d59d73853 Mon Sep 17 00:00:00 2001 From: Richard Leach Date: Sun, 16 Feb 2025 00:44:05 +0000 Subject: [PATCH 1/3] pp_reverse - chunk-at-a-time string reversal The performance characteristics of string reversal in blead is very variable depending upon the capabilities of the C compiler. Some compilers are able to vectorize some cases for better performance. This commit introduces explicit reversal and swapping of whole registers at a time, which all builds seem to be able to benefit from. The `_swab_xx_` macros for doing this already exist in perl.h, using them for this purpose was inspired by https://dev.to/wunk/fast-array-reversal-with-simd-j3p The bit shifting done by these macros should be portable and reasonably performant if not optimised further, but it is likely that they will be optimised to bswap, rev, movbe instructions. Some performance comparisons: 1. Large string reversal, with different source & destination buffers my $x = "X"x(1024*1000*10); my $y; for (0..1_000) { $y = reverse $x } gcc blead: 2,388.30 msec task-clock # 0.993 CPUs utilized 10,574,195,388 cycles # 4.427 GHz 61,520,672,268 instructions # 5.82 insn per cycle 10,255,049,869 branches # 4.294 G/sec clang blead: 688.37 msec task-clock # 0.946 CPUs utilized 3,161,754,439 cycles # 4.593 GHz 8,986,420,860 instructions # 2.84 insn per cycle 324,734,391 branches # 471.745 M/sec gcc patched: 408.39 msec task-clock # 0.936 CPUs utilized 1,617,273,653 cycles # 3.960 GHz 6,422,991,675 instructions # 3.97 insn per cycle 644,856,283 branches # 1.579 G/sec clang patched: 397.61 msec task-clock # 0.924 CPUs utilized 1,655,838,316 cycles # 4.165 GHz 5,782,487,237 instructions # 3.49 insn per cycle 324,586,437 branches # 816.350 M/sec 2. Large string reversal, but reversing the buffer in-place my $x = "X"x(1024*1000*10); my $y; for (0..1_000) { $y = reverse "foo",$x } gcc blead: 6,038.06 msec task-clock # 0.996 CPUs utilized 27,109,273,840 cycles # 4.490 GHz 41,987,097,139 instructions # 1.55 insn per cycle 5,211,350,347 branches # 863.083 M/sec clang blead: 5,815.86 msec task-clock # 0.995 CPUs utilized 26,962,768,616 cycles # 4.636 GHz 47,111,208,664 instructions # 1.75 insn per cycle 5,211,117,921 branches # 896.018 M/sec gcc patched: 1,003.49 msec task-clock # 0.999 CPUs utilized 4,298,242,624 cycles # 4.283 GHz 7,387,822,303 instructions # 1.72 insn per cycle 725,892,855 branches # 723.367 M/sec clang patched: 970.78 msec task-clock # 0.973 CPUs utilized 4,436,489,695 cycles # 4.570 GHz 8,028,374,567 instructions # 1.81 insn per cycle 725,867,979 branches # 747.713 M/sec 3. Short string reversal, different source & destination (checking performance on smaller string reversals - note: this one's vary variable due to noise) my $x = "1234567"; my $y; for (0..10_000_000) { $y = reverse $x } gcc blead: 401.20 msec task-clock # 0.916 CPUs utilized 1,672,263,966 cycles # 4.168 GHz 5,564,078,603 instructions # 3.33 insn per cycle 1,250,983,219 branches # 3.118 G/sec clang blead: 380.58 msec task-clock # 0.998 CPUs utilized 1,615,634,265 cycles # 4.245 GHz 5,583,854,366 instructions # 3.46 insn per cycle 1,300,935,443 branches # 3.418 G/sec gcc patched: 381.62 msec task-clock # 0.999 CPUs utilized 1,566,807,988 cycles # 4.106 GHz 5,474,069,670 instructions # 3.49 insn per cycle 1,240,983,221 branches # 3.252 G/sec clang patched: 346.21 msec task-clock # 0.999 CPUs utilized 1,600,780,787 cycles # 4.624 GHz 5,493,773,623 instructions # 3.43 insn per cycle 1,270,915,076 branches # 3.671 G/sec --- pp.c | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 91 insertions(+), 10 deletions(-) diff --git a/pp.c b/pp.c index 5c39bbf540f2..9429a27a5eb6 100644 --- a/pp.c +++ b/pp.c @@ -6529,7 +6529,6 @@ PP(pp_unshift) return NORMAL; } - PP_wrapped(pp_reverse, 0, 1) { dSP; dMARK; @@ -6679,10 +6678,50 @@ PP_wrapped(pp_reverse, 0, 1) } } } else { + STRLEN i = 0; + STRLEN j = len; char * outp= SvPVX(TARG); - const char *p = src + len; - while (p != src) - *outp++ = *--p; + /* Take a chunk of bytes from the front and from the + * back, reverse the bytes in each and and swap the + * chunks over. This should have generally good + * performance but also is likely to be optimised + * into bswap instructions by the compiler. + */ +#ifdef HAS_QUAD + while (j - i >= 16) { + *(U64 *)(outp + i) = _swab_64_( *(U64 *)(src + j - 8) ); + *(U64 *)(outp + j - 8) = _swab_64_( *(U64 *)(src + i) ); + i += 8; + j -= 8; + } + + if (j - i >= 8) { + *(U32 *)(outp + i) = _swab_32_( *(U32 *)(src + j - 4) ); + *(U32 *)(outp + j - 4) = _swab_32_( *(U32 *)(src + i) ); + i += 4; + j -= 4; + } +#else + while (j - i >= 8) { + *(U32 *)(outp + i) = _swab_32_( *(U32 *)(src + j - 4) ); + *(U32 *)(outp + j - 4) = _swab_32_( *(U32 *)(src + i) ); + i += 4; + j -= 4; + } +#endif + if (j - i >= 4) { + *(U16 *)(outp + i) = _swab_16_( *(U16 *)(src + j - 2) ); + *(U16 *)(outp + j - 2) = _swab_16_( *(U16 *)(src + i) ); + i += 2; + j -= 2; + } + + /* Swap any remaining bytes one by one. */ + while (i < j) { + outp[i] = src[j - 1]; + outp[j - 1] = src[i]; + i++; j--; + } } RETURN; } @@ -6695,8 +6734,8 @@ PP_wrapped(pp_reverse, 0, 1) if (len > 1) { /* The traditional way, operate on the current byte buffer */ - char *down; if (DO_UTF8(TARG)) { /* first reverse each character */ + char *down; U8* s = (U8*)SvPVX(TARG); const U8* send = (U8*)(s + len); while (s < send) { @@ -6720,11 +6759,53 @@ PP_wrapped(pp_reverse, 0, 1) } up = SvPVX(TARG); } - down = SvPVX(TARG) + len - 1; - while (down > up) { - const char tmp = *up; - *up++ = *down; - *down-- = tmp; + STRLEN i = 0; + STRLEN j = len; + /* Reverse the buffer in place, in chunks where possible */ +#ifdef HAS_QUAD + while (j - i >= 16) { + U64 lchunk = _swab_64_( *(U64 *)(up + j - 8) ); + U64 rchunk = _swab_64_( *(U64 *)(up + i) ); + *(U64 *)(up + i) = lchunk; + *(U64 *)(up + j - 8) = rchunk; + i += 8; + j -= 8; + } + + if (j - i >= 8) { + U32 lchunk = _swab_32_( *(U32 *)(up + j - 4) ); + U32 rchunk = _swab_32_( *(U32 *)(up + i) ); + *(U32 *)(up + i) = lchunk; + *(U32 *)(up + j - 4) = rchunk; + i += 4; + j -= 4; + } +#else + while (j - i >= 8) { + U32 lchunk = _swab_32_( *(U32 *)(up + j - 4) ); + U32 rchunk = _swab_32_( *(U32 *)(up + i) ); + *(U32 *)(up + i) = lchunk; + *(U32 *)(up + j - 4) = rchunk; + i += 4; + j -= 4; + } +#endif + if (j - i >= 4) { + U16 lchunk = _swab_16_( *(U16 *)(up + j - 2) ); + U16 rchunk = _swab_16_( *(U16 *)(up + i) ); + *(U16 *)(up + i) = lchunk; + *(U16 *)(up + j - 2) = rchunk; + i += 2; + j -= 2; + } + + /* Finally, swap any remaining bytes one-by-one. */ + while (i < j) { + unsigned char tmp = up[i]; + up[i] = up[j - 1]; + up[j - 1] = tmp; + i++; + j--; } } (void)SvPOK_only_UTF8(TARG); From 8a7804bcb9cea5c8d959bf8d5ea1e2cae4f3c84f Mon Sep 17 00:00:00 2001 From: Richard Leach Date: Mon, 16 Jun 2025 10:32:13 +0000 Subject: [PATCH 2/3] memcpy and intermediates --- pp.c | 78 +++++++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 54 insertions(+), 24 deletions(-) diff --git a/pp.c b/pp.c index 9429a27a5eb6..24cb19d5135d 100644 --- a/pp.c +++ b/pp.c @@ -6680,6 +6680,8 @@ PP_wrapped(pp_reverse, 0, 1) } else { STRLEN i = 0; STRLEN j = len; + U32 u32_1, u32_2; + U16 u16_1, u16_2; char * outp= SvPVX(TARG); /* Take a chunk of bytes from the front and from the * back, reverse the bytes in each and and swap the @@ -6688,30 +6690,47 @@ PP_wrapped(pp_reverse, 0, 1) * into bswap instructions by the compiler. */ #ifdef HAS_QUAD + U64 u64_1, u64_2; while (j - i >= 16) { - *(U64 *)(outp + i) = _swab_64_( *(U64 *)(src + j - 8) ); - *(U64 *)(outp + j - 8) = _swab_64_( *(U64 *)(src + i) ); + memcpy(&u64_1, src + j - 8, 8); + memcpy(&u64_2, src + i, 8); + u64_1 = _swab_64_(u64_1); + u64_2 = _swab_64_(u64_2); + memcpy(outp + j - 8, &u64_2, 8); + memcpy(outp + i, &u64_1, 8); i += 8; j -= 8; } if (j - i >= 8) { - *(U32 *)(outp + i) = _swab_32_( *(U32 *)(src + j - 4) ); - *(U32 *)(outp + j - 4) = _swab_32_( *(U32 *)(src + i) ); + memcpy(&u32_1, src + j - 4, 4); + memcpy(&u32_2, src + i, 4); + u32_1 = _swab_32_(u32_1); + u32_2 = _swab_32_(u32_2); + memcpy(outp + j - 4, &u32_2, 4); + memcpy(outp + i, &u32_1, 4); i += 4; j -= 4; } #else while (j - i >= 8) { - *(U32 *)(outp + i) = _swab_32_( *(U32 *)(src + j - 4) ); - *(U32 *)(outp + j - 4) = _swab_32_( *(U32 *)(src + i) ); + memcpy(&u32_1, src + j - 4, 4); + memcpy(&u32_2, src + i, 4); + u32_1 = _swab_32_(u32_1); + u32_2 = _swab_32_(u32_2); + memcpy(outp + j - 4, &u32_2, 4); + memcpy(outp + i, &u32_1, 4); i += 4; j -= 4; } #endif if (j - i >= 4) { - *(U16 *)(outp + i) = _swab_16_( *(U16 *)(src + j - 2) ); - *(U16 *)(outp + j - 2) = _swab_16_( *(U16 *)(src + i) ); + memcpy(&u16_1, src + j - 2, 2); + memcpy(&u16_2, src + i, 2); + u16_1 = _swab_16_(u16_1); + u16_2 = _swab_16_(u16_2); + memcpy(outp + j - 2, &u16_2, 2); + memcpy(outp + i, &u16_1, 2); i += 2; j -= 2; } @@ -6761,40 +6780,51 @@ PP_wrapped(pp_reverse, 0, 1) } STRLEN i = 0; STRLEN j = len; + U32 u32_1, u32_2; + U16 u16_1, u16_2; /* Reverse the buffer in place, in chunks where possible */ #ifdef HAS_QUAD + U64 u64_1, u64_2; while (j - i >= 16) { - U64 lchunk = _swab_64_( *(U64 *)(up + j - 8) ); - U64 rchunk = _swab_64_( *(U64 *)(up + i) ); - *(U64 *)(up + i) = lchunk; - *(U64 *)(up + j - 8) = rchunk; + memcpy(&u64_1, up + j - 8, 8); + memcpy(&u64_2, up + i, 8); + u64_1 = _swab_64_(u64_1); + u64_2 = _swab_64_(u64_2); + memcpy(up + j - 8, &u64_2, 8); + memcpy(up + i, &u64_1, 8); i += 8; j -= 8; } if (j - i >= 8) { - U32 lchunk = _swab_32_( *(U32 *)(up + j - 4) ); - U32 rchunk = _swab_32_( *(U32 *)(up + i) ); - *(U32 *)(up + i) = lchunk; - *(U32 *)(up + j - 4) = rchunk; + memcpy(&u32_1, up + j - 4, 4); + memcpy(&u32_2, up + i, 4); + u32_1 = _swab_32_(u32_1); + u32_2 = _swab_32_(u32_2); + memcpy(up + j - 4, &u32_2, 4); + memcpy(up + i, &u32_1, 4); i += 4; j -= 4; } #else while (j - i >= 8) { - U32 lchunk = _swab_32_( *(U32 *)(up + j - 4) ); - U32 rchunk = _swab_32_( *(U32 *)(up + i) ); - *(U32 *)(up + i) = lchunk; - *(U32 *)(up + j - 4) = rchunk; + memcpy(&u32_1, up + j - 4, 4); + memcpy(&u32_2, up + i, 4); + u32_1 = _swab_32_(u32_1); + u32_2 = _swab_32_(u32_2); + memcpy(up + j - 4, &u32_2, 4); + memcpy(up + i, &u32_1, 4); i += 4; j -= 4; } #endif if (j - i >= 4) { - U16 lchunk = _swab_16_( *(U16 *)(up + j - 2) ); - U16 rchunk = _swab_16_( *(U16 *)(up + i) ); - *(U16 *)(up + i) = lchunk; - *(U16 *)(up + j - 2) = rchunk; + memcpy(&u16_1, up + j - 2, 2); + memcpy(&u16_2, up + i, 2); + u16_1 = _swab_16_(u16_1); + u16_2 = _swab_16_(u16_2); + memcpy(up + j - 2, &u16_2, 2); + memcpy(up + i, &u16_1, 2); i += 2; j -= 2; } From 67d79fed92f352ed9d807b90b413c4f9c94d085f Mon Sep 17 00:00:00 2001 From: Richard Leach Date: Mon, 16 Jun 2025 11:46:43 +0000 Subject: [PATCH 3/3] squashme --- pp.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pp.c b/pp.c index 24cb19d5135d..4d7a6221e78d 100644 --- a/pp.c +++ b/pp.c @@ -6680,8 +6680,8 @@ PP_wrapped(pp_reverse, 0, 1) } else { STRLEN i = 0; STRLEN j = len; - U32 u32_1, u32_2; - U16 u16_1, u16_2; + uint32_t u32_1, u32_2; + uint16_t u16_1, u16_2; char * outp= SvPVX(TARG); /* Take a chunk of bytes from the front and from the * back, reverse the bytes in each and and swap the @@ -6690,7 +6690,7 @@ PP_wrapped(pp_reverse, 0, 1) * into bswap instructions by the compiler. */ #ifdef HAS_QUAD - U64 u64_1, u64_2; + uint64_t u64_1, u64_2; while (j - i >= 16) { memcpy(&u64_1, src + j - 8, 8); memcpy(&u64_2, src + i, 8); @@ -6780,11 +6780,11 @@ PP_wrapped(pp_reverse, 0, 1) } STRLEN i = 0; STRLEN j = len; - U32 u32_1, u32_2; - U16 u16_1, u16_2; + uint32_t u32_1, u32_2; + uint16_t u16_1, u16_2; /* Reverse the buffer in place, in chunks where possible */ #ifdef HAS_QUAD - U64 u64_1, u64_2; + uint64_t u64_1, u64_2; while (j - i >= 16) { memcpy(&u64_1, up + j - 8, 8); memcpy(&u64_2, up + i, 8);