Skip to content

Commit f4a3532

Browse files
committed
pp_reverse - chunk-at-a-time string reversal
The performance characteristics of string reversal in blead is very variable depending upon the capabilities of the C compiler. Some compilers are able to vectorize some cases for better performance. This commit introduces explicit reversal and swapping of whole registers at a time, which all builds seem to be able to benefit from. The `_swab_xx_` macros for doing this already exist in perl.h, using them for this purpose was inspired by https://dev.to/wunk/fast-array-reversal-with-simd-j3p The bit shifting done by these macros should be portable and reasonably performant if not optimised further, but it is likely that they will be optimised to bswap, rev, movbe instructions. Some performance comparisons: 1. Large string reversal, with different source & destination buffers my $x = "X"x(1024*1000*10); my $y; for (0..1_000) { $y = reverse $x } gcc blead: 2,388.30 msec task-clock # 0.993 CPUs utilized 10,574,195,388 cycles # 4.427 GHz 61,520,672,268 instructions # 5.82 insn per cycle 10,255,049,869 branches # 4.294 G/sec clang blead: 688.37 msec task-clock # 0.946 CPUs utilized 3,161,754,439 cycles # 4.593 GHz 8,986,420,860 instructions # 2.84 insn per cycle 324,734,391 branches # 471.745 M/sec gcc patched: 408.39 msec task-clock # 0.936 CPUs utilized 1,617,273,653 cycles # 3.960 GHz 6,422,991,675 instructions # 3.97 insn per cycle 644,856,283 branches # 1.579 G/sec clang patched: 397.61 msec task-clock # 0.924 CPUs utilized 1,655,838,316 cycles # 4.165 GHz 5,782,487,237 instructions # 3.49 insn per cycle 324,586,437 branches # 816.350 M/sec 2. Large string reversal, but reversing the buffer in-place my $x = "X"x(1024*1000*10); my $y; for (0..1_000) { $y = reverse "foo",$x } gcc blead: 6,038.06 msec task-clock # 0.996 CPUs utilized 27,109,273,840 cycles # 4.490 GHz 41,987,097,139 instructions # 1.55 insn per cycle 5,211,350,347 branches # 863.083 M/sec clang blead: 5,815.86 msec task-clock # 0.995 CPUs utilized 26,962,768,616 cycles # 4.636 GHz 47,111,208,664 instructions # 1.75 insn per cycle 5,211,117,921 branches # 896.018 M/sec gcc patched: 1,003.49 msec task-clock # 0.999 CPUs utilized 4,298,242,624 cycles # 4.283 GHz 7,387,822,303 instructions # 1.72 insn per cycle 725,892,855 branches # 723.367 M/sec clang patched: 970.78 msec task-clock # 0.973 CPUs utilized 4,436,489,695 cycles # 4.570 GHz 8,028,374,567 instructions # 1.81 insn per cycle 725,867,979 branches # 747.713 M/sec 3. Short string reversal, different source & destination (checking performance on smaller string reversals - note: this one's vary variable due to noise) my $x = "1234567"; my $y; for (0..10_000_000) { $y = reverse $x } gcc blead: 401.20 msec task-clock # 0.916 CPUs utilized 1,672,263,966 cycles # 4.168 GHz 5,564,078,603 instructions # 3.33 insn per cycle 1,250,983,219 branches # 3.118 G/sec clang blead: 380.58 msec task-clock # 0.998 CPUs utilized 1,615,634,265 cycles # 4.245 GHz 5,583,854,366 instructions # 3.46 insn per cycle 1,300,935,443 branches # 3.418 G/sec gcc patched: 381.62 msec task-clock # 0.999 CPUs utilized 1,566,807,988 cycles # 4.106 GHz 5,474,069,670 instructions # 3.49 insn per cycle 1,240,983,221 branches # 3.252 G/sec clang patched: 346.21 msec task-clock # 0.999 CPUs utilized 1,600,780,787 cycles # 4.624 GHz 5,493,773,623 instructions # 3.43 insn per cycle 1,270,915,076 branches # 3.671 G/sec
1 parent 6a4f62c commit f4a3532

File tree

1 file changed

+91
-10
lines changed

1 file changed

+91
-10
lines changed

pp.c

Lines changed: 91 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -6529,7 +6529,6 @@ PP(pp_unshift)
65296529
return NORMAL;
65306530
}
65316531

6532-
65336532
PP_wrapped(pp_reverse, 0, 1)
65346533
{
65356534
dSP; dMARK;
@@ -6679,10 +6678,50 @@ PP_wrapped(pp_reverse, 0, 1)
66796678
}
66806679
}
66816680
} else {
6681+
STRLEN i = 0;
6682+
STRLEN j = len;
66826683
char * outp= SvPVX(TARG);
6683-
const char *p = src + len;
6684-
while (p != src)
6685-
*outp++ = *--p;
6684+
/* Take a chunk of bytes from the front and from the
6685+
* back, reverse the bytes in each and and swap the
6686+
* chunks over. This should have generally good
6687+
* performance but also is likely to be optimised
6688+
* into bswap instructions by the compiler.
6689+
*/
6690+
#ifdef HAS_QUAD
6691+
while (j - i >= 16) {
6692+
*(U64 *)(outp + i) = _swab_64_( *(U64 *)(src + j - 8) );
6693+
*(U64 *)(outp + j - 8) = _swab_64_( *(U64 *)(src + i) );
6694+
i += 8;
6695+
j -= 8;
6696+
}
6697+
6698+
if (j - i >= 8) {
6699+
*(U32 *)(outp + i) = _swab_32_( *(U32 *)(src + j - 4) );
6700+
*(U32 *)(outp + j - 4) = _swab_32_( *(U32 *)(src + i) );
6701+
i += 4;
6702+
j -= 4;
6703+
}
6704+
#else
6705+
while (j - i >= 8) {
6706+
*(U32 *)(outp + i) = _swab_32_( *(U32 *)(src + j - 4) );
6707+
*(U32 *)(outp + j - 4) = _swab_32_( *(U32 *)(src + i) );
6708+
i += 4;
6709+
j -= 4;
6710+
}
6711+
#endif
6712+
if (j - i >= 4) {
6713+
*(U16 *)(outp + i) = _swab_16_( *(U16 *)(src + j - 2) );
6714+
*(U16 *)(outp + j - 2) = _swab_16_( *(U16 *)(src + i) );
6715+
i += 2;
6716+
j -= 2;
6717+
}
6718+
6719+
/* Swap any remaining bytes one by one. */
6720+
while (i < j) {
6721+
outp[i] = src[j - 1];
6722+
outp[j - 1] = src[i];
6723+
i++; j--;
6724+
}
66866725
}
66876726
RETURN;
66886727
}
@@ -6695,8 +6734,8 @@ PP_wrapped(pp_reverse, 0, 1)
66956734

66966735
if (len > 1) {
66976736
/* The traditional way, operate on the current byte buffer */
6698-
char *down;
66996737
if (DO_UTF8(TARG)) { /* first reverse each character */
6738+
char *down;
67006739
U8* s = (U8*)SvPVX(TARG);
67016740
const U8* send = (U8*)(s + len);
67026741
while (s < send) {
@@ -6720,11 +6759,53 @@ PP_wrapped(pp_reverse, 0, 1)
67206759
}
67216760
up = SvPVX(TARG);
67226761
}
6723-
down = SvPVX(TARG) + len - 1;
6724-
while (down > up) {
6725-
const char tmp = *up;
6726-
*up++ = *down;
6727-
*down-- = tmp;
6762+
STRLEN i = 0;
6763+
STRLEN j = len;
6764+
/* Reverse the buffer in place, in chunks where possible */
6765+
#ifdef HAS_QUAD
6766+
while (j - i >= 16) {
6767+
U64 lchunk = _swab_64_( *(U64 *)(up + j - 8) );
6768+
U64 rchunk = _swab_64_( *(U64 *)(up + i) );
6769+
*(U64 *)(up + i) = lchunk;
6770+
*(U64 *)(up + j - 8) = rchunk;
6771+
i += 8;
6772+
j -= 8;
6773+
}
6774+
6775+
if (j - i >= 8) {
6776+
U32 lchunk = _swab_32_( *(U32 *)(up + j - 4) );
6777+
U32 rchunk = _swab_32_( *(U32 *)(up + i) );
6778+
*(U32 *)(up + i) = lchunk;
6779+
*(U32 *)(up + j - 4) = rchunk;
6780+
i += 4;
6781+
j -= 4;
6782+
}
6783+
#else
6784+
while (j - i >= 8) {
6785+
U32 lchunk = _swab_32_( *(U32 *)(up + j - 4) );
6786+
U32 rchunk = _swab_32_( *(U32 *)(up + i) );
6787+
*(U32 *)(up + i) = lchunk;
6788+
*(U32 *)(up + j - 4) = rchunk;
6789+
i += 4;
6790+
j -= 4;
6791+
}
6792+
#endif
6793+
if (j - i >= 4) {
6794+
U16 lchunk = _swab_16_( *(U16 *)(up + j - 2) );
6795+
U16 rchunk = _swab_16_( *(U16 *)(up + i) );
6796+
*(U16 *)(up + i) = lchunk;
6797+
*(U16 *)(up + j - 2) = rchunk;
6798+
i += 2;
6799+
j -= 2;
6800+
}
6801+
6802+
/* Finally, swap any remaining bytes one-by-one. */
6803+
while (i < j) {
6804+
unsigned char tmp = up[i];
6805+
up[i] = up[j - 1];
6806+
up[j - 1] = tmp;
6807+
i++;
6808+
j--;
67286809
}
67296810
}
67306811
(void)SvPOK_only_UTF8(TARG);

0 commit comments

Comments
 (0)