From f4a35328936859dea25c35bfbd2f100d59d73853 Mon Sep 17 00:00:00 2001 From: Richard Leach Date: Sun, 16 Feb 2025 00:44:05 +0000 Subject: [PATCH 1/4] pp_reverse - chunk-at-a-time string reversal The performance characteristics of string reversal in blead is very variable depending upon the capabilities of the C compiler. Some compilers are able to vectorize some cases for better performance. This commit introduces explicit reversal and swapping of whole registers at a time, which all builds seem to be able to benefit from. The `_swab_xx_` macros for doing this already exist in perl.h, using them for this purpose was inspired by https://dev.to/wunk/fast-array-reversal-with-simd-j3p The bit shifting done by these macros should be portable and reasonably performant if not optimised further, but it is likely that they will be optimised to bswap, rev, movbe instructions. Some performance comparisons: 1. Large string reversal, with different source & destination buffers my $x = "X"x(1024*1000*10); my $y; for (0..1_000) { $y = reverse $x } gcc blead: 2,388.30 msec task-clock # 0.993 CPUs utilized 10,574,195,388 cycles # 4.427 GHz 61,520,672,268 instructions # 5.82 insn per cycle 10,255,049,869 branches # 4.294 G/sec clang blead: 688.37 msec task-clock # 0.946 CPUs utilized 3,161,754,439 cycles # 4.593 GHz 8,986,420,860 instructions # 2.84 insn per cycle 324,734,391 branches # 471.745 M/sec gcc patched: 408.39 msec task-clock # 0.936 CPUs utilized 1,617,273,653 cycles # 3.960 GHz 6,422,991,675 instructions # 3.97 insn per cycle 644,856,283 branches # 1.579 G/sec clang patched: 397.61 msec task-clock # 0.924 CPUs utilized 1,655,838,316 cycles # 4.165 GHz 5,782,487,237 instructions # 3.49 insn per cycle 324,586,437 branches # 816.350 M/sec 2. Large string reversal, but reversing the buffer in-place my $x = "X"x(1024*1000*10); my $y; for (0..1_000) { $y = reverse "foo",$x } gcc blead: 6,038.06 msec task-clock # 0.996 CPUs utilized 27,109,273,840 cycles # 4.490 GHz 41,987,097,139 instructions # 1.55 insn per cycle 5,211,350,347 branches # 863.083 M/sec clang blead: 5,815.86 msec task-clock # 0.995 CPUs utilized 26,962,768,616 cycles # 4.636 GHz 47,111,208,664 instructions # 1.75 insn per cycle 5,211,117,921 branches # 896.018 M/sec gcc patched: 1,003.49 msec task-clock # 0.999 CPUs utilized 4,298,242,624 cycles # 4.283 GHz 7,387,822,303 instructions # 1.72 insn per cycle 725,892,855 branches # 723.367 M/sec clang patched: 970.78 msec task-clock # 0.973 CPUs utilized 4,436,489,695 cycles # 4.570 GHz 8,028,374,567 instructions # 1.81 insn per cycle 725,867,979 branches # 747.713 M/sec 3. Short string reversal, different source & destination (checking performance on smaller string reversals - note: this one's vary variable due to noise) my $x = "1234567"; my $y; for (0..10_000_000) { $y = reverse $x } gcc blead: 401.20 msec task-clock # 0.916 CPUs utilized 1,672,263,966 cycles # 4.168 GHz 5,564,078,603 instructions # 3.33 insn per cycle 1,250,983,219 branches # 3.118 G/sec clang blead: 380.58 msec task-clock # 0.998 CPUs utilized 1,615,634,265 cycles # 4.245 GHz 5,583,854,366 instructions # 3.46 insn per cycle 1,300,935,443 branches # 3.418 G/sec gcc patched: 381.62 msec task-clock # 0.999 CPUs utilized 1,566,807,988 cycles # 4.106 GHz 5,474,069,670 instructions # 3.49 insn per cycle 1,240,983,221 branches # 3.252 G/sec clang patched: 346.21 msec task-clock # 0.999 CPUs utilized 1,600,780,787 cycles # 4.624 GHz 5,493,773,623 instructions # 3.43 insn per cycle 1,270,915,076 branches # 3.671 G/sec --- pp.c | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 91 insertions(+), 10 deletions(-) diff --git a/pp.c b/pp.c index 5c39bbf540f2..9429a27a5eb6 100644 --- a/pp.c +++ b/pp.c @@ -6529,7 +6529,6 @@ PP(pp_unshift) return NORMAL; } - PP_wrapped(pp_reverse, 0, 1) { dSP; dMARK; @@ -6679,10 +6678,50 @@ PP_wrapped(pp_reverse, 0, 1) } } } else { + STRLEN i = 0; + STRLEN j = len; char * outp= SvPVX(TARG); - const char *p = src + len; - while (p != src) - *outp++ = *--p; + /* Take a chunk of bytes from the front and from the + * back, reverse the bytes in each and and swap the + * chunks over. This should have generally good + * performance but also is likely to be optimised + * into bswap instructions by the compiler. + */ +#ifdef HAS_QUAD + while (j - i >= 16) { + *(U64 *)(outp + i) = _swab_64_( *(U64 *)(src + j - 8) ); + *(U64 *)(outp + j - 8) = _swab_64_( *(U64 *)(src + i) ); + i += 8; + j -= 8; + } + + if (j - i >= 8) { + *(U32 *)(outp + i) = _swab_32_( *(U32 *)(src + j - 4) ); + *(U32 *)(outp + j - 4) = _swab_32_( *(U32 *)(src + i) ); + i += 4; + j -= 4; + } +#else + while (j - i >= 8) { + *(U32 *)(outp + i) = _swab_32_( *(U32 *)(src + j - 4) ); + *(U32 *)(outp + j - 4) = _swab_32_( *(U32 *)(src + i) ); + i += 4; + j -= 4; + } +#endif + if (j - i >= 4) { + *(U16 *)(outp + i) = _swab_16_( *(U16 *)(src + j - 2) ); + *(U16 *)(outp + j - 2) = _swab_16_( *(U16 *)(src + i) ); + i += 2; + j -= 2; + } + + /* Swap any remaining bytes one by one. */ + while (i < j) { + outp[i] = src[j - 1]; + outp[j - 1] = src[i]; + i++; j--; + } } RETURN; } @@ -6695,8 +6734,8 @@ PP_wrapped(pp_reverse, 0, 1) if (len > 1) { /* The traditional way, operate on the current byte buffer */ - char *down; if (DO_UTF8(TARG)) { /* first reverse each character */ + char *down; U8* s = (U8*)SvPVX(TARG); const U8* send = (U8*)(s + len); while (s < send) { @@ -6720,11 +6759,53 @@ PP_wrapped(pp_reverse, 0, 1) } up = SvPVX(TARG); } - down = SvPVX(TARG) + len - 1; - while (down > up) { - const char tmp = *up; - *up++ = *down; - *down-- = tmp; + STRLEN i = 0; + STRLEN j = len; + /* Reverse the buffer in place, in chunks where possible */ +#ifdef HAS_QUAD + while (j - i >= 16) { + U64 lchunk = _swab_64_( *(U64 *)(up + j - 8) ); + U64 rchunk = _swab_64_( *(U64 *)(up + i) ); + *(U64 *)(up + i) = lchunk; + *(U64 *)(up + j - 8) = rchunk; + i += 8; + j -= 8; + } + + if (j - i >= 8) { + U32 lchunk = _swab_32_( *(U32 *)(up + j - 4) ); + U32 rchunk = _swab_32_( *(U32 *)(up + i) ); + *(U32 *)(up + i) = lchunk; + *(U32 *)(up + j - 4) = rchunk; + i += 4; + j -= 4; + } +#else + while (j - i >= 8) { + U32 lchunk = _swab_32_( *(U32 *)(up + j - 4) ); + U32 rchunk = _swab_32_( *(U32 *)(up + i) ); + *(U32 *)(up + i) = lchunk; + *(U32 *)(up + j - 4) = rchunk; + i += 4; + j -= 4; + } +#endif + if (j - i >= 4) { + U16 lchunk = _swab_16_( *(U16 *)(up + j - 2) ); + U16 rchunk = _swab_16_( *(U16 *)(up + i) ); + *(U16 *)(up + i) = lchunk; + *(U16 *)(up + j - 2) = rchunk; + i += 2; + j -= 2; + } + + /* Finally, swap any remaining bytes one-by-one. */ + while (i < j) { + unsigned char tmp = up[i]; + up[i] = up[j - 1]; + up[j - 1] = tmp; + i++; + j--; } } (void)SvPOK_only_UTF8(TARG); From 8a7804bcb9cea5c8d959bf8d5ea1e2cae4f3c84f Mon Sep 17 00:00:00 2001 From: Richard Leach Date: Mon, 16 Jun 2025 10:32:13 +0000 Subject: [PATCH 2/4] memcpy and intermediates --- pp.c | 78 +++++++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 54 insertions(+), 24 deletions(-) diff --git a/pp.c b/pp.c index 9429a27a5eb6..24cb19d5135d 100644 --- a/pp.c +++ b/pp.c @@ -6680,6 +6680,8 @@ PP_wrapped(pp_reverse, 0, 1) } else { STRLEN i = 0; STRLEN j = len; + U32 u32_1, u32_2; + U16 u16_1, u16_2; char * outp= SvPVX(TARG); /* Take a chunk of bytes from the front and from the * back, reverse the bytes in each and and swap the @@ -6688,30 +6690,47 @@ PP_wrapped(pp_reverse, 0, 1) * into bswap instructions by the compiler. */ #ifdef HAS_QUAD + U64 u64_1, u64_2; while (j - i >= 16) { - *(U64 *)(outp + i) = _swab_64_( *(U64 *)(src + j - 8) ); - *(U64 *)(outp + j - 8) = _swab_64_( *(U64 *)(src + i) ); + memcpy(&u64_1, src + j - 8, 8); + memcpy(&u64_2, src + i, 8); + u64_1 = _swab_64_(u64_1); + u64_2 = _swab_64_(u64_2); + memcpy(outp + j - 8, &u64_2, 8); + memcpy(outp + i, &u64_1, 8); i += 8; j -= 8; } if (j - i >= 8) { - *(U32 *)(outp + i) = _swab_32_( *(U32 *)(src + j - 4) ); - *(U32 *)(outp + j - 4) = _swab_32_( *(U32 *)(src + i) ); + memcpy(&u32_1, src + j - 4, 4); + memcpy(&u32_2, src + i, 4); + u32_1 = _swab_32_(u32_1); + u32_2 = _swab_32_(u32_2); + memcpy(outp + j - 4, &u32_2, 4); + memcpy(outp + i, &u32_1, 4); i += 4; j -= 4; } #else while (j - i >= 8) { - *(U32 *)(outp + i) = _swab_32_( *(U32 *)(src + j - 4) ); - *(U32 *)(outp + j - 4) = _swab_32_( *(U32 *)(src + i) ); + memcpy(&u32_1, src + j - 4, 4); + memcpy(&u32_2, src + i, 4); + u32_1 = _swab_32_(u32_1); + u32_2 = _swab_32_(u32_2); + memcpy(outp + j - 4, &u32_2, 4); + memcpy(outp + i, &u32_1, 4); i += 4; j -= 4; } #endif if (j - i >= 4) { - *(U16 *)(outp + i) = _swab_16_( *(U16 *)(src + j - 2) ); - *(U16 *)(outp + j - 2) = _swab_16_( *(U16 *)(src + i) ); + memcpy(&u16_1, src + j - 2, 2); + memcpy(&u16_2, src + i, 2); + u16_1 = _swab_16_(u16_1); + u16_2 = _swab_16_(u16_2); + memcpy(outp + j - 2, &u16_2, 2); + memcpy(outp + i, &u16_1, 2); i += 2; j -= 2; } @@ -6761,40 +6780,51 @@ PP_wrapped(pp_reverse, 0, 1) } STRLEN i = 0; STRLEN j = len; + U32 u32_1, u32_2; + U16 u16_1, u16_2; /* Reverse the buffer in place, in chunks where possible */ #ifdef HAS_QUAD + U64 u64_1, u64_2; while (j - i >= 16) { - U64 lchunk = _swab_64_( *(U64 *)(up + j - 8) ); - U64 rchunk = _swab_64_( *(U64 *)(up + i) ); - *(U64 *)(up + i) = lchunk; - *(U64 *)(up + j - 8) = rchunk; + memcpy(&u64_1, up + j - 8, 8); + memcpy(&u64_2, up + i, 8); + u64_1 = _swab_64_(u64_1); + u64_2 = _swab_64_(u64_2); + memcpy(up + j - 8, &u64_2, 8); + memcpy(up + i, &u64_1, 8); i += 8; j -= 8; } if (j - i >= 8) { - U32 lchunk = _swab_32_( *(U32 *)(up + j - 4) ); - U32 rchunk = _swab_32_( *(U32 *)(up + i) ); - *(U32 *)(up + i) = lchunk; - *(U32 *)(up + j - 4) = rchunk; + memcpy(&u32_1, up + j - 4, 4); + memcpy(&u32_2, up + i, 4); + u32_1 = _swab_32_(u32_1); + u32_2 = _swab_32_(u32_2); + memcpy(up + j - 4, &u32_2, 4); + memcpy(up + i, &u32_1, 4); i += 4; j -= 4; } #else while (j - i >= 8) { - U32 lchunk = _swab_32_( *(U32 *)(up + j - 4) ); - U32 rchunk = _swab_32_( *(U32 *)(up + i) ); - *(U32 *)(up + i) = lchunk; - *(U32 *)(up + j - 4) = rchunk; + memcpy(&u32_1, up + j - 4, 4); + memcpy(&u32_2, up + i, 4); + u32_1 = _swab_32_(u32_1); + u32_2 = _swab_32_(u32_2); + memcpy(up + j - 4, &u32_2, 4); + memcpy(up + i, &u32_1, 4); i += 4; j -= 4; } #endif if (j - i >= 4) { - U16 lchunk = _swab_16_( *(U16 *)(up + j - 2) ); - U16 rchunk = _swab_16_( *(U16 *)(up + i) ); - *(U16 *)(up + i) = lchunk; - *(U16 *)(up + j - 2) = rchunk; + memcpy(&u16_1, up + j - 2, 2); + memcpy(&u16_2, up + i, 2); + u16_1 = _swab_16_(u16_1); + u16_2 = _swab_16_(u16_2); + memcpy(up + j - 2, &u16_2, 2); + memcpy(up + i, &u16_1, 2); i += 2; j -= 2; } From 67d79fed92f352ed9d807b90b413c4f9c94d085f Mon Sep 17 00:00:00 2001 From: Richard Leach Date: Mon, 16 Jun 2025 11:46:43 +0000 Subject: [PATCH 3/4] squashme --- pp.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pp.c b/pp.c index 24cb19d5135d..4d7a6221e78d 100644 --- a/pp.c +++ b/pp.c @@ -6680,8 +6680,8 @@ PP_wrapped(pp_reverse, 0, 1) } else { STRLEN i = 0; STRLEN j = len; - U32 u32_1, u32_2; - U16 u16_1, u16_2; + uint32_t u32_1, u32_2; + uint16_t u16_1, u16_2; char * outp= SvPVX(TARG); /* Take a chunk of bytes from the front and from the * back, reverse the bytes in each and and swap the @@ -6690,7 +6690,7 @@ PP_wrapped(pp_reverse, 0, 1) * into bswap instructions by the compiler. */ #ifdef HAS_QUAD - U64 u64_1, u64_2; + uint64_t u64_1, u64_2; while (j - i >= 16) { memcpy(&u64_1, src + j - 8, 8); memcpy(&u64_2, src + i, 8); @@ -6780,11 +6780,11 @@ PP_wrapped(pp_reverse, 0, 1) } STRLEN i = 0; STRLEN j = len; - U32 u32_1, u32_2; - U16 u16_1, u16_2; + uint32_t u32_1, u32_2; + uint16_t u16_1, u16_2; /* Reverse the buffer in place, in chunks where possible */ #ifdef HAS_QUAD - U64 u64_1, u64_2; + uint64_t u64_1, u64_2; while (j - i >= 16) { memcpy(&u64_1, up + j - 8, 8); memcpy(&u64_2, up + i, 8); From 6b302174419dc1c5da48d06cf74480603b813b53 Mon Sep 17 00:00:00 2001 From: bulk88 Date: Tue, 17 Jun 2025 06:02:16 -0400 Subject: [PATCH 4/4] MSVC compat fixes for intrinsics enabled 4/8 bytes at a time pp_reverse --- perl.h | 20 +++++-- pp.c | 169 ++++++++++++++++++++++++++++++++++----------------------- 2 files changed, 116 insertions(+), 73 deletions(-) diff --git a/perl.h b/perl.h index 535a80fb4376..8c8ff4dd4d49 100644 --- a/perl.h +++ b/perl.h @@ -1452,18 +1452,26 @@ Use C> to get the largest type available on the platform. =cut */ #ifndef UINT16_C -# if INTSIZE >= 2 -# define UINT16_C(x) ((U16_TYPE)x##U) +# ifdef _MSC_VER +# define UINT16_C(x) ((U16TYPE)x##ui16) # else -# define UINT16_C(x) ((U16_TYPE)x##UL) +# if INTSIZE >= 2 +# define UINT16_C(x) ((U16TYPE)x##U) +# else +# define UINT16_C(x) ((U16TYPE)x##UL) +# endif # endif #endif #ifndef UINT32_C -# if INTSIZE >= 4 -# define UINT32_C(x) ((U32_TYPE)x##U) +# ifdef _MSC_VER +# define UINT32_C(x) ((U32TYPE)x##ui32) # else -# define UINT32_C(x) ((U32_TYPE)x##UL) +# if INTSIZE >= 4 +# define UINT32_C(x) ((U32TYPE)x##U) +# else +# define UINT32_C(x) ((U32TYPE)x##UL) +# endif # endif #endif diff --git a/pp.c b/pp.c index 4d7a6221e78d..507eb3e07bce 100644 --- a/pp.c +++ b/pp.c @@ -6529,6 +6529,20 @@ PP(pp_unshift) return NORMAL; } +#ifdef _MSC_VER +# pragma intrinsic(_byteswap_ushort, _byteswap_ulong, _byteswap_uint64) +# define S_bswap16(_x) _byteswap_ushort(_x) +# define S_bswap32(_x) _byteswap_ulong(_x) +# define S_bswap64(_x) _byteswap_uint64(_x) +PERL_STATIC_FORCE_INLINE void * + S_memcpy(void *dest, const void *src,size_t count); +#else +# define S_bswap16(_x) _swab_16_(_x) +# define S_bswap32(_x) _swab_32_(_x) +# define S_bswap64(_x) _swab_64_(_x) +# define S_memcpy(_d,_s,_n) memcpy((_d),(_s),(_n)) +#endif + PP_wrapped(pp_reverse, 0, 1) { dSP; dMARK; @@ -6554,15 +6568,17 @@ PP_wrapped(pp_reverse, 0, 1) SV *begin, *end; if (can_preserve) { - if (!av_exists(av, i)) { - if (av_exists(av, j)) { + bool exi = av_exists(av, i); + bool exj = av_exists(av, j); + if (!exi) { + if (exj) { SV *sv = av_delete(av, j, 0); begin = *av_fetch(av, i, TRUE); sv_setsv_mg(begin, sv); } continue; } - else if (!av_exists(av, j)) { + else if (!exj) { SV *sv = av_delete(av, i, 0); end = *av_fetch(av, j, TRUE); sv_setsv_mg(end, sv); @@ -6643,18 +6659,19 @@ PP_wrapped(pp_reverse, 0, 1) * in a single pass, rather than 2-3 passes. */ const char * src = SvPV_const(src_sv, len); + U8* dd; /* Prepare the TARG. */ - if (SvTYPE(TARG) < SVt_PV) { + if (SvTHINKFIRST(TARG)) + SV_CHECK_THINKFIRST_COW_DROP(TARG); /* Drops any buffer or RV */ + if (SvTYPE(TARG) < SVt_PV) SvUPGRADE(TARG, SvTYPE(src_sv)); /* No buffer allocation here */ - } else if(SvTHINKFIRST(TARG)) { - SV_CHECK_THINKFIRST_COW_DROP(TARG); /* Drops any buffer */ - } - SvSETMAGIC(TARG); - SvGROW(TARG, len + 1); + else /* can't have SMG if < PVMG, SvROK/SvAMAGIC doesn't apply */ + SvSETMAGIC(TARG); + dd = (U8*)SvGROW(TARG, len + 1); SvCUR_set(TARG, len); SvPOK_only(TARG); - *SvEND(TARG) = '\0'; + dd[len] = '\0'; if (SvTAINTED(src_sv)) SvTAINT(TARG); @@ -6663,9 +6680,9 @@ PP_wrapped(pp_reverse, 0, 1) SvUTF8_on(TARG); const U8* s = (const U8*)src; - U8* dd = (U8*)(SvPVX(TARG) + len); const U8* send = (const U8*)(s + len); int bytes = 0; + dd = dd + len; while (s < send) { bytes = UTF8SKIP(s); if (bytes == 1) { @@ -6680,9 +6697,9 @@ PP_wrapped(pp_reverse, 0, 1) } else { STRLEN i = 0; STRLEN j = len; - uint32_t u32_1, u32_2; - uint16_t u16_1, u16_2; - char * outp= SvPVX(TARG); + U32 u32_1, u32_2; + U16 u16_1, u16_2; + char * outp = NUM2PTR(char*,dd); /* Take a chunk of bytes from the front and from the * back, reverse the bytes in each and and swap the * chunks over. This should have generally good @@ -6690,47 +6707,47 @@ PP_wrapped(pp_reverse, 0, 1) * into bswap instructions by the compiler. */ #ifdef HAS_QUAD - uint64_t u64_1, u64_2; + U64 u64_1, u64_2; while (j - i >= 16) { - memcpy(&u64_1, src + j - 8, 8); - memcpy(&u64_2, src + i, 8); - u64_1 = _swab_64_(u64_1); - u64_2 = _swab_64_(u64_2); - memcpy(outp + j - 8, &u64_2, 8); - memcpy(outp + i, &u64_1, 8); + S_memcpy(&u64_1, src + j - 8, 8); + S_memcpy(&u64_2, src + i, 8); + u64_1 = S_bswap64(u64_1); + u64_2 = S_bswap64(u64_2); + S_memcpy(outp + j - 8, &u64_2, 8); + S_memcpy(outp + i, &u64_1, 8); i += 8; j -= 8; } if (j - i >= 8) { - memcpy(&u32_1, src + j - 4, 4); - memcpy(&u32_2, src + i, 4); - u32_1 = _swab_32_(u32_1); - u32_2 = _swab_32_(u32_2); - memcpy(outp + j - 4, &u32_2, 4); - memcpy(outp + i, &u32_1, 4); + S_memcpy(&u32_1, src + j - 4, 4); + S_memcpy(&u32_2, src + i, 4); + u32_1 = S_bswap32(u32_1); + u32_2 = S_bswap32(u32_2); + S_memcpy(outp + j - 4, &u32_2, 4); + S_memcpy(outp + i, &u32_1, 4); i += 4; j -= 4; } #else while (j - i >= 8) { - memcpy(&u32_1, src + j - 4, 4); - memcpy(&u32_2, src + i, 4); - u32_1 = _swab_32_(u32_1); - u32_2 = _swab_32_(u32_2); - memcpy(outp + j - 4, &u32_2, 4); - memcpy(outp + i, &u32_1, 4); + S_memcpy(&u32_1, src + j - 4, 4); + S_memcpy(&u32_2, src + i, 4); + u32_1 = S_bswap32(u32_1); + u32_2 = S_bswap32(u32_2); + S_memcpy(outp + j - 4, &u32_2, 4); + S_memcpy(outp + i, &u32_1, 4); i += 4; j -= 4; } #endif if (j - i >= 4) { - memcpy(&u16_1, src + j - 2, 2); - memcpy(&u16_2, src + i, 2); - u16_1 = _swab_16_(u16_1); - u16_2 = _swab_16_(u16_2); - memcpy(outp + j - 2, &u16_2, 2); - memcpy(outp + i, &u16_1, 2); + S_memcpy(&u16_1, src + j - 2, 2); + S_memcpy(&u16_2, src + i, 2); + u16_1 = S_bswap16(u16_1); + u16_2 = S_bswap16(u16_2); + S_memcpy(outp + j - 2, &u16_2, 2); + S_memcpy(outp + i, &u16_1, 2); i += 2; j -= 2; } @@ -6755,7 +6772,8 @@ PP_wrapped(pp_reverse, 0, 1) /* The traditional way, operate on the current byte buffer */ if (DO_UTF8(TARG)) { /* first reverse each character */ char *down; - U8* s = (U8*)SvPVX(TARG); + assert(SvPVX(TARG) == up); + U8* s = (U8*)up; const U8* send = (U8*)(s + len); while (s < send) { if (UTF8_IS_INVARIANT(*s)) { @@ -6780,51 +6798,51 @@ PP_wrapped(pp_reverse, 0, 1) } STRLEN i = 0; STRLEN j = len; - uint32_t u32_1, u32_2; - uint16_t u16_1, u16_2; + U32 u32_1, u32_2; + U16 u16_1, u16_2; /* Reverse the buffer in place, in chunks where possible */ #ifdef HAS_QUAD - uint64_t u64_1, u64_2; + U64 u64_1, u64_2; while (j - i >= 16) { - memcpy(&u64_1, up + j - 8, 8); - memcpy(&u64_2, up + i, 8); - u64_1 = _swab_64_(u64_1); - u64_2 = _swab_64_(u64_2); - memcpy(up + j - 8, &u64_2, 8); - memcpy(up + i, &u64_1, 8); + S_memcpy(&u64_1, up + j - 8, 8); + S_memcpy(&u64_2, up + i, 8); + u64_1 = S_bswap64(u64_1); + u64_2 = S_bswap64(u64_2); + S_memcpy(up + j - 8, &u64_2, 8); + S_memcpy(up + i, &u64_1, 8); i += 8; j -= 8; } if (j - i >= 8) { - memcpy(&u32_1, up + j - 4, 4); - memcpy(&u32_2, up + i, 4); - u32_1 = _swab_32_(u32_1); - u32_2 = _swab_32_(u32_2); - memcpy(up + j - 4, &u32_2, 4); - memcpy(up + i, &u32_1, 4); + S_memcpy(&u32_1, up + j - 4, 4); + S_memcpy(&u32_2, up + i, 4); + u32_1 = S_bswap32(u32_1); + u32_2 = S_bswap32(u32_2); + S_memcpy(up + j - 4, &u32_2, 4); + S_memcpy(up + i, &u32_1, 4); i += 4; j -= 4; } #else while (j - i >= 8) { - memcpy(&u32_1, up + j - 4, 4); - memcpy(&u32_2, up + i, 4); - u32_1 = _swab_32_(u32_1); - u32_2 = _swab_32_(u32_2); - memcpy(up + j - 4, &u32_2, 4); - memcpy(up + i, &u32_1, 4); + S_memcpy(&u32_1, up + j - 4, 4); + S_memcpy(&u32_2, up + i, 4); + u32_1 = S_bswap32(u32_1); + u32_2 = S_bswap32(u32_2); + S_memcpy(up + j - 4, &u32_2, 4); + S_memcpy(up + i, &u32_1, 4); i += 4; j -= 4; } #endif if (j - i >= 4) { - memcpy(&u16_1, up + j - 2, 2); - memcpy(&u16_2, up + i, 2); - u16_1 = _swab_16_(u16_1); - u16_2 = _swab_16_(u16_2); - memcpy(up + j - 2, &u16_2, 2); - memcpy(up + i, &u16_1, 2); + S_memcpy(&u16_1, up + j - 2, 2); + S_memcpy(&u16_2, up + i, 2); + u16_1 = S_bswap16(u16_1); + u16_2 = S_bswap16(u16_2); + S_memcpy(up + j - 2, &u16_2, 2); + S_memcpy(up + i, &u16_1, 2); i += 2; j -= 2; } @@ -6843,6 +6861,11 @@ PP_wrapped(pp_reverse, 0, 1) RETURN; } +#undef S_memcpy +#undef S_bswap16 +#undef S_bswap32 +#undef S_bswap64 + PP_wrapped(pp_split, ( (PL_op->op_private & OPpSPLIT_ASSIGN) && (PL_op->op_flags & OPf_STACKED)) @@ -8179,6 +8202,18 @@ PP(pp_is_tainted) return NORMAL; } +#ifdef _MSC_VER +/* this pragma can't be push/pop-ed vs whatever the cmd line to cl.exe was */ +# pragma intrinsic(memcpy) + +void * +S_memcpy(void *dest, const void *src, size_t count) +{ + return memcpy(dest, src, count); +} + +#endif + /* * ex: set ts=8 sts=4 sw=4 et: */