Skip to content
This repository was archived by the owner on Oct 12, 2022. It is now read-only.

Commit 0fd4364

Browse files
authored
[dmd-cxx] Backport intrinsics modules from master, couple more Solaris fixes. (#3366)
[dmd-cxx] Backport intrinsics modules from master, couple more Solaris fixes. merged-on-behalf-of: Iain Buclaw <ibuclaw@users.noreply.github.com>
1 parent 9d0c836 commit 0fd4364

File tree

13 files changed

+1218
-741
lines changed

13 files changed

+1218
-741
lines changed

mak/COPY

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ COPY=\
1414
$(IMPDIR)\core\simd.d \
1515
$(IMPDIR)\core\time.d \
1616
$(IMPDIR)\core\vararg.d \
17+
$(IMPDIR)\core\volatile.d \
1718
\
1819
$(IMPDIR)\core\internal\abort.d \
1920
$(IMPDIR)\core\internal\arrayop.d \

mak/SRCS

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ SRCS=\
1414
src\core\simd.d \
1515
src\core\time.d \
1616
src\core\vararg.d \
17+
src\core\volatile.d \
1718
\
1819
src\core\internal\abort.d \
1920
src\core\internal\arrayop.d \

src/core/bitop.d

Lines changed: 69 additions & 76 deletions
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,7 @@ unittest
267267
* (No longer an intrisic - the compiler recognizes the patterns
268268
* in the body.)
269269
*/
270-
int bt(in size_t* p, size_t bitnum) pure @system
270+
int bt(const scope size_t* p, size_t bitnum) pure @system
271271
{
272272
static if (size_t.sizeof == 8)
273273
return ((p[bitnum >> 6] & (1L << (bitnum & 63)))) != 0;
@@ -494,26 +494,62 @@ struct BitRange
494494
testIt(100, 6, 45, 89, 92, 99);
495495
}
496496

497+
/**
498+
* Swaps bytes in a 2 byte ushort.
499+
* Params:
500+
* x = value
501+
* Returns:
502+
* `x` with bytes swapped
503+
*/
504+
pragma(inline, false)
505+
ushort byteswap(ushort x) pure
506+
{
507+
/* Calling it bswap(ushort) would break existing code that calls bswap(uint).
508+
*
509+
* This pattern is meant to be recognized by the dmd code generator.
510+
* Don't change it without checking that an XCH instruction is still
511+
* used to implement it.
512+
* Inlining may also throw it off.
513+
*/
514+
return cast(ushort) (((x >> 8) & 0xFF) | ((x << 8) & 0xFF00u));
515+
}
516+
517+
///
518+
unittest
519+
{
520+
assert(byteswap(cast(ushort)0xF234) == 0x34F2);
521+
static ushort xx = 0xF234;
522+
assert(byteswap(xx) == 0x34F2);
523+
}
524+
497525
/**
498526
* Swaps bytes in a 4 byte uint end-to-end, i.e. byte 0 becomes
499527
* byte 3, byte 1 becomes byte 2, byte 2 becomes byte 1, byte 3
500528
* becomes byte 0.
501529
*/
502530
uint bswap(uint v) pure;
503531

532+
///
533+
unittest
534+
{
535+
assert(bswap(0x01020304u) == 0x04030201u);
536+
static uint xx = 0x10203040u;
537+
assert(bswap(xx) == 0x40302010u);
538+
}
539+
504540
/**
505541
* Swaps bytes in an 8 byte ulong end-to-end, i.e. byte 0 becomes
506542
* byte 7, byte 1 becomes byte 6, etc.
543+
* This is meant to be recognized by the compiler as an intrinsic.
507544
*/
508-
ulong bswap(ulong v) pure
509-
{
510-
auto sv = Split64(v);
511-
512-
const temp = sv.lo;
513-
sv.lo = bswap(sv.hi);
514-
sv.hi = bswap(temp);
545+
ulong bswap(ulong v) pure;
515546

516-
return (cast(ulong) sv.hi << 32) | sv.lo;
547+
///
548+
unittest
549+
{
550+
assert(bswap(0x01020304_05060708uL) == 0x08070605_04030201uL);
551+
static ulong xx = 0x10203040_50607080uL;
552+
assert(bswap(xx) == 0x80706050_40302010uL);
517553
}
518554

519555
version (DigitalMars) version (AnyX86) @system // not pure
@@ -722,57 +758,14 @@ version (DigitalMars) version (AnyX86)
722758
}
723759

724760

725-
/*************************************
726-
* Read/write value from/to the memory location indicated by ptr.
727-
*
728-
* These functions are recognized by the compiler, and calls to them are guaranteed
729-
* to not be removed (as dead assignment elimination or presumed to have no effect)
730-
* or reordered in the same thread.
731-
*
732-
* These reordering guarantees are only made with regards to other
733-
* operations done through these functions; the compiler is free to reorder regular
734-
* loads/stores with regards to loads/stores done through these functions.
735-
*
736-
* This is useful when dealing with memory-mapped I/O (MMIO) where a store can
737-
* have an effect other than just writing a value, or where sequential loads
738-
* with no intervening stores can retrieve
739-
* different values from the same location due to external stores to the location.
740-
*
741-
* These functions will, when possible, do the load/store as a single operation. In
742-
* general, this is possible when the size of the operation is less than or equal to
743-
* $(D (void*).sizeof), although some targets may support larger operations. If the
744-
* load/store cannot be done as a single operation, multiple smaller operations will be used.
745-
*
746-
* These are not to be conflated with atomic operations. They do not guarantee any
747-
* atomicity. This may be provided by coincidence as a result of the instructions
748-
* used on the target, but this should not be relied on for portable programs.
749-
* Further, no memory fences are implied by these functions.
750-
* They should not be used for communication between threads.
751-
* They may be used to guarantee a write or read cycle occurs at a specified address.
752-
*/
753-
754-
ubyte volatileLoad(ubyte * ptr);
755-
ushort volatileLoad(ushort* ptr); /// ditto
756-
uint volatileLoad(uint * ptr); /// ditto
757-
ulong volatileLoad(ulong * ptr); /// ditto
758-
759-
void volatileStore(ubyte * ptr, ubyte value); /// ditto
760-
void volatileStore(ushort* ptr, ushort value); /// ditto
761-
void volatileStore(uint * ptr, uint value); /// ditto
762-
void volatileStore(ulong * ptr, ulong value); /// ditto
763-
764-
@system unittest
761+
deprecated("volatileLoad has been moved to core.volatile. Use core.volatile.volatileLoad instead.")
765762
{
766-
alias TT(T...) = T;
763+
public import core.volatile : volatileLoad;
764+
}
767765

768-
foreach (T; TT!(ubyte, ushort, uint, ulong))
769-
{
770-
T u;
771-
T* p = &u;
772-
volatileStore(p, 1);
773-
T r = volatileLoad(p);
774-
assert(r == u);
775-
}
766+
deprecated("volatileStore has been moved to core.volatile. Use core.volatile.volatileStore instead.")
767+
{
768+
public import core.volatile : volatileStore;
776769
}
777770

778771

@@ -954,51 +947,51 @@ version (D_InlineAsm_X86_64)
954947
* Bitwise rotate `value` left (`rol`) or right (`ror`) by
955948
* `count` bit positions.
956949
*/
957-
pure T rol(T)(in T value, in uint count)
950+
pure T rol(T)(const T value, const uint count)
958951
if (__traits(isIntegral, T) && __traits(isUnsigned, T))
959952
{
960953
assert(count < 8 * T.sizeof);
961-
return cast(T) ((value << count) | (value >> (-count & (T.sizeof * 8 - 1))));
954+
return cast(T) ((value << count) | (value >> (T.sizeof * 8 - count)));
962955
}
963956
/// ditto
964-
pure T ror(T)(in T value, in uint count)
957+
pure T ror(T)(const T value, const uint count)
965958
if (__traits(isIntegral, T) && __traits(isUnsigned, T))
966959
{
967960
assert(count < 8 * T.sizeof);
968-
return cast(T) ((value >> count) | (value << (-count & (T.sizeof * 8 - 1))));
961+
return cast(T) ((value >> count) | (value << (T.sizeof * 8 - count)));
969962
}
970963
/// ditto
971-
pure T rol(uint count, T)(in T value)
964+
pure T rol(uint count, T)(const T value)
972965
if (__traits(isIntegral, T) && __traits(isUnsigned, T))
973966
{
974967
static assert(count < 8 * T.sizeof);
975-
return cast(T) ((value << count) | (value >> (-count & (T.sizeof * 8 - 1))));
968+
return cast(T) ((value << count) | (value >> (T.sizeof * 8 - count)));
976969
}
977970
/// ditto
978-
pure T ror(uint count, T)(in T value)
971+
pure T ror(uint count, T)(const T value)
979972
if (__traits(isIntegral, T) && __traits(isUnsigned, T))
980973
{
981974
static assert(count < 8 * T.sizeof);
982-
return cast(T) ((value >> count) | (value << (-count & (T.sizeof * 8 - 1))));
975+
return cast(T) ((value >> count) | (value << (T.sizeof * 8 - count)));
983976
}
984977

985978
///
986979
unittest
987980
{
988-
ubyte a = 0b10101010U;
989-
ulong b = ulong.max;
981+
ubyte a = 0b11110000U;
982+
ulong b = ~1UL;
990983

991-
assert(rol(a, 1) == 0b01010101);
992-
assert(ror(a, 1) == 0b01010101);
993-
assert(rol(a, 3) == 0b01010101);
994-
assert(ror(a, 3) == 0b01010101);
984+
assert(rol(a, 1) == 0b11100001);
985+
assert(ror(a, 1) == 0b01111000);
986+
assert(rol(a, 3) == 0b10000111);
987+
assert(ror(a, 3) == 0b00011110);
995988

996989
assert(rol(a, 0) == a);
997990
assert(ror(a, 0) == a);
998991

999-
assert(rol(b, 63) == ulong.max);
1000-
assert(ror(b, 63) == ulong.max);
992+
assert(rol(b, 63) == ~(1UL << 63));
993+
assert(ror(b, 63) == ~2UL);
1001994

1002-
assert(rol!3(a) == 0b01010101);
1003-
assert(ror!3(a) == 0b01010101);
995+
assert(rol!3(a) == 0b10000111);
996+
assert(ror!3(a) == 0b00011110);
1004997
}

0 commit comments

Comments
 (0)