@@ -267,7 +267,7 @@ unittest
267
267
* (No longer an intrisic - the compiler recognizes the patterns
268
268
* in the body.)
269
269
*/
270
- int bt (in size_t * p, size_t bitnum) pure @system
270
+ int bt (const scope size_t * p, size_t bitnum) pure @system
271
271
{
272
272
static if (size_t .sizeof == 8 )
273
273
return ((p[bitnum >> 6 ] & (1L << (bitnum & 63 )))) != 0 ;
@@ -494,26 +494,62 @@ struct BitRange
494
494
testIt(100 , 6 , 45 , 89 , 92 , 99 );
495
495
}
496
496
497
+ /**
498
+ * Swaps bytes in a 2 byte ushort.
499
+ * Params:
500
+ * x = value
501
+ * Returns:
502
+ * `x` with bytes swapped
503
+ */
504
+ pragma (inline, false )
505
+ ushort byteswap (ushort x) pure
506
+ {
507
+ /* Calling it bswap(ushort) would break existing code that calls bswap(uint).
508
+ *
509
+ * This pattern is meant to be recognized by the dmd code generator.
510
+ * Don't change it without checking that an XCH instruction is still
511
+ * used to implement it.
512
+ * Inlining may also throw it off.
513
+ */
514
+ return cast (ushort ) (((x >> 8 ) & 0xFF ) | ((x << 8 ) & 0xFF00u ));
515
+ }
516
+
517
+ // /
518
+ unittest
519
+ {
520
+ assert (byteswap(cast (ushort )0xF234 ) == 0x34F2 );
521
+ static ushort xx = 0xF234 ;
522
+ assert (byteswap(xx) == 0x34F2 );
523
+ }
524
+
497
525
/**
498
526
* Swaps bytes in a 4 byte uint end-to-end, i.e. byte 0 becomes
499
527
* byte 3, byte 1 becomes byte 2, byte 2 becomes byte 1, byte 3
500
528
* becomes byte 0.
501
529
*/
502
530
uint bswap (uint v) pure ;
503
531
532
+ // /
533
+ unittest
534
+ {
535
+ assert (bswap(0x01020304u ) == 0x04030201u );
536
+ static uint xx = 0x10203040u ;
537
+ assert (bswap(xx) == 0x40302010u );
538
+ }
539
+
504
540
/**
505
541
* Swaps bytes in an 8 byte ulong end-to-end, i.e. byte 0 becomes
506
542
* byte 7, byte 1 becomes byte 6, etc.
543
+ * This is meant to be recognized by the compiler as an intrinsic.
507
544
*/
508
- ulong bswap (ulong v) pure
509
- {
510
- auto sv = Split64(v);
511
-
512
- const temp = sv.lo;
513
- sv.lo = bswap(sv.hi);
514
- sv.hi = bswap(temp);
545
+ ulong bswap (ulong v) pure ;
515
546
516
- return (cast (ulong ) sv.hi << 32 ) | sv.lo;
547
+ // /
548
+ unittest
549
+ {
550
+ assert (bswap(0x01020304_05060708uL) == 0x08070605_04030201uL);
551
+ static ulong xx = 0x10203040_50607080uL;
552
+ assert (bswap(xx) == 0x80706050_40302010uL);
517
553
}
518
554
519
555
version (DigitalMars ) version (AnyX86) @system // not pure
@@ -722,57 +758,14 @@ version (DigitalMars) version (AnyX86)
722
758
}
723
759
724
760
725
- /* ************************************
726
- * Read/write value from/to the memory location indicated by ptr.
727
- *
728
- * These functions are recognized by the compiler, and calls to them are guaranteed
729
- * to not be removed (as dead assignment elimination or presumed to have no effect)
730
- * or reordered in the same thread.
731
- *
732
- * These reordering guarantees are only made with regards to other
733
- * operations done through these functions; the compiler is free to reorder regular
734
- * loads/stores with regards to loads/stores done through these functions.
735
- *
736
- * This is useful when dealing with memory-mapped I/O (MMIO) where a store can
737
- * have an effect other than just writing a value, or where sequential loads
738
- * with no intervening stores can retrieve
739
- * different values from the same location due to external stores to the location.
740
- *
741
- * These functions will, when possible, do the load/store as a single operation. In
742
- * general, this is possible when the size of the operation is less than or equal to
743
- * $(D (void*).sizeof), although some targets may support larger operations. If the
744
- * load/store cannot be done as a single operation, multiple smaller operations will be used.
745
- *
746
- * These are not to be conflated with atomic operations. They do not guarantee any
747
- * atomicity. This may be provided by coincidence as a result of the instructions
748
- * used on the target, but this should not be relied on for portable programs.
749
- * Further, no memory fences are implied by these functions.
750
- * They should not be used for communication between threads.
751
- * They may be used to guarantee a write or read cycle occurs at a specified address.
752
- */
753
-
754
- ubyte volatileLoad (ubyte * ptr);
755
- ushort volatileLoad (ushort * ptr); // / ditto
756
- uint volatileLoad (uint * ptr); // / ditto
757
- ulong volatileLoad (ulong * ptr); // / ditto
758
-
759
- void volatileStore (ubyte * ptr, ubyte value); // / ditto
760
- void volatileStore (ushort * ptr, ushort value); // / ditto
761
- void volatileStore (uint * ptr, uint value); // / ditto
762
- void volatileStore (ulong * ptr, ulong value); // / ditto
763
-
764
- @system unittest
761
+ deprecated (" volatileLoad has been moved to core.volatile. Use core.volatile.volatileLoad instead." )
765
762
{
766
- alias TT (T... ) = T;
763
+ public import core.volatile : volatileLoad;
764
+ }
767
765
768
- foreach (T; TT ! (ubyte , ushort , uint , ulong ))
769
- {
770
- T u;
771
- T* p = &u;
772
- volatileStore(p, 1 );
773
- T r = volatileLoad(p);
774
- assert (r == u);
775
- }
766
+ deprecated (" volatileStore has been moved to core.volatile. Use core.volatile.volatileStore instead." )
767
+ {
768
+ public import core.volatile : volatileStore;
776
769
}
777
770
778
771
@@ -954,51 +947,51 @@ version (D_InlineAsm_X86_64)
954
947
* Bitwise rotate `value` left (`rol`) or right (`ror`) by
955
948
* `count` bit positions.
956
949
*/
957
- pure T rol(T)(in T value, in uint count)
950
+ pure T rol(T)(const T value, const uint count)
958
951
if (__traits(isIntegral, T) && __traits(isUnsigned, T))
959
952
{
960
953
assert (count < 8 * T.sizeof);
961
- return cast (T) ((value << count) | (value >> (- count & ( T.sizeof * 8 - 1 ) )));
954
+ return cast (T) ((value << count) | (value >> (T.sizeof * 8 - count )));
962
955
}
963
956
// / ditto
964
- pure T ror(T)(in T value, in uint count)
957
+ pure T ror(T)(const T value, const uint count)
965
958
if (__traits(isIntegral, T) && __traits(isUnsigned, T))
966
959
{
967
960
assert (count < 8 * T.sizeof);
968
- return cast (T) ((value >> count) | (value << (- count & ( T.sizeof * 8 - 1 ) )));
961
+ return cast (T) ((value >> count) | (value << (T.sizeof * 8 - count )));
969
962
}
970
963
// / ditto
971
- pure T rol(uint count, T)(in T value)
964
+ pure T rol(uint count, T)(const T value)
972
965
if (__traits(isIntegral, T) && __traits(isUnsigned, T))
973
966
{
974
967
static assert (count < 8 * T.sizeof);
975
- return cast (T) ((value << count) | (value >> (- count & ( T.sizeof * 8 - 1 ) )));
968
+ return cast (T) ((value << count) | (value >> (T.sizeof * 8 - count )));
976
969
}
977
970
// / ditto
978
- pure T ror(uint count, T)(in T value)
971
+ pure T ror(uint count, T)(const T value)
979
972
if (__traits(isIntegral, T) && __traits(isUnsigned, T))
980
973
{
981
974
static assert (count < 8 * T.sizeof);
982
- return cast (T) ((value >> count) | (value << (- count & ( T.sizeof * 8 - 1 ) )));
975
+ return cast (T) ((value >> count) | (value << (T.sizeof * 8 - count )));
983
976
}
984
977
985
978
// /
986
979
unittest
987
980
{
988
- ubyte a = 0b10101010U ;
989
- ulong b = ulong .max ;
981
+ ubyte a = 0b11110000U ;
982
+ ulong b = ~ 1UL ;
990
983
991
- assert (rol(a, 1 ) == 0b01010101 );
992
- assert (ror(a, 1 ) == 0b01010101 );
993
- assert (rol(a, 3 ) == 0b01010101 );
994
- assert (ror(a, 3 ) == 0b01010101 );
984
+ assert (rol(a, 1 ) == 0b11100001 );
985
+ assert (ror(a, 1 ) == 0b01111000 );
986
+ assert (rol(a, 3 ) == 0b10000111 );
987
+ assert (ror(a, 3 ) == 0b00011110 );
995
988
996
989
assert (rol(a, 0 ) == a);
997
990
assert (ror(a, 0 ) == a);
998
991
999
- assert (rol(b, 63 ) == ulong .max );
1000
- assert (ror(b, 63 ) == ulong .max );
992
+ assert (rol(b, 63 ) == ~ ( 1UL << 63 ) );
993
+ assert (ror(b, 63 ) == ~ 2UL );
1001
994
1002
- assert (rol! 3 (a) == 0b01010101 );
1003
- assert (ror! 3 (a) == 0b01010101 );
995
+ assert (rol! 3 (a) == 0b10000111 );
996
+ assert (ror! 3 (a) == 0b00011110 );
1004
997
}
0 commit comments