@@ -11,6 +11,25 @@ module core.internal.hash;
11
11
12
12
import core.internal.convert ;
13
13
14
+ // If true ensure that positive zero and negative zero have the same hash.
15
+ // typeid(float).getHash does this but historically hashOf(float) did not.
16
+ private enum floatCoalesceZeroes = false ;
17
+ // If true ensure that all NaNs of the same floating point type have the same hash.
18
+ // typeid(float).getHash does not do this but historically hashOf(float) did.
19
+ private enum floatCoalesceNaNs = true ;
20
+
21
+ // BUG: if either of the above are true then no struct or array that contains the
22
+ // representation of a floating point number should be hashed with `bytesHash`,
23
+ // but this is currently disregarded.
24
+
25
+ @nogc nothrow pure @safe unittest
26
+ {
27
+ static if (floatCoalesceZeroes)
28
+ assert (hashOf(+ 0.0 ) == hashOf(- 0.0 )); // Same hash for +0.0 and -0.0.
29
+ static if (floatCoalesceNaNs)
30
+ assert (hashOf(double .nan) == hashOf(- double .nan)); // Same hash for different NaN.
31
+ }
32
+
14
33
// enum hash. CTFE depends on base type
15
34
size_t hashOf (T)(auto ref T val, size_t seed = 0 ) if (is (T == enum ))
16
35
{
@@ -86,12 +105,66 @@ size_t hashOf(T)(auto ref T val, size_t seed = 0) if (!is(T == enum) && __traits
86
105
{
87
106
static if (__traits(isFloating, val))
88
107
{
89
- T data = (val != val) ? T.nan : val;
90
- return bytesHashAlignedBy! T(toUbyte(data), seed);
108
+ static if (floatCoalesceZeroes || floatCoalesceNaNs)
109
+ {
110
+ auto data = val;
111
+ // Zero coalescing not supported for deprecated complex types.
112
+ static if (floatCoalesceZeroes && is (typeof (data = 0 )))
113
+ if (data == 0 ) data = 0 ; // +0.0 and -0.0 become the same.
114
+ static if (floatCoalesceNaNs)
115
+ if (data != data) data = T.nan; // All NaN patterns become the same.
116
+ }
117
+ else
118
+ {
119
+ alias data = val;
120
+ }
121
+
122
+ static if (T.mant_dig == float .mant_dig && T.sizeof == uint .sizeof)
123
+ return hashOf (* cast (const uint * ) &data, seed);
124
+ else static if (T.mant_dig == double .mant_dig && T.sizeof == ulong .sizeof)
125
+ return hashOf (* cast (const ulong * ) &data, seed);
126
+ else
127
+ return bytesHashAlignedBy! T(toUbyte(data), seed);
91
128
}
92
129
else
93
130
{
94
- return bytesHashAlignedBy! T(toUbyte(val), seed);
131
+ static if (T.sizeof <= size_t .sizeof && __traits(isIntegral, T))
132
+ {
133
+ static if (size_t .sizeof < ulong .sizeof)
134
+ {
135
+ // MurmurHash3 32-bit single round
136
+ enum uint c1 = 0xcc9e2d51 ;
137
+ enum uint c2 = 0x1b873593 ;
138
+ enum uint c3 = 0xe6546b64 ;
139
+ enum uint r1 = 15 ;
140
+ enum uint r2 = 13 ;
141
+ }
142
+ else
143
+ {
144
+ // Half of MurmurHash3 64-bit single round
145
+ // (omits second interleaved update)
146
+ enum ulong c1 = 0x87c37b91114253d5 ;
147
+ enum ulong c2 = 0x4cf5ad432745937f ;
148
+ enum ulong c3 = 0x52dce729 ;
149
+ enum uint r1 = 31 ;
150
+ enum uint r2 = 27 ;
151
+ }
152
+ auto h = c1 * val;
153
+ h = (h << r1) | (h >>> (typeof (h).sizeof * 8 - r1));
154
+ h = (h * c2) ^ seed;
155
+ h = (h << r2) | (h >>> (typeof (h).sizeof * 8 - r2));
156
+ return h * 5 + c3;
157
+ }
158
+ else static if (T.sizeof > size_t .sizeof && __traits(isIntegral, T))
159
+ {
160
+ static foreach (i; 0 .. T.sizeof / size_t .sizeof)
161
+ seed = hashOf(cast (size_t ) (val >>> (size_t .sizeof * 8 * i)), seed);
162
+ return seed;
163
+ }
164
+ else
165
+ {
166
+ return bytesHashAlignedBy! T(toUbyte(val), seed);
167
+ }
95
168
}
96
169
}
97
170
0 commit comments