@@ -12,50 +12,7 @@ module core.atomic;
12
12
13
13
import core.internal.atomic ;
14
14
import core.internal.attributes : betterC;
15
-
16
- version (D_InlineAsm_X86 )
17
- {
18
- version = AsmX86;
19
- version = AsmX86_32;
20
- enum has64BitXCHG = false ;
21
- enum has64BitCAS = true ;
22
- enum has128BitCAS = false ;
23
- }
24
- else version (D_InlineAsm_X86_64 )
25
- {
26
- version = AsmX86;
27
- version = AsmX86_64;
28
- enum has64BitXCHG = true ;
29
- enum has64BitCAS = true ;
30
- enum has128BitCAS = true ;
31
- }
32
- else
33
- {
34
- enum has64BitXCHG = false ;
35
- enum has64BitCAS = false ;
36
- enum has128BitCAS = false ;
37
- }
38
-
39
- version (AsmX86)
40
- {
41
- // NOTE: Strictly speaking, the x86 supports atomic operations on
42
- // unaligned values. However, this is far slower than the
43
- // common case, so such behavior should be prohibited.
44
- private bool atomicValueIsProperlyAligned (T)( ref T val ) pure nothrow @nogc @trusted
45
- {
46
- return atomicPtrIsProperlyAligned (&val);
47
- }
48
-
49
- private bool atomicPtrIsProperlyAligned (T)( T* ptr ) pure nothrow @nogc @safe
50
- {
51
- // NOTE: 32 bit x86 systems support 8 byte CAS, which only requires
52
- // 4 byte alignment, so use size_t as the align type here.
53
- static if ( T.sizeof > size_t .sizeof )
54
- return cast (size_t )ptr % size_t .sizeof == 0 ;
55
- else
56
- return cast (size_t )ptr % T.sizeof == 0 ;
57
- }
58
- }
15
+ import core.internal.traits : hasUnsharedIndirections;
59
16
60
17
/**
61
18
* Specifies the memory ordering semantics of an atomic operation.
@@ -109,7 +66,8 @@ enum MemoryOrder
109
66
* Returns:
110
67
* The value of 'val'.
111
68
*/
112
- TailShared! T atomicLoad (MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T val ) pure nothrow @nogc @trusted
69
+ T atomicLoad (MemoryOrder ms = MemoryOrder.seq, T)( ref T val ) pure nothrow @nogc @trusted
70
+ if ( ! is ( T == shared U, U ) && ! is ( T == shared inout U, U ) && ! is ( T == shared const U, U ) )
113
71
{
114
72
static if ( __traits(isFloating, T) )
115
73
{
@@ -118,10 +76,30 @@ TailShared!T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T
118
76
return * cast (T* )&r;
119
77
}
120
78
else
121
- {
122
- T r = core.internal.atomic.atomicLoad ! ms(cast (T* )&val);
123
- return * cast (TailShared! T* )&r;
124
- }
79
+ return core.internal.atomic.atomicLoad ! ms(&val);
80
+ }
81
+
82
+ // / Ditto
83
+ T atomicLoad (MemoryOrder ms = MemoryOrder.seq, T)( ref shared T val ) pure nothrow @nogc @trusted
84
+ if ( ! hasUnsharedIndirections! T )
85
+ {
86
+ import core.internal.traits : hasUnsharedIndirections;
87
+ static assert (! hasUnsharedIndirections! T, " Copying `shared " ~ T.stringof ~ " ` would violate shared." );
88
+
89
+ return atomicLoad! ms(* cast (T* )&val);
90
+ }
91
+
92
+ // / Ditto
93
+ TailShared! T atomicLoad (MemoryOrder ms = MemoryOrder.seq, T)( ref shared T val ) pure nothrow @nogc @trusted
94
+ if ( hasUnsharedIndirections! T )
95
+ {
96
+ // HACK: DEPRECATE THIS FUNCTION, IT IS INVALID TO DO ATOMIC LOAD OF SHARED CLASS
97
+ // this is here because code exists in the wild that does this...
98
+
99
+ import core.lifetime : move;
100
+
101
+ T r = core.internal.atomic.atomicLoad ! ms(cast (T* )&val);
102
+ return move (* cast (TailShared! T* )&r);
125
103
}
126
104
127
105
/**
@@ -134,8 +112,8 @@ TailShared!T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T
134
112
* val = The target variable.
135
113
* newval = The value to store.
136
114
*/
137
- void atomicStore (MemoryOrder ms = MemoryOrder.seq, T, V)( ref shared T val, V newval ) pure nothrow @nogc @trusted
138
- if ( __traits( compiles, { val = newval; } ) )
115
+ void atomicStore (MemoryOrder ms = MemoryOrder.seq, T, V)( ref T val, V newval ) pure nothrow @nogc @trusted
116
+ if ( __traits( compiles, { val = newval; } ) && ! is (T == shared S, S) && ! is (V == shared U, U) )
139
117
{
140
118
static if ( __traits(isFloating, T) )
141
119
{
@@ -144,7 +122,31 @@ void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)( ref shared T val, V ne
144
122
core.internal.atomic.atomicStore ! ms(cast (IntTy* )&val, * cast (IntTy* )&newval);
145
123
}
146
124
else
147
- core.internal.atomic.atomicStore ! ms(cast (T* )&val, newval);
125
+ core.internal.atomic.atomicStore ! ms(&val, newval);
126
+ }
127
+
128
+ // / Ditto
129
+ void atomicStore (MemoryOrder ms = MemoryOrder.seq, T, V)( ref shared T val, V newval ) pure nothrow @nogc @trusted
130
+ if ( __traits( compiles, { val = newval; } ) && ! is ( T == class ) )
131
+ {
132
+ static if ( is ( V == shared U, U ) )
133
+ alias Thunk = U;
134
+ else
135
+ {
136
+ import core.internal.traits : hasUnsharedIndirections;
137
+ static assert (! hasUnsharedIndirections! V, " Copying unshared argument `newval` to shared `val` would violate shared." );
138
+ alias Thunk = V;
139
+ }
140
+ atomicStore! ms(* cast (T* )&val, * cast (Thunk* )&newval);
141
+ }
142
+
143
+ // / Ditto
144
+ void atomicStore (MemoryOrder ms = MemoryOrder.seq, T, V)( ref shared T val, shared V newval ) pure nothrow @nogc @trusted
145
+ if ( is ( T == class ) )
146
+ {
147
+ static assert ( is ( V : T ), " Can't assign `newval` of type `shared " ~ V.stringof ~ " ` to `shared " ~ T.stringof ~ " `." );
148
+
149
+ core.internal.atomic.atomicStore ! ms(cast (T* )&val, cast (V)newval);
148
150
}
149
151
150
152
/**
@@ -158,7 +160,7 @@ void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)( ref shared T val, V ne
158
160
* Returns:
159
161
* The value held previously by `val`.
160
162
*/
161
- TailShared! (T) atomicFetchAdd (MemoryOrder ms = MemoryOrder.seq, T)( ref shared T val, size_t mod ) pure nothrow @nogc @trusted
163
+ TailShared! T atomicFetchAdd (MemoryOrder ms = MemoryOrder.seq, T)( ref shared T val, size_t mod ) pure nothrow @nogc @trusted
162
164
if ( __traits(isIntegral, T) )
163
165
in ( atomicValueIsProperlyAligned(val) )
164
166
{
@@ -176,7 +178,7 @@ in ( atomicValueIsProperlyAligned(val) )
176
178
* Returns:
177
179
* The value held previously by `val`.
178
180
*/
179
- TailShared! (T) atomicFetchSub (MemoryOrder ms = MemoryOrder.seq, T)( ref shared T val, size_t mod ) pure nothrow @nogc @trusted
181
+ TailShared! T atomicFetchSub (MemoryOrder ms = MemoryOrder.seq, T)( ref shared T val, size_t mod ) pure nothrow @nogc @trusted
180
182
if ( __traits(isIntegral, T) )
181
183
in ( atomicValueIsProperlyAligned(val) )
182
184
{
@@ -323,7 +325,6 @@ void atomicFence() nothrow @nogc @safe
323
325
core.internal.atomic.atomicFence ();
324
326
}
325
327
326
-
327
328
/**
328
329
* Performs the binary operation 'op' on val using 'mod' as the modifier.
329
330
*
@@ -385,11 +386,54 @@ in ( atomicValueIsProperlyAligned( val ) )
385
386
}
386
387
}
387
388
389
+
390
+ version (X86 )
391
+ {
392
+ version = IsX86;
393
+ enum has64BitXCHG = false ;
394
+ enum has64BitCAS = true ;
395
+ enum has128BitCAS = false ;
396
+ }
397
+ else version (X86_64 )
398
+ {
399
+ version = IsX86;
400
+ enum has64BitXCHG = true ;
401
+ enum has64BitCAS = true ;
402
+ enum has128BitCAS = true ;
403
+ }
404
+ else
405
+ {
406
+ enum has64BitXCHG = false ;
407
+ enum has64BitCAS = false ;
408
+ enum has128BitCAS = false ;
409
+ }
410
+
388
411
private
389
412
{
413
+ version (IsX86)
414
+ {
415
+ // NOTE: Strictly speaking, the x86 supports atomic operations on
416
+ // unaligned values. However, this is far slower than the
417
+ // common case, so such behavior should be prohibited.
418
+ bool atomicValueIsProperlyAligned (T)( ref T val ) pure nothrow @nogc @trusted
419
+ {
420
+ return atomicPtrIsProperlyAligned (&val);
421
+ }
422
+
423
+ bool atomicPtrIsProperlyAligned (T)( T* ptr ) pure nothrow @nogc @safe
424
+ {
425
+ // NOTE: 32 bit x86 systems support 8 byte CAS, which only requires
426
+ // 4 byte alignment, so use size_t as the align type here.
427
+ static if ( T.sizeof > size_t .sizeof )
428
+ return cast (size_t )ptr % size_t .sizeof == 0 ;
429
+ else
430
+ return cast (size_t )ptr % T.sizeof == 0 ;
431
+ }
432
+ }
433
+
390
434
template IntForFloat (F)
435
+ if (__traits(isFloating, F))
391
436
{
392
- static assert ( __traits(isFloating, F), " Not a floating point type: " ~ F.stringof );
393
437
static if ( F.sizeof == 4 )
394
438
alias IntForFloat = uint ;
395
439
else static if ( F.sizeof == 8 )
@@ -398,6 +442,34 @@ private
398
442
static assert ( false , " Invalid floating point type: " ~ F.stringof ~ " , only support `float` and `double`." );
399
443
}
400
444
445
+ template IntForStruct (S)
446
+ if (is (S == struct ))
447
+ {
448
+ static if ( S.sizeof == 1 )
449
+ alias IntForFloat = ubyte ;
450
+ else static if ( F.sizeof == 2 )
451
+ alias IntForFloat = ushort ;
452
+ else static if ( F.sizeof == 4 )
453
+ alias IntForFloat = uint ;
454
+ else static if ( F.sizeof == 8 )
455
+ alias IntForFloat = ulong ;
456
+ else static if ( F.sizeof == 16 )
457
+ alias IntForFloat = ulong [2 ]; // TODO: what's the best type here? slice/delegates pass in registers...
458
+ else
459
+ static assert (ValidateStruct! S);
460
+ }
461
+
462
+ template ValidateStruct (S)
463
+ if (is (S == struct ))
464
+ {
465
+ import core.internal.traits : hasElaborateAssign;
466
+
467
+ static assert (S.sizeof <= size_t * 2 && (S.sizeof & (S.sizeof - 1 )) == 0 , S.stringof ~ " has invalid size for atomic operations." );
468
+ static assert (! hasElaborateAssign! S, S.stringof ~ " may not have an elaborate assignment when used with atomic operations." );
469
+
470
+ enum ValidateStruct = true ;
471
+ }
472
+
401
473
// TODO: it'd be nice if we had @trusted scopes; we could remove this...
402
474
bool casByRef (T,V1 ,V2 )( ref T value, V1 ifThis, V2 writeThis ) pure nothrow @nogc @trusted
403
475
{
@@ -795,7 +867,7 @@ version (unittest)
795
867
assert (atomicOp! " +=" (i8, 8 ) == 13 );
796
868
assert (atomicOp! " +=" (i16, 8 ) == 14 );
797
869
assert (atomicOp! " +=" (i32, 8 ) == 15 );
798
- version (AsmX86_64 )
870
+ version (D_LP64 )
799
871
{
800
872
shared ulong u64 = 4 ;
801
873
shared long i64 = 8 ;
@@ -819,7 +891,7 @@ version (unittest)
819
891
assert (atomicOp! " -=" (i8, 1 ) == 4 );
820
892
assert (atomicOp! " -=" (i16, 1 ) == 5 );
821
893
assert (atomicOp! " -=" (i32, 1 ) == 6 );
822
- version (AsmX86_64 )
894
+ version (D_LP64 )
823
895
{
824
896
shared ulong u64 = 4 ;
825
897
shared long i64 = 8 ;
0 commit comments