Skip to content
This repository was archived by the owner on Oct 12, 2022. It is now read-only.

Commit 9183a7d

Browse files
committed
Better handling of various permutations of shared-ness.
1 parent 4120ca7 commit 9183a7d

File tree

2 files changed

+131
-59
lines changed

2 files changed

+131
-59
lines changed

src/core/atomic.d

Lines changed: 130 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -12,50 +12,7 @@ module core.atomic;
1212

1313
import core.internal.atomic;
1414
import core.internal.attributes : betterC;
15-
16-
version (D_InlineAsm_X86)
17-
{
18-
version = AsmX86;
19-
version = AsmX86_32;
20-
enum has64BitXCHG = false;
21-
enum has64BitCAS = true;
22-
enum has128BitCAS = false;
23-
}
24-
else version (D_InlineAsm_X86_64)
25-
{
26-
version = AsmX86;
27-
version = AsmX86_64;
28-
enum has64BitXCHG = true;
29-
enum has64BitCAS = true;
30-
enum has128BitCAS = true;
31-
}
32-
else
33-
{
34-
enum has64BitXCHG = false;
35-
enum has64BitCAS = false;
36-
enum has128BitCAS = false;
37-
}
38-
39-
version (AsmX86)
40-
{
41-
// NOTE: Strictly speaking, the x86 supports atomic operations on
42-
// unaligned values. However, this is far slower than the
43-
// common case, so such behavior should be prohibited.
44-
private bool atomicValueIsProperlyAligned(T)( ref T val ) pure nothrow @nogc @trusted
45-
{
46-
return atomicPtrIsProperlyAligned(&val);
47-
}
48-
49-
private bool atomicPtrIsProperlyAligned(T)( T* ptr ) pure nothrow @nogc @safe
50-
{
51-
// NOTE: 32 bit x86 systems support 8 byte CAS, which only requires
52-
// 4 byte alignment, so use size_t as the align type here.
53-
static if ( T.sizeof > size_t.sizeof )
54-
return cast(size_t)ptr % size_t.sizeof == 0;
55-
else
56-
return cast(size_t)ptr % T.sizeof == 0;
57-
}
58-
}
15+
import core.internal.traits : hasUnsharedIndirections;
5916

6017
/**
6118
* Specifies the memory ordering semantics of an atomic operation.
@@ -109,7 +66,8 @@ enum MemoryOrder
10966
* Returns:
11067
* The value of 'val'.
11168
*/
112-
TailShared!T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T val ) pure nothrow @nogc @trusted
69+
T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref T val ) pure nothrow @nogc @trusted
70+
if ( !is( T == shared U, U ) && !is( T == shared inout U, U ) && !is( T == shared const U, U ) )
11371
{
11472
static if ( __traits(isFloating, T) )
11573
{
@@ -118,10 +76,30 @@ TailShared!T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T
11876
return *cast(T*)&r;
11977
}
12078
else
121-
{
122-
T r = core.internal.atomic.atomicLoad!ms(cast(T*)&val);
123-
return *cast(TailShared!T*)&r;
124-
}
79+
return core.internal.atomic.atomicLoad!ms(&val);
80+
}
81+
82+
/// Ditto
83+
T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref shared T val ) pure nothrow @nogc @trusted
84+
if ( !hasUnsharedIndirections!T )
85+
{
86+
import core.internal.traits : hasUnsharedIndirections;
87+
static assert(!hasUnsharedIndirections!T, "Copying `shared " ~ T.stringof ~ "` would violate shared.");
88+
89+
return atomicLoad!ms(*cast(T*)&val);
90+
}
91+
92+
/// Ditto
93+
TailShared!T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref shared T val ) pure nothrow @nogc @trusted
94+
if ( hasUnsharedIndirections!T )
95+
{
96+
// HACK: DEPRECATE THIS FUNCTION, IT IS INVALID TO DO ATOMIC LOAD OF SHARED CLASS
97+
// this is here because code exists in the wild that does this...
98+
99+
import core.lifetime : move;
100+
101+
T r = core.internal.atomic.atomicLoad!ms(cast(T*)&val);
102+
return move(*cast(TailShared!T*)&r);
125103
}
126104

127105
/**
@@ -134,8 +112,8 @@ TailShared!T atomicLoad(MemoryOrder ms = MemoryOrder.seq, T)( ref const shared T
134112
* val = The target variable.
135113
* newval = The value to store.
136114
*/
137-
void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)( ref shared T val, V newval ) pure nothrow @nogc @trusted
138-
if ( __traits( compiles, { val = newval; } ) )
115+
void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)( ref T val, V newval ) pure nothrow @nogc @trusted
116+
if ( __traits( compiles, { val = newval; } ) && !is(T == shared S, S) && !is(V == shared U, U) )
139117
{
140118
static if ( __traits(isFloating, T) )
141119
{
@@ -144,7 +122,31 @@ void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)( ref shared T val, V ne
144122
core.internal.atomic.atomicStore!ms(cast(IntTy*)&val, *cast(IntTy*)&newval);
145123
}
146124
else
147-
core.internal.atomic.atomicStore!ms(cast(T*)&val, newval);
125+
core.internal.atomic.atomicStore!ms(&val, newval);
126+
}
127+
128+
/// Ditto
129+
void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)( ref shared T val, V newval ) pure nothrow @nogc @trusted
130+
if ( __traits( compiles, { val = newval; } ) && !is( T == class ) )
131+
{
132+
static if ( is ( V == shared U, U ) )
133+
alias Thunk = U;
134+
else
135+
{
136+
import core.internal.traits : hasUnsharedIndirections;
137+
static assert(!hasUnsharedIndirections!V, "Copying unshared argument `newval` to shared `val` would violate shared.");
138+
alias Thunk = V;
139+
}
140+
atomicStore!ms(*cast(T*)&val, *cast(Thunk*)&newval);
141+
}
142+
143+
/// Ditto
144+
void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)( ref shared T val, shared V newval ) pure nothrow @nogc @trusted
145+
if ( is( T == class ) )
146+
{
147+
static assert ( is ( V : T ), "Can't assign `newval` of type `shared " ~ V.stringof ~ "` to `shared " ~ T.stringof ~ "`.");
148+
149+
core.internal.atomic.atomicStore!ms(cast(T*)&val, cast(V)newval);
148150
}
149151

150152
/**
@@ -158,7 +160,7 @@ void atomicStore(MemoryOrder ms = MemoryOrder.seq, T, V)( ref shared T val, V ne
158160
* Returns:
159161
* The value held previously by `val`.
160162
*/
161-
TailShared!(T) atomicFetchAdd(MemoryOrder ms = MemoryOrder.seq, T)( ref shared T val, size_t mod ) pure nothrow @nogc @trusted
163+
TailShared!T atomicFetchAdd(MemoryOrder ms = MemoryOrder.seq, T)( ref shared T val, size_t mod ) pure nothrow @nogc @trusted
162164
if ( __traits(isIntegral, T) )
163165
in ( atomicValueIsProperlyAligned(val) )
164166
{
@@ -176,7 +178,7 @@ in ( atomicValueIsProperlyAligned(val) )
176178
* Returns:
177179
* The value held previously by `val`.
178180
*/
179-
TailShared!(T) atomicFetchSub(MemoryOrder ms = MemoryOrder.seq, T)( ref shared T val, size_t mod ) pure nothrow @nogc @trusted
181+
TailShared!T atomicFetchSub(MemoryOrder ms = MemoryOrder.seq, T)( ref shared T val, size_t mod ) pure nothrow @nogc @trusted
180182
if ( __traits(isIntegral, T) )
181183
in ( atomicValueIsProperlyAligned(val) )
182184
{
@@ -323,7 +325,6 @@ void atomicFence() nothrow @nogc @safe
323325
core.internal.atomic.atomicFence();
324326
}
325327

326-
327328
/**
328329
* Performs the binary operation 'op' on val using 'mod' as the modifier.
329330
*
@@ -385,11 +386,54 @@ in ( atomicValueIsProperlyAligned( val ) )
385386
}
386387
}
387388

389+
390+
version (X86)
391+
{
392+
version = IsX86;
393+
enum has64BitXCHG = false;
394+
enum has64BitCAS = true;
395+
enum has128BitCAS = false;
396+
}
397+
else version (X86_64)
398+
{
399+
version = IsX86;
400+
enum has64BitXCHG = true;
401+
enum has64BitCAS = true;
402+
enum has128BitCAS = true;
403+
}
404+
else
405+
{
406+
enum has64BitXCHG = false;
407+
enum has64BitCAS = false;
408+
enum has128BitCAS = false;
409+
}
410+
388411
private
389412
{
413+
version (IsX86)
414+
{
415+
// NOTE: Strictly speaking, the x86 supports atomic operations on
416+
// unaligned values. However, this is far slower than the
417+
// common case, so such behavior should be prohibited.
418+
bool atomicValueIsProperlyAligned(T)( ref T val ) pure nothrow @nogc @trusted
419+
{
420+
return atomicPtrIsProperlyAligned(&val);
421+
}
422+
423+
bool atomicPtrIsProperlyAligned(T)( T* ptr ) pure nothrow @nogc @safe
424+
{
425+
// NOTE: 32 bit x86 systems support 8 byte CAS, which only requires
426+
// 4 byte alignment, so use size_t as the align type here.
427+
static if ( T.sizeof > size_t.sizeof )
428+
return cast(size_t)ptr % size_t.sizeof == 0;
429+
else
430+
return cast(size_t)ptr % T.sizeof == 0;
431+
}
432+
}
433+
390434
template IntForFloat(F)
435+
if (__traits(isFloating, F))
391436
{
392-
static assert ( __traits(isFloating, F), "Not a floating point type: " ~ F.stringof );
393437
static if ( F.sizeof == 4 )
394438
alias IntForFloat = uint;
395439
else static if ( F.sizeof == 8 )
@@ -398,6 +442,34 @@ private
398442
static assert ( false, "Invalid floating point type: " ~ F.stringof ~ ", only support `float` and `double`." );
399443
}
400444

445+
template IntForStruct(S)
446+
if (is(S == struct))
447+
{
448+
static if ( S.sizeof == 1 )
449+
alias IntForFloat = ubyte;
450+
else static if ( F.sizeof == 2 )
451+
alias IntForFloat = ushort;
452+
else static if ( F.sizeof == 4 )
453+
alias IntForFloat = uint;
454+
else static if ( F.sizeof == 8 )
455+
alias IntForFloat = ulong;
456+
else static if ( F.sizeof == 16 )
457+
alias IntForFloat = ulong[2]; // TODO: what's the best type here? slice/delegates pass in registers...
458+
else
459+
static assert (ValidateStruct!S);
460+
}
461+
462+
template ValidateStruct(S)
463+
if (is(S == struct))
464+
{
465+
import core.internal.traits : hasElaborateAssign;
466+
467+
static assert (S.sizeof <= size_t*2 && (S.sizeof & (S.sizeof - 1)) == 0, S.stringof ~ " has invalid size for atomic operations.");
468+
static assert (!hasElaborateAssign!S, S.stringof ~ " may not have an elaborate assignment when used with atomic operations.");
469+
470+
enum ValidateStruct = true;
471+
}
472+
401473
// TODO: it'd be nice if we had @trusted scopes; we could remove this...
402474
bool casByRef(T,V1,V2)( ref T value, V1 ifThis, V2 writeThis ) pure nothrow @nogc @trusted
403475
{
@@ -795,7 +867,7 @@ version (unittest)
795867
assert(atomicOp!"+="(i8, 8) == 13);
796868
assert(atomicOp!"+="(i16, 8) == 14);
797869
assert(atomicOp!"+="(i32, 8) == 15);
798-
version (AsmX86_64)
870+
version (D_LP64)
799871
{
800872
shared ulong u64 = 4;
801873
shared long i64 = 8;
@@ -819,7 +891,7 @@ version (unittest)
819891
assert(atomicOp!"-="(i8, 1) == 4);
820892
assert(atomicOp!"-="(i16, 1) == 5);
821893
assert(atomicOp!"-="(i32, 1) == 6);
822-
version (AsmX86_64)
894+
version (D_LP64)
823895
{
824896
shared ulong u64 = 4;
825897
shared long i64 = 8;

src/core/internal/atomic.d

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ private
4747
enum SizedReg(int reg, T = size_t) = registerNames[reg][RegIndex!T];
4848
}
4949

50-
T atomicLoad(MemoryOrder order = MemoryOrder.seq, T)(T* src) pure nothrow @nogc @trusted
50+
inout(T) atomicLoad(MemoryOrder order = MemoryOrder.seq, T)(inout(T)* src) pure nothrow @nogc @trusted
5151
if (CanCAS!T)
5252
{
5353
static assert(order != MemoryOrder.rel, "invalid MemoryOrder for atomicLoad()");

0 commit comments

Comments
 (0)