Skip to content

Commit 1a81340

Browse files
committed
Add test cases
1 parent d3ed860 commit 1a81340

File tree

2 files changed

+259
-0
lines changed

2 files changed

+259
-0
lines changed
Lines changed: 257 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,257 @@
1+
// ignore-windows: Concurrency on Windows is not supported yet.
2+
// compile-flags: -Zmiri-ignore-leaks -Zmiri-disable-stacked-borrows
3+
4+
// Weak memory emulation tests. All of the following test if
5+
// our weak memory emulation produces any inconsistent execution outcomes
6+
//
7+
// Due to the random nature of choosing valid stores, it is always
8+
// possible that our tests spuriously succeeds: even though our weak
9+
// memory emulation code has incorrectly identified a store in
10+
// modification order as being valid, it may be never chosen by
11+
// the RNG and never observed in our tests.
12+
//
13+
// To mitigate this, each test is ran enough times such that the chance
14+
// of spurious success is very low. These tests never supriously fail.
15+
//
16+
// Note that we can't effectively test whether our weak memory emulation
17+
// can produce *all* consistent execution outcomes. This may be possible
18+
// if Miri's scheduler is sufficiently random and explores all possible
19+
// interleavings of our small test cases after a reasonable number of runs.
20+
// However, since Miri's scheduler is not even pre-emptive, there will
21+
// always be possible interleavings (and possible execution outcomes),
22+
// that can never be observed regardless of how weak memory emulation is
23+
// implemented.
24+
25+
// Test cases and their consistent outcomes are from
26+
// http://svr-pes20-cppmem.cl.cam.ac.uk/cppmem/
27+
// Based on
28+
// M. Batty, S. Owens, S. Sarkar, P. Sewell and T. Weber,
29+
// "Mathematizing C++ concurrency", ACM SIGPLAN Notices, vol. 46, no. 1, pp. 55-66, 2011.
30+
// Available: https://ss265.host.cs.st-andrews.ac.uk/papers/n3132.pdf.
31+
32+
use std::sync::atomic::Ordering::*;
33+
use std::sync::atomic::{fence, AtomicUsize};
34+
use std::thread::{spawn, yield_now};
35+
36+
#[derive(Copy, Clone)]
37+
struct EvilSend<T>(pub T);
38+
39+
unsafe impl<T> Send for EvilSend<T> {}
40+
unsafe impl<T> Sync for EvilSend<T> {}
41+
42+
// We can't create static items because we need to run each test
43+
// multiple tests
44+
fn static_atomic(val: usize) -> &'static AtomicUsize {
45+
let ret = Box::leak(Box::new(AtomicUsize::new(val)));
46+
// A workaround to put the initialisation value in the store buffer
47+
ret.store(val, Relaxed);
48+
ret
49+
}
50+
51+
// Spins and yields until until acquires a pre-determined value
52+
fn acquires_value(loc: &AtomicUsize, val: usize) -> usize {
53+
while loc.load(Acquire) != val {
54+
yield_now();
55+
}
56+
val
57+
}
58+
59+
fn reads_value(loc: &AtomicUsize, val: usize) -> usize {
60+
while loc.load(Relaxed) != val {
61+
yield_now();
62+
}
63+
val
64+
}
65+
66+
// https://plv.mpi-sws.org/scfix/paper.pdf
67+
// 2.2 Second Problem: SC Fences are Too Weak
68+
fn test_rwc_syncs() {
69+
/*
70+
int main() {
71+
atomic_int x = 0;
72+
atomic_int y = 0;
73+
74+
{{{ x.store(1,mo_relaxed);
75+
||| { r1=x.load(mo_relaxed).readsvalue(1);
76+
fence(mo_seq_cst);
77+
r2=y.load(mo_relaxed); }
78+
||| { y.store(1,mo_relaxed);
79+
fence(mo_seq_cst);
80+
r3=x.load(mo_relaxed); }
81+
}}}
82+
return 0;
83+
}
84+
*/
85+
let x = static_atomic(0);
86+
let y = static_atomic(0);
87+
88+
let j1 = spawn(move || {
89+
x.store(1, Relaxed);
90+
});
91+
92+
let j2 = spawn(move || {
93+
reads_value(&x, 1);
94+
fence(SeqCst);
95+
y.load(Relaxed)
96+
});
97+
98+
let j3 = spawn(move || {
99+
y.store(1, Relaxed);
100+
fence(SeqCst);
101+
x.load(Relaxed)
102+
});
103+
104+
j1.join().unwrap();
105+
let b = j2.join().unwrap();
106+
let c = j3.join().unwrap();
107+
108+
assert_ne!((b, c), (0, 0));
109+
}
110+
111+
fn test_corr() {
112+
let x = static_atomic(0);
113+
let y = static_atomic(0);
114+
115+
let j1 = spawn(move || {
116+
x.store(1, Relaxed);
117+
x.store(2, Relaxed);
118+
});
119+
120+
let j2 = spawn(move || {
121+
let r2 = x.load(Relaxed); // -------------------------------------+
122+
y.store(1, Release); // ---------------------+ |
123+
r2 // | |
124+
}); // | |
125+
// |synchronizes-with |happens-before
126+
let j3 = spawn(move || { // | |
127+
acquires_value(&y, 1); // <------------------+ |
128+
x.load(Relaxed) // <----------------------------------------------+
129+
// The two reads on x are ordered by hb, so they cannot observe values
130+
// differently from the modification order. If the first read observed
131+
// 2, then the second read must observe 2 as well.
132+
});
133+
134+
j1.join().unwrap();
135+
let r2 = j2.join().unwrap();
136+
let r3 = j3.join().unwrap();
137+
if r2 == 2 {
138+
assert_eq!(r3, 2);
139+
}
140+
}
141+
142+
fn test_wrc() {
143+
let x = static_atomic(0);
144+
let y = static_atomic(0);
145+
146+
let j1 = spawn(move || {
147+
x.store(1, Release); // ---------------------+---------------------+
148+
}); // | |
149+
// |synchronizes-with |
150+
let j2 = spawn(move || { // | |
151+
acquires_value(&x, 1); // <------------------+ |
152+
y.store(1, Release); // ---------------------+ |happens-before
153+
}); // | |
154+
// |synchronizes-with |
155+
let j3 = spawn(move || { // | |
156+
acquires_value(&y, 1); // <------------------+ |
157+
x.load(Relaxed) // <-----------------------------------------------+
158+
});
159+
160+
j1.join().unwrap();
161+
j2.join().unwrap();
162+
let r3 = j3.join().unwrap();
163+
164+
assert_eq!(r3, 1);
165+
}
166+
167+
fn test_message_passing() {
168+
let mut var = 0u32;
169+
let ptr = &mut var as *mut u32;
170+
let x = EvilSend(ptr);
171+
let y = static_atomic(0);
172+
173+
let j1 = spawn(move || {
174+
unsafe { *x.0 = 1 }; // -----------------------------------------+
175+
y.store(1, Release); // ---------------------+ |
176+
}); // | |
177+
// |synchronizes-with | happens-before
178+
let j2 = spawn(move || { // | |
179+
acquires_value(&y, 1); // <------------------+ |
180+
unsafe { *x.0 } // <---------------------------------------------+
181+
});
182+
183+
j1.join().unwrap();
184+
let r2 = j2.join().unwrap();
185+
186+
assert_eq!(r2, 1);
187+
}
188+
189+
// LB+acq_rel+acq_rel
190+
fn test_load_buffering_acq_rel() {
191+
let x = static_atomic(0);
192+
let y = static_atomic(0);
193+
let j1 = spawn(move || {
194+
let r1 = x.load(Acquire);
195+
y.store(1, Release);
196+
r1
197+
});
198+
199+
let j2 = spawn(move || {
200+
let r2 = y.load(Acquire);
201+
x.store(1, Release);
202+
r2
203+
});
204+
205+
let r1 = j1.join().unwrap();
206+
let r2 = j2.join().unwrap();
207+
208+
// 3 consistent outcomes: (0,0), (0,1), (1,0)
209+
assert_ne!((r1, r2), (1, 1));
210+
}
211+
212+
fn test_mixed_access() {
213+
/*
214+
int main() {
215+
atomic_int x = 0;
216+
{{{
217+
x.store(1, mo_relaxed);
218+
}}}
219+
220+
x.store(2, mo_relaxed);
221+
222+
{{{
223+
r1 = x.load(mo_relaxed);
224+
}}}
225+
226+
return 0;
227+
}
228+
*/
229+
let x = static_atomic(0);
230+
231+
spawn(move || {
232+
x.store(1, Relaxed);
233+
})
234+
.join()
235+
.unwrap();
236+
237+
x.store(2, Relaxed);
238+
239+
let r2 = spawn(move || x.load(Relaxed)).join().unwrap();
240+
241+
assert_eq!(r2, 2);
242+
}
243+
244+
pub fn main() {
245+
// TODO: does this make chances of spurious success
246+
// "sufficiently low"? This also takes a long time to run,
247+
// prehaps each function should be its own test case so they
248+
// can be run in parallel
249+
for _ in 0..500 {
250+
test_mixed_access();
251+
test_load_buffering_acq_rel();
252+
test_message_passing();
253+
test_wrc();
254+
test_corr();
255+
test_rwc_syncs();
256+
}
257+
}
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
warning: thread support is experimental and incomplete: weak memory effects are not emulated.
2+

0 commit comments

Comments
 (0)