Skip to content

Commit 4bde81c

Browse files
committed
Import test
1 parent 6994570 commit 4bde81c

File tree

1 file changed

+312
-0
lines changed

1 file changed

+312
-0
lines changed

tests/thread_safety_test.rs

Lines changed: 312 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,312 @@
1+
//! Thread safety stress test for SieveCache implementations.
2+
//!
3+
//! This test verifies that the thread-safe implementations of SieveCache
4+
//! properly handle concurrent operations without data races or other
5+
//! thread safety issues.
6+
7+
use sieve_cache::{ShardedSieveCache, SyncSieveCache};
8+
use std::sync::{Arc, Barrier};
9+
use std::thread;
10+
use std::time::Duration;
11+
12+
const NUM_THREADS: usize = 8;
13+
const OPERATIONS_PER_THREAD: usize = 5000;
14+
const CACHE_CAPACITY: usize = 1000;
15+
const SHARDED_CACHE_SHARDS: usize = 16;
16+
17+
/// Test concurrent operations on SyncSieveCache
18+
#[test]
19+
fn test_sync_cache_concurrent_operations() {
20+
let cache = Arc::new(SyncSieveCache::new(CACHE_CAPACITY).unwrap());
21+
let barrier = Arc::new(Barrier::new(NUM_THREADS));
22+
23+
// Pre-populate the cache with some values
24+
for i in 0..100 {
25+
cache.insert(format!("init_key{}", i), i);
26+
}
27+
28+
let mut handles = Vec::with_capacity(NUM_THREADS);
29+
30+
for thread_id in 0..NUM_THREADS {
31+
let cache_clone = Arc::clone(&cache);
32+
let barrier_clone = Arc::clone(&barrier);
33+
34+
let handle = thread::spawn(move || {
35+
// Wait for all threads to be ready
36+
barrier_clone.wait();
37+
38+
for i in 0..OPERATIONS_PER_THREAD {
39+
let op = i % 5;
40+
let key = format!("key{}_{}", thread_id, i % 200);
41+
42+
match op {
43+
0 => {
44+
// Insert operation
45+
cache_clone.insert(key, i);
46+
}
47+
1 => {
48+
// Get operation
49+
let _ = cache_clone.get(&key);
50+
}
51+
2 => {
52+
// Remove operation
53+
let _ = cache_clone.remove(&key);
54+
}
55+
3 => {
56+
// get_mut operation
57+
cache_clone.get_mut(&key, |value| {
58+
*value += 1;
59+
});
60+
}
61+
4 => {
62+
// Every 1000 operations, use one of our more complex operations
63+
if i % 1000 == 0 {
64+
match i % 3 {
65+
0 => {
66+
// for_each_value
67+
cache_clone.for_each_value(|value| {
68+
*value += 1;
69+
});
70+
}
71+
1 => {
72+
// for_each_entry
73+
cache_clone.for_each_entry(|(key, value)| {
74+
if key.contains("_50") {
75+
*value *= 2;
76+
}
77+
});
78+
}
79+
2 => {
80+
// Use the new batch retain operation occasionally
81+
cache_clone.retain_batch(|key, _| {
82+
!key.contains(&format!("_{}", thread_id))
83+
});
84+
}
85+
_ => unreachable!(),
86+
}
87+
}
88+
}
89+
_ => unreachable!(),
90+
}
91+
}
92+
});
93+
94+
handles.push(handle);
95+
}
96+
97+
// Wait for all threads to complete
98+
for handle in handles {
99+
handle.join().unwrap();
100+
}
101+
102+
// Verify the cache is still functional
103+
let len = cache.len();
104+
println!("SyncSieveCache final size: {}", len);
105+
106+
// Insert and retrieve a final value to confirm functionality
107+
cache.insert("final_key".to_string(), 999);
108+
assert_eq!(cache.get(&"final_key".to_string()), Some(999));
109+
}
110+
111+
/// Test concurrent operations on ShardedSieveCache
112+
#[test]
113+
fn test_sharded_cache_concurrent_operations() {
114+
let cache =
115+
Arc::new(ShardedSieveCache::with_shards(CACHE_CAPACITY, SHARDED_CACHE_SHARDS).unwrap());
116+
let barrier = Arc::new(Barrier::new(NUM_THREADS));
117+
118+
// Pre-populate the cache with some values
119+
for i in 0..100 {
120+
cache.insert(format!("init_key{}", i), i);
121+
}
122+
123+
let mut handles = Vec::with_capacity(NUM_THREADS);
124+
125+
for thread_id in 0..NUM_THREADS {
126+
let cache_clone = Arc::clone(&cache);
127+
let barrier_clone = Arc::clone(&barrier);
128+
129+
let handle = thread::spawn(move || {
130+
// Wait for all threads to be ready
131+
barrier_clone.wait();
132+
133+
for i in 0..OPERATIONS_PER_THREAD {
134+
let op = i % 5;
135+
let key = format!("key{}_{}", thread_id, i % 200);
136+
137+
match op {
138+
0 => {
139+
// Insert operation
140+
cache_clone.insert(key, i);
141+
}
142+
1 => {
143+
// Get operation
144+
let _ = cache_clone.get(&key);
145+
}
146+
2 => {
147+
// Remove operation
148+
let _ = cache_clone.remove(&key);
149+
}
150+
3 => {
151+
// get_mut operation
152+
cache_clone.get_mut(&key, |value| {
153+
*value += 1;
154+
});
155+
}
156+
4 => {
157+
// Every 1000 operations, use one of our more complex operations
158+
if i % 1000 == 0 {
159+
match i % 3 {
160+
0 => {
161+
// for_each_value
162+
cache_clone.for_each_value(|value| {
163+
*value += 1;
164+
});
165+
}
166+
1 => {
167+
// for_each_entry
168+
cache_clone.for_each_entry(|(key, value)| {
169+
if key.contains("_50") {
170+
*value *= 2;
171+
}
172+
});
173+
}
174+
2 => {
175+
// retain
176+
cache_clone
177+
.retain(|key, _| !key.contains(&format!("_{}", thread_id)));
178+
}
179+
_ => unreachable!(),
180+
}
181+
}
182+
}
183+
_ => unreachable!(),
184+
}
185+
}
186+
});
187+
188+
handles.push(handle);
189+
}
190+
191+
// Wait for all threads to complete
192+
for handle in handles {
193+
handle.join().unwrap();
194+
}
195+
196+
// Verify the cache is still functional
197+
let len = cache.len();
198+
println!("ShardedSieveCache final size: {}", len);
199+
200+
// Insert and retrieve a final value to confirm functionality
201+
cache.insert("final_key".to_string(), 999);
202+
assert_eq!(cache.get(&"final_key".to_string()), Some(999));
203+
}
204+
205+
/// Test specific race conditions that could occur in the retain operation
206+
#[test]
207+
fn test_retain_race_conditions() {
208+
let cache = Arc::new(SyncSieveCache::new(CACHE_CAPACITY).unwrap());
209+
210+
// Pre-populate the cache with values
211+
for i in 0..500 {
212+
cache.insert(format!("key{}", i), i);
213+
}
214+
215+
// Thread 1: Continuously modifies values
216+
let cache_clone1 = Arc::clone(&cache);
217+
let modifier = thread::spawn(move || {
218+
for i in 0..50 {
219+
// Modify existing values
220+
for j in 0..500 {
221+
let key = format!("key{}", j);
222+
cache_clone1.get_mut(&key, |value| {
223+
*value += 1;
224+
});
225+
}
226+
// Add new values
227+
for j in 0..10 {
228+
let new_key = format!("new_key{}_{}", i, j);
229+
cache_clone1.insert(new_key, j);
230+
}
231+
thread::sleep(Duration::from_millis(1));
232+
}
233+
});
234+
235+
// Thread 2: Performs retain operations while Thread 1 is modifying
236+
let cache_clone2 = Arc::clone(&cache);
237+
let retainer = thread::spawn(move || {
238+
for i in 0..10 {
239+
// Use standard retain
240+
if i % 2 == 0 {
241+
cache_clone2.retain(|key, value| {
242+
// Keep only even-valued entries and keys not containing "new"
243+
(*value % 2 == 0) || !key.contains("new")
244+
});
245+
} else {
246+
// Use batch retain
247+
cache_clone2.retain_batch(|key, value| {
248+
// Keep only odd-valued entries and keys not containing "new"
249+
(*value % 2 == 1) || !key.contains("new")
250+
});
251+
}
252+
thread::sleep(Duration::from_millis(5));
253+
}
254+
});
255+
256+
// Wait for both threads to finish
257+
modifier.join().unwrap();
258+
retainer.join().unwrap();
259+
260+
// Verify the cache is still functional
261+
let len = cache.len();
262+
println!("Cache size after retain race test: {}", len);
263+
264+
// Insert and retrieve one more value to confirm functionality
265+
cache.insert("after_race_test".to_string(), 1000);
266+
assert_eq!(cache.get(&"after_race_test".to_string()), Some(1000));
267+
}
268+
269+
/// Test with_lock operations for deadlock prevention
270+
#[test]
271+
fn test_with_lock_operation() {
272+
let cache = Arc::new(SyncSieveCache::new(100).unwrap());
273+
cache.insert("key1".to_string(), 1);
274+
275+
// Test basic with_lock operation
276+
cache.with_lock(|inner_cache| {
277+
inner_cache.insert("key2".to_string(), 2);
278+
});
279+
280+
assert!(cache.len() > 0);
281+
}
282+
283+
/// Test nested get inside get_mut for deadlock prevention
284+
#[test]
285+
fn test_nested_get_during_mut() {
286+
let cache = Arc::new(SyncSieveCache::new(100).unwrap());
287+
cache.insert("key1".to_string(), 1);
288+
cache.insert("key2".to_string(), 2);
289+
290+
// Simple nested get operation from within get_mut
291+
cache.get_mut(&"key1".to_string(), |val| {
292+
*val += 1;
293+
// This would deadlock if our implementation were flawed
294+
let _ = cache.get(&"key2".to_string());
295+
});
296+
297+
assert!(cache.len() > 0);
298+
}
299+
300+
/// Test simple get_mut operation for deadlock prevention
301+
#[test]
302+
fn test_get_mut_operation() {
303+
let cache = Arc::new(SyncSieveCache::new(100).unwrap());
304+
cache.insert("key1".to_string(), 1);
305+
306+
// Simple get_mut operation
307+
cache.get_mut(&"key1".to_string(), |val| {
308+
*val += 1;
309+
});
310+
311+
assert!(cache.len() > 0);
312+
}

0 commit comments

Comments
 (0)