Skip to content

Commit 5f07d97

Browse files
committed
up
1 parent f700461 commit 5f07d97

File tree

4 files changed

+301
-172
lines changed

4 files changed

+301
-172
lines changed

Cargo.toml

Lines changed: 27 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,47 @@
11
[package]
22
name = "sieve-cache"
3-
version = "0.2.1"
3+
version = "1.0.0"
44
edition = "2021"
5-
description = "SIEVE cache replacement policy"
5+
description = "SIEVE cache replacement policy with thread-safe wrappers"
66
homepage = "https://github.com/jedisct1/rust-sieve-cache"
77
repository = "https://github.com/jedisct1/rust-sieve-cache"
8-
keywords = ["cache", "replacement", "sieve"]
8+
documentation = "https://docs.rs/sieve-cache"
9+
keywords = ["cache", "replacement", "sieve", "thread-safe", "concurrent"]
910
license = "MIT"
1011
authors = ["Frank Denis <github@pureftpd.org>"]
11-
categories = ["algorithms", "caching"]
12+
categories = ["algorithms", "caching", "concurrency", "data-structures"]
1213
readme = "README.md"
14+
rust-version = "1.60.0"
15+
16+
[features]
17+
default = ["sync", "sharded"]
18+
sync = []
19+
sharded = []
20+
doctest = ["sync", "sharded"]
21+
22+
[dependencies]
1323

1424
[dev-dependencies]
15-
criterion = "0.5"
25+
criterion = { version = "0.5", features = ["html_reports"] }
1626
rand = "0.9.0"
1727
rand_distr = "0.5.1"
1828

1929
[[bench]]
2030
name = "criterion"
2131
harness = false
2232

33+
[package.metadata.docs.rs]
34+
all-features = true
35+
rustdoc-args = ["--cfg", "docsrs"]
36+
2337
[profile.release]
2438
panic = "abort"
2539
opt-level = 3
40+
lto = true
41+
codegen-units = 1
42+
strip = true
43+
44+
[profile.bench]
45+
opt-level = 3
46+
debug = true
47+
lto = "thin"

README.md

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,10 @@ let evicted = cache.evict(); // Returns and removes a value that wasn't recentl
6767

6868
## Thread-Safe Implementations
6969

70+
These implementations are available when using the appropriate feature flags:
71+
- `SyncSieveCache` is available with the `sync` feature (enabled by default)
72+
- `ShardedSieveCache` is available with the `sharded` feature (enabled by default)
73+
7074
### `SyncSieveCache` - Basic Thread-Safe Cache
7175

7276
For concurrent access from multiple threads, you can use the `SyncSieveCache` wrapper, which provides thread safety with a single global lock:
@@ -190,6 +194,29 @@ The `ShardedSieveCache` divides the cache into multiple independent segments (sh
190194

191195
This design significantly reduces lock contention when operations are distributed across different keys, making it ideal for high-concurrency workloads.
192196

197+
## Feature Flags
198+
199+
This crate provides the following feature flags to control which implementations are available:
200+
201+
- `sync`: Enables the thread-safe `SyncSieveCache` implementation (enabled by default)
202+
- `sharded`: Enables the sharded `ShardedSieveCache` implementation (enabled by default)
203+
204+
If you only need specific implementations, you can select just the features you need:
205+
206+
```toml
207+
# Only use the core implementation
208+
sieve-cache = { version = "1.0.0", default-features = false }
209+
210+
# Only use the core and sync implementations
211+
sieve-cache = { version = "1.0.0", default-features = false, features = ["sync"] }
212+
213+
# Only use the core and sharded implementations
214+
sieve-cache = { version = "1.0.0", default-features = false, features = ["sharded"] }
215+
216+
# For documentation tests to work correctly
217+
sieve-cache = { version = "1.0.0", features = ["doctest"] }
218+
```
219+
193220
## Performance Considerations
194221

195222
Choosing the right cache implementation depends on your workload:

benches/criterion.rs

Lines changed: 143 additions & 121 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,16 @@ extern crate criterion;
44
use criterion::{black_box, BatchSize, Criterion};
55
use rand::prelude::*;
66
use rand_distr::{Distribution, Normal};
7-
use sieve_cache::{ShardedSieveCache, SieveCache, SyncSieveCache};
7+
use sieve_cache::SieveCache;
88
use std::sync::Arc;
99
use std::thread;
1010

11+
#[cfg(feature = "sync")]
12+
use sieve_cache::SyncSieveCache;
13+
14+
#[cfg(feature = "sharded")]
15+
use sieve_cache::ShardedSieveCache;
16+
1117
/// Benchmark sequential access patterns with the base SieveCache implementation.
1218
///
1319
/// This benchmark measures the performance of sequential insert and get operations
@@ -108,146 +114,162 @@ fn bench_composite_normal(c: &mut Criterion) {
108114
});
109115
}
110116

111-
/// Benchmark comparing different thread-safe cache implementations in a high-concurrency scenario.
112-
///
113-
/// This benchmark measures the performance difference between:
114-
/// 1. SyncSieveCache - using a single mutex for the entire cache
115-
/// 2. ShardedSieveCache - using multiple mutexes (default 16 shards)
116-
/// 3. ShardedSieveCache with 32 shards - higher shard count
117-
///
118-
/// The test simulates multiple threads performing random operations (inserts and lookups)
119-
/// concurrently, which should highlight the benefits of the sharded approach in
120-
/// reducing lock contention.
121-
fn bench_concurrent_access(c: &mut Criterion) {
122-
let mut group = c.benchmark_group("concurrent_access");
123-
group.sample_size(10); // Reduce sample size for these expensive benchmarks
124-
125-
// Set up benchmark parameters
126-
const CACHE_SIZE: usize = 10000;
127-
const NUM_THREADS: usize = 8;
128-
const OPS_PER_THREAD: usize = 1000;
129-
130-
// Generic benchmark function to reduce code duplication
131-
let run_concurrent_benchmark = |cache: Arc<dyn CacheInterface<u64, u64>>| {
132-
let mut handles = Vec::with_capacity(NUM_THREADS);
133-
134-
for thread_id in 0..NUM_THREADS {
135-
let cache_clone = Arc::clone(&cache);
136-
let handle = thread::spawn(move || {
137-
// Use a seeded RNG for reproducibility, with different seeds per thread
138-
let mut rng = StdRng::seed_from_u64(thread_id as u64);
139-
140-
for i in 0..OPS_PER_THREAD {
141-
// Use a key range that creates some contention but also some distribution
142-
let key = rng.random_range(0..1000);
143-
144-
// Mix operations: 40% inserts, 60% reads
145-
if i % 10 < 4 {
146-
black_box(cache_clone.insert(key, key));
147-
} else {
148-
black_box(cache_clone.get(&key));
149-
}
150-
}
151-
});
152-
handles.push(handle);
153-
}
117+
// Interface trait to allow treating both cache implementations uniformly
118+
trait CacheInterface<K, V>: Send + Sync {
119+
fn insert(&self, key: K, value: V) -> bool;
120+
fn get(&self, key: &K) -> Option<V>;
121+
}
154122

155-
for handle in handles {
156-
handle.join().unwrap();
157-
}
158-
};
123+
// Only compile concurrent benchmark when both thread-safe implementations are available
124+
#[cfg(all(feature = "sync", feature = "sharded"))]
125+
mod concurrent_benchmarks {
126+
use super::*;
159127

160-
// Benchmark with SyncSieveCache (single mutex)
161-
group.bench_function("sync_cache", |b| {
162-
b.iter_batched(
163-
|| {
164-
// Setup for each iteration
165-
Arc::new(SyncSieveCacheAdapter(
166-
SyncSieveCache::new(CACHE_SIZE).unwrap(),
167-
))
168-
},
169-
|cache| run_concurrent_benchmark(cache),
170-
BatchSize::SmallInput,
171-
);
172-
});
128+
/// Benchmark comparing different thread-safe cache implementations in a high-concurrency scenario.
129+
///
130+
/// This benchmark measures the performance difference between:
131+
/// 1. SyncSieveCache - using a single mutex for the entire cache
132+
/// 2. ShardedSieveCache - using multiple mutexes (default 16 shards)
133+
/// 3. ShardedSieveCache with 32 shards - higher shard count
134+
///
135+
/// The test simulates multiple threads performing random operations (inserts and lookups)
136+
/// concurrently, which should highlight the benefits of the sharded approach in
137+
/// reducing lock contention.
138+
pub fn bench_concurrent_access(c: &mut Criterion) {
139+
let mut group = c.benchmark_group("concurrent_access");
140+
group.sample_size(10); // Reduce sample size for these expensive benchmarks
173141

174-
// Benchmark with ShardedSieveCache (default: 16 mutexes)
175-
group.bench_function("sharded_cache_16_shards", |b| {
176-
b.iter_batched(
177-
|| {
178-
// Setup for each iteration
179-
Arc::new(ShardedSieveCacheAdapter(
180-
ShardedSieveCache::new(CACHE_SIZE).unwrap(),
181-
))
182-
},
183-
|cache| run_concurrent_benchmark(cache),
184-
BatchSize::SmallInput,
185-
);
186-
});
142+
// Set up benchmark parameters
143+
const CACHE_SIZE: usize = 10000;
144+
const NUM_THREADS: usize = 8;
145+
const OPS_PER_THREAD: usize = 1000;
187146

188-
// Benchmark with different shard counts
189-
group.bench_function("sharded_cache_32_shards", |b| {
190-
b.iter_batched(
191-
|| {
192-
// Setup for each iteration
193-
Arc::new(ShardedSieveCacheAdapter(
194-
ShardedSieveCache::with_shards(CACHE_SIZE, 32).unwrap(),
195-
))
196-
},
197-
|cache| run_concurrent_benchmark(cache),
198-
BatchSize::SmallInput,
199-
);
200-
});
147+
// Generic benchmark function to reduce code duplication
148+
let run_concurrent_benchmark = |cache: Arc<dyn CacheInterface<u64, u64>>| {
149+
let mut handles = Vec::with_capacity(NUM_THREADS);
201150

202-
group.finish();
203-
}
151+
for thread_id in 0..NUM_THREADS {
152+
let cache_clone = Arc::clone(&cache);
153+
let handle = thread::spawn(move || {
154+
// Use a seeded RNG for reproducibility, with different seeds per thread
155+
let mut rng = StdRng::seed_from_u64(thread_id as u64);
204156

205-
// Interface trait to allow treating both cache implementations uniformly
206-
trait CacheInterface<K, V>: Send + Sync {
207-
fn insert(&self, key: K, value: V) -> bool;
208-
fn get(&self, key: &K) -> Option<V>;
209-
}
157+
for i in 0..OPS_PER_THREAD {
158+
// Use a key range that creates some contention but also some distribution
159+
let key = rng.random_range(0..1000);
210160

211-
// Adapter for SyncSieveCache
212-
struct SyncSieveCacheAdapter<K: Eq + std::hash::Hash + Clone + Send + Sync, V: Clone + Send + Sync>(
213-
SyncSieveCache<K, V>,
214-
);
161+
// Mix operations: 40% inserts, 60% reads
162+
if i % 10 < 4 {
163+
black_box(cache_clone.insert(key, key));
164+
} else {
165+
black_box(cache_clone.get(&key));
166+
}
167+
}
168+
});
169+
handles.push(handle);
170+
}
215171

216-
impl<K: Eq + std::hash::Hash + Clone + Send + Sync, V: Clone + Send + Sync> CacheInterface<K, V>
217-
for SyncSieveCacheAdapter<K, V>
218-
{
219-
fn insert(&self, key: K, value: V) -> bool {
220-
self.0.insert(key, value)
221-
}
172+
for handle in handles {
173+
handle.join().unwrap();
174+
}
175+
};
176+
177+
// Benchmark with SyncSieveCache (single mutex)
178+
group.bench_function("sync_cache", |b| {
179+
b.iter_batched(
180+
|| {
181+
// Setup for each iteration
182+
Arc::new(SyncSieveCacheAdapter(
183+
SyncSieveCache::new(CACHE_SIZE).unwrap(),
184+
))
185+
},
186+
|cache| run_concurrent_benchmark(cache),
187+
BatchSize::SmallInput,
188+
);
189+
});
190+
191+
// Benchmark with ShardedSieveCache (default: 16 mutexes)
192+
group.bench_function("sharded_cache_16_shards", |b| {
193+
b.iter_batched(
194+
|| {
195+
// Setup for each iteration
196+
Arc::new(ShardedSieveCacheAdapter(
197+
ShardedSieveCache::new(CACHE_SIZE).unwrap(),
198+
))
199+
},
200+
|cache| run_concurrent_benchmark(cache),
201+
BatchSize::SmallInput,
202+
);
203+
});
222204

223-
fn get(&self, key: &K) -> Option<V> {
224-
self.0.get(key)
205+
// Benchmark with different shard counts
206+
group.bench_function("sharded_cache_32_shards", |b| {
207+
b.iter_batched(
208+
|| {
209+
// Setup for each iteration
210+
Arc::new(ShardedSieveCacheAdapter(
211+
ShardedSieveCache::with_shards(CACHE_SIZE, 32).unwrap(),
212+
))
213+
},
214+
|cache| run_concurrent_benchmark(cache),
215+
BatchSize::SmallInput,
216+
);
217+
});
218+
219+
group.finish();
225220
}
226-
}
227221

228-
// Adapter for ShardedSieveCache
229-
struct ShardedSieveCacheAdapter<
230-
K: Eq + std::hash::Hash + Clone + Send + Sync,
231-
V: Clone + Send + Sync,
232-
>(ShardedSieveCache<K, V>);
233-
234-
impl<K: Eq + std::hash::Hash + Clone + Send + Sync, V: Clone + Send + Sync> CacheInterface<K, V>
235-
for ShardedSieveCacheAdapter<K, V>
236-
{
237-
fn insert(&self, key: K, value: V) -> bool {
238-
self.0.insert(key, value)
222+
// Adapter for SyncSieveCache
223+
struct SyncSieveCacheAdapter<K: Eq + std::hash::Hash + Clone + Send + Sync, V: Clone + Send + Sync>(
224+
SyncSieveCache<K, V>,
225+
);
226+
227+
impl<K: Eq + std::hash::Hash + Clone + Send + Sync, V: Clone + Send + Sync> CacheInterface<K, V>
228+
for SyncSieveCacheAdapter<K, V>
229+
{
230+
fn insert(&self, key: K, value: V) -> bool {
231+
self.0.insert(key, value)
232+
}
233+
234+
fn get(&self, key: &K) -> Option<V> {
235+
self.0.get(key)
236+
}
239237
}
240238

241-
fn get(&self, key: &K) -> Option<V> {
242-
self.0.get(key)
239+
// Adapter for ShardedSieveCache
240+
struct ShardedSieveCacheAdapter<
241+
K: Eq + std::hash::Hash + Clone + Send + Sync,
242+
V: Clone + Send + Sync,
243+
>(ShardedSieveCache<K, V>);
244+
245+
impl<K: Eq + std::hash::Hash + Clone + Send + Sync, V: Clone + Send + Sync> CacheInterface<K, V>
246+
for ShardedSieveCacheAdapter<K, V>
247+
{
248+
fn insert(&self, key: K, value: V) -> bool {
249+
self.0.insert(key, value)
250+
}
251+
252+
fn get(&self, key: &K) -> Option<V> {
253+
self.0.get(key)
254+
}
243255
}
244256
}
245257

258+
#[cfg(all(feature = "sync", feature = "sharded"))]
246259
criterion_group!(
247260
benches,
248261
bench_sequence,
249262
bench_composite,
250263
bench_composite_normal,
251-
bench_concurrent_access
264+
concurrent_benchmarks::bench_concurrent_access
252265
);
266+
267+
#[cfg(not(all(feature = "sync", feature = "sharded")))]
268+
criterion_group!(
269+
benches,
270+
bench_sequence,
271+
bench_composite,
272+
bench_composite_normal
273+
);
274+
253275
criterion_main!(benches);

0 commit comments

Comments
 (0)