1
1
use oorandom:: Rand64 ;
2
2
use parking_lot:: Mutex ;
3
3
use std:: fmt:: Debug ;
4
- use std:: sync:: atomic:: AtomicUsize ;
4
+ use std:: sync:: atomic:: AtomicU16 ;
5
5
use std:: sync:: atomic:: Ordering ;
6
6
use triomphe:: Arc ;
7
7
@@ -20,15 +20,15 @@ pub(crate) struct Lru<Node>
20
20
where
21
21
Node : LruNode ,
22
22
{
23
- green_zone : AtomicUsize ,
23
+ green_zone : AtomicU16 ,
24
24
data : Mutex < LruData < Node > > ,
25
25
}
26
26
27
27
#[ derive( Debug ) ]
28
28
struct LruData < Node > {
29
- end_red_zone : usize ,
30
- end_yellow_zone : usize ,
31
- end_green_zone : usize ,
29
+ end_red_zone : u16 ,
30
+ end_yellow_zone : u16 ,
31
+ end_green_zone : u16 ,
32
32
rng : Rand64 ,
33
33
entries : Vec < Arc < Node > > ,
34
34
}
@@ -39,9 +39,9 @@ pub(crate) trait LruNode: Sized + Debug {
39
39
40
40
#[ derive( Debug ) ]
41
41
pub ( crate ) struct LruIndex {
42
- /// Index in the appropriate LRU list, or std::usize ::MAX if not a
42
+ /// Index in the appropriate LRU list, or std::u16 ::MAX if not a
43
43
/// member.
44
- index : AtomicUsize ,
44
+ index : AtomicU16 ,
45
45
}
46
46
47
47
impl < Node > Default for Lru < Node >
@@ -68,12 +68,12 @@ where
68
68
69
69
#[ cfg_attr( not( test) , allow( dead_code) ) ]
70
70
fn with_seed ( seed : & str ) -> Self {
71
- Lru { green_zone : AtomicUsize :: new ( 0 ) , data : Mutex :: new ( LruData :: with_seed ( seed) ) }
71
+ Lru { green_zone : AtomicU16 :: new ( 0 ) , data : Mutex :: new ( LruData :: with_seed ( seed) ) }
72
72
}
73
73
74
74
/// Adjust the total number of nodes permitted to have a value at
75
75
/// once. If `len` is zero, this disables LRU caching completely.
76
- pub ( crate ) fn set_lru_capacity ( & self , len : usize ) {
76
+ pub ( crate ) fn set_lru_capacity ( & self , len : u16 ) {
77
77
let mut data = self . data . lock ( ) ;
78
78
79
79
// We require each zone to have at least 1 slot. Therefore,
@@ -143,23 +143,24 @@ where
143
143
LruData { end_yellow_zone : 0 , end_green_zone : 0 , end_red_zone : 0 , entries : Vec :: new ( ) , rng }
144
144
}
145
145
146
- fn green_zone ( & self ) -> std:: ops:: Range < usize > {
146
+ fn green_zone ( & self ) -> std:: ops:: Range < u16 > {
147
147
0 ..self . end_green_zone
148
148
}
149
149
150
- fn yellow_zone ( & self ) -> std:: ops:: Range < usize > {
150
+ fn yellow_zone ( & self ) -> std:: ops:: Range < u16 > {
151
151
self . end_green_zone ..self . end_yellow_zone
152
152
}
153
153
154
- fn red_zone ( & self ) -> std:: ops:: Range < usize > {
154
+ fn red_zone ( & self ) -> std:: ops:: Range < u16 > {
155
155
self . end_yellow_zone ..self . end_red_zone
156
156
}
157
157
158
- fn resize ( & mut self , len_green_zone : usize , len_yellow_zone : usize , len_red_zone : usize ) {
158
+ fn resize ( & mut self , len_green_zone : u16 , len_yellow_zone : u16 , len_red_zone : u16 ) {
159
159
self . end_green_zone = len_green_zone;
160
160
self . end_yellow_zone = self . end_green_zone + len_yellow_zone;
161
161
self . end_red_zone = self . end_yellow_zone + len_red_zone;
162
- let entries = std:: mem:: replace ( & mut self . entries , Vec :: with_capacity ( self . end_red_zone ) ) ;
162
+ let entries =
163
+ std:: mem:: replace ( & mut self . entries , Vec :: with_capacity ( self . end_red_zone as usize ) ) ;
163
164
164
165
tracing:: debug!( "green_zone = {:?}" , self . green_zone( ) ) ;
165
166
tracing:: debug!( "yellow_zone = {:?}" , self . yellow_zone( ) ) ;
@@ -207,7 +208,7 @@ where
207
208
208
209
// Easy case: we still have capacity. Push it, and then promote
209
210
// it up to the appropriate zone.
210
- let len = self . entries . len ( ) ;
211
+ let len = self . entries . len ( ) as u16 ;
211
212
if len < self . end_red_zone {
212
213
self . entries . push ( node. clone ( ) ) ;
213
214
node. lru_index ( ) . store ( len) ;
@@ -218,7 +219,7 @@ where
218
219
// Harder case: no capacity. Create some by evicting somebody from red
219
220
// zone and then promoting.
220
221
let victim_index = self . pick_index ( self . red_zone ( ) ) ;
221
- let victim_node = std:: mem:: replace ( & mut self . entries [ victim_index] , node. clone ( ) ) ;
222
+ let victim_node = std:: mem:: replace ( & mut self . entries [ victim_index as usize ] , node. clone ( ) ) ;
222
223
tracing:: debug!( "evicting red node {:?} from {}" , victim_node, victim_index) ;
223
224
victim_node. lru_index ( ) . clear ( ) ;
224
225
self . promote_red_to_green ( node, victim_index) ;
@@ -231,7 +232,7 @@ where
231
232
///
232
233
/// NB: It is not required that `node.lru_index()` is up-to-date
233
234
/// when entering this method.
234
- fn promote_red_to_green ( & mut self , node : & Arc < Node > , red_index : usize ) {
235
+ fn promote_red_to_green ( & mut self , node : & Arc < Node > , red_index : u16 ) {
235
236
debug_assert ! ( self . red_zone( ) . contains( & red_index) ) ;
236
237
237
238
// Pick a yellow at random and switch places with it.
@@ -242,12 +243,12 @@ where
242
243
let yellow_index = self . pick_index ( self . yellow_zone ( ) ) ;
243
244
tracing:: debug!(
244
245
"demoting yellow node {:?} from {} to red at {}" ,
245
- self . entries[ yellow_index] ,
246
+ self . entries[ yellow_index as usize ] ,
246
247
yellow_index,
247
248
red_index,
248
249
) ;
249
- self . entries . swap ( yellow_index, red_index) ;
250
- self . entries [ red_index] . lru_index ( ) . store ( red_index) ;
250
+ self . entries . swap ( yellow_index as usize , red_index as usize ) ;
251
+ self . entries [ red_index as usize ] . lru_index ( ) . store ( red_index) ;
251
252
252
253
// Now move ourselves up into the green zone.
253
254
self . promote_yellow_to_green ( node, yellow_index) ;
@@ -259,51 +260,51 @@ where
259
260
///
260
261
/// NB: It is not required that `node.lru_index()` is up-to-date
261
262
/// when entering this method.
262
- fn promote_yellow_to_green ( & mut self , node : & Arc < Node > , yellow_index : usize ) {
263
+ fn promote_yellow_to_green ( & mut self , node : & Arc < Node > , yellow_index : u16 ) {
263
264
debug_assert ! ( self . yellow_zone( ) . contains( & yellow_index) ) ;
264
265
265
266
// Pick a yellow at random and switch places with it.
266
267
let green_index = self . pick_index ( self . green_zone ( ) ) ;
267
268
tracing:: debug!(
268
269
"demoting green node {:?} from {} to yellow at {}" ,
269
- self . entries[ green_index] ,
270
+ self . entries[ green_index as usize ] ,
270
271
green_index,
271
272
yellow_index
272
273
) ;
273
- self . entries . swap ( green_index, yellow_index) ;
274
- self . entries [ yellow_index] . lru_index ( ) . store ( yellow_index) ;
274
+ self . entries . swap ( green_index as usize , yellow_index as usize ) ;
275
+ self . entries [ yellow_index as usize ] . lru_index ( ) . store ( yellow_index) ;
275
276
node. lru_index ( ) . store ( green_index) ;
276
277
277
278
tracing:: debug!( "promoted {:?} to green index {}" , node, green_index) ;
278
279
}
279
280
280
- fn pick_index ( & mut self , zone : std:: ops:: Range < usize > ) -> usize {
281
- let end_index = std:: cmp:: min ( zone. end , self . entries . len ( ) ) ;
282
- self . rng . rand_range ( zone. start as u64 ..end_index as u64 ) as usize
281
+ fn pick_index ( & mut self , zone : std:: ops:: Range < u16 > ) -> u16 {
282
+ let end_index = std:: cmp:: min ( zone. end , self . entries . len ( ) as u16 ) ;
283
+ self . rng . rand_range ( zone. start as u64 ..end_index as u64 ) as u16
283
284
}
284
285
}
285
286
286
287
impl Default for LruIndex {
287
288
fn default ( ) -> Self {
288
- Self { index : AtomicUsize :: new ( usize :: MAX ) }
289
+ Self { index : AtomicU16 :: new ( u16 :: MAX ) }
289
290
}
290
291
}
291
292
292
293
impl LruIndex {
293
- fn load ( & self ) -> usize {
294
+ fn load ( & self ) -> u16 {
294
295
self . index . load ( Ordering :: Acquire ) // see note on ordering below
295
296
}
296
297
297
- fn store ( & self , value : usize ) {
298
+ fn store ( & self , value : u16 ) {
298
299
self . index . store ( value, Ordering :: Release ) // see note on ordering below
299
300
}
300
301
301
302
fn clear ( & self ) {
302
- self . store ( usize :: MAX ) ;
303
+ self . store ( u16 :: MAX ) ;
303
304
}
304
305
305
306
fn is_in_lru ( & self ) -> bool {
306
- self . load ( ) != usize :: MAX
307
+ self . load ( ) != u16 :: MAX
307
308
}
308
309
}
309
310
0 commit comments