1
- use core_affinity:: CoreId ;
2
1
use crossbeam_queue:: ArrayQueue ;
3
2
use itertools:: Itertools ;
4
3
use spacetimedb_paths:: server:: { ConfigToml , LogsDir } ;
@@ -16,6 +15,8 @@ use tracing_subscriber::{reload, EnvFilter};
16
15
use crate :: config:: { ConfigFile , LogConfig } ;
17
16
use crate :: util:: jobs:: JobCores ;
18
17
18
+ pub use core_affinity:: CoreId ;
19
+
19
20
pub struct TracingOptions {
20
21
pub config : LogConfig ,
21
22
/// Whether or not to periodically reload the log config in the background.
@@ -196,6 +197,13 @@ pub struct CoreReservations {
196
197
///
197
198
/// Default: 1/8
198
199
pub rayon : f64 ,
200
+ /// Cores to reserve for IRQ handling.
201
+ ///
202
+ /// This will be the first `n` [`CoreId`]s in the list.
203
+ /// Only make use of this if you're configuring the machine for IRQ pinning!
204
+ ///
205
+ /// Default: 2
206
+ pub irq : usize ,
199
207
/// Extra reserved cores.
200
208
///
201
209
/// If greater than zero, this many cores will be reserved _before_
@@ -211,6 +219,7 @@ impl Default for CoreReservations {
211
219
databases : 1.0 / 8.0 ,
212
220
tokio_workers : 4.0 / 8.0 ,
213
221
rayon : 1.0 / 8.0 ,
222
+ irq : 2 ,
214
223
reserved : 0 ,
215
224
}
216
225
}
@@ -221,13 +230,15 @@ impl CoreReservations {
221
230
///
222
231
/// Returns the allocated cores in the order:
223
232
///
233
+ /// - irq
224
234
/// - reserved
225
235
/// - databases
226
236
/// - tokio_workers
227
237
/// - rayon
228
238
///
229
239
/// Left public for testing and debugging purposes.
230
- pub fn apply ( & self , cores : & mut Vec < CoreId > ) -> [ Vec < CoreId > ; 4 ] {
240
+ pub fn apply ( & self , cores : & mut Vec < CoreId > ) -> [ Vec < CoreId > ; 5 ] {
241
+ let irq = cores. drain ( ..self . irq ) . collect_vec ( ) ;
231
242
let reserved = cores. drain ( ..self . reserved ) . collect_vec ( ) ;
232
243
233
244
let total = cores. len ( ) as f64 ;
@@ -240,7 +251,7 @@ impl CoreReservations {
240
251
let tokio_workers = claim ( cores, frac ( self . tokio_workers ) ) . collect_vec ( ) ;
241
252
let rayon = claim ( cores, frac ( self . rayon ) ) . collect_vec ( ) ;
242
253
243
- [ reserved, databases, tokio_workers, rayon]
254
+ [ irq , reserved, databases, tokio_workers, rayon]
244
255
}
245
256
}
246
257
@@ -273,7 +284,7 @@ impl Cores {
273
284
fn get ( reservations : CoreReservations ) -> Option < Self > {
274
285
let mut cores = Self :: get_core_ids ( ) ?;
275
286
276
- let [ reserved, databases, tokio_workers, rayon] = reservations. apply ( & mut cores) ;
287
+ let [ _irq , reserved, databases, tokio_workers, rayon] = reservations. apply ( & mut cores) ;
277
288
278
289
let reserved = ( !reserved. is_empty ( ) ) . then ( || reserved. into ( ) ) ;
279
290
let databases = databases. into_iter ( ) . collect :: < JobCores > ( ) ;
@@ -306,20 +317,12 @@ impl Cores {
306
317
307
318
/// Get the cores of the local host, as reported by the operating system.
308
319
///
309
- /// Cores 0 and 1 are not included in the returned vec, as we reserve them
310
- /// for the operating system.
311
- ///
312
- /// Returns `None` if `num_cpus - 2` is less than 8.
320
+ /// Returns `None` if `num_cpus` is less than 8.
313
321
/// If `Some` is returned, the `Vec` is non-empty.
314
322
pub fn get_core_ids ( ) -> Option < Vec < CoreId > > {
315
323
let cores = core_affinity:: get_core_ids ( )
316
324
. filter ( |cores| cores. len ( ) >= 10 ) ?
317
325
. into_iter ( )
318
- // We reserve the first two cores for the OS.
319
- // This allows us to pin interrupt handlers (IRQs) to these cores,
320
- // particularly those for incoming network traffic,
321
- // preventing them from preempting the main reducer threads.
322
- . filter ( |core_id| core_id. id > 1 )
323
326
. collect_vec ( ) ;
324
327
325
328
( !cores. is_empty ( ) ) . then_some ( cores)
@@ -328,14 +331,14 @@ impl Cores {
328
331
329
332
#[ derive( Default ) ]
330
333
pub struct TokioCores {
331
- workers : Option < Vec < CoreId > > ,
334
+ pub workers : Option < Vec < CoreId > > ,
332
335
// For blocking threads, we don't want to limit them to a specific number
333
336
// and pin them to their own cores - they're supposed to run concurrently
334
337
// with each other. However, `core_affinity` doesn't support affinity masks,
335
338
// so we just use the Linux-specific API, since this is only a slight boost
336
339
// and we don't care enough about performance on other platforms.
337
340
#[ cfg( target_os = "linux" ) ]
338
- blocking : Option < nix:: sched:: CpuSet > ,
341
+ pub blocking : Option < nix:: sched:: CpuSet > ,
339
342
}
340
343
341
344
impl TokioCores {
0 commit comments