@@ -26,7 +26,9 @@ pub struct TlsEntry<'tcx> {
26
26
27
27
#[ derive( Clone , Debug ) ]
28
28
struct RunningDtorsState {
29
- /// The last TlsKey used to retrieve a TLS destructor.
29
+ /// The last TlsKey used to retrieve a TLS destructor. `None` means that we
30
+ /// have not tried to retrieve a TLS destructor yet or that we already tried
31
+ /// all keys.
30
32
last_dtor_key : Option < TlsKey > ,
31
33
}
32
34
@@ -40,7 +42,7 @@ pub struct TlsData<'tcx> {
40
42
41
43
/// A single per thread destructor of the thread local storage (that's how
42
44
/// things work on macOS) with a data argument.
43
- thread_dtors : BTreeMap < ThreadId , ( ty:: Instance < ' tcx > , Scalar < Tag > ) > ,
45
+ macos_thread_dtors : BTreeMap < ThreadId , ( ty:: Instance < ' tcx > , Scalar < Tag > ) > ,
44
46
45
47
/// State for currently running TLS dtors. If this map contains a key for a
46
48
/// specific thread, it means that we are in the "destruct" phase, during
@@ -53,7 +55,7 @@ impl<'tcx> Default for TlsData<'tcx> {
53
55
TlsData {
54
56
next_key : 1 , // start with 1 as we must not use 0 on Windows
55
57
keys : Default :: default ( ) ,
56
- thread_dtors : Default :: default ( ) ,
58
+ macos_thread_dtors : Default :: default ( ) ,
57
59
dtors_running : Default :: default ( ) ,
58
60
}
59
61
}
@@ -143,7 +145,7 @@ impl<'tcx> TlsData<'tcx> {
143
145
// UB, according to libstd docs.
144
146
throw_ub_format ! ( "setting thread's local storage destructor while destructors are already running" ) ;
145
147
}
146
- if self . thread_dtors . insert ( thread, ( dtor, data) ) . is_some ( ) {
148
+ if self . macos_thread_dtors . insert ( thread, ( dtor, data) ) . is_some ( ) {
147
149
throw_unsup_format ! ( "setting more than one thread local storage destructor for the same thread is not supported" ) ;
148
150
}
149
151
Ok ( ( ) )
@@ -186,6 +188,7 @@ impl<'tcx> TlsData<'tcx> {
186
188
match data. entry ( thread_id) {
187
189
Entry :: Occupied ( entry) => {
188
190
if let Some ( dtor) = dtor {
191
+ // Set TLS data to NULL, and call dtor with old value.
189
192
let data_scalar = entry. remove ( ) ;
190
193
let ret = Some ( ( * dtor, data_scalar, key) ) ;
191
194
return ret;
@@ -204,6 +207,8 @@ impl<'tcx> TlsData<'tcx> {
204
207
if self . dtors_running . contains_key ( & thread) {
205
208
true
206
209
} else {
210
+ // We need to guard this `insert` with a check because otherwise we
211
+ // would risk to overwrite `last_dtor_key` with `None`.
207
212
self . dtors_running . insert (
208
213
thread,
209
214
RunningDtorsState { last_dtor_key : None }
@@ -259,7 +264,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
259
264
fn schedule_macos_tls_dtor ( & mut self ) -> InterpResult < ' tcx , bool > {
260
265
let this = self . eval_context_mut ( ) ;
261
266
let thread_id = this. get_active_thread ( ) ?;
262
- if let Some ( ( instance, data) ) = this. machine . tls . thread_dtors . remove ( & thread_id) {
267
+ if let Some ( ( instance, data) ) = this. machine . tls . macos_thread_dtors . remove ( & thread_id) {
263
268
trace ! ( "Running macos dtor {:?} on {:?} at {:?}" , instance, data, thread_id) ;
264
269
265
270
let ret_place = MPlaceTy :: dangling ( this. machine . layouts . unit , this) . into ( ) ;
@@ -283,7 +288,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
283
288
284
289
/// Schedule a pthread TLS destructor. Returns `true` if found
285
290
/// a destructor to schedule, and `false` otherwise.
286
- fn schedule_pthread_tls_dtors ( & mut self ) -> InterpResult < ' tcx , bool > {
291
+ fn schedule_next_pthread_tls_dtor ( & mut self ) -> InterpResult < ' tcx , bool > {
287
292
let this = self . eval_context_mut ( ) ;
288
293
let active_thread = this. get_active_thread ( ) ?;
289
294
@@ -329,33 +334,43 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
329
334
///
330
335
/// FIXME: we do not support yet deallocation of thread local statics.
331
336
/// Issue: https://github.com/rust-lang/miri/issues/1369
337
+ ///
338
+ /// Note: we consistently run TLS destructors for all threads, including the
339
+ /// main thread. However, it is not clear that we should run the TLS
340
+ /// destructors for the main thread. See issue:
341
+ /// https://github.com/rust-lang/rust/issues/28129.
332
342
fn schedule_next_tls_dtor_for_active_thread ( & mut self ) -> InterpResult < ' tcx > {
333
343
let this = self . eval_context_mut ( ) ;
334
344
let active_thread = this. get_active_thread ( ) ?;
335
345
336
- let scheduled_next = if this. tcx . sess . target . target . target_os == "windows" {
337
- if !this. machine . tls . set_dtors_running_for_thread ( active_thread) {
346
+ if this. machine . tls . set_dtors_running_for_thread ( active_thread) {
347
+ // This is the first time we got asked to schedule a destructor. The
348
+ // Windows schedule destructor function must be called exactly once,
349
+ // this is why it is in this block.
350
+ if this. tcx . sess . target . target . target_os == "windows" {
351
+ // On Windows, we signal that the thread quit by starting the
352
+ // relevant function, reenabling the thread, and going back to
353
+ // the scheduler.
338
354
this. schedule_windows_tls_dtors ( ) ?;
339
- true
340
- } else {
341
- false
342
- }
343
- } else {
344
- this. machine . tls . set_dtors_running_for_thread ( active_thread) ;
345
- // The macOS thread wide destructor runs "before any TLS slots get
346
- // freed", so do that first.
347
- if this. schedule_macos_tls_dtor ( ) ? {
348
- true
349
- } else {
350
- this. schedule_pthread_tls_dtors ( ) ?
355
+ return Ok ( ( ) )
351
356
}
352
- } ;
353
-
354
- if !scheduled_next {
355
- // No dtors scheduled means that we are finished. Delete the
356
- // remaining TLS entries.
357
- this. machine . tls . delete_all_thread_tls ( active_thread) ;
358
357
}
358
+ // The macOS thread wide destructor runs "before any TLS slots get
359
+ // freed", so do that first.
360
+ if this. schedule_macos_tls_dtor ( ) ? {
361
+ // We have scheduled a MacOS dtor to run on the thread. Execute it
362
+ // to completion and come back here. Scheduling a destructor
363
+ // destroys it, so we will not enter this branch again.
364
+ return Ok ( ( ) )
365
+ }
366
+ if this. schedule_next_pthread_tls_dtor ( ) ? {
367
+ // We have scheduled a pthread destructor and removed it from the
368
+ // destructors list. Run it to completion and come back here.
369
+ return Ok ( ( ) )
370
+ }
371
+
372
+ // All dtors done!
373
+ this. machine . tls . delete_all_thread_tls ( active_thread) ;
359
374
360
375
Ok ( ( ) )
361
376
}
0 commit comments