Skip to content

Commit 46b0317

Browse files
author
Vytautas Astrauskas
committed
Improve code readability and comments.
1 parent 3b58541 commit 46b0317

File tree

5 files changed

+50
-33
lines changed

5 files changed

+50
-33
lines changed

src/eval.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -211,6 +211,9 @@ pub fn eval_main<'tcx>(tcx: TyCtxt<'tcx>, main_id: DefId, config: MiriConfig) ->
211211
assert!(ecx.step()?, "a terminated thread was scheduled for execution");
212212
}
213213
SchedulingAction::ExecuteDtors => {
214+
// This will either enable the thread again (so we go back
215+
// to `ExecuteStep`), or determine that this thread is done
216+
// for good.
214217
ecx.schedule_next_tls_dtor_for_active_thread()?;
215218
}
216219
SchedulingAction::Stop => {

src/shims/sync.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -555,7 +555,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
555555
// result, not only readers can starve writers, but also writers can
556556
// starve readers.
557557
if let Some(_writer) = this.unblock_some_thread(writer_blockset)? {
558-
rwlock_set_writers(this, rwlock_op, Scalar::from_u32(1))?;
558+
assert_eq!(writers, 1);
559559
} else {
560560
rwlock_set_writers(this, rwlock_op, Scalar::from_u32(0))?;
561561
let mut readers = 0;

src/shims/thread.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
2525

2626
let thread_info_place = this.deref_operand(thread)?;
2727
this.write_scalar(
28-
Scalar::from_uint(new_thread_id.to_u128(), thread_info_place.layout.size),
28+
Scalar::from_uint(new_thread_id.to_u32(), thread_info_place.layout.size),
2929
thread_info_place.into(),
3030
)?;
3131

@@ -83,7 +83,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
8383
let this = self.eval_context_mut();
8484

8585
let thread_id = this.get_active_thread()?;
86-
this.write_scalar(Scalar::from_uint(thread_id.to_u128(), dest.layout.size), dest)
86+
this.write_scalar(Scalar::from_uint(thread_id.to_u32(), dest.layout.size), dest)
8787
}
8888

8989
fn prctl(

src/shims/tls.rs

Lines changed: 41 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,9 @@ pub struct TlsEntry<'tcx> {
2626

2727
#[derive(Clone, Debug)]
2828
struct RunningDtorsState {
29-
/// The last TlsKey used to retrieve a TLS destructor.
29+
/// The last TlsKey used to retrieve a TLS destructor. `None` means that we
30+
/// have not tried to retrieve a TLS destructor yet or that we already tried
31+
/// all keys.
3032
last_dtor_key: Option<TlsKey>,
3133
}
3234

@@ -40,7 +42,7 @@ pub struct TlsData<'tcx> {
4042

4143
/// A single per thread destructor of the thread local storage (that's how
4244
/// things work on macOS) with a data argument.
43-
thread_dtors: BTreeMap<ThreadId, (ty::Instance<'tcx>, Scalar<Tag>)>,
45+
macos_thread_dtors: BTreeMap<ThreadId, (ty::Instance<'tcx>, Scalar<Tag>)>,
4446

4547
/// State for currently running TLS dtors. If this map contains a key for a
4648
/// specific thread, it means that we are in the "destruct" phase, during
@@ -53,7 +55,7 @@ impl<'tcx> Default for TlsData<'tcx> {
5355
TlsData {
5456
next_key: 1, // start with 1 as we must not use 0 on Windows
5557
keys: Default::default(),
56-
thread_dtors: Default::default(),
58+
macos_thread_dtors: Default::default(),
5759
dtors_running: Default::default(),
5860
}
5961
}
@@ -143,7 +145,7 @@ impl<'tcx> TlsData<'tcx> {
143145
// UB, according to libstd docs.
144146
throw_ub_format!("setting thread's local storage destructor while destructors are already running");
145147
}
146-
if self.thread_dtors.insert(thread, (dtor, data)).is_some() {
148+
if self.macos_thread_dtors.insert(thread, (dtor, data)).is_some() {
147149
throw_unsup_format!("setting more than one thread local storage destructor for the same thread is not supported");
148150
}
149151
Ok(())
@@ -186,6 +188,7 @@ impl<'tcx> TlsData<'tcx> {
186188
match data.entry(thread_id) {
187189
Entry::Occupied(entry) => {
188190
if let Some(dtor) = dtor {
191+
// Set TLS data to NULL, and call dtor with old value.
189192
let data_scalar = entry.remove();
190193
let ret = Some((*dtor, data_scalar, key));
191194
return ret;
@@ -204,6 +207,8 @@ impl<'tcx> TlsData<'tcx> {
204207
if self.dtors_running.contains_key(&thread) {
205208
true
206209
} else {
210+
// We need to guard this `insert` with a check because otherwise we
211+
// would risk to overwrite `last_dtor_key` with `None`.
207212
self.dtors_running.insert(
208213
thread,
209214
RunningDtorsState { last_dtor_key: None }
@@ -259,7 +264,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
259264
fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
260265
let this = self.eval_context_mut();
261266
let thread_id = this.get_active_thread()?;
262-
if let Some((instance, data)) = this.machine.tls.thread_dtors.remove(&thread_id) {
267+
if let Some((instance, data)) = this.machine.tls.macos_thread_dtors.remove(&thread_id) {
263268
trace!("Running macos dtor {:?} on {:?} at {:?}", instance, data, thread_id);
264269

265270
let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into();
@@ -283,7 +288,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
283288

284289
/// Schedule a pthread TLS destructor. Returns `true` if found
285290
/// a destructor to schedule, and `false` otherwise.
286-
fn schedule_pthread_tls_dtors(&mut self) -> InterpResult<'tcx, bool> {
291+
fn schedule_next_pthread_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
287292
let this = self.eval_context_mut();
288293
let active_thread = this.get_active_thread()?;
289294

@@ -329,33 +334,43 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
329334
///
330335
/// FIXME: we do not support yet deallocation of thread local statics.
331336
/// Issue: https://github.com/rust-lang/miri/issues/1369
337+
///
338+
/// Note: we consistently run TLS destructors for all threads, including the
339+
/// main thread. However, it is not clear that we should run the TLS
340+
/// destructors for the main thread. See issue:
341+
/// https://github.com/rust-lang/rust/issues/28129.
332342
fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> {
333343
let this = self.eval_context_mut();
334344
let active_thread = this.get_active_thread()?;
335345

336-
let scheduled_next = if this.tcx.sess.target.target.target_os == "windows" {
337-
if !this.machine.tls.set_dtors_running_for_thread(active_thread) {
346+
if this.machine.tls.set_dtors_running_for_thread(active_thread) {
347+
// This is the first time we got asked to schedule a destructor. The
348+
// Windows schedule destructor function must be called exactly once,
349+
// this is why it is in this block.
350+
if this.tcx.sess.target.target.target_os == "windows" {
351+
// On Windows, we signal that the thread quit by starting the
352+
// relevant function, reenabling the thread, and going back to
353+
// the scheduler.
338354
this.schedule_windows_tls_dtors()?;
339-
true
340-
} else {
341-
false
342-
}
343-
} else {
344-
this.machine.tls.set_dtors_running_for_thread(active_thread);
345-
// The macOS thread wide destructor runs "before any TLS slots get
346-
// freed", so do that first.
347-
if this.schedule_macos_tls_dtor()? {
348-
true
349-
} else {
350-
this.schedule_pthread_tls_dtors()?
355+
return Ok(())
351356
}
352-
};
353-
354-
if !scheduled_next {
355-
// No dtors scheduled means that we are finished. Delete the
356-
// remaining TLS entries.
357-
this.machine.tls.delete_all_thread_tls(active_thread);
358357
}
358+
// The macOS thread wide destructor runs "before any TLS slots get
359+
// freed", so do that first.
360+
if this.schedule_macos_tls_dtor()? {
361+
// We have scheduled a MacOS dtor to run on the thread. Execute it
362+
// to completion and come back here. Scheduling a destructor
363+
// destroys it, so we will not enter this branch again.
364+
return Ok(())
365+
}
366+
if this.schedule_next_pthread_tls_dtor()? {
367+
// We have scheduled a pthread destructor and removed it from the
368+
// destructors list. Run it to completion and come back here.
369+
return Ok(())
370+
}
371+
372+
// All dtors done!
373+
this.machine.tls.delete_all_thread_tls(active_thread);
359374

360375
Ok(())
361376
}

src/thread.rs

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
33
use std::cell::RefCell;
44
use std::convert::TryFrom;
5-
use std::convert::TryInto;
65
use std::num::{NonZeroU32, TryFromIntError};
76

87
use log::trace;
@@ -36,8 +35,8 @@ pub struct ThreadId(u32);
3635
const MAIN_THREAD: ThreadId = ThreadId(0);
3736

3837
impl ThreadId {
39-
pub fn to_u128(self) -> u128 {
40-
self.0.try_into().unwrap()
38+
pub fn to_u32(self) -> u32 {
39+
self.0
4140
}
4241
}
4342

@@ -362,7 +361,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
362361
fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
363362
// Check whether the thread has **just** terminated (`check_terminated`
364363
// checks whether the thread has popped all its stack and if yes, sets
365-
// the thread state to terminated.)
364+
// the thread state to terminated).
366365
if self.threads[self.active_thread].check_terminated() {
367366
// Check if we need to unblock any threads.
368367
for (i, thread) in self.threads.iter_enumerated_mut() {

0 commit comments

Comments
 (0)