Skip to content

Commit a3a4e86

Browse files
committed
Add can_fail flag to Unit that allows compilation to proceed even if
specific units fail. All Docscrape units now have can_fail = true to avoid stopping Rustdoc if an example fails to scrape.
1 parent d583b21 commit a3a4e86

File tree

10 files changed

+209
-15
lines changed

10 files changed

+209
-15
lines changed

src/cargo/core/compiler/context/mod.rs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,13 @@ pub struct Context<'a, 'cfg> {
7777
/// Map of Doc/Docscrape units to metadata for their -Cmetadata flag.
7878
/// See Context::find_metadata_units for more details.
7979
pub metadata_for_doc_units: HashMap<Unit, Metadata>,
80+
81+
/// Map that tracks whether a unit completed successfully. Used in conjuction
82+
/// with the `Unit::can_fail` flag, so jobs can dynamically track at runtime
83+
/// whether their dependencies succeeded or failed. Currently used to for
84+
/// the Rustdoc scrape-examples feature to allow Rustdoc to proceed even if
85+
/// examples fail to scrape.
86+
pub completed_units: Arc<Mutex<HashMap<Metadata, bool>>>,
8087
}
8188

8289
impl<'a, 'cfg> Context<'a, 'cfg> {
@@ -115,6 +122,7 @@ impl<'a, 'cfg> Context<'a, 'cfg> {
115122
rustc_clients: HashMap::new(),
116123
lto: HashMap::new(),
117124
metadata_for_doc_units: HashMap::new(),
125+
completed_units: Arc::new(Mutex::new(HashMap::new())),
118126
})
119127
}
120128

src/cargo/core/compiler/job_queue.rs

Lines changed: 41 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,7 @@ struct DrainState<'cfg> {
126126
total_units: usize,
127127

128128
queue: DependencyQueue<Unit, Artifact, Job>,
129+
dep_map: HashMap<Unit, HashSet<(Unit, Artifact)>>,
129130
messages: Arc<Queue<Message>>,
130131
/// Diagnostic deduplication support.
131132
diag_dedupe: DiagDedupe<'cfg>,
@@ -508,6 +509,12 @@ impl<'cfg> JobQueue<'cfg> {
508509
let progress = Progress::with_style("Building", ProgressStyle::Ratio, cx.bcx.config);
509510
let state = DrainState {
510511
total_units: self.queue.len(),
512+
dep_map: self
513+
.queue
514+
.dep_map()
515+
.iter()
516+
.map(|(unit, (deps, _))| (unit.clone(), deps.clone()))
517+
.collect(),
511518
queue: self.queue,
512519
// 100 here is somewhat arbitrary. It is a few screenfulls of
513520
// output, and hopefully at most a few megabytes of memory for
@@ -578,6 +585,32 @@ impl<'cfg> DrainState<'cfg> {
578585
// start requesting job tokens. Each job after the first needs to
579586
// request a token.
580587
while let Some((unit, job)) = self.queue.dequeue() {
588+
// First, we handle the special case of fallible units. If
589+
// this unit is allowed to fail, and any one of its dependencies
590+
// has failed, then we should immediately mark it as failed and
591+
// skip executing it.
592+
if unit.can_fail {
593+
let mut completed_units = cx.completed_units.lock().unwrap();
594+
let failed_deps = self.dep_map[&unit]
595+
.iter()
596+
.filter(|(dep_unit, _)| {
597+
let dep_meta = cx.files().metadata(dep_unit);
598+
!completed_units[&dep_meta]
599+
})
600+
.map(|(_, artifact)| artifact)
601+
.collect::<HashSet<_>>();
602+
if !failed_deps.is_empty() {
603+
// TODO: should put a warning here saying which units were skipped
604+
// due to failed dependencies.
605+
for artifact in failed_deps {
606+
self.queue.finish(&unit, artifact);
607+
}
608+
let unit_meta = cx.files().metadata(&unit);
609+
completed_units.insert(unit_meta, false);
610+
continue;
611+
}
612+
}
613+
581614
self.pending_queue.push((unit, job));
582615
if self.active.len() + self.pending_queue.len() > 1 {
583616
jobserver_helper.request_token();
@@ -713,7 +746,8 @@ impl<'cfg> DrainState<'cfg> {
713746
};
714747
debug!("end ({:?}): {:?}", unit, result);
715748
match result {
716-
Ok(()) => self.finish(id, &unit, artifact, cx)?,
749+
Ok(()) => self.finish(id, &unit, artifact, cx, true)?,
750+
Err(_) if unit.can_fail => self.finish(id, &unit, artifact, cx, false)?,
717751
Err(error) => {
718752
let msg = "The following warnings were emitted during compilation:";
719753
self.emit_warnings(Some(msg), &unit, cx)?;
@@ -1161,6 +1195,7 @@ impl<'cfg> DrainState<'cfg> {
11611195
unit: &Unit,
11621196
artifact: Artifact,
11631197
cx: &mut Context<'_, '_>,
1198+
success: bool,
11641199
) -> CargoResult<()> {
11651200
if unit.mode.is_run_custom_build() && unit.show_warnings(cx.bcx.config) {
11661201
self.emit_warnings(None, unit, cx)?;
@@ -1170,6 +1205,11 @@ impl<'cfg> DrainState<'cfg> {
11701205
Artifact::All => self.timings.unit_finished(id, unlocked),
11711206
Artifact::Metadata => self.timings.unit_rmeta_finished(id, unlocked),
11721207
}
1208+
cx.completed_units
1209+
.lock()
1210+
.unwrap()
1211+
.insert(cx.files().metadata(unit), success);
1212+
11731213
Ok(())
11741214
}
11751215

src/cargo/core/compiler/mod.rs

Lines changed: 42 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,13 @@ mod unit;
2222
pub mod unit_dependencies;
2323
pub mod unit_graph;
2424

25-
use std::collections::HashSet;
25+
use std::collections::{HashMap, HashSet};
2626
use std::env;
2727
use std::ffi::{OsStr, OsString};
2828
use std::fs::{self, File};
2929
use std::io::{BufRead, Write};
3030
use std::path::{Path, PathBuf};
31-
use std::sync::Arc;
31+
use std::sync::{Arc, Mutex};
3232

3333
use anyhow::{Context as _, Error};
3434
use lazycell::LazyCell;
@@ -263,6 +263,7 @@ fn rustc(cx: &mut Context<'_, '_>, unit: &Unit, exec: &Arc<dyn Executor>) -> Car
263263
let script_metadata = cx.find_build_script_metadata(unit);
264264
let is_local = unit.is_local();
265265
let artifact = unit.artifact;
266+
let completed_units = Arc::clone(&cx.completed_units);
266267

267268
return Ok(Work::new(move |state| {
268269
// Artifacts are in a different location than typical units,
@@ -292,7 +293,13 @@ fn rustc(cx: &mut Context<'_, '_>, unit: &Unit, exec: &Arc<dyn Executor>) -> Car
292293
)?;
293294
add_plugin_deps(&mut rustc, &script_outputs, &build_scripts, &root_output)?;
294295
}
295-
add_custom_flags(&mut rustc, &script_outputs, script_metadata)?;
296+
add_custom_flags(
297+
&mut rustc,
298+
&script_outputs,
299+
script_metadata,
300+
completed_units,
301+
None,
302+
)?;
296303
}
297304

298305
for output in outputs.iter() {
@@ -639,9 +646,9 @@ fn rustdoc(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<Work> {
639646
let metadata = cx.metadata_for_doc_units[unit];
640647
rustdoc.arg("-C").arg(format!("metadata={}", metadata));
641648

642-
let scrape_output_path = |unit: &Unit| -> CargoResult<PathBuf> {
649+
let scrape_output_path = |unit: &Unit| -> PathBuf {
643650
let output_dir = cx.files().deps_dir(unit);
644-
Ok(output_dir.join(format!("{}.examples", unit.buildkey())))
651+
output_dir.join(format!("{}.examples", unit.buildkey()))
645652
};
646653

647654
if unit.mode.is_doc_scrape() {
@@ -651,7 +658,7 @@ fn rustdoc(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<Work> {
651658

652659
rustdoc
653660
.arg("--scrape-examples-output-path")
654-
.arg(scrape_output_path(unit)?);
661+
.arg(scrape_output_path(unit));
655662

656663
// Only scrape example for items from crates in the workspace, to reduce generated file size
657664
for pkg in cx.bcx.ws.members() {
@@ -664,18 +671,24 @@ fn rustdoc(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<Work> {
664671
rustdoc.arg("--scrape-examples-target-crate").arg(name);
665672
}
666673
}
667-
} else if cx.bcx.scrape_units.len() > 0 && cx.bcx.ws.unit_needs_doc_scrape(unit) {
674+
}
675+
676+
let scrape_units = if cx.bcx.scrape_units.len() > 0 && cx.bcx.ws.unit_needs_doc_scrape(unit) {
668677
// We only pass scraped examples to packages in the workspace
669678
// since examples are only coming from reverse-dependencies of workspace packages
670679

671680
rustdoc.arg("-Zunstable-options");
672681

673-
for scrape_unit in &cx.bcx.scrape_units {
674-
rustdoc
675-
.arg("--with-examples")
676-
.arg(scrape_output_path(scrape_unit)?);
677-
}
678-
}
682+
Some(
683+
cx.bcx
684+
.scrape_units
685+
.iter()
686+
.map(|unit| (cx.files().metadata(unit), scrape_output_path(unit)))
687+
.collect::<HashMap<_, _>>(),
688+
)
689+
} else {
690+
None
691+
};
679692

680693
build_deps_args(&mut rustdoc, cx, unit)?;
681694
rustdoc::add_root_urls(cx, unit, &mut rustdoc)?;
@@ -693,11 +706,14 @@ fn rustdoc(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<Work> {
693706
let target = Target::clone(&unit.target);
694707
let mut output_options = OutputOptions::new(cx, unit);
695708
let script_metadata = cx.find_build_script_metadata(unit);
709+
let completed_units = Arc::clone(&cx.completed_units);
696710
Ok(Work::new(move |state| {
697711
add_custom_flags(
698712
&mut rustdoc,
699713
&build_script_outputs.lock().unwrap(),
700714
script_metadata,
715+
completed_units,
716+
scrape_units,
701717
)?;
702718
let crate_dir = doc_dir.join(&crate_name);
703719
if crate_dir.exists() {
@@ -706,6 +722,7 @@ fn rustdoc(cx: &mut Context<'_, '_>, unit: &Unit) -> CargoResult<Work> {
706722
debug!("removing pre-existing doc directory {:?}", crate_dir);
707723
paths::remove_dir_all(crate_dir)?;
708724
}
725+
709726
state.running(&rustdoc);
710727

711728
rustdoc
@@ -1165,6 +1182,8 @@ fn add_custom_flags(
11651182
cmd: &mut ProcessBuilder,
11661183
build_script_outputs: &BuildScriptOutputs,
11671184
metadata: Option<Metadata>,
1185+
completed_units: Arc<Mutex<HashMap<Metadata, bool>>>,
1186+
scrape_units: Option<HashMap<Metadata, PathBuf>>,
11681187
) -> CargoResult<()> {
11691188
if let Some(metadata) = metadata {
11701189
if let Some(output) = build_script_outputs.get(metadata) {
@@ -1183,6 +1202,16 @@ fn add_custom_flags(
11831202
}
11841203
}
11851204

1205+
if let Some(scrape_units) = scrape_units {
1206+
let completed_units = completed_units.lock().unwrap();
1207+
for (_, output_path) in scrape_units
1208+
.iter()
1209+
.filter(|(buildkey, _)| completed_units[*buildkey])
1210+
{
1211+
cmd.arg("--with-examples").arg(output_path);
1212+
}
1213+
}
1214+
11861215
Ok(())
11871216
}
11881217

src/cargo/core/compiler/standard_lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -217,6 +217,7 @@ pub fn generate_std_roots(
217217
/*is_std*/ true,
218218
/*dep_hash*/ 0,
219219
IsArtifact::No,
220+
false,
220221
));
221222
}
222223
}

src/cargo/core/compiler/unit.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,7 @@ pub struct UnitInner {
7272
/// This value initially starts as 0, and then is filled in via a
7373
/// second-pass after all the unit dependencies have been computed.
7474
pub dep_hash: u64,
75+
pub can_fail: bool,
7576
}
7677

7778
impl UnitInner {
@@ -141,6 +142,7 @@ impl fmt::Debug for Unit {
141142
.field("artifact", &self.artifact.is_true())
142143
.field("is_std", &self.is_std)
143144
.field("dep_hash", &self.dep_hash)
145+
.field("can_fail", &self.can_fail)
144146
.finish()
145147
}
146148
}
@@ -184,6 +186,7 @@ impl UnitInterner {
184186
is_std: bool,
185187
dep_hash: u64,
186188
artifact: IsArtifact,
189+
can_fail: bool,
187190
) -> Unit {
188191
let target = match (is_std, target.kind()) {
189192
// This is a horrible hack to support build-std. `libstd` declares
@@ -216,6 +219,7 @@ impl UnitInterner {
216219
is_std,
217220
dep_hash,
218221
artifact,
222+
can_fail,
219223
});
220224
Unit { inner }
221225
}

src/cargo/core/compiler/unit_dependencies.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -888,6 +888,7 @@ fn new_unit_dep_with_profile(
888888
state.is_std,
889889
/*dep_hash*/ 0,
890890
artifact.map_or(IsArtifact::No, |_| IsArtifact::Yes),
891+
parent.can_fail,
891892
);
892893
Ok(UnitDep {
893894
unit,

src/cargo/core/workspace.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1517,7 +1517,7 @@ impl<'cfg> Workspace<'cfg> {
15171517
// (not documented) or proc macros (have no scrape-able exports). Additionally,
15181518
// naively passing a proc macro's unit_for to new_unit_dep will currently cause
15191519
// Cargo to panic, see issue #10545.
1520-
self.is_member(&unit.pkg) && !unit.target.for_host()
1520+
self.is_member(&unit.pkg) && !unit.target.for_host() && unit.mode.is_doc()
15211521
}
15221522
}
15231523

src/cargo/ops/cargo_compile.rs

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1069,6 +1069,7 @@ fn generate_targets(
10691069
/*is_std*/ false,
10701070
/*dep_hash*/ 0,
10711071
IsArtifact::No,
1072+
mode.is_doc_scrape(),
10721073
);
10731074
units.insert(unit);
10741075
}
@@ -1631,6 +1632,7 @@ fn traverse_and_share(
16311632
unit.is_std,
16321633
new_dep_hash,
16331634
unit.artifact,
1635+
unit.can_fail,
16341636
);
16351637
assert!(memo.insert(unit.clone(), new_unit.clone()).is_none());
16361638
new_graph.entry(new_unit.clone()).or_insert(new_deps);
@@ -1872,6 +1874,7 @@ fn override_rustc_crate_types(
18721874
unit.is_std,
18731875
unit.dep_hash,
18741876
unit.artifact,
1877+
unit.can_fail,
18751878
)
18761879
};
18771880
units[0] = match unit.target.kind() {

src/cargo/util/dependency_queue.rs

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,10 @@ impl<N: Hash + Eq + Clone, E: Eq + Hash + Clone, V> DependencyQueue<N, E, V> {
170170
self.dep_map.len()
171171
}
172172

173+
pub fn dep_map(&self) -> &HashMap<N, (HashSet<(N, E)>, V)> {
174+
&self.dep_map
175+
}
176+
173177
/// Indicate that something has finished.
174178
///
175179
/// Calling this function indicates that the `node` has produced `edge`. All

0 commit comments

Comments
 (0)