From 15a17ee2667df7a76ce407e477180865be5ddf6d Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sun, 1 Nov 2020 16:55:13 +0100 Subject: [PATCH 01/14] Refactor try_mark_previous_green. --- .../rustc_query_system/src/dep_graph/graph.rs | 237 +++++++++--------- 1 file changed, 115 insertions(+), 122 deletions(-) diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 0f25572170f53..f3d74829a8beb 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -626,6 +626,114 @@ impl DepGraph { } } + fn try_mark_parent_green>( + &self, + tcx: Ctxt, + data: &DepGraphData, + parent_dep_node_index: SerializedDepNodeIndex, + dep_node: &DepNode, + ) -> Option<()> { + let dep_dep_node_color = data.colors.get(parent_dep_node_index); + let dep_dep_node = &data.previous.index_to_node(parent_dep_node_index); + + match dep_dep_node_color { + Some(DepNodeColor::Green(_)) => { + // This dependency has been marked as green before, we are + // still fine and can continue with checking the other + // dependencies. + debug!( + "try_mark_parent_green({:?}) --- found dependency {:?} to be immediately green", + dep_node, dep_dep_node, + ); + return Some(()); + } + Some(DepNodeColor::Red) => { + // We found a dependency the value of which has changed + // compared to the previous compilation session. We cannot + // mark the DepNode as green and also don't need to bother + // with checking any of the other dependencies. + debug!( + "try_mark_parent_green({:?}) - END - dependency {:?} was immediately red", + dep_node, dep_dep_node, + ); + return None; + } + None => {} + } + + // We don't know the state of this dependency. If it isn't + // an eval_always node, let's try to mark it green recursively. + debug!( + "try_mark_parent_green({:?}) --- state of dependency {:?} ({}) \ + is unknown, trying to mark it green", + dep_node, dep_dep_node, dep_dep_node.hash, + ); + + let node_index = + self.try_mark_previous_green(tcx, data, parent_dep_node_index, dep_dep_node); + if node_index.is_some() { + debug!( + "try_mark_parent_green({:?}) --- managed to MARK dependency {:?} as green", + dep_node, dep_dep_node + ); + return Some(()); + } + + // We failed to mark it green, so we try to force the query. + debug!( + "try_mark_parent_green({:?}) --- trying to force dependency {:?}", + dep_node, dep_dep_node + ); + if !tcx.try_force_from_dep_node(dep_dep_node) { + // The DepNode could not be forced. + debug!( + "try_mark_parent_green({:?}) - END - dependency {:?} could not be forced", + dep_node, dep_dep_node + ); + return None; + } + + let dep_dep_node_color = data.colors.get(parent_dep_node_index); + + match dep_dep_node_color { + Some(DepNodeColor::Green(_)) => { + debug!( + "try_mark_parent_green({:?}) --- managed to FORCE dependency {:?} to green", + dep_node, dep_dep_node + ); + return Some(()); + } + Some(DepNodeColor::Red) => { + debug!( + "try_mark_parent_green({:?}) - END - dependency {:?} was red after forcing", + dep_node, dep_dep_node + ); + return None; + } + None => {} + } + + if !tcx.dep_context().sess().has_errors_or_delayed_span_bugs() { + panic!("try_mark_parent_green() - Forcing the DepNode should have set its color") + } + + // If the query we just forced has resulted in + // some kind of compilation error, we cannot rely on + // the dep-node color having been properly updated. + // This means that the query system has reached an + // invalid state. We let the compiler continue (by + // returning `None`) so it can emit error messages + // and wind down, but rely on the fact that this + // invalid state will not be persisted to the + // incremental compilation cache because of + // compilation errors being present. + debug!( + "try_mark_parent_green({:?}) - END - dependency {:?} resulted in compilation error", + dep_node, dep_dep_node + ); + return None; + } + /// Try to mark a dep-node which existed in the previous compilation session as green. fn try_mark_previous_green>( &self, @@ -634,6 +742,11 @@ impl DepGraph { prev_dep_node_index: SerializedDepNodeIndex, dep_node: &DepNode, ) -> Option { + // We never try to mark eval_always nodes as green + if dep_node.kind.is_eval_always() { + return None; + } + debug!("try_mark_previous_green({:?}) - BEGIN", dep_node); #[cfg(not(parallel_compiler))] @@ -642,131 +755,12 @@ impl DepGraph { debug_assert!(data.colors.get(prev_dep_node_index).is_none()); } - // We never try to mark eval_always nodes as green - debug_assert!(!dep_node.kind.is_eval_always()); - debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node); let prev_deps = data.previous.edge_targets_from(prev_dep_node_index); for &dep_dep_node_index in prev_deps { - let dep_dep_node_color = data.colors.get(dep_dep_node_index); - - match dep_dep_node_color { - Some(DepNodeColor::Green(_)) => { - // This dependency has been marked as green before, we are - // still fine and can continue with checking the other - // dependencies. - debug!( - "try_mark_previous_green({:?}) --- found dependency {:?} to \ - be immediately green", - dep_node, - data.previous.index_to_node(dep_dep_node_index) - ); - } - Some(DepNodeColor::Red) => { - // We found a dependency the value of which has changed - // compared to the previous compilation session. We cannot - // mark the DepNode as green and also don't need to bother - // with checking any of the other dependencies. - debug!( - "try_mark_previous_green({:?}) - END - dependency {:?} was \ - immediately red", - dep_node, - data.previous.index_to_node(dep_dep_node_index) - ); - return None; - } - None => { - let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index); - - // We don't know the state of this dependency. If it isn't - // an eval_always node, let's try to mark it green recursively. - if !dep_dep_node.kind.is_eval_always() { - debug!( - "try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \ - is unknown, trying to mark it green", - dep_node, dep_dep_node, dep_dep_node.hash, - ); - - let node_index = self.try_mark_previous_green( - tcx, - data, - dep_dep_node_index, - dep_dep_node, - ); - if node_index.is_some() { - debug!( - "try_mark_previous_green({:?}) --- managed to MARK \ - dependency {:?} as green", - dep_node, dep_dep_node - ); - continue; - } - } - - // We failed to mark it green, so we try to force the query. - debug!( - "try_mark_previous_green({:?}) --- trying to force \ - dependency {:?}", - dep_node, dep_dep_node - ); - if tcx.try_force_from_dep_node(dep_dep_node) { - let dep_dep_node_color = data.colors.get(dep_dep_node_index); - - match dep_dep_node_color { - Some(DepNodeColor::Green(_)) => { - debug!( - "try_mark_previous_green({:?}) --- managed to \ - FORCE dependency {:?} to green", - dep_node, dep_dep_node - ); - } - Some(DepNodeColor::Red) => { - debug!( - "try_mark_previous_green({:?}) - END - \ - dependency {:?} was red after forcing", - dep_node, dep_dep_node - ); - return None; - } - None => { - if !tcx.dep_context().sess().has_errors_or_delayed_span_bugs() { - panic!( - "try_mark_previous_green() - Forcing the DepNode \ - should have set its color" - ) - } else { - // If the query we just forced has resulted in - // some kind of compilation error, we cannot rely on - // the dep-node color having been properly updated. - // This means that the query system has reached an - // invalid state. We let the compiler continue (by - // returning `None`) so it can emit error messages - // and wind down, but rely on the fact that this - // invalid state will not be persisted to the - // incremental compilation cache because of - // compilation errors being present. - debug!( - "try_mark_previous_green({:?}) - END - \ - dependency {:?} resulted in compilation error", - dep_node, dep_dep_node - ); - return None; - } - } - } - } else { - // The DepNode could not be forced. - debug!( - "try_mark_previous_green({:?}) - END - dependency {:?} \ - could not be forced", - dep_node, dep_dep_node - ); - return None; - } - } - } + self.try_mark_parent_green(tcx, data, dep_dep_node_index, dep_node)? } // If we got here without hitting a `return` that means that all @@ -790,8 +784,7 @@ impl DepGraph { #[cfg(not(parallel_compiler))] debug_assert!( data.colors.get(prev_dep_node_index).is_none(), - "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \ - insertion for {:?}", + "try_mark_previous_green({:?}) - duplicate DepNodeColor insertion", dep_node ); From a82dc1dce6939d041faaa255523c2e05a7a955cd Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Tue, 16 Feb 2021 23:12:45 +0100 Subject: [PATCH 02/14] Reimplement the dep_graph. --- .../src/persist/dirty_clean.rs | 28 +- .../rustc_incremental/src/persist/load.rs | 10 +- .../rustc_middle/src/dep_graph/dep_node.rs | 2 +- compiler/rustc_middle/src/dep_graph/mod.rs | 1 - compiler/rustc_middle/src/ty/query/mod.rs | 2 + .../src/ty/query/on_disk_cache.rs | 13 +- compiler/rustc_query_impl/src/lib.rs | 1 + compiler/rustc_query_impl/src/plumbing.rs | 16 +- .../rustc_query_system/src/dep_graph/graph.rs | 962 ++---------------- .../rustc_query_system/src/dep_graph/mod.rs | 6 +- .../rustc_query_system/src/dep_graph/prev.rs | 61 -- .../src/dep_graph/serialized.rs | 531 ++++++++-- .../rustc_query_system/src/query/plumbing.rs | 16 +- 13 files changed, 593 insertions(+), 1056 deletions(-) delete mode 100644 compiler/rustc_query_system/src/dep_graph/prev.rs diff --git a/compiler/rustc_incremental/src/persist/dirty_clean.rs b/compiler/rustc_incremental/src/persist/dirty_clean.rs index 8a83149d73235..73207089831cf 100644 --- a/compiler/rustc_incremental/src/persist/dirty_clean.rs +++ b/compiler/rustc_incremental/src/persist/dirty_clean.rs @@ -14,7 +14,6 @@ //! the required condition is not met. use rustc_ast::{self as ast, Attribute, NestedMetaItem}; -use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxHashSet; use rustc_hir as hir; use rustc_hir::def_id::{DefId, LocalDefId}; @@ -22,7 +21,7 @@ use rustc_hir::intravisit; use rustc_hir::itemlikevisit::ItemLikeVisitor; use rustc_hir::Node as HirNode; use rustc_hir::{ImplItemKind, ItemKind as HirItem, TraitItemKind}; -use rustc_middle::dep_graph::{label_strs, DepNode, DepNodeExt}; +use rustc_middle::dep_graph::{label_strs, DepNode, DepNodeColor, DepNodeExt}; use rustc_middle::hir::map::Map; use rustc_middle::ty::TyCtxt; use rustc_span::symbol::{sym, Symbol}; @@ -391,10 +390,7 @@ impl DirtyCleanVisitor<'tcx> { fn assert_dirty(&self, item_span: Span, dep_node: DepNode) { debug!("assert_dirty({:?})", dep_node); - let current_fingerprint = self.get_fingerprint(&dep_node); - let prev_fingerprint = self.tcx.dep_graph.prev_fingerprint_of(&dep_node); - - if current_fingerprint == prev_fingerprint { + if self.tcx.dep_graph.node_color(&dep_node) == Some(DepNodeColor::Green) { let dep_node_str = self.dep_node_str(&dep_node); self.tcx .sess @@ -402,28 +398,10 @@ impl DirtyCleanVisitor<'tcx> { } } - fn get_fingerprint(&self, dep_node: &DepNode) -> Option { - if self.tcx.dep_graph.dep_node_exists(dep_node) { - let dep_node_index = self.tcx.dep_graph.dep_node_index_of(dep_node); - Some(self.tcx.dep_graph.fingerprint_of(dep_node_index)) - } else { - None - } - } - fn assert_clean(&self, item_span: Span, dep_node: DepNode) { debug!("assert_clean({:?})", dep_node); - let current_fingerprint = self.get_fingerprint(&dep_node); - let prev_fingerprint = self.tcx.dep_graph.prev_fingerprint_of(&dep_node); - - // if the node wasn't previously evaluated and now is (or vice versa), - // then the node isn't actually clean or dirty. - if (current_fingerprint == None) ^ (prev_fingerprint == None) { - return; - } - - if current_fingerprint != prev_fingerprint { + if self.tcx.dep_graph.node_color(&dep_node) == Some(DepNodeColor::Red) { let dep_node_str = self.dep_node_str(&dep_node); self.tcx .sess diff --git a/compiler/rustc_incremental/src/persist/load.rs b/compiler/rustc_incremental/src/persist/load.rs index 2b5649bb0594f..9274eb13a2867 100644 --- a/compiler/rustc_incremental/src/persist/load.rs +++ b/compiler/rustc_incremental/src/persist/load.rs @@ -2,7 +2,7 @@ use rustc_data_structures::fx::FxHashMap; use rustc_hir::definitions::Definitions; -use rustc_middle::dep_graph::{PreviousDepGraph, SerializedDepGraph, WorkProduct, WorkProductId}; +use rustc_middle::dep_graph::{SerializedDepGraph, WorkProduct, WorkProductId}; use rustc_middle::ty::query::OnDiskCache; use rustc_serialize::opaque::Decoder; use rustc_serialize::Decodable as RustcDecodable; @@ -22,8 +22,8 @@ pub enum LoadResult { Error { message: String }, } -impl LoadResult<(PreviousDepGraph, WorkProductMap)> { - pub fn open(self, sess: &Session) -> (PreviousDepGraph, WorkProductMap) { +impl LoadResult<(SerializedDepGraph, WorkProductMap)> { + pub fn open(self, sess: &Session) -> (SerializedDepGraph, WorkProductMap) { match self { LoadResult::Error { message } => { sess.warn(&message); @@ -84,7 +84,7 @@ impl MaybeAsync { } } -pub type DepGraphFuture = MaybeAsync>; +pub type DepGraphFuture = MaybeAsync>; /// Launch a thread and load the dependency graph in the background. pub fn load_dep_graph(sess: &Session) -> DepGraphFuture { @@ -185,7 +185,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture { let dep_graph = SerializedDepGraph::decode(&mut decoder) .expect("Error reading cached dep-graph"); - LoadResult::Ok { data: (PreviousDepGraph::new(dep_graph), prev_work_products) } + LoadResult::Ok { data: (dep_graph, prev_work_products) } } } })) diff --git a/compiler/rustc_middle/src/dep_graph/dep_node.rs b/compiler/rustc_middle/src/dep_graph/dep_node.rs index ba9d0a40732e6..6e0621ca7a9e2 100644 --- a/compiler/rustc_middle/src/dep_graph/dep_node.rs +++ b/compiler/rustc_middle/src/dep_graph/dep_node.rs @@ -64,7 +64,7 @@ use rustc_hir::HirId; use rustc_span::symbol::Symbol; use std::hash::Hash; -pub use rustc_query_system::dep_graph::{DepContext, DepNodeParams}; +pub use rustc_query_system::dep_graph::{DepContext, DepNodeColor, DepNodeParams}; /// This struct stores metadata about each DepKind. /// diff --git a/compiler/rustc_middle/src/dep_graph/mod.rs b/compiler/rustc_middle/src/dep_graph/mod.rs index c688b23be1d02..842ac07b614fd 100644 --- a/compiler/rustc_middle/src/dep_graph/mod.rs +++ b/compiler/rustc_middle/src/dep_graph/mod.rs @@ -18,7 +18,6 @@ pub use dep_node::{label_strs, DepKind, DepNode, DepNodeExt}; pub type DepGraph = rustc_query_system::dep_graph::DepGraph; pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps; pub type DepGraphQuery = rustc_query_system::dep_graph::DepGraphQuery; -pub type PreviousDepGraph = rustc_query_system::dep_graph::PreviousDepGraph; pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph; impl rustc_query_system::dep_graph::DepKind for DepKind { diff --git a/compiler/rustc_middle/src/ty/query/mod.rs b/compiler/rustc_middle/src/ty/query/mod.rs index 51a214bc07bac..e1d9327068fee 100644 --- a/compiler/rustc_middle/src/ty/query/mod.rs +++ b/compiler/rustc_middle/src/ty/query/mod.rs @@ -43,6 +43,7 @@ use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId}; use rustc_hir::lang_items::{LangItem, LanguageItems}; use rustc_hir::{Crate, ItemLocalId, TraitCandidate}; use rustc_index::{bit_set::FiniteBitSet, vec::IndexVec}; +use rustc_query_system::dep_graph::{DepNodeIndex, SerializedDepNodeIndex}; use rustc_serialize::opaque; use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion}; use rustc_session::utils::NativeLibKind; @@ -237,6 +238,7 @@ macro_rules! define_callbacks { tcx: TyCtxt<'tcx>, encoder: &mut on_disk_cache::CacheEncoder<'a, 'tcx, opaque::FileEncoder>, query_result_index: &mut on_disk_cache::EncodedQueryResultIndex, + remap: &IndexVec>, ) -> opaque::FileEncodeResult; fn exec_cache_promotions(&'tcx self, tcx: TyCtxt<'tcx>); diff --git a/compiler/rustc_middle/src/ty/query/on_disk_cache.rs b/compiler/rustc_middle/src/ty/query/on_disk_cache.rs index d0cd8a48f99b3..3885eeac441ed 100644 --- a/compiler/rustc_middle/src/ty/query/on_disk_cache.rs +++ b/compiler/rustc_middle/src/ty/query/on_disk_cache.rs @@ -13,7 +13,7 @@ use rustc_errors::Diagnostic; use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, LOCAL_CRATE}; use rustc_hir::definitions::DefPathHash; use rustc_hir::definitions::Definitions; -use rustc_index::vec::{Idx, IndexVec}; +use rustc_index::vec::IndexVec; use rustc_query_system::dep_graph::DepContext; use rustc_query_system::query::QueryContext; use rustc_serialize::{ @@ -303,13 +303,15 @@ impl<'sess> OnDiskCache<'sess> { latest_foreign_def_path_hashes, }; + let remap = tcx.dep_graph.compression_map(); + // Encode query results. let mut query_result_index = EncodedQueryResultIndex::new(); tcx.sess.time("encode_query_results", || -> FileEncodeResult { let enc = &mut encoder; let qri = &mut query_result_index; - tcx.queries.encode_query_results(tcx, enc, qri) + tcx.queries.encode_query_results(tcx, enc, qri, &remap) })?; // Encode diagnostics. @@ -318,11 +320,11 @@ impl<'sess> OnDiskCache<'sess> { .borrow() .iter() .map( - |(dep_node_index, diagnostics)| -> Result<_, ::Error> { + |(&dep_node_index, diagnostics)| -> Result<_, ::Error> { let pos = AbsoluteBytePos::new(encoder.position()); // Let's make sure we get the expected type here. let diagnostics: &EncodedDiagnostics = diagnostics; - let dep_node_index = SerializedDepNodeIndex::new(dep_node_index.index()); + let dep_node_index = remap[dep_node_index].unwrap(); encoder.encode_tagged(dep_node_index, diagnostics)?; Ok((dep_node_index, pos)) @@ -1220,6 +1222,7 @@ pub fn encode_query_results<'a, 'tcx, CTX, Q>( tcx: CTX, encoder: &mut CacheEncoder<'a, 'tcx, FileEncoder>, query_result_index: &mut EncodedQueryResultIndex, + remap: &IndexVec>, ) -> FileEncodeResult where CTX: QueryContext + 'tcx, @@ -1236,7 +1239,7 @@ where cache.iter_results(|results| { for (key, value, dep_node) in results { if Q::cache_on_disk(tcx, &key, Some(value)) { - let dep_node = SerializedDepNodeIndex::new(dep_node.index()); + let dep_node = remap[dep_node].unwrap(); // Record position of the cache entry. query_result_index diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index e9314797fbdc5..1570a760af8d1 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs @@ -26,6 +26,7 @@ use rustc_middle::ich::StableHashingContext; use rustc_middle::ty::query::{query_keys, query_storage, query_stored, query_values}; use rustc_middle::ty::query::{Providers, QueryEngine}; use rustc_middle::ty::{self, TyCtxt}; +use rustc_query_system::dep_graph::{DepNodeColor, DepNodeIndex, SerializedDepNodeIndex}; use rustc_serialize::opaque; use rustc_span::{Span, DUMMY_SP}; diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index 37a176de94196..e4faa1fcc76b6 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -13,6 +13,7 @@ use rustc_query_system::query::{QueryContext, QueryDescription, QueryJobId, Quer use rustc_data_structures::sync::Lock; use rustc_data_structures::thin_vec::ThinVec; use rustc_errors::Diagnostic; +use rustc_index::vec::IndexVec; use rustc_serialize::opaque; use rustc_span::def_id::{DefId, LocalDefId}; @@ -177,6 +178,7 @@ impl<'tcx> QueryCtxt<'tcx> { self, encoder: &mut on_disk_cache::CacheEncoder<'a, 'tcx, opaque::FileEncoder>, query_result_index: &mut on_disk_cache::EncodedQueryResultIndex, + remap: &IndexVec>, ) -> opaque::FileEncodeResult { macro_rules! encode_queries { ($($query:ident,)*) => { @@ -184,7 +186,8 @@ impl<'tcx> QueryCtxt<'tcx> { on_disk_cache::encode_query_results::<_, super::queries::$query<'_>>( self, encoder, - query_result_index + query_result_index, + remap, )?; )* } @@ -478,10 +481,10 @@ macro_rules! define_queries { return } - debug_assert!(tcx.dep_graph - .node_color(dep_node) - .map(|c| c.is_green()) - .unwrap_or(false)); + debug_assert_eq!( + tcx.dep_graph.node_color(dep_node), + Some(DepNodeColor::Green), + ); let key = recover(*tcx, dep_node).unwrap_or_else(|| panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash)); if queries::$name::cache_on_disk(tcx, &key, None) { @@ -562,9 +565,10 @@ macro_rules! define_queries_struct { tcx: TyCtxt<'tcx>, encoder: &mut on_disk_cache::CacheEncoder<'a, 'tcx, opaque::FileEncoder>, query_result_index: &mut on_disk_cache::EncodedQueryResultIndex, + remap: &IndexVec>, ) -> opaque::FileEncodeResult { let tcx = QueryCtxt { tcx, queries: self }; - tcx.encode_query_results(encoder, query_result_index) + tcx.encode_query_results(encoder, query_result_index, remap) } fn exec_cache_promotions(&'tcx self, tcx: TyCtxt<'tcx>) { diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index f3d74829a8beb..5c266873ab1a8 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -1,28 +1,22 @@ use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::profiling::QueryInvocationId; -use rustc_data_structures::sharded::{self, Sharded}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; -use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, LockGuard, Lrc, Ordering}; +use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, RwLock}; use rustc_data_structures::unlikely; use rustc_errors::Diagnostic; -use rustc_index::vec::{Idx, IndexVec}; +use rustc_index::vec::IndexVec; use rustc_serialize::{Encodable, Encoder}; -use parking_lot::{Condvar, Mutex}; use smallvec::{smallvec, SmallVec}; -use std::collections::hash_map::Entry; use std::env; use std::hash::Hash; use std::marker::PhantomData; -use std::mem; -use std::ops::Range; use std::sync::atomic::Ordering::Relaxed; use super::debug::EdgeFilter; -use super::prev::PreviousDepGraph; use super::query::DepGraphQuery; -use super::serialized::SerializedDepNodeIndex; +use super::serialized::{DepNodeColor, DepNodeIndex, SerializedDepGraph, SerializedDepNodeIndex}; use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId}; use crate::query::QueryContext; @@ -37,10 +31,6 @@ pub struct DepGraph { virtual_dep_node_index: Lrc, } -rustc_index::newtype_index! { - pub struct DepNodeIndex { .. } -} - impl DepNodeIndex { pub const INVALID: DepNodeIndex = DepNodeIndex::MAX; } @@ -52,39 +42,19 @@ impl std::convert::From for QueryInvocationId { } } -#[derive(PartialEq)] -pub enum DepNodeColor { - Red, - Green(DepNodeIndex), -} - -impl DepNodeColor { - pub fn is_green(self) -> bool { - match self { - DepNodeColor::Red => false, - DepNodeColor::Green(_) => true, - } - } -} - struct DepGraphData { /// The new encoding of the dependency graph, optimized for red/green /// tracking. The `current` field is the dependency graph of only the /// current compilation session: We don't merge the previous dep-graph into /// current one anymore, but we do reference shared data to save space. - current: CurrentDepGraph, + current: CurrentDepGraph, /// The dep-graph from the previous compilation session. It contains all /// nodes and edges as well as all fingerprints of nodes that have them. - previous: PreviousDepGraph, - - colors: DepNodeColorMap, + previous: RwLock>, /// A set of loaded diagnostics that is in the progress of being emitted. - emitting_diagnostics: Mutex>, - - /// Used to wait for diagnostics to be emitted. - emitting_diagnostics_cond_var: Condvar, + emitting_diagnostics: Lock>, /// When we load, there may be `.o` files, cached MIR, or other such /// things available to us. If we find that they are not dirty, we @@ -107,10 +77,10 @@ where impl DepGraph { pub fn new( - prev_graph: PreviousDepGraph, + prev_graph: SerializedDepGraph, prev_work_products: FxHashMap, ) -> DepGraph { - let prev_graph_node_count = prev_graph.node_count(); + let prev_graph_node_count = prev_graph.serialized_node_count(); DepGraph { data: Some(Lrc::new(DepGraphData { @@ -118,9 +88,7 @@ impl DepGraph { dep_node_debug: Default::default(), current: CurrentDepGraph::new(prev_graph_node_count), emitting_diagnostics: Default::default(), - emitting_diagnostics_cond_var: Condvar::new(), - previous: prev_graph, - colors: DepNodeColorMap::new(prev_graph_node_count), + previous: RwLock::new(prev_graph), })), virtual_dep_node_index: Lrc::new(AtomicU32::new(0)), } @@ -138,60 +106,7 @@ impl DepGraph { pub fn query(&self) -> DepGraphQuery { let data = self.data.as_ref().unwrap(); - let previous = &data.previous; - - // Note locking order: `prev_index_to_index`, then `data`. - let prev_index_to_index = data.current.prev_index_to_index.lock(); - let data = data.current.data.lock(); - let node_count = data.hybrid_indices.len(); - let edge_count = self.edge_count(&data); - - let mut nodes = Vec::with_capacity(node_count); - let mut edge_list_indices = Vec::with_capacity(node_count); - let mut edge_list_data = Vec::with_capacity(edge_count); - - // See `DepGraph`'s `Encodable` implementation for notes on the approach used here. - - edge_list_data.extend(data.unshared_edges.iter().map(|i| i.index())); - - for &hybrid_index in data.hybrid_indices.iter() { - match hybrid_index.into() { - HybridIndex::New(new_index) => { - nodes.push(data.new.nodes[new_index]); - let edges = &data.new.edges[new_index]; - edge_list_indices.push((edges.start.index(), edges.end.index())); - } - HybridIndex::Red(red_index) => { - nodes.push(previous.index_to_node(data.red.node_indices[red_index])); - let edges = &data.red.edges[red_index]; - edge_list_indices.push((edges.start.index(), edges.end.index())); - } - HybridIndex::LightGreen(lg_index) => { - nodes.push(previous.index_to_node(data.light_green.node_indices[lg_index])); - let edges = &data.light_green.edges[lg_index]; - edge_list_indices.push((edges.start.index(), edges.end.index())); - } - HybridIndex::DarkGreen(prev_index) => { - nodes.push(previous.index_to_node(prev_index)); - - let edges_iter = previous - .edge_targets_from(prev_index) - .iter() - .map(|&dst| prev_index_to_index[dst].unwrap().index()); - - let start = edge_list_data.len(); - edge_list_data.extend(edges_iter); - let end = edge_list_data.len(); - edge_list_indices.push((start, end)); - } - } - } - - debug_assert_eq!(nodes.len(), node_count); - debug_assert_eq!(edge_list_indices.len(), node_count); - debug_assert_eq!(edge_list_data.len(), edge_count); - - DepGraphQuery::new(&nodes[..], &edge_list_indices[..], &edge_list_data[..]) + data.previous.read().query() } pub fn assert_ignored(&self) { @@ -280,81 +195,13 @@ impl DepGraph { let mut hcx = dcx.create_stable_hashing_context(); let current_fingerprint = hash_result(&mut hcx, &result); - let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks; - // Intern the new `DepNode`. - let dep_node_index = if let Some(prev_index) = data.previous.node_to_index_opt(&key) { - // Determine the color and index of the new `DepNode`. - let (color, dep_node_index) = if let Some(current_fingerprint) = current_fingerprint - { - if current_fingerprint == data.previous.fingerprint_by_index(prev_index) { - if print_status { - eprintln!("[task::green] {:?}", key); - } - - // This is a light green node: it existed in the previous compilation, - // its query was re-executed, and it has the same result as before. - let dep_node_index = - data.current.intern_light_green_node(&data.previous, prev_index, edges); - - (DepNodeColor::Green(dep_node_index), dep_node_index) - } else { - if print_status { - eprintln!("[task::red] {:?}", key); - } - - // This is a red node: it existed in the previous compilation, its query - // was re-executed, but it has a different result from before. - let dep_node_index = data.current.intern_red_node( - &data.previous, - prev_index, - edges, - current_fingerprint, - ); - - (DepNodeColor::Red, dep_node_index) - } - } else { - if print_status { - eprintln!("[task::unknown] {:?}", key); - } - - // This is a red node, effectively: it existed in the previous compilation - // session, its query was re-executed, but it doesn't compute a result hash - // (i.e. it represents a `no_hash` query), so we have no way of determining - // whether or not the result was the same as before. - let dep_node_index = data.current.intern_red_node( - &data.previous, - prev_index, - edges, - Fingerprint::ZERO, - ); - - (DepNodeColor::Red, dep_node_index) - }; - - debug_assert!( - data.colors.get(prev_index).is_none(), - "DepGraph::with_task() - Duplicate DepNodeColor \ - insertion for {:?}", - key - ); - - data.colors.insert(prev_index, color); - dep_node_index - } else { - if print_status { - eprintln!("[task::new] {:?}", key); - } - - // This is a new node: it didn't exist in the previous compilation session. - data.current.intern_new_node( - &data.previous, - key, - edges, - current_fingerprint.unwrap_or(Fingerprint::ZERO), - ) - }; + let dep_node_index = data.previous.write().intern_task_node( + key, + &edges[..], + current_fingerprint, + dcx.sess().opts.debugging_opts.dep_tasks, + ); (result, dep_node_index) } else { @@ -395,12 +242,8 @@ impl DepGraph { hash: data.current.anon_id_seed.combine(hasher.finish()).into(), }; - let dep_node_index = data.current.intern_new_node( - &data.previous, - target_dep_node, - task_deps.reads, - Fingerprint::ZERO, - ); + let mut previous = data.previous.write(); + let dep_node_index = previous.intern_anon_node(target_dep_node, &task_deps.reads[..]); (result, dep_node_index) } else { @@ -474,13 +317,7 @@ impl DepGraph { #[inline] pub fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option { let data = self.data.as_ref().unwrap(); - let current = &data.current; - - if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) { - current.prev_index_to_index.lock()[prev_index] - } else { - current.new_node_to_index.get_shard_by_value(dep_node).lock().get(dep_node).copied() - } + data.previous.read().dep_node_index_of_opt(dep_node) } #[inline] @@ -491,37 +328,13 @@ impl DepGraph { #[inline] pub fn dep_node_of(&self, dep_node_index: DepNodeIndex) -> DepNode { let data = self.data.as_ref().unwrap(); - let previous = &data.previous; - let data = data.current.data.lock(); - - match data.hybrid_indices[dep_node_index].into() { - HybridIndex::New(new_index) => data.new.nodes[new_index], - HybridIndex::Red(red_index) => previous.index_to_node(data.red.node_indices[red_index]), - HybridIndex::LightGreen(light_green_index) => { - previous.index_to_node(data.light_green.node_indices[light_green_index]) - } - HybridIndex::DarkGreen(prev_index) => previous.index_to_node(prev_index), - } + data.previous.read().dep_node_of(dep_node_index) } #[inline] pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint { let data = self.data.as_ref().unwrap(); - let previous = &data.previous; - let data = data.current.data.lock(); - - match data.hybrid_indices[dep_node_index].into() { - HybridIndex::New(new_index) => data.new.fingerprints[new_index], - HybridIndex::Red(red_index) => data.red.fingerprints[red_index], - HybridIndex::LightGreen(light_green_index) => { - previous.fingerprint_by_index(data.light_green.node_indices[light_green_index]) - } - HybridIndex::DarkGreen(prev_index) => previous.fingerprint_by_index(prev_index), - } - } - - pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Option { - self.data.as_ref().unwrap().previous.fingerprint_of(dep_node) + data.previous.read().fingerprint_of(dep_node_index) } /// Checks whether a previous work product exists for `v` and, if @@ -554,29 +367,15 @@ impl DepGraph { self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned() } - fn edge_count(&self, node_data: &LockGuard<'_, DepNodeData>) -> usize { - let data = self.data.as_ref().unwrap(); - let previous = &data.previous; - - let mut edge_count = node_data.unshared_edges.len(); - - for &hybrid_index in node_data.hybrid_indices.iter() { - if let HybridIndex::DarkGreen(prev_index) = hybrid_index.into() { - edge_count += previous.edge_targets_from(prev_index).len() - } - } - - edge_count - } - pub fn node_color(&self, dep_node: &DepNode) -> Option { if let Some(ref data) = self.data { - if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) { - return data.colors.get(prev_index); + let previous = data.previous.read(); + if let Some(prev_index) = previous.node_to_index_opt(dep_node) { + return previous.color(prev_index); } else { // This is a node that did not exist in the previous compilation // session, so we consider it to be red. - return Some(DepNodeColor::Red); + return Some(DepNodeColor::New); } } @@ -593,7 +392,7 @@ impl DepGraph { dep_node: &DepNode, ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> { self.try_mark_green(tcx, dep_node).map(|(prev_index, dep_node_index)| { - debug_assert!(self.is_green(&dep_node)); + debug_assert!(self.node_color(&dep_node) == Some(DepNodeColor::Green)); self.read_index(dep_node_index); (prev_index, dep_node_index) }) @@ -610,11 +409,12 @@ impl DepGraph { let data = self.data.as_ref()?; // Return None if the dep node didn't exist in the previous session - let prev_index = data.previous.node_to_index_opt(dep_node)?; + let prev_index = data.previous.read().node_to_index_opt(dep_node)?; + let color = data.previous.read().color(prev_index); - match data.colors.get(prev_index) { - Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)), - Some(DepNodeColor::Red) => None, + match color { + Some(DepNodeColor::Green) => Some((prev_index, prev_index.rejuvenate())), + Some(DepNodeColor::Red) | Some(DepNodeColor::New) => None, None => { // This DepNode and the corresponding query invocation existed // in the previous compilation session too, so we can try to @@ -633,11 +433,11 @@ impl DepGraph { parent_dep_node_index: SerializedDepNodeIndex, dep_node: &DepNode, ) -> Option<()> { - let dep_dep_node_color = data.colors.get(parent_dep_node_index); - let dep_dep_node = &data.previous.index_to_node(parent_dep_node_index); + let dep_dep_node_color = data.previous.read().color(parent_dep_node_index); + let dep_dep_node = &data.previous.read().index_to_node(parent_dep_node_index); match dep_dep_node_color { - Some(DepNodeColor::Green(_)) => { + Some(DepNodeColor::Green) => { // This dependency has been marked as green before, we are // still fine and can continue with checking the other // dependencies. @@ -647,7 +447,7 @@ impl DepGraph { ); return Some(()); } - Some(DepNodeColor::Red) => { + Some(DepNodeColor::Red) | Some(DepNodeColor::New) => { // We found a dependency the value of which has changed // compared to the previous compilation session. We cannot // mark the DepNode as green and also don't need to bother @@ -693,17 +493,17 @@ impl DepGraph { return None; } - let dep_dep_node_color = data.colors.get(parent_dep_node_index); + let dep_dep_node_color = data.previous.read().color(parent_dep_node_index); match dep_dep_node_color { - Some(DepNodeColor::Green(_)) => { + Some(DepNodeColor::Green) => { debug!( "try_mark_parent_green({:?}) --- managed to FORCE dependency {:?} to green", dep_node, dep_dep_node ); return Some(()); } - Some(DepNodeColor::Red) => { + Some(DepNodeColor::Red) | Some(DepNodeColor::New) => { debug!( "try_mark_parent_green({:?}) - END - dependency {:?} was red after forcing", dep_node, dep_dep_node @@ -749,20 +549,37 @@ impl DepGraph { debug!("try_mark_previous_green({:?}) - BEGIN", dep_node); - #[cfg(not(parallel_compiler))] - { - debug_assert!(!self.dep_node_exists(dep_node)); - debug_assert!(data.colors.get(prev_dep_node_index).is_none()); - } - - debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node); + // We never try to mark eval_always nodes as green + debug_assert!(!dep_node.kind.is_eval_always()); + debug_assert_eq!(data.previous.read().index_to_node(prev_dep_node_index), *dep_node); - let prev_deps = data.previous.edge_targets_from(prev_dep_node_index); + // Do not keep a reference to the borrowed `previous` graph, + // because the recursive calls. + let prev_deps: Vec<_> = + data.previous.read().edge_targets_from_serialized(prev_dep_node_index).collect(); + debug!( + "try_mark_previous_green({:?}) --- {:?} -- deps={:?}", + dep_node, + prev_dep_node_index, + prev_deps + .iter() + .map(|&d| (d, data.previous.read().index_to_node(d))) + .collect::>(), + ); - for &dep_dep_node_index in prev_deps { + for dep_dep_node_index in prev_deps { self.try_mark_parent_green(tcx, data, dep_dep_node_index, dep_node)? } + #[cfg(not(parallel_compiler))] + debug_assert_eq!( + data.previous.read().color(prev_dep_node_index), + None, + "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \ + insertion for {:?}", + dep_node + ); + // If we got here without hitting a `return` that means that all // dependencies of this DepNode could be marked as green. Therefore we // can also mark this DepNode as green. @@ -772,30 +589,19 @@ impl DepGraph { let dep_node_index = { // We allocating an entry for the node in the current dependency graph and // adding all the appropriate edges imported from the previous graph - data.current.intern_dark_green_node(&data.previous, prev_dep_node_index) + data.previous.write().intern_dark_green_node(prev_dep_node_index) }; - // ... emitting any stored diagnostic ... + // ... and emitting any stored diagnostic. // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere // Maybe store a list on disk and encode this fact in the DepNodeState let diagnostics = tcx.load_diagnostics(prev_dep_node_index); - #[cfg(not(parallel_compiler))] - debug_assert!( - data.colors.get(prev_dep_node_index).is_none(), - "try_mark_previous_green({:?}) - duplicate DepNodeColor insertion", - dep_node - ); - if unlikely!(!diagnostics.is_empty()) { - self.emit_diagnostics(tcx, data, dep_node_index, prev_dep_node_index, diagnostics); + self.emit_diagnostics(tcx, data, dep_node_index, diagnostics); } - // ... and finally storing a "Green" entry in the color map. - // Multiple threads can all write the same color here - data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index)); - debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node); Some(dep_node_index) } @@ -809,58 +615,23 @@ impl DepGraph { tcx: Ctxt, data: &DepGraphData, dep_node_index: DepNodeIndex, - prev_dep_node_index: SerializedDepNodeIndex, diagnostics: Vec, ) { - let mut emitting = data.emitting_diagnostics.lock(); - - if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) { - // The node is already green so diagnostics must have been emitted already + let should_emit = data.emitting_diagnostics.lock().insert(dep_node_index); + if !should_emit { return; } - if emitting.insert(dep_node_index) { - // We were the first to insert the node in the set so this thread - // must emit the diagnostics and signal other potentially waiting - // threads after. - mem::drop(emitting); - - // Promote the previous diagnostics to the current session. - tcx.store_diagnostics(dep_node_index, diagnostics.clone().into()); - - let handle = tcx.dep_context().sess().diagnostic(); - - for diagnostic in diagnostics { - handle.emit_diagnostic(&diagnostic); - } + // Promote the previous diagnostics to the current session. + tcx.store_diagnostics(dep_node_index, diagnostics.clone().into()); - // Mark the node as green now that diagnostics are emitted - data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index)); + let handle = tcx.dep_context().sess().diagnostic(); - // Remove the node from the set - data.emitting_diagnostics.lock().remove(&dep_node_index); - - // Wake up waiters - data.emitting_diagnostics_cond_var.notify_all(); - } else { - // We must wait for the other thread to finish emitting the diagnostic - - loop { - data.emitting_diagnostics_cond_var.wait(&mut emitting); - if data.colors.get(prev_dep_node_index) == Some(DepNodeColor::Green(dep_node_index)) - { - break; - } - } + for diagnostic in diagnostics { + handle.emit_diagnostic(&diagnostic); } } - // Returns true if the given node has been marked as green during the - // current compilation session. Used in various assertions - pub fn is_green(&self, dep_node: &DepNode) -> bool { - self.node_color(dep_node).map_or(false, |c| c.is_green()) - } - // This method loads all on-disk cacheable query results into memory, so // they can be written out to the new cache file again. Most query results // will already be in memory but in the case where we marked something as @@ -874,13 +645,15 @@ impl DepGraph { let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion"); let data = self.data.as_ref().unwrap(); - for prev_index in data.colors.values.indices() { - match data.colors.get(prev_index) { - Some(DepNodeColor::Green(_)) => { - let dep_node = data.previous.index_to_node(prev_index); + let previous = data.previous.read(); + for prev_index in previous.serialized_indices() { + match previous.color(prev_index) { + Some(DepNodeColor::Green) => { + let dep_node = data.previous.read().index_to_node(prev_index); + debug!("PROMOTE {:?} {:?}", prev_index, dep_node); qcx.try_load_from_on_disk_cache(&dep_node); } - None | Some(DepNodeColor::Red) => { + None | Some(DepNodeColor::Red) | Some(DepNodeColor::New) => { // We can skip red nodes because a node can only be marked // as red if the query result was recomputed and thus is // already in memory. @@ -892,10 +665,11 @@ impl DepGraph { // Register reused dep nodes (i.e. nodes we've marked red or green) with the context. pub fn register_reused_dep_nodes>(&self, tcx: Ctxt) { let data = self.data.as_ref().unwrap(); - for prev_index in data.colors.values.indices() { - match data.colors.get(prev_index) { - Some(DepNodeColor::Red) | Some(DepNodeColor::Green(_)) => { - let dep_node = data.previous.index_to_node(prev_index); + let previous = data.previous.read(); + for prev_index in previous.serialized_indices() { + match previous.color(prev_index) { + Some(_) => { + let dep_node = data.previous.read().index_to_node(prev_index); tcx.register_reused_dep_node(&dep_node); } None => {} @@ -912,46 +686,25 @@ impl DepGraph { } let data = self.data.as_ref().unwrap(); - let prev = &data.previous; + let prev = &data.previous.read(); let current = &data.current; - let data = current.data.lock(); let mut stats: FxHashMap<_, Stat> = FxHashMap::with_hasher(Default::default()); - for &hybrid_index in data.hybrid_indices.iter() { - let (kind, edge_count) = match hybrid_index.into() { - HybridIndex::New(new_index) => { - let kind = data.new.nodes[new_index].kind; - let edge_range = &data.new.edges[new_index]; - (kind, edge_range.end.as_usize() - edge_range.start.as_usize()) - } - HybridIndex::Red(red_index) => { - let kind = prev.index_to_node(data.red.node_indices[red_index]).kind; - let edge_range = &data.red.edges[red_index]; - (kind, edge_range.end.as_usize() - edge_range.start.as_usize()) - } - HybridIndex::LightGreen(lg_index) => { - let kind = prev.index_to_node(data.light_green.node_indices[lg_index]).kind; - let edge_range = &data.light_green.edges[lg_index]; - (kind, edge_range.end.as_usize() - edge_range.start.as_usize()) - } - HybridIndex::DarkGreen(prev_index) => { - let kind = prev.index_to_node(prev_index).kind; - let edge_count = prev.edge_targets_from(prev_index).len(); - (kind, edge_count) - } - }; + for index in prev.indices() { + let kind = prev.dep_node_of(index).kind; + let edge_count = prev.edge_targets_from(index).len(); let stat = stats.entry(kind).or_insert(Stat { kind, node_counter: 0, edge_counter: 0 }); stat.node_counter += 1; stat.edge_counter += edge_count as u64; } - let total_node_count = data.hybrid_indices.len(); - let total_edge_count = self.edge_count(&data); + let total_node_count = prev.node_count(); + let total_edge_count = prev.edge_count(); // Drop the lock guard. - std::mem::drop(data); + std::mem::drop(prev); let mut stats: Vec<_> = stats.values().cloned().collect(); stats.sort_by_key(|s| -(s.node_counter as i64)); @@ -1010,141 +763,16 @@ impl DepGraph { let index = self.virtual_dep_node_index.fetch_add(1, Relaxed); DepNodeIndex::from_u32(index) } -} -impl> Encodable for DepGraph { - fn encode(&self, e: &mut E) -> Result<(), E::Error> { - // We used to serialize the dep graph by creating and serializing a `SerializedDepGraph` - // using data copied from the `DepGraph`. But copying created a large memory spike, so we - // now serialize directly from the `DepGraph` as if it's a `SerializedDepGraph`. Because we - // deserialize that data into a `SerializedDepGraph` in the next compilation session, we - // need `DepGraph`'s `Encodable` and `SerializedDepGraph`'s `Decodable` implementations to - // be in sync. If you update this encoding, be sure to update the decoding, and vice-versa. - - let data = self.data.as_ref().unwrap(); - let prev = &data.previous; - - // Note locking order: `prev_index_to_index`, then `data`. - let prev_index_to_index = data.current.prev_index_to_index.lock(); - let data = data.current.data.lock(); - let new = &data.new; - let red = &data.red; - let lg = &data.light_green; - - let node_count = data.hybrid_indices.len(); - let edge_count = self.edge_count(&data); - - // `rustc_middle::ty::query::OnDiskCache` expects nodes to be encoded in `DepNodeIndex` - // order. The edges in `edge_list_data` don't need to be in a particular order, as long as - // each node references its edges as a contiguous range within it. Therefore, we can encode - // `edge_list_data` directly from `unshared_edges`. It meets the above requirements, as - // each non-dark-green node already knows the range of edges to reference within it, which - // they'll encode in `edge_list_indices`. Dark green nodes, however, don't have their edges - // in `unshared_edges`, so need to add them to `edge_list_data`. - - use HybridIndex::*; - - // Encoded values (nodes, etc.) are explicitly typed below to avoid inadvertently - // serializing data in the wrong format (i.e. one incompatible with `SerializedDepGraph`). - e.emit_struct("SerializedDepGraph", 4, |e| { - e.emit_struct_field("nodes", 0, |e| { - // `SerializedDepGraph` expects this to be encoded as a sequence of `DepNode`s. - e.emit_seq(node_count, |e| { - for (seq_index, &hybrid_index) in data.hybrid_indices.iter().enumerate() { - let node: DepNode = match hybrid_index.into() { - New(i) => new.nodes[i], - Red(i) => prev.index_to_node(red.node_indices[i]), - LightGreen(i) => prev.index_to_node(lg.node_indices[i]), - DarkGreen(prev_index) => prev.index_to_node(prev_index), - }; - - e.emit_seq_elt(seq_index, |e| node.encode(e))?; - } - - Ok(()) - }) - })?; - - e.emit_struct_field("fingerprints", 1, |e| { - // `SerializedDepGraph` expects this to be encoded as a sequence of `Fingerprints`s. - e.emit_seq(node_count, |e| { - for (seq_index, &hybrid_index) in data.hybrid_indices.iter().enumerate() { - let fingerprint: Fingerprint = match hybrid_index.into() { - New(i) => new.fingerprints[i], - Red(i) => red.fingerprints[i], - LightGreen(i) => prev.fingerprint_by_index(lg.node_indices[i]), - DarkGreen(prev_index) => prev.fingerprint_by_index(prev_index), - }; - - e.emit_seq_elt(seq_index, |e| fingerprint.encode(e))?; - } - - Ok(()) - }) - })?; - - e.emit_struct_field("edge_list_indices", 2, |e| { - // `SerializedDepGraph` expects this to be encoded as a sequence of `(u32, u32)`s. - e.emit_seq(node_count, |e| { - // Dark green node edges start after the unshared (all other nodes') edges. - let mut dark_green_edge_index = data.unshared_edges.len(); - - for (seq_index, &hybrid_index) in data.hybrid_indices.iter().enumerate() { - let edge_indices: (u32, u32) = match hybrid_index.into() { - New(i) => (new.edges[i].start.as_u32(), new.edges[i].end.as_u32()), - Red(i) => (red.edges[i].start.as_u32(), red.edges[i].end.as_u32()), - LightGreen(i) => (lg.edges[i].start.as_u32(), lg.edges[i].end.as_u32()), - DarkGreen(prev_index) => { - let edge_count = prev.edge_targets_from(prev_index).len(); - let start = dark_green_edge_index as u32; - dark_green_edge_index += edge_count; - let end = dark_green_edge_index as u32; - (start, end) - } - }; - - e.emit_seq_elt(seq_index, |e| edge_indices.encode(e))?; - } - - assert_eq!(dark_green_edge_index, edge_count); - - Ok(()) - }) - })?; - - e.emit_struct_field("edge_list_data", 3, |e| { - // `SerializedDepGraph` expects this to be encoded as a sequence of - // `SerializedDepNodeIndex`. - e.emit_seq(edge_count, |e| { - for (seq_index, &edge) in data.unshared_edges.iter().enumerate() { - let serialized_edge = SerializedDepNodeIndex::new(edge.index()); - e.emit_seq_elt(seq_index, |e| serialized_edge.encode(e))?; - } - - let mut seq_index = data.unshared_edges.len(); - - for &hybrid_index in data.hybrid_indices.iter() { - if let DarkGreen(prev_index) = hybrid_index.into() { - for &edge in prev.edge_targets_from(prev_index) { - // Dark green node edges are stored in the previous graph - // and must be converted to edges in the current graph, - // and then serialized as `SerializedDepNodeIndex`. - let serialized_edge = SerializedDepNodeIndex::new( - prev_index_to_index[edge].as_ref().unwrap().index(), - ); - - e.emit_seq_elt(seq_index, |e| serialized_edge.encode(e))?; - seq_index += 1; - } - } - } - - assert_eq!(seq_index, edge_count); + pub fn compression_map(&self) -> IndexVec> { + self.data.as_ref().unwrap().previous.read().compression_map() + } - Ok(()) - }) - }) - }) + pub fn encode(&self, encoder: &mut E) -> Result<(), E::Error> + where + K: Encodable, + { + if let Some(data) = &self.data { data.previous.read().encode(encoder) } else { Ok(()) } } } @@ -1186,201 +814,11 @@ pub struct WorkProduct { pub saved_file: Option, } -// The maximum value of the follow index types leaves the upper two bits unused -// so that we can store multiple index types in `CompressedHybridIndex`, and use -// those bits to encode which index type it contains. - -// Index type for `NewDepNodeData`. -rustc_index::newtype_index! { - struct NewDepNodeIndex { - MAX = 0x7FFF_FFFF - } -} - -// Index type for `RedDepNodeData`. -rustc_index::newtype_index! { - struct RedDepNodeIndex { - MAX = 0x7FFF_FFFF - } -} - -// Index type for `LightGreenDepNodeData`. -rustc_index::newtype_index! { - struct LightGreenDepNodeIndex { - MAX = 0x7FFF_FFFF - } -} - -/// Compressed representation of `HybridIndex` enum. Bits unused by the -/// contained index types are used to encode which index type it contains. -#[derive(Copy, Clone)] -struct CompressedHybridIndex(u32); - -impl CompressedHybridIndex { - const NEW_TAG: u32 = 0b0000_0000_0000_0000_0000_0000_0000_0000; - const RED_TAG: u32 = 0b0100_0000_0000_0000_0000_0000_0000_0000; - const LIGHT_GREEN_TAG: u32 = 0b1000_0000_0000_0000_0000_0000_0000_0000; - const DARK_GREEN_TAG: u32 = 0b1100_0000_0000_0000_0000_0000_0000_0000; - - const TAG_MASK: u32 = 0b1100_0000_0000_0000_0000_0000_0000_0000; - const INDEX_MASK: u32 = !Self::TAG_MASK; -} - -impl From for CompressedHybridIndex { - #[inline] - fn from(index: NewDepNodeIndex) -> Self { - CompressedHybridIndex(Self::NEW_TAG | index.as_u32()) - } -} - -impl From for CompressedHybridIndex { - #[inline] - fn from(index: RedDepNodeIndex) -> Self { - CompressedHybridIndex(Self::RED_TAG | index.as_u32()) - } -} - -impl From for CompressedHybridIndex { - #[inline] - fn from(index: LightGreenDepNodeIndex) -> Self { - CompressedHybridIndex(Self::LIGHT_GREEN_TAG | index.as_u32()) - } -} - -impl From for CompressedHybridIndex { - #[inline] - fn from(index: SerializedDepNodeIndex) -> Self { - CompressedHybridIndex(Self::DARK_GREEN_TAG | index.as_u32()) - } -} - -/// Contains an index into one of several node data collections. Elsewhere, we -/// store `CompressedHyridIndex` instead of this to save space, but convert to -/// this type during processing to take advantage of the enum match ergonomics. -enum HybridIndex { - New(NewDepNodeIndex), - Red(RedDepNodeIndex), - LightGreen(LightGreenDepNodeIndex), - DarkGreen(SerializedDepNodeIndex), -} - -impl From for HybridIndex { - #[inline] - fn from(hybrid_index: CompressedHybridIndex) -> Self { - let index = hybrid_index.0 & CompressedHybridIndex::INDEX_MASK; - - match hybrid_index.0 & CompressedHybridIndex::TAG_MASK { - CompressedHybridIndex::NEW_TAG => HybridIndex::New(NewDepNodeIndex::from_u32(index)), - CompressedHybridIndex::RED_TAG => HybridIndex::Red(RedDepNodeIndex::from_u32(index)), - CompressedHybridIndex::LIGHT_GREEN_TAG => { - HybridIndex::LightGreen(LightGreenDepNodeIndex::from_u32(index)) - } - CompressedHybridIndex::DARK_GREEN_TAG => { - HybridIndex::DarkGreen(SerializedDepNodeIndex::from_u32(index)) - } - _ => unreachable!(), - } - } -} - // Index type for `DepNodeData`'s edges. rustc_index::newtype_index! { struct EdgeIndex { .. } } -/// Data for nodes in the current graph, divided into different collections -/// based on their presence in the previous graph, and if present, their color. -/// We divide nodes this way because different types of nodes are able to share -/// more or less data with the previous graph. -/// -/// To enable more sharing, we distinguish between two kinds of green nodes. -/// Light green nodes are nodes in the previous graph that have been marked -/// green because we re-executed their queries and the results were the same as -/// in the previous session. Dark green nodes are nodes in the previous graph -/// that have been marked green because we were able to mark all of their -/// dependencies green. -/// -/// Both light and dark green nodes can share the dep node and fingerprint with -/// the previous graph, but for light green nodes, we can't be sure that the -/// edges may be shared without comparing them against the previous edges, so we -/// store them directly (an approach in which we compare edges with the previous -/// edges to see if they can be shared was evaluated, but was not found to be -/// very profitable). -/// -/// For dark green nodes, we can share everything with the previous graph, which -/// is why the `HybridIndex::DarkGreen` enum variant contains the index of the -/// node in the previous graph, and why we don't have a separate collection for -/// dark green node data--the collection is the `PreviousDepGraph` itself. -/// -/// (Note that for dark green nodes, the edges in the previous graph -/// (`SerializedDepNodeIndex`s) must be converted to edges in the current graph -/// (`DepNodeIndex`s). `CurrentDepGraph` contains `prev_index_to_index`, which -/// can perform this conversion. It should always be possible, as by definition, -/// a dark green node is one whose dependencies from the previous session have -/// all been marked green--which means `prev_index_to_index` contains them.) -/// -/// Node data is stored in parallel vectors to eliminate the padding between -/// elements that would be needed to satisfy alignment requirements of the -/// structure that would contain all of a node's data. We could group tightly -/// packing subsets of node data together and use fewer vectors, but for -/// consistency's sake, we use separate vectors for each piece of data. -struct DepNodeData { - /// Data for nodes not in previous graph. - new: NewDepNodeData, - - /// Data for nodes in previous graph that have been marked red. - red: RedDepNodeData, - - /// Data for nodes in previous graph that have been marked light green. - light_green: LightGreenDepNodeData, - - // Edges for all nodes other than dark-green ones. Edges for each node - // occupy a contiguous region of this collection, which a node can reference - // using two indices. Storing edges this way rather than using an `EdgesVec` - // for each node reduces memory consumption by a not insignificant amount - // when compiling large crates. The downside is that we have to copy into - // this collection the edges from the `EdgesVec`s that are built up during - // query execution. But this is mostly balanced out by the more efficient - // implementation of `DepGraph::serialize` enabled by this representation. - unshared_edges: IndexVec, - - /// Mapping from `DepNodeIndex` to an index into a collection above. - /// Indicates which of the above collections contains a node's data. - /// - /// This collection is wasteful in time and space during incr-full builds, - /// because for those, all nodes are new. However, the waste is relatively - /// small, and the maintenance cost of avoiding using this for incr-full - /// builds is somewhat high and prone to bugginess. It does not seem worth - /// it at the time of this writing, but we may want to revisit the idea. - hybrid_indices: IndexVec, -} - -/// Data for nodes not in previous graph. Since we cannot share any data with -/// the previous graph, so we must store all of such a node's data here. -struct NewDepNodeData { - nodes: IndexVec>, - edges: IndexVec>, - fingerprints: IndexVec, -} - -/// Data for nodes in previous graph that have been marked red. We can share the -/// dep node with the previous graph, but the edges may be different, and the -/// fingerprint is known to be different, so we store the latter two directly. -struct RedDepNodeData { - node_indices: IndexVec, - edges: IndexVec>, - fingerprints: IndexVec, -} - -/// Data for nodes in previous graph that have been marked green because we -/// re-executed their queries and the results were the same as in the previous -/// session. We can share the dep node and the fingerprint with the previous -/// graph, but the edges may be different, so we store them directly. -struct LightGreenDepNodeData { - node_indices: IndexVec, - edges: IndexVec>, -} - /// `CurrentDepGraph` stores the dependency graph for the current session. It /// will be populated as we run queries or tasks. We never remove nodes from the /// graph: they are only added. @@ -1388,7 +826,7 @@ struct LightGreenDepNodeData { /// The nodes in it are identified by a `DepNodeIndex`. Internally, this maps to /// a `HybridIndex`, which identifies which collection in the `data` field /// contains a node's data. Which collection is used for a node depends on -/// whether the node was present in the `PreviousDepGraph`, and if so, the color +/// whether the node was present in the `SerializedDepGraph`, and if so, the color /// of the node. Each type of node can share more or less data with the previous /// graph. When possible, we can store just the index of the node in the /// previous graph, rather than duplicating its data in our own collections. @@ -1398,7 +836,7 @@ struct LightGreenDepNodeData { /// For the same reason, we also avoid storing `DepNode`s more than once as map /// keys. The `new_node_to_index` map only contains nodes not in the previous /// graph, and we map nodes in the previous graph to indices via a two-step -/// mapping. `PreviousDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`, +/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`, /// and the `prev_index_to_index` vector (which is more compact and faster than /// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`. /// @@ -1410,11 +848,7 @@ struct LightGreenDepNodeData { /// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When /// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index` /// first, and `data` second. -pub(super) struct CurrentDepGraph { - data: Lock>, - new_node_to_index: Sharded, DepNodeIndex>>, - prev_index_to_index: Lock>>, - +pub(super) struct CurrentDepGraph { /// Used to trap when a specific edge is added to the graph. /// This is used for debug purposes and is only active with `debug_assertions`. #[allow(dead_code)] @@ -1439,8 +873,8 @@ pub(super) struct CurrentDepGraph { total_duplicate_read_count: AtomicU64, } -impl CurrentDepGraph { - fn new(prev_graph_node_count: usize) -> CurrentDepGraph { +impl CurrentDepGraph { + fn new(_prev_graph_node_count: usize) -> CurrentDepGraph { use std::time::{SystemTime, UNIX_EPOCH}; let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); @@ -1460,6 +894,7 @@ impl CurrentDepGraph { None }; + /* // Pre-allocate the dep node structures. We over-allocate a little so // that we hopefully don't have to re-allocate during this compilation // session. The over-allocation for new nodes is 2% plus a small @@ -1487,6 +922,7 @@ impl CurrentDepGraph { let average_edges_per_node_estimate = 6; let unshared_edge_count_estimate = average_edges_per_node_estimate * (new_node_count_estimate + red_node_count_estimate + light_green_node_count_estimate); + */ // We store a large collection of these in `prev_index_to_index` during // non-full incremental builds, and want to ensure that the element size @@ -1494,157 +930,12 @@ impl CurrentDepGraph { static_assert_size!(Option, 4); CurrentDepGraph { - data: Lock::new(DepNodeData { - new: NewDepNodeData { - nodes: IndexVec::with_capacity(new_node_count_estimate), - edges: IndexVec::with_capacity(new_node_count_estimate), - fingerprints: IndexVec::with_capacity(new_node_count_estimate), - }, - red: RedDepNodeData { - node_indices: IndexVec::with_capacity(red_node_count_estimate), - edges: IndexVec::with_capacity(red_node_count_estimate), - fingerprints: IndexVec::with_capacity(red_node_count_estimate), - }, - light_green: LightGreenDepNodeData { - node_indices: IndexVec::with_capacity(light_green_node_count_estimate), - edges: IndexVec::with_capacity(light_green_node_count_estimate), - }, - unshared_edges: IndexVec::with_capacity(unshared_edge_count_estimate), - hybrid_indices: IndexVec::with_capacity(total_node_count_estimate), - }), - new_node_to_index: Sharded::new(|| { - FxHashMap::with_capacity_and_hasher( - new_node_count_estimate / sharded::SHARDS, - Default::default(), - ) - }), - prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)), anon_id_seed: stable_hasher.finish(), forbidden_edge, total_read_count: AtomicU64::new(0), total_duplicate_read_count: AtomicU64::new(0), } } - - fn intern_new_node( - &self, - prev_graph: &PreviousDepGraph, - dep_node: DepNode, - edges: EdgesVec, - fingerprint: Fingerprint, - ) -> DepNodeIndex { - debug_assert!( - prev_graph.node_to_index_opt(&dep_node).is_none(), - "node in previous graph should be interned using one \ - of `intern_red_node`, `intern_light_green_node`, etc." - ); - - match self.new_node_to_index.get_shard_by_value(&dep_node).lock().entry(dep_node) { - Entry::Occupied(entry) => *entry.get(), - Entry::Vacant(entry) => { - let data = &mut *self.data.lock(); - let new_index = data.new.nodes.push(dep_node); - add_edges(&mut data.unshared_edges, &mut data.new.edges, edges); - data.new.fingerprints.push(fingerprint); - let dep_node_index = data.hybrid_indices.push(new_index.into()); - entry.insert(dep_node_index); - dep_node_index - } - } - } - - fn intern_red_node( - &self, - prev_graph: &PreviousDepGraph, - prev_index: SerializedDepNodeIndex, - edges: EdgesVec, - fingerprint: Fingerprint, - ) -> DepNodeIndex { - self.debug_assert_not_in_new_nodes(prev_graph, prev_index); - - let mut prev_index_to_index = self.prev_index_to_index.lock(); - - match prev_index_to_index[prev_index] { - Some(dep_node_index) => dep_node_index, - None => { - let data = &mut *self.data.lock(); - let red_index = data.red.node_indices.push(prev_index); - add_edges(&mut data.unshared_edges, &mut data.red.edges, edges); - data.red.fingerprints.push(fingerprint); - let dep_node_index = data.hybrid_indices.push(red_index.into()); - prev_index_to_index[prev_index] = Some(dep_node_index); - dep_node_index - } - } - } - - fn intern_light_green_node( - &self, - prev_graph: &PreviousDepGraph, - prev_index: SerializedDepNodeIndex, - edges: EdgesVec, - ) -> DepNodeIndex { - self.debug_assert_not_in_new_nodes(prev_graph, prev_index); - - let mut prev_index_to_index = self.prev_index_to_index.lock(); - - match prev_index_to_index[prev_index] { - Some(dep_node_index) => dep_node_index, - None => { - let data = &mut *self.data.lock(); - let light_green_index = data.light_green.node_indices.push(prev_index); - add_edges(&mut data.unshared_edges, &mut data.light_green.edges, edges); - let dep_node_index = data.hybrid_indices.push(light_green_index.into()); - prev_index_to_index[prev_index] = Some(dep_node_index); - dep_node_index - } - } - } - - fn intern_dark_green_node( - &self, - prev_graph: &PreviousDepGraph, - prev_index: SerializedDepNodeIndex, - ) -> DepNodeIndex { - self.debug_assert_not_in_new_nodes(prev_graph, prev_index); - - let mut prev_index_to_index = self.prev_index_to_index.lock(); - - match prev_index_to_index[prev_index] { - Some(dep_node_index) => dep_node_index, - None => { - let mut data = self.data.lock(); - let dep_node_index = data.hybrid_indices.push(prev_index.into()); - prev_index_to_index[prev_index] = Some(dep_node_index); - dep_node_index - } - } - } - - #[inline] - fn debug_assert_not_in_new_nodes( - &self, - prev_graph: &PreviousDepGraph, - prev_index: SerializedDepNodeIndex, - ) { - let node = &prev_graph.index_to_node(prev_index); - debug_assert!( - !self.new_node_to_index.get_shard_by_value(node).lock().contains_key(node), - "node from previous graph present in new node collection" - ); - } -} - -#[inline] -fn add_edges( - edges: &mut IndexVec, - edge_indices: &mut IndexVec>, - new_edges: EdgesVec, -) { - let start = edges.next_index(); - edges.extend(new_edges); - let end = edges.next_index(); - edge_indices.push(start..end); } /// The capacity of the `reads` field `SmallVec` @@ -1670,40 +961,3 @@ impl Default for TaskDeps { } } } - -// A data structure that stores Option values as a contiguous -// array, using one u32 per entry. -struct DepNodeColorMap { - values: IndexVec, -} - -const COMPRESSED_NONE: u32 = 0; -const COMPRESSED_RED: u32 = 1; -const COMPRESSED_FIRST_GREEN: u32 = 2; - -impl DepNodeColorMap { - fn new(size: usize) -> DepNodeColorMap { - DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() } - } - - #[inline] - fn get(&self, index: SerializedDepNodeIndex) -> Option { - match self.values[index].load(Ordering::Acquire) { - COMPRESSED_NONE => None, - COMPRESSED_RED => Some(DepNodeColor::Red), - value => { - Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN))) - } - } - } - - fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) { - self.values[index].store( - match color { - DepNodeColor::Red => COMPRESSED_RED, - DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN, - }, - Ordering::Release, - ) - } -} diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs index e8fb71be3e08f..f9a3905e14e31 100644 --- a/compiler/rustc_query_system/src/dep_graph/mod.rs +++ b/compiler/rustc_query_system/src/dep_graph/mod.rs @@ -1,15 +1,13 @@ pub mod debug; mod dep_node; mod graph; -mod prev; mod query; mod serialized; pub use dep_node::{DepNode, DepNodeParams, WorkProductId}; -pub use graph::{hash_result, DepGraph, DepNodeColor, DepNodeIndex, TaskDeps, WorkProduct}; -pub use prev::PreviousDepGraph; +pub use graph::{hash_result, DepGraph, TaskDeps, WorkProduct}; pub use query::DepGraphQuery; -pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex}; +pub use serialized::{DepNodeColor, DepNodeIndex, SerializedDepGraph, SerializedDepNodeIndex}; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::sync::Lock; diff --git a/compiler/rustc_query_system/src/dep_graph/prev.rs b/compiler/rustc_query_system/src/dep_graph/prev.rs deleted file mode 100644 index c3d0f79525572..0000000000000 --- a/compiler/rustc_query_system/src/dep_graph/prev.rs +++ /dev/null @@ -1,61 +0,0 @@ -use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex}; -use super::{DepKind, DepNode}; -use rustc_data_structures::fingerprint::Fingerprint; -use rustc_data_structures::fx::FxHashMap; - -#[derive(Debug)] -pub struct PreviousDepGraph { - data: SerializedDepGraph, - index: FxHashMap, SerializedDepNodeIndex>, -} - -impl Default for PreviousDepGraph { - fn default() -> Self { - PreviousDepGraph { data: Default::default(), index: Default::default() } - } -} - -impl PreviousDepGraph { - pub fn new(data: SerializedDepGraph) -> PreviousDepGraph { - let index: FxHashMap<_, _> = - data.nodes.iter_enumerated().map(|(idx, &dep_node)| (dep_node, idx)).collect(); - PreviousDepGraph { data, index } - } - - #[inline] - pub fn edge_targets_from( - &self, - dep_node_index: SerializedDepNodeIndex, - ) -> &[SerializedDepNodeIndex] { - self.data.edge_targets_from(dep_node_index) - } - - #[inline] - pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode { - self.data.nodes[dep_node_index] - } - - #[inline] - pub fn node_to_index(&self, dep_node: &DepNode) -> SerializedDepNodeIndex { - self.index[dep_node] - } - - #[inline] - pub fn node_to_index_opt(&self, dep_node: &DepNode) -> Option { - self.index.get(dep_node).cloned() - } - - #[inline] - pub fn fingerprint_of(&self, dep_node: &DepNode) -> Option { - self.index.get(dep_node).map(|&node_index| self.data.fingerprints[node_index]) - } - - #[inline] - pub fn fingerprint_by_index(&self, dep_node_index: SerializedDepNodeIndex) -> Fingerprint { - self.data.fingerprints[dep_node_index] - } - - pub fn node_count(&self) -> usize { - self.index.len() - } -} diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs index 9bb922b0a9008..9decdcc700228 100644 --- a/compiler/rustc_query_system/src/dep_graph/serialized.rs +++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs @@ -1,127 +1,492 @@ //! The data that we will serialize and deserialize. +use super::query::DepGraphQuery; use super::{DepKind, DepNode}; use rustc_data_structures::fingerprint::Fingerprint; -use rustc_index::vec::IndexVec; -use rustc_serialize::{Decodable, Decoder}; +use rustc_data_structures::fx::FxHashMap; +use rustc_index::vec::{Idx, IndexVec}; +use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; +use std::convert::TryInto; + +#[derive(Debug, PartialEq, Eq)] +pub enum DepNodeColor { + Green, + Red, + New, +} + +const TAG_UNKNOWN: u32 = 0; +const TAG_GREEN: u32 = 1 << 30; +const TAG_RED: u32 = 2 << 30; +const TAG_NEW: u32 = 3 << 30; +const TAG_MASK: u32 = TAG_UNKNOWN | TAG_GREEN | TAG_RED | TAG_NEW; +const OFFSET_MASK: u32 = !TAG_MASK; + +impl DepNodeColor { + const fn tag(self) -> u32 { + match self { + Self::Green => TAG_GREEN, + Self::Red => TAG_RED, + Self::New => TAG_NEW, + } + } +} // The maximum value of `SerializedDepNodeIndex` leaves the upper two bits -// unused so that we can store multiple index types in `CompressedHybridIndex`, -// and use those bits to encode which index type it contains. +// unused so that we can store the node color along with it. rustc_index::newtype_index! { pub struct SerializedDepNodeIndex { MAX = 0x7FFF_FFFF } } +// This newtype exists to ensure the main algorithms do not forget interning nodes. +rustc_index::newtype_index! { + pub struct DepNodeIndex { + MAX = 0x7FFF_FFFF + } +} + +impl SerializedDepNodeIndex { + pub(super) fn rejuvenate(self) -> DepNodeIndex { + DepNodeIndex::new(self.index()) + } +} + +#[derive(Copy, Clone, Encodable, Decodable)] +struct ColorAndOffset(u32); + +impl std::fmt::Debug for ColorAndOffset { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + fmt.debug_struct("ColorAndOffset") + .field("color", &self.color()) + .field("offset", &self.offset()) + .finish() + } +} + +impl ColorAndOffset { + fn unknown(offset: u32) -> ColorAndOffset { + debug_assert_eq!(offset & TAG_MASK, 0); + ColorAndOffset(offset | TAG_UNKNOWN) + } + + fn new(color: DepNodeColor, offset: usize) -> ColorAndOffset { + let offset: u32 = offset.try_into().unwrap(); + debug_assert_eq!(offset & TAG_MASK, 0); + ColorAndOffset(offset | color.tag()) + } + + fn set_color(&mut self, color: DepNodeColor) { + let offset = self.0 & OFFSET_MASK; + self.0 = color.tag() | offset; + } + + fn offset(self) -> u32 { + self.0 & OFFSET_MASK + } + + fn color(self) -> Option { + let tag = self.0 & TAG_MASK; + match tag { + TAG_NEW => Some(DepNodeColor::New), + TAG_RED => Some(DepNodeColor::Red), + TAG_GREEN => Some(DepNodeColor::Green), + TAG_UNKNOWN => None, + _ => panic!(), + } + } +} + /// Data for use when recompiling the **current crate**. #[derive(Debug)] pub struct SerializedDepGraph { /// The set of all DepNodes in the graph - pub nodes: IndexVec>, + nodes: IndexVec>, /// The set of all Fingerprints in the graph. Each Fingerprint corresponds to /// the DepNode at the same index in the nodes vector. - pub fingerprints: IndexVec, + fingerprints: IndexVec, /// For each DepNode, stores the list of edges originating from that /// DepNode. Encoded as a [start, end) pair indexing into edge_list_data, /// which holds the actual DepNodeIndices of the target nodes. - pub edge_list_indices: IndexVec, + edge_list_indices: IndexVec, /// A flattened list of all edge targets in the graph. Edge sources are /// implicit in edge_list_indices. - pub edge_list_data: Vec, + edge_list_data: Vec, + /// Reverse map for `nodes`. It is computed on the fly at decoding time. + index: FxHashMap, SerializedDepNodeIndex>, + /// Index of the last serialized node. + serialized_node_count: SerializedDepNodeIndex, } impl Default for SerializedDepGraph { fn default() -> Self { - SerializedDepGraph { - nodes: Default::default(), - fingerprints: Default::default(), - edge_list_indices: Default::default(), - edge_list_data: Default::default(), + Self { + nodes: IndexVec::new(), + fingerprints: IndexVec::new(), + edge_list_indices: IndexVec::new(), + edge_list_data: Vec::new(), + index: FxHashMap::default(), + serialized_node_count: SerializedDepNodeIndex::new(0), } } } impl SerializedDepGraph { + fn intern_new_node( + &mut self, + node: DepNode, + deps: &[DepNodeIndex], + fingerprint: Fingerprint, + ) -> DepNodeIndex { + let index = self.nodes.push(node); + debug!("intern_new: {:?} {:?}", index, node); + let _index = self.fingerprints.push(fingerprint); + debug_assert_eq!(index, _index); + let (start, end) = self.insert_deps(deps); + let _index = self + .edge_list_indices + .push((ColorAndOffset::new(DepNodeColor::New, start), end.try_into().unwrap())); + debug_assert_eq!(index, _index); + let _o = self.index.insert(node, index); + debug_assert_eq!(_o, None); + index.rejuvenate() + } + + fn insert_deps(&mut self, deps: &[DepNodeIndex]) -> (usize, usize) { + let start = self.edge_list_data.len(); + self.edge_list_data.extend(deps.iter().copied()); + let end = self.edge_list_data.len(); + (start, end) + } + + fn update_deps( + &mut self, + index: SerializedDepNodeIndex, + color: DepNodeColor, + deps: &[DepNodeIndex], + ) { + let (start, _) = self.edge_list_indices[index]; + debug_assert_eq!(start.color(), None); + let (start, end) = self.insert_deps(deps); + debug!("intern_color: {:?} => {:?}", index, color); + let start = ColorAndOffset::new(color, start); + self.edge_list_indices[index] = (start, end.try_into().unwrap()); + } + + pub(crate) fn intern_dark_green_node(&mut self, index: SerializedDepNodeIndex) -> DepNodeIndex { + debug!("intern_drak_green: {:?}", index); + debug_assert_eq!(self.edge_list_indices[index].0.color(), None); + self.edge_list_indices[index].0.set_color(DepNodeColor::Green); + debug!("intern_color: {:?} => Green", index); + index.rejuvenate() + } + + pub(crate) fn intern_anon_node( + &mut self, + node: DepNode, + deps: &[DepNodeIndex], + ) -> DepNodeIndex { + self.dep_node_index_of_opt(&node) + .unwrap_or_else(|| self.intern_new_node(node, deps, Fingerprint::ZERO)) + } + + pub(crate) fn intern_task_node( + &mut self, + node: DepNode, + deps: &[DepNodeIndex], + fingerprint: Option, + print_status: bool, + ) -> DepNodeIndex { + let print_status = cfg!(debug_assertions) && print_status; + + if let Some(&prev_index) = self.index.get(&node) { + if let Some(_) = self.color(prev_index) { + return prev_index.rejuvenate(); + } + + // Determine the color and index of the new `DepNode`. + let color = if let Some(fingerprint) = fingerprint { + if fingerprint == self.fingerprints[prev_index] { + if print_status { + eprintln!("[task::green] {:?}", node); + } + + // This is a light green node: it existed in the previous compilation, + // its query was re-executed, and it has the same result as before. + DepNodeColor::Green + } else { + if print_status { + eprintln!("[task::red] {:?}", node); + } + + // This is a red node: it existed in the previous compilation, its query + // was re-executed, but it has a different result from before. + self.fingerprints[prev_index] = fingerprint; + DepNodeColor::Red + } + } else { + if print_status { + eprintln!("[task::red] {:?}", node); + } + + // This is a red node, effectively: it existed in the previous compilation + // session, its query was re-executed, but it doesn't compute a result hash + // (i.e. it represents a `no_hash` query), so we have no way of determining + // whether or not the result was the same as before. + self.fingerprints[prev_index] = Fingerprint::ZERO; + DepNodeColor::Red + }; + + self.update_deps(prev_index, color, deps); + prev_index.rejuvenate() + } else { + if print_status { + eprintln!("[task::new] {:?}", node); + } + + // This is a new node: it didn't exist in the previous compilation session. + self.intern_new_node(node, deps, fingerprint.unwrap_or(Fingerprint::ZERO)) + } + } + + #[inline] + pub(crate) fn edge_targets_from_serialized( + &self, + source: SerializedDepNodeIndex, + ) -> impl Iterator + '_ { + let (start, end) = self.edge_list_indices[source]; + debug_assert_eq!(start.color(), None); + let start = start.offset() as usize; + let end = end as usize; + self.edge_list_data[start..end].iter().map(|i| SerializedDepNodeIndex::new(i.index())) + } + + #[inline] + pub(crate) fn edge_targets_from(&self, source: DepNodeIndex) -> &[DepNodeIndex] { + let (start, end) = self.edge_list_indices[SerializedDepNodeIndex::new(source.index())]; + let start = start.offset() as usize; + let end = end as usize; + &self.edge_list_data[start..end] + } + + #[inline] + pub(crate) fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode { + self.nodes[dep_node_index] + } + + #[inline] + pub(crate) fn dep_node_of(&self, dep_node_index: DepNodeIndex) -> DepNode { + self.nodes[SerializedDepNodeIndex::new(dep_node_index.index())] + } + + #[inline] + pub(crate) fn node_to_index_opt( + &self, + dep_node: &DepNode, + ) -> Option { + let idx = *self.index.get(dep_node)?; + if idx >= self.serialized_node_count { None } else { Some(idx) } + } + + #[inline] + pub(crate) fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option { + let index = *self.index.get(dep_node)?; + // Return none if the node has not been coloured yet. + let _ = self.edge_list_indices[index].0.color()?; + debug!( + "dep_node_index_of_opt: dep_node={:?} index={:?} indices={:?}", + dep_node, index, self.edge_list_indices[index] + ); + Some(index.rejuvenate()) + } + + #[inline] + pub(crate) fn color(&self, index: SerializedDepNodeIndex) -> Option { + self.edge_list_indices[index].0.color() + } + + #[inline] + pub(crate) fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint { + self.fingerprints[SerializedDepNodeIndex::new(dep_node_index.index())] + } + + #[inline] + pub(crate) fn serialized_indices(&self) -> impl Iterator + '_ { + (0..self.serialized_node_count.index()).map(SerializedDepNodeIndex::new) + } + + #[inline] + pub(crate) fn indices(&self) -> impl Iterator + '_ { + self.edge_list_indices.iter_enumerated().filter_map(|(i, (s, _))| { + // Return none if the node has not been coloured yet. + let _ = s.color()?; + Some(i.rejuvenate()) + }) + } + + #[inline] + pub(crate) fn serialized_node_count(&self) -> usize { + self.serialized_node_count.index() + } + + #[inline] + pub(crate) fn node_count(&self) -> usize { + self.edge_list_indices.iter().filter_map(|(s, _)| s.color()).count() + } + #[inline] - pub fn edge_targets_from(&self, source: SerializedDepNodeIndex) -> &[SerializedDepNodeIndex] { - let targets = self.edge_list_indices[source]; - &self.edge_list_data[targets.0 as usize..targets.1 as usize] + pub(crate) fn edge_count(&self) -> usize { + self.edge_list_indices + .iter() + .filter_map(|(s, e)| { + s.color()?; + Some((e - s.offset()) as usize) + }) + .sum() + } + + pub(crate) fn query(&self) -> DepGraphQuery { + let nodes: Vec<_> = self + .nodes + .iter_enumerated() + .filter_map(|(i, n)| { + let _ = self.edge_list_indices[i].0.color()?; + Some(*n) + }) + .collect(); + let edge_list_indices: Vec<_> = self + .edge_list_indices + .iter() + .filter_map(|(s, e)| { + s.color()?; + Some((s.offset() as usize, *e as usize)) + }) + .collect(); + let edge_list_data: Vec<_> = self.edge_list_data.iter().map(|i| i.index()).collect(); + debug_assert_eq!(nodes.len(), edge_list_indices.len()); + + DepGraphQuery::new(&nodes[..], &edge_list_indices[..], &edge_list_data[..]) + } + + pub(crate) fn compression_map(&self) -> IndexVec> { + let mut new_index = SerializedDepNodeIndex::new(0); + let mut remap = IndexVec::from_elem_n(None, self.nodes.len()); + for index in self.indices() { + debug_assert!(new_index.index() <= index.index()); + remap[index] = Some(new_index); + new_index.increment_by(1); + } + remap + } +} + +impl> Encodable for SerializedDepGraph { + fn encode(&self, e: &mut E) -> Result<(), E::Error> { + let remap = self.compression_map(); + + let (nodes, fingerprints) = { + let mut nodes = self.nodes.clone(); + let mut fingerprints = self.fingerprints.clone(); + let mut new_index = SerializedDepNodeIndex::new(0); + + for index in self.indices() { + debug_assert!(new_index.index() <= index.index()); + + // Back-copy the nodes and fingerprints. + let index = SerializedDepNodeIndex::new(index.index()); + nodes[new_index] = self.nodes[index]; + fingerprints[new_index] = self.fingerprints[index]; + + new_index.increment_by(1); + } + nodes.truncate(new_index.index()); + fingerprints.truncate(new_index.index()); + + (nodes, fingerprints) + }; + + let (new_indices, new_edges) = { + let mut new_indices: IndexVec = + IndexVec::with_capacity(self.nodes.len()); + let mut new_edges: Vec = + Vec::with_capacity(self.edge_list_data.len()); + + for (index, (start, end)) in self.edge_list_indices.iter_enumerated() { + match start.color() { + // This node does not exist in this session. Skip it. + None => continue, + Some(_) => {} + } + + let new_index = new_indices.push(new_edges.len().try_into().unwrap()); + debug_assert_eq!(remap[index.rejuvenate()], Some(new_index)); + + // Reconstruct the edges vector since it may be out of order. + // We only store the start indices, since the end is the next's start. + let start = start.offset() as usize; + let end = *end as usize; + new_edges.extend(self.edge_list_data[start..end].iter().map(|i| { + remap[*i] + .unwrap_or_else(|| panic!("Unknown remap for {:?} while {:?}", *i, index)) + })); + } + + (new_indices, new_edges) + }; + + let mut index = FxHashMap::default(); + for (idx, &dep_node) in nodes.iter_enumerated() { + debug!("DECODE index={:?} node={:?}", idx, dep_node); + let _o = index.insert(dep_node, idx); + debug_assert_eq!(_o, None); + } + let _ = index; + + e.emit_struct("SerializedDepGraph", 4, |e| { + e.emit_struct_field("nodes", 0, |e| nodes.encode(e))?; + e.emit_struct_field("fingerprints", 1, |e| fingerprints.encode(e))?; + e.emit_struct_field("edge_list_indices", 2, |e| new_indices.encode(e))?; + e.emit_struct_field("edge_list_data", 3, |e| new_edges.encode(e))?; + Ok(()) + }) } } impl> Decodable for SerializedDepGraph { fn decode(d: &mut D) -> Result, D::Error> { - // We used to serialize the dep graph by creating and serializing a `SerializedDepGraph` - // using data copied from the `DepGraph`. But copying created a large memory spike, so we - // now serialize directly from the `DepGraph` as if it's a `SerializedDepGraph`. Because we - // deserialize that data into a `SerializedDepGraph` in the next compilation session, we - // need `DepGraph`'s `Encodable` and `SerializedDepGraph`'s `Decodable` implementations to - // be in sync. If you update this decoding, be sure to update the encoding, and vice-versa. - // - // We mimic the sequence of `Encode` and `Encodable` method calls used by the `DepGraph`'s - // `Encodable` implementation with the corresponding sequence of `Decode` and `Decodable` - // method calls. E.g. `Decode::read_struct` pairs with `Encode::emit_struct`, `DepNode`'s - // `decode` pairs with `DepNode`'s `encode`, and so on. Any decoding methods not associated - // with corresponding encoding methods called in `DepGraph`'s `Encodable` implementation - // are off limits, because we'd be relying on their implementation details. - // - // For example, because we know it happens to do the right thing, its tempting to just use - // `IndexVec`'s `Decodable` implementation to decode into some of the collections below, - // even though `DepGraph` doesn't use its `Encodable` implementation. But the `IndexVec` - // implementation could change, and we'd have a bug. - // - // Variables below are explicitly typed so that anyone who changes the `SerializedDepGraph` - // representation without updating this function will encounter a compilation error, and - // know to update this and possibly the `DepGraph` `Encodable` implementation accordingly - // (the latter should serialize data in a format compatible with our representation). - d.read_struct("SerializedDepGraph", 4, |d| { let nodes: IndexVec> = - d.read_struct_field("nodes", 0, |d| { - d.read_seq(|d, len| { - let mut v = IndexVec::with_capacity(len); - for i in 0..len { - v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?); - } - Ok(v) - }) - })?; - + d.read_struct_field("nodes", 0, Decodable::decode)?; let fingerprints: IndexVec = - d.read_struct_field("fingerprints", 1, |d| { - d.read_seq(|d, len| { - let mut v = IndexVec::with_capacity(len); - for i in 0..len { - v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?); - } - Ok(v) - }) - })?; - - let edge_list_indices: IndexVec = d - .read_struct_field("edge_list_indices", 2, |d| { - d.read_seq(|d, len| { - let mut v = IndexVec::with_capacity(len); - for i in 0..len { - v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?); - } - Ok(v) - }) - })?; - - let edge_list_data: Vec = - d.read_struct_field("edge_list_data", 3, |d| { - d.read_seq(|d, len| { - let mut v = Vec::with_capacity(len); - for i in 0..len { - v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?); - } - Ok(v) - }) - })?; - - Ok(SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data }) + d.read_struct_field("fingerprints", 1, Decodable::decode)?; + let mut edge_list_indices: IndexVec = + d.read_struct_field("edge_list_indices", 2, Decodable::decode)?; + let edge_list_data: Vec = + d.read_struct_field("edge_list_data", 3, Decodable::decode)?; + + edge_list_indices.push(edge_list_data.len().try_into().unwrap()); + let edge_list_indices = IndexVec::from_fn_n( + |i| (ColorAndOffset::unknown(edge_list_indices[i]), edge_list_indices[i + 1]), + edge_list_indices.len() - 1, + ); + + let mut index = FxHashMap::default(); + for (idx, &dep_node) in nodes.iter_enumerated() { + debug!("DECODE index={:?} node={:?}", idx, dep_node); + let _o = index.insert(dep_node, idx); + debug_assert_eq!(_o, None); + } + let serialized_node_count = nodes.next_index(); + + Ok(SerializedDepGraph { + nodes, + index, + fingerprints, + edge_list_indices, + edge_list_data, + serialized_node_count, + }) }) } } diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 0050670b338ea..c194221fb442c 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -2,8 +2,9 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. -use crate::dep_graph::{DepContext, DepKind, DepNode}; -use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex}; +use crate::dep_graph::{ + DepContext, DepKind, DepNode, DepNodeColor, DepNodeIndex, SerializedDepNodeIndex, +}; use crate::query::caches::QueryCache; use crate::query::config::{QueryDescription, QueryVtable, QueryVtableExt}; use crate::query::job::{ @@ -509,11 +510,11 @@ fn load_from_disk_and_cache_in_memory( where CTX: QueryContext, { + debug_assert!(tcx.dep_context().dep_graph().node_color(dep_node) == Some(DepNodeColor::Green)); + // Note this function can be called concurrently from the same query // We must ensure that this is handled correctly. - debug_assert!(tcx.dep_context().dep_graph().is_green(dep_node)); - // First we try to load the result from the on-disk cache. let result = if query.cache_on_disk(tcx, &key, None) { let prof_timer = tcx.dep_context().profiler().incr_cache_loading(); @@ -568,13 +569,6 @@ fn incremental_verify_ich( ) where CTX: QueryContext, { - assert!( - Some(tcx.dep_graph().fingerprint_of(dep_node_index)) - == tcx.dep_graph().prev_fingerprint_of(dep_node), - "fingerprint for green query instance not loaded from cache: {:?}", - dep_node, - ); - debug!("BEGIN verify_ich({:?})", dep_node); let mut hcx = tcx.create_stable_hashing_context(); From 8e33f213cf0c218262b90fe5bb796afdef8b8d42 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sat, 20 Feb 2021 17:46:47 +0100 Subject: [PATCH 03/14] Inline CurrentDepGraph. --- .../rustc_query_system/src/dep_graph/graph.rs | 222 +++++++----------- 1 file changed, 88 insertions(+), 134 deletions(-) diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 5c266873ab1a8..9409f4c42c3ea 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -43,16 +43,33 @@ impl std::convert::From for QueryInvocationId { } struct DepGraphData { - /// The new encoding of the dependency graph, optimized for red/green - /// tracking. The `current` field is the dependency graph of only the - /// current compilation session: We don't merge the previous dep-graph into - /// current one anymore, but we do reference shared data to save space. - current: CurrentDepGraph, - /// The dep-graph from the previous compilation session. It contains all /// nodes and edges as well as all fingerprints of nodes that have them. previous: RwLock>, + /// Used to trap when a specific edge is added to the graph. + /// This is used for debug purposes and is only active with `debug_assertions`. + #[allow(dead_code)] + forbidden_edge: Option, + + /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of + /// their edges. This has the beneficial side-effect that multiple anonymous + /// nodes can be coalesced into one without changing the semantics of the + /// dependency graph. However, the merging of nodes can lead to a subtle + /// problem during red-green marking: The color of an anonymous node from + /// the current session might "shadow" the color of the node with the same + /// ID from the previous session. In order to side-step this problem, we make + /// sure that anonymous `NodeId`s allocated in different sessions don't overlap. + /// This is implemented by mixing a session-key into the ID fingerprint of + /// each anon node. The session-key is just a random number generated when + /// the `DepGraph` is created. + anon_id_seed: Fingerprint, + + /// These are simple counters that are for profiling and + /// debugging and only active with `debug_assertions`. + total_read_count: AtomicU64, + total_duplicate_read_count: AtomicU64, + /// A set of loaded diagnostics that is in the progress of being emitted. emitting_diagnostics: Lock>, @@ -80,13 +97,70 @@ impl DepGraph { prev_graph: SerializedDepGraph, prev_work_products: FxHashMap, ) -> DepGraph { - let prev_graph_node_count = prev_graph.serialized_node_count(); + let _prev_graph_node_count = prev_graph.serialized_node_count(); + + use std::time::{SystemTime, UNIX_EPOCH}; + + let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); + let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64; + let mut stable_hasher = StableHasher::new(); + nanos.hash(&mut stable_hasher); + + let forbidden_edge = if cfg!(debug_assertions) { + match env::var("RUST_FORBID_DEP_GRAPH_EDGE") { + Ok(s) => match EdgeFilter::new(&s) { + Ok(f) => Some(f), + Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err), + }, + Err(_) => None, + } + } else { + None + }; + + /* + // Pre-allocate the dep node structures. We over-allocate a little so + // that we hopefully don't have to re-allocate during this compilation + // session. The over-allocation for new nodes is 2% plus a small + // constant to account for the fact that in very small crates 2% might + // not be enough. The allocation for red and green node data doesn't + // include a constant, as we don't want to allocate anything for these + // structures during full incremental builds, where they aren't used. + // + // These estimates are based on the distribution of node and edge counts + // seen in rustc-perf benchmarks, adjusted somewhat to account for the + // fact that these benchmarks aren't perfectly representative. + // + // FIXME Use a collection type that doesn't copy node and edge data and + // grow multiplicatively on reallocation. Without such a collection or + // solution having the same effect, there is a performance hazard here + // in both time and space, as growing these collections means copying a + // large amount of data and doubling already large buffer capacities. A + // solution for this will also mean that it's less important to get + // these estimates right. + let new_node_count_estimate = (prev_graph_node_count * 2) / 100 + 200; + let red_node_count_estimate = (prev_graph_node_count * 3) / 100; + let light_green_node_count_estimate = (prev_graph_node_count * 25) / 100; + let total_node_count_estimate = prev_graph_node_count + new_node_count_estimate; + + let average_edges_per_node_estimate = 6; + let unshared_edge_count_estimate = average_edges_per_node_estimate + * (new_node_count_estimate + red_node_count_estimate + light_green_node_count_estimate); + */ + + // We store a large collection of these in `prev_index_to_index` during + // non-full incremental builds, and want to ensure that the element size + // doesn't inadvertently increase. + static_assert_size!(Option, 4); DepGraph { data: Some(Lrc::new(DepGraphData { previous_work_products: prev_work_products, dep_node_debug: Default::default(), - current: CurrentDepGraph::new(prev_graph_node_count), + anon_id_seed: stable_hasher.finish(), + forbidden_edge, + total_read_count: AtomicU64::new(0), + total_duplicate_read_count: AtomicU64::new(0), emitting_diagnostics: Default::default(), previous: RwLock::new(prev_graph), })), @@ -239,7 +313,7 @@ impl DepGraph { // Fingerprint::combine() is faster than sending Fingerprint // through the StableHasher (at least as long as StableHasher // is so slow). - hash: data.current.anon_id_seed.combine(hasher.finish()).into(), + hash: data.anon_id_seed.combine(hasher.finish()).into(), }; let mut previous = data.previous.write(); @@ -272,7 +346,7 @@ impl DepGraph { let mut task_deps = task_deps.lock(); let task_deps = &mut *task_deps; if cfg!(debug_assertions) { - data.current.total_read_count.fetch_add(1, Relaxed); + data.total_read_count.fetch_add(1, Relaxed); } // As long as we only have a low number of reads we can avoid doing a hash @@ -293,7 +367,7 @@ impl DepGraph { #[cfg(debug_assertions)] { if let Some(target) = task_deps.node { - if let Some(ref forbidden_edge) = data.current.forbidden_edge { + if let Some(ref forbidden_edge) = data.forbidden_edge { let src = self.dep_node_of(dep_node_index); if forbidden_edge.test(&src, &target) { panic!("forbidden edge {:?} -> {:?} created", src, target) @@ -302,7 +376,7 @@ impl DepGraph { } } } else if cfg!(debug_assertions) { - data.current.total_duplicate_read_count.fetch_add(1, Relaxed); + data.total_duplicate_read_count.fetch_add(1, Relaxed); } } }) @@ -687,7 +761,6 @@ impl DepGraph { let data = self.data.as_ref().unwrap(); let prev = &data.previous.read(); - let current = &data.current; let mut stats: FxHashMap<_, Stat> = FxHashMap::with_hasher(Default::default()); @@ -721,8 +794,8 @@ impl DepGraph { eprintln!("[incremental] Total Edge Count: {}", total_edge_count); if cfg!(debug_assertions) { - let total_edge_reads = current.total_read_count.load(Relaxed); - let total_duplicate_edge_reads = current.total_duplicate_read_count.load(Relaxed); + let total_edge_reads = data.total_read_count.load(Relaxed); + let total_duplicate_edge_reads = data.total_duplicate_read_count.load(Relaxed); eprintln!("[incremental] Total Edge Reads: {}", total_edge_reads); eprintln!("[incremental] Total Duplicate Edge Reads: {}", total_duplicate_edge_reads); @@ -819,125 +892,6 @@ rustc_index::newtype_index! { struct EdgeIndex { .. } } -/// `CurrentDepGraph` stores the dependency graph for the current session. It -/// will be populated as we run queries or tasks. We never remove nodes from the -/// graph: they are only added. -/// -/// The nodes in it are identified by a `DepNodeIndex`. Internally, this maps to -/// a `HybridIndex`, which identifies which collection in the `data` field -/// contains a node's data. Which collection is used for a node depends on -/// whether the node was present in the `SerializedDepGraph`, and if so, the color -/// of the node. Each type of node can share more or less data with the previous -/// graph. When possible, we can store just the index of the node in the -/// previous graph, rather than duplicating its data in our own collections. -/// This is important, because these graph structures are some of the largest in -/// the compiler. -/// -/// For the same reason, we also avoid storing `DepNode`s more than once as map -/// keys. The `new_node_to_index` map only contains nodes not in the previous -/// graph, and we map nodes in the previous graph to indices via a two-step -/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`, -/// and the `prev_index_to_index` vector (which is more compact and faster than -/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`. -/// -/// This struct uses three locks internally. The `data`, `new_node_to_index`, -/// and `prev_index_to_index` fields are locked separately. Operations that take -/// a `DepNodeIndex` typically just access the `data` field. -/// -/// We only need to manipulate at most two locks simultaneously: -/// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When -/// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index` -/// first, and `data` second. -pub(super) struct CurrentDepGraph { - /// Used to trap when a specific edge is added to the graph. - /// This is used for debug purposes and is only active with `debug_assertions`. - #[allow(dead_code)] - forbidden_edge: Option, - - /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of - /// their edges. This has the beneficial side-effect that multiple anonymous - /// nodes can be coalesced into one without changing the semantics of the - /// dependency graph. However, the merging of nodes can lead to a subtle - /// problem during red-green marking: The color of an anonymous node from - /// the current session might "shadow" the color of the node with the same - /// ID from the previous session. In order to side-step this problem, we make - /// sure that anonymous `NodeId`s allocated in different sessions don't overlap. - /// This is implemented by mixing a session-key into the ID fingerprint of - /// each anon node. The session-key is just a random number generated when - /// the `DepGraph` is created. - anon_id_seed: Fingerprint, - - /// These are simple counters that are for profiling and - /// debugging and only active with `debug_assertions`. - total_read_count: AtomicU64, - total_duplicate_read_count: AtomicU64, -} - -impl CurrentDepGraph { - fn new(_prev_graph_node_count: usize) -> CurrentDepGraph { - use std::time::{SystemTime, UNIX_EPOCH}; - - let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); - let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64; - let mut stable_hasher = StableHasher::new(); - nanos.hash(&mut stable_hasher); - - let forbidden_edge = if cfg!(debug_assertions) { - match env::var("RUST_FORBID_DEP_GRAPH_EDGE") { - Ok(s) => match EdgeFilter::new(&s) { - Ok(f) => Some(f), - Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err), - }, - Err(_) => None, - } - } else { - None - }; - - /* - // Pre-allocate the dep node structures. We over-allocate a little so - // that we hopefully don't have to re-allocate during this compilation - // session. The over-allocation for new nodes is 2% plus a small - // constant to account for the fact that in very small crates 2% might - // not be enough. The allocation for red and green node data doesn't - // include a constant, as we don't want to allocate anything for these - // structures during full incremental builds, where they aren't used. - // - // These estimates are based on the distribution of node and edge counts - // seen in rustc-perf benchmarks, adjusted somewhat to account for the - // fact that these benchmarks aren't perfectly representative. - // - // FIXME Use a collection type that doesn't copy node and edge data and - // grow multiplicatively on reallocation. Without such a collection or - // solution having the same effect, there is a performance hazard here - // in both time and space, as growing these collections means copying a - // large amount of data and doubling already large buffer capacities. A - // solution for this will also mean that it's less important to get - // these estimates right. - let new_node_count_estimate = (prev_graph_node_count * 2) / 100 + 200; - let red_node_count_estimate = (prev_graph_node_count * 3) / 100; - let light_green_node_count_estimate = (prev_graph_node_count * 25) / 100; - let total_node_count_estimate = prev_graph_node_count + new_node_count_estimate; - - let average_edges_per_node_estimate = 6; - let unshared_edge_count_estimate = average_edges_per_node_estimate - * (new_node_count_estimate + red_node_count_estimate + light_green_node_count_estimate); - */ - - // We store a large collection of these in `prev_index_to_index` during - // non-full incremental builds, and want to ensure that the element size - // doesn't inadvertently increase. - static_assert_size!(Option, 4); - - CurrentDepGraph { - anon_id_seed: stable_hasher.finish(), - forbidden_edge, - total_read_count: AtomicU64::new(0), - total_duplicate_read_count: AtomicU64::new(0), - } - } -} - /// The capacity of the `reads` field `SmallVec` const TASK_DEPS_READS_CAP: usize = 8; type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>; From a9de504339f799ae353904fabfd439065606b749 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sun, 21 Feb 2021 13:57:19 +0100 Subject: [PATCH 04/14] Keep the previous dep-graph separately allocated. --- .../rustc_query_system/src/dep_graph/graph.rs | 69 +-- .../src/dep_graph/serialized.rs | 396 ++++++++++++------ 2 files changed, 276 insertions(+), 189 deletions(-) diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 9409f4c42c3ea..276049828766e 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -16,7 +16,9 @@ use std::sync::atomic::Ordering::Relaxed; use super::debug::EdgeFilter; use super::query::DepGraphQuery; -use super::serialized::{DepNodeColor, DepNodeIndex, SerializedDepGraph, SerializedDepNodeIndex}; +use super::serialized::{ + CurrentDepGraph, DepNodeColor, DepNodeIndex, SerializedDepGraph, SerializedDepNodeIndex, +}; use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId}; use crate::query::QueryContext; @@ -45,7 +47,7 @@ impl std::convert::From for QueryInvocationId { struct DepGraphData { /// The dep-graph from the previous compilation session. It contains all /// nodes and edges as well as all fingerprints of nodes that have them. - previous: RwLock>, + previous: RwLock>, /// Used to trap when a specific edge is added to the graph. /// This is used for debug purposes and is only active with `debug_assertions`. @@ -97,8 +99,6 @@ impl DepGraph { prev_graph: SerializedDepGraph, prev_work_products: FxHashMap, ) -> DepGraph { - let _prev_graph_node_count = prev_graph.serialized_node_count(); - use std::time::{SystemTime, UNIX_EPOCH}; let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); @@ -118,41 +118,6 @@ impl DepGraph { None }; - /* - // Pre-allocate the dep node structures. We over-allocate a little so - // that we hopefully don't have to re-allocate during this compilation - // session. The over-allocation for new nodes is 2% plus a small - // constant to account for the fact that in very small crates 2% might - // not be enough. The allocation for red and green node data doesn't - // include a constant, as we don't want to allocate anything for these - // structures during full incremental builds, where they aren't used. - // - // These estimates are based on the distribution of node and edge counts - // seen in rustc-perf benchmarks, adjusted somewhat to account for the - // fact that these benchmarks aren't perfectly representative. - // - // FIXME Use a collection type that doesn't copy node and edge data and - // grow multiplicatively on reallocation. Without such a collection or - // solution having the same effect, there is a performance hazard here - // in both time and space, as growing these collections means copying a - // large amount of data and doubling already large buffer capacities. A - // solution for this will also mean that it's less important to get - // these estimates right. - let new_node_count_estimate = (prev_graph_node_count * 2) / 100 + 200; - let red_node_count_estimate = (prev_graph_node_count * 3) / 100; - let light_green_node_count_estimate = (prev_graph_node_count * 25) / 100; - let total_node_count_estimate = prev_graph_node_count + new_node_count_estimate; - - let average_edges_per_node_estimate = 6; - let unshared_edge_count_estimate = average_edges_per_node_estimate - * (new_node_count_estimate + red_node_count_estimate + light_green_node_count_estimate); - */ - - // We store a large collection of these in `prev_index_to_index` during - // non-full incremental builds, and want to ensure that the element size - // doesn't inadvertently increase. - static_assert_size!(Option, 4); - DepGraph { data: Some(Lrc::new(DepGraphData { previous_work_products: prev_work_products, @@ -162,7 +127,7 @@ impl DepGraph { total_read_count: AtomicU64::new(0), total_duplicate_read_count: AtomicU64::new(0), emitting_diagnostics: Default::default(), - previous: RwLock::new(prev_graph), + previous: RwLock::new(CurrentDepGraph::new(prev_graph)), })), virtual_dep_node_index: Lrc::new(AtomicU32::new(0)), } @@ -627,21 +592,13 @@ impl DepGraph { debug_assert!(!dep_node.kind.is_eval_always()); debug_assert_eq!(data.previous.read().index_to_node(prev_dep_node_index), *dep_node); - // Do not keep a reference to the borrowed `previous` graph, - // because the recursive calls. - let prev_deps: Vec<_> = - data.previous.read().edge_targets_from_serialized(prev_dep_node_index).collect(); - debug!( - "try_mark_previous_green({:?}) --- {:?} -- deps={:?}", - dep_node, - prev_dep_node_index, - prev_deps - .iter() - .map(|&d| (d, data.previous.read().index_to_node(d))) - .collect::>(), - ); - - for dep_dep_node_index in prev_deps { + let prev_deps = data.previous.read().color_or_edges(prev_dep_node_index); + let prev_deps = match prev_deps { + Err(prev_deps) => prev_deps, + Ok(DepNodeColor::Green) => return Some(prev_dep_node_index.rejuvenate()), + Ok(DepNodeColor::Red) | Ok(DepNodeColor::New) => return None, + }; + for &dep_dep_node_index in prev_deps { self.try_mark_parent_green(tcx, data, dep_dep_node_index, dep_node)? } @@ -764,7 +721,7 @@ impl DepGraph { let mut stats: FxHashMap<_, Stat> = FxHashMap::with_hasher(Default::default()); - for index in prev.indices() { + for index in prev.live_indices() { let kind = prev.dep_node_of(index).kind; let edge_count = prev.edge_targets_from(index).len(); diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs index 9decdcc700228..52ceef91fa5a3 100644 --- a/compiler/rustc_query_system/src/dep_graph/serialized.rs +++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs @@ -47,12 +47,25 @@ rustc_index::newtype_index! { } } +// Index type for new nodes. +rustc_index::newtype_index! { + struct SplitIndex { + MAX = 0x7FFF_FFFF + } +} + impl SerializedDepNodeIndex { pub(super) fn rejuvenate(self) -> DepNodeIndex { DepNodeIndex::new(self.index()) } } +// We store a large collection of these `edge_list_data`. +// Non-full incremental builds, and want to ensure that the +// element size doesn't inadvertently increase. +static_assert_size!(Option, 4); +static_assert_size!(Option, 4); + #[derive(Copy, Clone, Encodable, Decodable)] struct ColorAndOffset(u32); @@ -71,7 +84,7 @@ impl ColorAndOffset { ColorAndOffset(offset | TAG_UNKNOWN) } - fn new(color: DepNodeColor, offset: usize) -> ColorAndOffset { + fn new(color: DepNodeColor, offset: u32) -> ColorAndOffset { let offset: u32 = offset.try_into().unwrap(); debug_assert_eq!(offset & TAG_MASK, 0); ColorAndOffset(offset | color.tag()) @@ -98,7 +111,9 @@ impl ColorAndOffset { } } -/// Data for use when recompiling the **current crate**. +/// Data for use when recompiling the **previous crate**. +/// +/// Those IndexVec are never pushed to, so as to avoid large reallocations. #[derive(Debug)] pub struct SerializedDepGraph { /// The set of all DepNodes in the graph @@ -113,10 +128,27 @@ pub struct SerializedDepGraph { /// A flattened list of all edge targets in the graph. Edge sources are /// implicit in edge_list_indices. edge_list_data: Vec, +} + +/// Data for use when recompiling the **current crate**. +#[derive(Debug)] +pub struct CurrentDepGraph { + /// The previous graph. + serialized: SerializedDepGraph, + /// The set of all DepNodes in the graph + nodes: IndexVec>, + /// The set of all Fingerprints in the graph. Each Fingerprint corresponds to + /// the DepNode at the same index in the nodes vector. + fingerprints: IndexVec, + /// For each DepNode, stores the list of edges originating from that + /// DepNode. Encoded as a [start, end) pair indexing into edge_list_data, + /// which holds the actual DepNodeIndices of the target nodes. + edge_list_indices: IndexVec, + /// A flattened list of all edge targets in the graph. Edge sources are + /// implicit in edge_list_indices. + edge_list_data: Vec, /// Reverse map for `nodes`. It is computed on the fly at decoding time. - index: FxHashMap, SerializedDepNodeIndex>, - /// Index of the last serialized node. - serialized_node_count: SerializedDepNodeIndex, + index: FxHashMap, DepNodeIndex>, } impl Default for SerializedDepGraph { @@ -126,13 +158,32 @@ impl Default for SerializedDepGraph { fingerprints: IndexVec::new(), edge_list_indices: IndexVec::new(), edge_list_data: Vec::new(), - index: FxHashMap::default(), - serialized_node_count: SerializedDepNodeIndex::new(0), } } } -impl SerializedDepGraph { +impl CurrentDepGraph { + pub(crate) fn new(serialized: SerializedDepGraph) -> Self { + let prev_graph_node_count = serialized.nodes.len(); + let nodes = node_count_estimate(prev_graph_node_count); + let edges = edge_count_estimate(prev_graph_node_count); + + let mut index = FxHashMap::default(); + for (idx, &dep_node) in serialized.nodes.iter_enumerated() { + debug!("DECODE index={:?} node={:?}", idx, dep_node); + let _o = index.insert(dep_node, idx.rejuvenate()); + debug_assert_eq!(_o, None); + } + Self { + serialized, + nodes: IndexVec::with_capacity(nodes), + fingerprints: IndexVec::with_capacity(nodes), + edge_list_indices: IndexVec::with_capacity(nodes), + edge_list_data: Vec::with_capacity(edges), + index, + } + } + fn intern_new_node( &mut self, node: DepNode, @@ -140,24 +191,23 @@ impl SerializedDepGraph { fingerprint: Fingerprint, ) -> DepNodeIndex { let index = self.nodes.push(node); - debug!("intern_new: {:?} {:?}", index, node); + debug!("intern_new: {:?} {:?}", self.from_split(index), node); let _index = self.fingerprints.push(fingerprint); debug_assert_eq!(index, _index); let (start, end) = self.insert_deps(deps); - let _index = self - .edge_list_indices - .push((ColorAndOffset::new(DepNodeColor::New, start), end.try_into().unwrap())); + let _index = self.edge_list_indices.push((start, end)); debug_assert_eq!(index, _index); + let index = self.from_split(index); let _o = self.index.insert(node, index); debug_assert_eq!(_o, None); - index.rejuvenate() + index } - fn insert_deps(&mut self, deps: &[DepNodeIndex]) -> (usize, usize) { + fn insert_deps(&mut self, deps: &[DepNodeIndex]) -> (u32, u32) { let start = self.edge_list_data.len(); self.edge_list_data.extend(deps.iter().copied()); let end = self.edge_list_data.len(); - (start, end) + (start.try_into().unwrap(), end.try_into().unwrap()) } fn update_deps( @@ -166,18 +216,19 @@ impl SerializedDepGraph { color: DepNodeColor, deps: &[DepNodeIndex], ) { - let (start, _) = self.edge_list_indices[index]; + let (start, _) = self.serialized.edge_list_indices[index]; debug_assert_eq!(start.color(), None); let (start, end) = self.insert_deps(deps); + let len = self.serialized.edge_list_data.len() as u32; debug!("intern_color: {:?} => {:?}", index, color); - let start = ColorAndOffset::new(color, start); - self.edge_list_indices[index] = (start, end.try_into().unwrap()); + let start = ColorAndOffset::new(color, start + len); + self.serialized.edge_list_indices[index] = (start, end + len); } pub(crate) fn intern_dark_green_node(&mut self, index: SerializedDepNodeIndex) -> DepNodeIndex { debug!("intern_drak_green: {:?}", index); - debug_assert_eq!(self.edge_list_indices[index].0.color(), None); - self.edge_list_indices[index].0.set_color(DepNodeColor::Green); + debug_assert_eq!(self.serialized.edge_list_indices[index].0.color(), None); + self.serialized.edge_list_indices[index].0.set_color(DepNodeColor::Green); debug!("intern_color: {:?} => Green", index); index.rejuvenate() } @@ -200,14 +251,24 @@ impl SerializedDepGraph { ) -> DepNodeIndex { let print_status = cfg!(debug_assertions) && print_status; - if let Some(&prev_index) = self.index.get(&node) { - if let Some(_) = self.color(prev_index) { - return prev_index.rejuvenate(); + if let Some(&existing) = self.index.get(&node) { + let prev_index = self + .as_serialized(existing) + .unwrap_or_else(|_| panic!("Node {:?} is being interned multiple times.", node)); + match self.color(prev_index) { + Some(DepNodeColor::Red) | Some(DepNodeColor::New) => { + panic!("Node {:?} is being interned multiple times.", node) + } + + // This can happen when trying to compute the result of green queries. + Some(DepNodeColor::Green) => return existing, + + None => {} } // Determine the color and index of the new `DepNode`. let color = if let Some(fingerprint) = fingerprint { - if fingerprint == self.fingerprints[prev_index] { + if fingerprint == self.serialized.fingerprints[prev_index] { if print_status { eprintln!("[task::green] {:?}", node); } @@ -222,7 +283,7 @@ impl SerializedDepGraph { // This is a red node: it existed in the previous compilation, its query // was re-executed, but it has a different result from before. - self.fingerprints[prev_index] = fingerprint; + self.serialized.fingerprints[prev_index] = fingerprint; DepNodeColor::Red } } else { @@ -234,7 +295,7 @@ impl SerializedDepGraph { // session, its query was re-executed, but it doesn't compute a result hash // (i.e. it represents a `no_hash` query), so we have no way of determining // whether or not the result was the same as before. - self.fingerprints[prev_index] = Fingerprint::ZERO; + self.serialized.fingerprints[prev_index] = Fingerprint::ZERO; DepNodeColor::Red }; @@ -251,33 +312,85 @@ impl SerializedDepGraph { } #[inline] - pub(crate) fn edge_targets_from_serialized( + fn as_serialized(&self, index: DepNodeIndex) -> Result { + let index = index.index(); + let count = self.serialized.nodes.len(); + if index < count { + Ok(SerializedDepNodeIndex::new(index)) + } else { + Err(SplitIndex::new(index - count)) + } + } + + #[inline] + fn from_split(&self, index: SplitIndex) -> DepNodeIndex { + DepNodeIndex::new(self.serialized.nodes.len() + index.index()) + } + + #[inline] + fn serialized_edges(&self, source: SerializedDepNodeIndex) -> &[DepNodeIndex] { + let (start, end) = self.serialized.edge_list_indices[source]; + let start = start.offset() as usize; + let end = end as usize; + let len = self.serialized.edge_list_data.len(); + if start < len { + &self.serialized.edge_list_data[start..end] + } else { + &self.edge_list_data[start - len..end - len] + } + } + + #[inline] + fn new_edges(&self, source: SplitIndex) -> &[DepNodeIndex] { + let (start, end) = self.edge_list_indices[source]; + let start = start as usize; + let end = end as usize; + &self.edge_list_data[start..end] + } + + #[inline] + pub(crate) fn color_or_edges( &self, source: SerializedDepNodeIndex, - ) -> impl Iterator + '_ { - let (start, end) = self.edge_list_indices[source]; - debug_assert_eq!(start.color(), None); + ) -> Result { + let (start, end) = self.serialized.edge_list_indices[source]; + if let Some(color) = start.color() { + return Ok(color); + } let start = start.offset() as usize; let end = end as usize; - self.edge_list_data[start..end].iter().map(|i| SerializedDepNodeIndex::new(i.index())) + // The node has not been colored, so the dependencies have not been lifted to point to the + // new nodes vector. + let edges = &self.serialized.edge_list_data[start..end]; + debug_assert_eq!( + std::mem::size_of::(), + std::mem::size_of::() + ); + // SAFETY: 1. serialized.edge_list_data is never modified. + // 2. SerializedDepNodeIndex and DepNodeIndex have the same binary representation. + let edges = unsafe { std::mem::transmute::<&[_], &[_]>(edges) }; + Err(edges) } #[inline] pub(crate) fn edge_targets_from(&self, source: DepNodeIndex) -> &[DepNodeIndex] { - let (start, end) = self.edge_list_indices[SerializedDepNodeIndex::new(source.index())]; - let start = start.offset() as usize; - let end = end as usize; - &self.edge_list_data[start..end] + match self.as_serialized(source) { + Ok(source) => self.serialized_edges(source), + Err(source) => self.new_edges(source), + } } #[inline] pub(crate) fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode { - self.nodes[dep_node_index] + self.serialized.nodes[dep_node_index] } #[inline] pub(crate) fn dep_node_of(&self, dep_node_index: DepNodeIndex) -> DepNode { - self.nodes[SerializedDepNodeIndex::new(dep_node_index.index())] + match self.as_serialized(dep_node_index) { + Ok(serialized) => self.serialized.nodes[serialized], + Err(new) => self.nodes[new], + } } #[inline] @@ -286,93 +399,105 @@ impl SerializedDepGraph { dep_node: &DepNode, ) -> Option { let idx = *self.index.get(dep_node)?; - if idx >= self.serialized_node_count { None } else { Some(idx) } + self.as_serialized(idx).ok() } #[inline] pub(crate) fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option { let index = *self.index.get(dep_node)?; - // Return none if the node has not been coloured yet. - let _ = self.edge_list_indices[index].0.color()?; - debug!( - "dep_node_index_of_opt: dep_node={:?} index={:?} indices={:?}", - dep_node, index, self.edge_list_indices[index] - ); - Some(index.rejuvenate()) + if let Ok(prev) = self.as_serialized(index) { + // Return none if the node has not been coloured yet. + self.serialized.edge_list_indices[prev].0.color()?; + } + Some(index) } #[inline] pub(crate) fn color(&self, index: SerializedDepNodeIndex) -> Option { - self.edge_list_indices[index].0.color() + self.serialized.edge_list_indices[index].0.color() } #[inline] pub(crate) fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint { - self.fingerprints[SerializedDepNodeIndex::new(dep_node_index.index())] + match self.as_serialized(dep_node_index) { + Ok(serialized) => self.serialized.fingerprints[serialized], + Err(split) => self.fingerprints[split], + } } #[inline] - pub(crate) fn serialized_indices(&self) -> impl Iterator + '_ { - (0..self.serialized_node_count.index()).map(SerializedDepNodeIndex::new) + pub(crate) fn serialized_indices(&self) -> impl Iterator { + self.serialized.nodes.indices() } #[inline] - pub(crate) fn indices(&self) -> impl Iterator + '_ { - self.edge_list_indices.iter_enumerated().filter_map(|(i, (s, _))| { + fn live_serialized_indices(&self) -> impl Iterator + '_ { + self.serialized.edge_list_indices.iter_enumerated().filter_map(|(i, (s, _))| { // Return none if the node has not been coloured yet. let _ = s.color()?; - Some(i.rejuvenate()) + Some(i) }) } #[inline] - pub(crate) fn serialized_node_count(&self) -> usize { - self.serialized_node_count.index() + fn new_indices(&self) -> impl Iterator + '_ { + self.nodes.indices().map(move |i| self.from_split(i)) + } + + #[inline] + pub(crate) fn live_indices(&self) -> impl Iterator + '_ { + // New indices are always live. + self.live_serialized_indices() + .map(SerializedDepNodeIndex::rejuvenate) + .chain(self.new_indices()) } #[inline] pub(crate) fn node_count(&self) -> usize { - self.edge_list_indices.iter().filter_map(|(s, _)| s.color()).count() + self.live_indices().count() + } + + #[inline] + fn edge_map(&self) -> impl Iterator + '_ { + let serialized_edges = + self.live_serialized_indices().map(move |index| self.serialized_edges(index)); + let new_edges = self.edge_list_indices.iter().map(move |&(start, end)| { + let start = start as usize; + let end = end as usize; + &self.edge_list_data[start..end] + }); + serialized_edges.chain(new_edges) } #[inline] pub(crate) fn edge_count(&self) -> usize { - self.edge_list_indices - .iter() - .filter_map(|(s, e)| { - s.color()?; - Some((e - s.offset()) as usize) - }) - .sum() + self.edge_map().flatten().count() } pub(crate) fn query(&self) -> DepGraphQuery { - let nodes: Vec<_> = self - .nodes - .iter_enumerated() - .filter_map(|(i, n)| { - let _ = self.edge_list_indices[i].0.color()?; - Some(*n) - }) - .collect(); - let edge_list_indices: Vec<_> = self - .edge_list_indices - .iter() - .filter_map(|(s, e)| { - s.color()?; - Some((s.offset() as usize, *e as usize)) - }) - .collect(); - let edge_list_data: Vec<_> = self.edge_list_data.iter().map(|i| i.index()).collect(); - debug_assert_eq!(nodes.len(), edge_list_indices.len()); + let node_count = self.node_count(); + let edge_count = self.edge_count(); + + let mut nodes = Vec::with_capacity(node_count); + nodes.extend(self.live_indices().map(|i| self.dep_node_of(i))); + + let mut edge_list_indices = Vec::with_capacity(node_count); + let mut edge_list_data = Vec::with_capacity(edge_count); + for edges in self.edge_map() { + let start = edge_list_data.len(); + edge_list_data.extend(edges.iter().map(|i| i.index() as usize)); + let end = edge_list_data.len(); + edge_list_indices.push((start, end)) + } + debug_assert_eq!(nodes.len(), edge_list_indices.len()); DepGraphQuery::new(&nodes[..], &edge_list_indices[..], &edge_list_data[..]) } pub(crate) fn compression_map(&self) -> IndexVec> { let mut new_index = SerializedDepNodeIndex::new(0); - let mut remap = IndexVec::from_elem_n(None, self.nodes.len()); - for index in self.indices() { + let mut remap = IndexVec::from_elem_n(None, self.serialized.nodes.len() + self.nodes.len()); + for index in self.live_indices() { debug_assert!(new_index.index() <= index.index()); remap[index] = Some(new_index); new_index.increment_by(1); @@ -381,52 +506,48 @@ impl SerializedDepGraph { } } -impl> Encodable for SerializedDepGraph { +impl> Encodable for CurrentDepGraph { fn encode(&self, e: &mut E) -> Result<(), E::Error> { let remap = self.compression_map(); + let node_count = remap.iter().flatten().count(); + // Back-copy the nodes and fingerprints. let (nodes, fingerprints) = { - let mut nodes = self.nodes.clone(); - let mut fingerprints = self.fingerprints.clone(); - let mut new_index = SerializedDepNodeIndex::new(0); - - for index in self.indices() { - debug_assert!(new_index.index() <= index.index()); - - // Back-copy the nodes and fingerprints. - let index = SerializedDepNodeIndex::new(index.index()); - nodes[new_index] = self.nodes[index]; - fingerprints[new_index] = self.fingerprints[index]; - - new_index.increment_by(1); + let mut nodes: IndexVec> = + IndexVec::with_capacity(node_count); + let mut fingerprints: IndexVec = + IndexVec::with_capacity(node_count); + + for index in self.live_serialized_indices() { + nodes.push(self.serialized.nodes[index]); + fingerprints.push(self.serialized.fingerprints[index]); } - nodes.truncate(new_index.index()); - fingerprints.truncate(new_index.index()); + nodes.extend(self.nodes.iter().copied()); + fingerprints.extend(self.fingerprints.iter().copied()); (nodes, fingerprints) }; + // Reconstruct the edges vector since it may be out of order. + // We only store the start indices, since the end is the next's start. let (new_indices, new_edges) = { let mut new_indices: IndexVec = - IndexVec::with_capacity(self.nodes.len()); - let mut new_edges: Vec = - Vec::with_capacity(self.edge_list_data.len()); - - for (index, (start, end)) in self.edge_list_indices.iter_enumerated() { - match start.color() { - // This node does not exist in this session. Skip it. - None => continue, - Some(_) => {} - } - - let new_index = new_indices.push(new_edges.len().try_into().unwrap()); - debug_assert_eq!(remap[index.rejuvenate()], Some(new_index)); + IndexVec::with_capacity(node_count); + let mut new_edges: Vec = Vec::with_capacity( + self.serialized.edge_list_data.len() + self.edge_list_data.len(), + ); - // Reconstruct the edges vector since it may be out of order. - // We only store the start indices, since the end is the next's start. - let start = start.offset() as usize; - let end = *end as usize; - new_edges.extend(self.edge_list_data[start..end].iter().map(|i| { + for index in self.live_serialized_indices() { + new_indices.push(new_edges.len().try_into().unwrap()); + let edges = self.serialized_edges(index); + new_edges.extend(edges.iter().map(|i| { + remap[*i] + .unwrap_or_else(|| panic!("Unknown remap for {:?} while {:?}", *i, index)) + })); + } + for index in self.nodes.indices() { + new_indices.push(new_edges.len().try_into().unwrap()); + new_edges.extend(self.new_edges(index).iter().map(|i| { remap[*i] .unwrap_or_else(|| panic!("Unknown remap for {:?} while {:?}", *i, index)) })); @@ -435,9 +556,13 @@ impl> Encodable for SerializedDepGraph< (new_indices, new_edges) }; + debug_assert_eq!(node_count, nodes.len()); + debug_assert_eq!(node_count, fingerprints.len()); + debug_assert_eq!(node_count, new_indices.len()); + let mut index = FxHashMap::default(); for (idx, &dep_node) in nodes.iter_enumerated() { - debug!("DECODE index={:?} node={:?}", idx, dep_node); + debug!("ENCODE index={:?} node={:?}", idx, dep_node); let _o = index.insert(dep_node, idx); debug_assert_eq!(_o, None); } @@ -453,6 +578,26 @@ impl> Encodable for SerializedDepGraph< } } +// Pre-allocate the dep node structures. We over-allocate a little so +// that we hopefully don't have to re-allocate during this compilation +// session. The over-allocation for new nodes is 2% plus a small +// constant to account for the fact that in very small crates 2% might +// not be enough. The allocation for red and green node data doesn't +// include a constant, as we don't want to allocate anything for these +// structures during full incremental builds, where they aren't used. +// +// These estimates are based on the distribution of node and edge counts +// seen in rustc-perf benchmarks, adjusted somewhat to account for the +// fact that these benchmarks aren't perfectly representative. +fn node_count_estimate(prev_graph_node_count: usize) -> usize { + (2 * prev_graph_node_count) / 100 + 200 +} + +fn edge_count_estimate(prev_graph_node_count: usize) -> usize { + let average_edges_per_node_estimate = 6; + average_edges_per_node_estimate * (200 + (prev_graph_node_count * 30) / 100) +} + impl> Decodable for SerializedDepGraph { fn decode(d: &mut D) -> Result, D::Error> { d.read_struct("SerializedDepGraph", 4, |d| { @@ -471,22 +616,7 @@ impl> Decodable for SerializedDepGraph< edge_list_indices.len() - 1, ); - let mut index = FxHashMap::default(); - for (idx, &dep_node) in nodes.iter_enumerated() { - debug!("DECODE index={:?} node={:?}", idx, dep_node); - let _o = index.insert(dep_node, idx); - debug_assert_eq!(_o, None); - } - let serialized_node_count = nodes.next_index(); - - Ok(SerializedDepGraph { - nodes, - index, - fingerprints, - edge_list_indices, - edge_list_data, - serialized_node_count, - }) + Ok(SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data }) }) } } From 08168b54124eda90b02518bfecc24b4235f5b528 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Mon, 22 Feb 2021 19:03:07 +0100 Subject: [PATCH 05/14] Only check color once. --- .../rustc_query_system/src/dep_graph/graph.rs | 50 ++++++++----------- 1 file changed, 22 insertions(+), 28 deletions(-) diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 276049828766e..aa2151f7075f0 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -449,20 +449,20 @@ impl DepGraph { // Return None if the dep node didn't exist in the previous session let prev_index = data.previous.read().node_to_index_opt(dep_node)?; - let color = data.previous.read().color(prev_index); - - match color { - Some(DepNodeColor::Green) => Some((prev_index, prev_index.rejuvenate())), - Some(DepNodeColor::Red) | Some(DepNodeColor::New) => None, - None => { - // This DepNode and the corresponding query invocation existed - // in the previous compilation session too, so we can try to - // mark it as green by recursively marking all of its - // dependencies green. - self.try_mark_previous_green(tcx, data, prev_index, &dep_node) - .map(|dep_node_index| (prev_index, dep_node_index)) - } - } + let prev_deps = data.previous.read().color_or_edges(prev_index); + let prev_deps = match prev_deps { + Err(prev_deps) => prev_deps, + Ok(DepNodeColor::Green) => return Some((prev_index, prev_index.rejuvenate())), + Ok(DepNodeColor::Red) | Ok(DepNodeColor::New) => return None, + }; + + // This DepNode and the corresponding query invocation existed + // in the previous compilation session too, so we can try to + // mark it as green by recursively marking all of its + // dependencies green. + let dep_node_index = + self.try_mark_previous_green(tcx, data, prev_index, prev_deps, &dep_node)?; + Some((prev_index, dep_node_index)) } fn try_mark_parent_green>( @@ -472,11 +472,10 @@ impl DepGraph { parent_dep_node_index: SerializedDepNodeIndex, dep_node: &DepNode, ) -> Option<()> { - let dep_dep_node_color = data.previous.read().color(parent_dep_node_index); let dep_dep_node = &data.previous.read().index_to_node(parent_dep_node_index); - - match dep_dep_node_color { - Some(DepNodeColor::Green) => { + let dep_dep_node_color = data.previous.read().color_or_edges(parent_dep_node_index); + let prev_deps = match dep_dep_node_color { + Ok(DepNodeColor::Green) => { // This dependency has been marked as green before, we are // still fine and can continue with checking the other // dependencies. @@ -486,7 +485,7 @@ impl DepGraph { ); return Some(()); } - Some(DepNodeColor::Red) | Some(DepNodeColor::New) => { + Ok(DepNodeColor::Red) | Ok(DepNodeColor::New) => { // We found a dependency the value of which has changed // compared to the previous compilation session. We cannot // mark the DepNode as green and also don't need to bother @@ -497,8 +496,8 @@ impl DepGraph { ); return None; } - None => {} - } + Err(prev_deps) => prev_deps, + }; // We don't know the state of this dependency. If it isn't // an eval_always node, let's try to mark it green recursively. @@ -509,7 +508,7 @@ impl DepGraph { ); let node_index = - self.try_mark_previous_green(tcx, data, parent_dep_node_index, dep_dep_node); + self.try_mark_previous_green(tcx, data, parent_dep_node_index, prev_deps, dep_dep_node); if node_index.is_some() { debug!( "try_mark_parent_green({:?}) --- managed to MARK dependency {:?} as green", @@ -579,6 +578,7 @@ impl DepGraph { tcx: Ctxt, data: &DepGraphData, prev_dep_node_index: SerializedDepNodeIndex, + prev_deps: &[SerializedDepNodeIndex], dep_node: &DepNode, ) -> Option { // We never try to mark eval_always nodes as green @@ -592,12 +592,6 @@ impl DepGraph { debug_assert!(!dep_node.kind.is_eval_always()); debug_assert_eq!(data.previous.read().index_to_node(prev_dep_node_index), *dep_node); - let prev_deps = data.previous.read().color_or_edges(prev_dep_node_index); - let prev_deps = match prev_deps { - Err(prev_deps) => prev_deps, - Ok(DepNodeColor::Green) => return Some(prev_dep_node_index.rejuvenate()), - Ok(DepNodeColor::Red) | Ok(DepNodeColor::New) => return None, - }; for &dep_dep_node_index in prev_deps { self.try_mark_parent_green(tcx, data, dep_dep_node_index, dep_node)? } From 47ce741f33b05d0951d9320eee8c9df50edcfff2 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sun, 21 Feb 2021 12:19:00 +0100 Subject: [PATCH 06/14] Do not access the dep_node only for debugging. --- .../rustc_query_system/src/dep_graph/graph.rs | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index aa2151f7075f0..d54b77dde93ea 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -472,7 +472,6 @@ impl DepGraph { parent_dep_node_index: SerializedDepNodeIndex, dep_node: &DepNode, ) -> Option<()> { - let dep_dep_node = &data.previous.read().index_to_node(parent_dep_node_index); let dep_dep_node_color = data.previous.read().color_or_edges(parent_dep_node_index); let prev_deps = match dep_dep_node_color { Ok(DepNodeColor::Green) => { @@ -481,7 +480,8 @@ impl DepGraph { // dependencies. debug!( "try_mark_parent_green({:?}) --- found dependency {:?} to be immediately green", - dep_node, dep_dep_node, + dep_node, + data.previous.read().index_to_node(parent_dep_node_index) ); return Some(()); } @@ -492,7 +492,8 @@ impl DepGraph { // with checking any of the other dependencies. debug!( "try_mark_parent_green({:?}) - END - dependency {:?} was immediately red", - dep_node, dep_dep_node, + dep_node, + data.previous.read().index_to_node(parent_dep_node_index) ); return None; } @@ -502,11 +503,16 @@ impl DepGraph { // We don't know the state of this dependency. If it isn't // an eval_always node, let's try to mark it green recursively. debug!( - "try_mark_parent_green({:?}) --- state of dependency {:?} ({}) \ + "try_mark_parent_green({:?}) --- state of dependency {:?} \ is unknown, trying to mark it green", - dep_node, dep_dep_node, dep_dep_node.hash, + dep_node, + { + let dep_dep_node = data.previous.read().index_to_node(parent_dep_node_index); + (dep_dep_node, dep_dep_node.hash) + } ); + let dep_dep_node = &data.previous.read().index_to_node(parent_dep_node_index); let node_index = self.try_mark_previous_green(tcx, data, parent_dep_node_index, prev_deps, dep_dep_node); if node_index.is_some() { From ac0e10cabc569bfbf2d0e30c6193da8e9769258d Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sat, 20 Feb 2021 18:53:11 +0100 Subject: [PATCH 07/14] Encode and decode on the fly. --- .../src/dep_graph/serialized.rs | 159 ++++++++++-------- 1 file changed, 85 insertions(+), 74 deletions(-) diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs index 52ceef91fa5a3..35a41c8f6ba03 100644 --- a/compiler/rustc_query_system/src/dep_graph/serialized.rs +++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs @@ -509,70 +509,52 @@ impl CurrentDepGraph { impl> Encodable for CurrentDepGraph { fn encode(&self, e: &mut E) -> Result<(), E::Error> { let remap = self.compression_map(); - let node_count = remap.iter().flatten().count(); - - // Back-copy the nodes and fingerprints. - let (nodes, fingerprints) = { - let mut nodes: IndexVec> = - IndexVec::with_capacity(node_count); - let mut fingerprints: IndexVec = - IndexVec::with_capacity(node_count); - - for index in self.live_serialized_indices() { - nodes.push(self.serialized.nodes[index]); - fingerprints.push(self.serialized.fingerprints[index]); - } - nodes.extend(self.nodes.iter().copied()); - fingerprints.extend(self.fingerprints.iter().copied()); + let live_indices = || remap.iter_enumerated().filter_map(|(s, &n)| Some((s, n?))); + let node_count = live_indices().count(); + let edge_count = self.edge_count(); - (nodes, fingerprints) - }; + e.emit_struct("SerializedDepGraph", 4, |e| { + e.emit_struct_field("nodes", 0, |e| { + e.emit_seq(node_count, |e| { + for (index, new_index) in live_indices() { + let node = self.dep_node_of(index); + e.emit_seq_elt(new_index.index(), |e| node.encode(e))?; + } + Ok(()) + }) + })?; + e.emit_struct_field("fingerprints", 1, |e| { + e.emit_seq(node_count, |e| { + for (index, new_index) in live_indices() { + let node = self.fingerprint_of(index); + e.emit_seq_elt(new_index.index(), |e| node.encode(e))?; + } + Ok(()) + }) + })?; - // Reconstruct the edges vector since it may be out of order. - // We only store the start indices, since the end is the next's start. - let (new_indices, new_edges) = { + // Reconstruct the edges vector since it may be out of order. + // We only store the start indices, since the end is the next's start. let mut new_indices: IndexVec = - IndexVec::with_capacity(node_count); - let mut new_edges: Vec = Vec::with_capacity( - self.serialized.edge_list_data.len() + self.edge_list_data.len(), - ); - - for index in self.live_serialized_indices() { - new_indices.push(new_edges.len().try_into().unwrap()); - let edges = self.serialized_edges(index); - new_edges.extend(edges.iter().map(|i| { - remap[*i] - .unwrap_or_else(|| panic!("Unknown remap for {:?} while {:?}", *i, index)) - })); - } - for index in self.nodes.indices() { - new_indices.push(new_edges.len().try_into().unwrap()); - new_edges.extend(self.new_edges(index).iter().map(|i| { - remap[*i] - .unwrap_or_else(|| panic!("Unknown remap for {:?} while {:?}", *i, index)) - })); - } - - (new_indices, new_edges) - }; - - debug_assert_eq!(node_count, nodes.len()); - debug_assert_eq!(node_count, fingerprints.len()); - debug_assert_eq!(node_count, new_indices.len()); - - let mut index = FxHashMap::default(); - for (idx, &dep_node) in nodes.iter_enumerated() { - debug!("ENCODE index={:?} node={:?}", idx, dep_node); - let _o = index.insert(dep_node, idx); - debug_assert_eq!(_o, None); - } - let _ = index; - - e.emit_struct("SerializedDepGraph", 4, |e| { - e.emit_struct_field("nodes", 0, |e| nodes.encode(e))?; - e.emit_struct_field("fingerprints", 1, |e| fingerprints.encode(e))?; - e.emit_struct_field("edge_list_indices", 2, |e| new_indices.encode(e))?; - e.emit_struct_field("edge_list_data", 3, |e| new_edges.encode(e))?; + IndexVec::from_elem_n(0u32, node_count); + e.emit_struct_field("edge_list_data", 2, |e| { + e.emit_seq(edge_count, |e| { + let mut pos: u32 = 0; + for (new_index, edges) in self.edge_map().enumerate() { + // Reconstruct the edges vector since it may be out of order. + // We only store the end indices, since the start can be reconstructed. + for &edge in edges { + let edge = remap[edge].unwrap(); + e.emit_seq_elt(pos as usize, |e| edge.encode(e))?; + pos += 1; + } + new_indices[SerializedDepNodeIndex::new(new_index)] = pos; + } + debug_assert_eq!(pos as usize, edge_count); + Ok(()) + }) + })?; + e.emit_struct_field("edge_list_ends", 3, |e| new_indices.encode(e))?; Ok(()) }) } @@ -602,19 +584,48 @@ impl> Decodable for SerializedDepGraph< fn decode(d: &mut D) -> Result, D::Error> { d.read_struct("SerializedDepGraph", 4, |d| { let nodes: IndexVec> = - d.read_struct_field("nodes", 0, Decodable::decode)?; - let fingerprints: IndexVec = - d.read_struct_field("fingerprints", 1, Decodable::decode)?; - let mut edge_list_indices: IndexVec = - d.read_struct_field("edge_list_indices", 2, Decodable::decode)?; - let edge_list_data: Vec = - d.read_struct_field("edge_list_data", 3, Decodable::decode)?; - - edge_list_indices.push(edge_list_data.len().try_into().unwrap()); - let edge_list_indices = IndexVec::from_fn_n( - |i| (ColorAndOffset::unknown(edge_list_indices[i]), edge_list_indices[i + 1]), - edge_list_indices.len() - 1, - ); + d.read_struct_field("nodes", 0, |d| { + d.read_seq(|d, len| { + let mut nodes = IndexVec::with_capacity(len); + for i in 0..len { + let node = d.read_seq_elt(i, Decodable::decode)?; + nodes.push(node); + } + Ok(nodes) + }) + })?; + let fingerprints = d.read_struct_field("fingerprints", 1, |d| { + d.read_seq(|d, len| { + let mut fingerprints = IndexVec::with_capacity(len); + for i in 0..len { + let fingerprint = d.read_seq_elt(i, Decodable::decode)?; + fingerprints.push(fingerprint); + } + Ok(fingerprints) + }) + })?; + let edge_list_data = d.read_struct_field("edge_list_data", 2, |d| { + d.read_seq(|d, len| { + let mut edges = Vec::with_capacity(len); + for i in 0..len { + let edge = d.read_seq_elt(i, Decodable::decode)?; + edges.push(edge); + } + Ok(edges) + }) + })?; + let edge_list_indices = d.read_struct_field("edge_list_ends", 3, |d| { + d.read_seq(|d, len| { + let mut indices = IndexVec::with_capacity(len); + let mut start: u32 = 0; + for i in 0..len { + let end: u32 = d.read_seq_elt(i, Decodable::decode)?; + indices.push((ColorAndOffset::unknown(start), end)); + start = end; + } + Ok(indices) + }) + })?; Ok(SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data }) }) From 689936f827efeb61ca74c9fcf0119a6210af0bd4 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Sun, 21 Feb 2021 18:35:17 +0100 Subject: [PATCH 08/14] Compress edge indices further. --- .../src/dep_graph/serialized.rs | 125 +++++++++++------- 1 file changed, 74 insertions(+), 51 deletions(-) diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs index 35a41c8f6ba03..22d78efb9de1c 100644 --- a/compiler/rustc_query_system/src/dep_graph/serialized.rs +++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs @@ -7,6 +7,7 @@ use rustc_data_structures::fx::FxHashMap; use rustc_index::vec::{Idx, IndexVec}; use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; use std::convert::TryInto; +use std::ops::Range; #[derive(Debug, PartialEq, Eq)] pub enum DepNodeColor { @@ -67,27 +68,40 @@ static_assert_size!(Option, 4); static_assert_size!(Option, 4); #[derive(Copy, Clone, Encodable, Decodable)] -struct ColorAndOffset(u32); +struct ColorAndOffset(u32, u32); impl std::fmt::Debug for ColorAndOffset { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fmt.debug_struct("ColorAndOffset") .field("color", &self.color()) - .field("offset", &self.offset()) + .field("lifted", &self.lifted()) + .field("range", &self.range()) .finish() } } impl ColorAndOffset { - fn unknown(offset: u32) -> ColorAndOffset { - debug_assert_eq!(offset & TAG_MASK, 0); - ColorAndOffset(offset | TAG_UNKNOWN) + fn unknown(start: u32, end: u32) -> ColorAndOffset { + debug_assert_eq!(start & TAG_MASK, 0); + debug_assert_eq!(end & TAG_MASK, 0); + ColorAndOffset(start | TAG_UNKNOWN, end | TAG_UNKNOWN) } - fn new(color: DepNodeColor, offset: u32) -> ColorAndOffset { - let offset: u32 = offset.try_into().unwrap(); - debug_assert_eq!(offset & TAG_MASK, 0); - ColorAndOffset(offset | color.tag()) + #[allow(dead_code)] + fn new(color: DepNodeColor, range: Range) -> ColorAndOffset { + let start: u32 = range.start.try_into().unwrap(); + let end: u32 = range.end.try_into().unwrap(); + debug_assert_eq!(start & TAG_MASK, 0); + debug_assert_eq!(end & TAG_MASK, 0); + ColorAndOffset(start | color.tag(), end | TAG_UNKNOWN) + } + + fn new_lifted(color: DepNodeColor, range: Range) -> ColorAndOffset { + let start: u32 = range.start.try_into().unwrap(); + let end: u32 = range.end.try_into().unwrap(); + debug_assert_eq!(start & TAG_MASK, 0); + debug_assert_eq!(end & TAG_MASK, 0); + ColorAndOffset(start | color.tag(), end | TAG_GREEN) } fn set_color(&mut self, color: DepNodeColor) { @@ -95,10 +109,6 @@ impl ColorAndOffset { self.0 = color.tag() | offset; } - fn offset(self) -> u32 { - self.0 & OFFSET_MASK - } - fn color(self) -> Option { let tag = self.0 & TAG_MASK; match tag { @@ -106,9 +116,27 @@ impl ColorAndOffset { TAG_RED => Some(DepNodeColor::Red), TAG_GREEN => Some(DepNodeColor::Green), TAG_UNKNOWN => None, - _ => panic!(), + _ => unsafe { std::hint::unreachable_unchecked() }, + } + } + + fn lifted(self) -> bool { + let tag = self.1 & TAG_MASK; + match tag { + TAG_UNKNOWN => false, + _ => true, } } + + fn range(self) -> Range { + let start = (self.0 & OFFSET_MASK) as usize; + let end = (self.1 & OFFSET_MASK) as usize; + start..end + } +} + +fn shrink_range(range: Range) -> Range { + range.start.try_into().unwrap()..range.end.try_into().unwrap() } /// Data for use when recompiling the **previous crate**. @@ -124,7 +152,7 @@ pub struct SerializedDepGraph { /// For each DepNode, stores the list of edges originating from that /// DepNode. Encoded as a [start, end) pair indexing into edge_list_data, /// which holds the actual DepNodeIndices of the target nodes. - edge_list_indices: IndexVec, + edge_list_indices: IndexVec, /// A flattened list of all edge targets in the graph. Edge sources are /// implicit in edge_list_indices. edge_list_data: Vec, @@ -143,7 +171,7 @@ pub struct CurrentDepGraph { /// For each DepNode, stores the list of edges originating from that /// DepNode. Encoded as a [start, end) pair indexing into edge_list_data, /// which holds the actual DepNodeIndices of the target nodes. - edge_list_indices: IndexVec, + edge_list_indices: IndexVec>, /// A flattened list of all edge targets in the graph. Edge sources are /// implicit in edge_list_indices. edge_list_data: Vec, @@ -194,8 +222,8 @@ impl CurrentDepGraph { debug!("intern_new: {:?} {:?}", self.from_split(index), node); let _index = self.fingerprints.push(fingerprint); debug_assert_eq!(index, _index); - let (start, end) = self.insert_deps(deps); - let _index = self.edge_list_indices.push((start, end)); + let range = self.insert_deps(deps); + let _index = self.edge_list_indices.push(shrink_range(range)); debug_assert_eq!(index, _index); let index = self.from_split(index); let _o = self.index.insert(node, index); @@ -203,11 +231,11 @@ impl CurrentDepGraph { index } - fn insert_deps(&mut self, deps: &[DepNodeIndex]) -> (u32, u32) { + fn insert_deps(&mut self, deps: &[DepNodeIndex]) -> Range { let start = self.edge_list_data.len(); self.edge_list_data.extend(deps.iter().copied()); let end = self.edge_list_data.len(); - (start.try_into().unwrap(), end.try_into().unwrap()) + start..end } fn update_deps( @@ -216,19 +244,18 @@ impl CurrentDepGraph { color: DepNodeColor, deps: &[DepNodeIndex], ) { - let (start, _) = self.serialized.edge_list_indices[index]; - debug_assert_eq!(start.color(), None); - let (start, end) = self.insert_deps(deps); - let len = self.serialized.edge_list_data.len() as u32; debug!("intern_color: {:?} => {:?}", index, color); - let start = ColorAndOffset::new(color, start + len); - self.serialized.edge_list_indices[index] = (start, end + len); + let range = self.serialized.edge_list_indices[index]; + debug_assert_eq!(range.color(), None); + let range = self.insert_deps(deps); + let range = ColorAndOffset::new_lifted(color, range); + self.serialized.edge_list_indices[index] = range; } pub(crate) fn intern_dark_green_node(&mut self, index: SerializedDepNodeIndex) -> DepNodeIndex { debug!("intern_drak_green: {:?}", index); - debug_assert_eq!(self.serialized.edge_list_indices[index].0.color(), None); - self.serialized.edge_list_indices[index].0.set_color(DepNodeColor::Green); + debug_assert_eq!(self.serialized.edge_list_indices[index].color(), None); + self.serialized.edge_list_indices[index].set_color(DepNodeColor::Green); debug!("intern_color: {:?} => Green", index); index.rejuvenate() } @@ -329,22 +356,19 @@ impl CurrentDepGraph { #[inline] fn serialized_edges(&self, source: SerializedDepNodeIndex) -> &[DepNodeIndex] { - let (start, end) = self.serialized.edge_list_indices[source]; - let start = start.offset() as usize; - let end = end as usize; - let len = self.serialized.edge_list_data.len(); - if start < len { - &self.serialized.edge_list_data[start..end] + let range = self.serialized.edge_list_indices[source]; + if range.lifted() { + &self.edge_list_data[range.range()] } else { - &self.edge_list_data[start - len..end - len] + &self.serialized.edge_list_data[range.range()] } } #[inline] fn new_edges(&self, source: SplitIndex) -> &[DepNodeIndex] { - let (start, end) = self.edge_list_indices[source]; - let start = start as usize; - let end = end as usize; + let range = &self.edge_list_indices[source]; + let start = range.start as usize; + let end = range.end as usize; &self.edge_list_data[start..end] } @@ -353,15 +377,14 @@ impl CurrentDepGraph { &self, source: SerializedDepNodeIndex, ) -> Result { - let (start, end) = self.serialized.edge_list_indices[source]; - if let Some(color) = start.color() { + let range = self.serialized.edge_list_indices[source]; + if let Some(color) = range.color() { return Ok(color); } - let start = start.offset() as usize; - let end = end as usize; // The node has not been colored, so the dependencies have not been lifted to point to the // new nodes vector. - let edges = &self.serialized.edge_list_data[start..end]; + debug_assert!(!range.lifted()); + let edges = &self.serialized.edge_list_data[range.range()]; debug_assert_eq!( std::mem::size_of::(), std::mem::size_of::() @@ -407,14 +430,14 @@ impl CurrentDepGraph { let index = *self.index.get(dep_node)?; if let Ok(prev) = self.as_serialized(index) { // Return none if the node has not been coloured yet. - self.serialized.edge_list_indices[prev].0.color()?; + self.serialized.edge_list_indices[prev].color()?; } Some(index) } #[inline] pub(crate) fn color(&self, index: SerializedDepNodeIndex) -> Option { - self.serialized.edge_list_indices[index].0.color() + self.serialized.edge_list_indices[index].color() } #[inline] @@ -432,9 +455,9 @@ impl CurrentDepGraph { #[inline] fn live_serialized_indices(&self) -> impl Iterator + '_ { - self.serialized.edge_list_indices.iter_enumerated().filter_map(|(i, (s, _))| { + self.serialized.edge_list_indices.iter_enumerated().filter_map(|(i, range)| { // Return none if the node has not been coloured yet. - let _ = s.color()?; + let _ = range.color()?; Some(i) }) } @@ -461,9 +484,9 @@ impl CurrentDepGraph { fn edge_map(&self) -> impl Iterator + '_ { let serialized_edges = self.live_serialized_indices().map(move |index| self.serialized_edges(index)); - let new_edges = self.edge_list_indices.iter().map(move |&(start, end)| { - let start = start as usize; - let end = end as usize; + let new_edges = self.edge_list_indices.iter().map(move |range| { + let start = range.start as usize; + let end = range.end as usize; &self.edge_list_data[start..end] }); serialized_edges.chain(new_edges) @@ -620,7 +643,7 @@ impl> Decodable for SerializedDepGraph< let mut start: u32 = 0; for i in 0..len { let end: u32 = d.read_seq_elt(i, Decodable::decode)?; - indices.push((ColorAndOffset::unknown(start), end)); + indices.push(ColorAndOffset::unknown(start, end)); start = end; } Ok(indices) From 5c8fdfee2f46943a5ff712b8731fffc87c3d809a Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Mon, 22 Feb 2021 21:35:44 +0100 Subject: [PATCH 09/14] Introduce IndexArray. --- compiler/rustc_index/src/vec.rs | 139 ++++++++++++++++++ .../src/dep_graph/serialized.rs | 48 +++--- 2 files changed, 164 insertions(+), 23 deletions(-) diff --git a/compiler/rustc_index/src/vec.rs b/compiler/rustc_index/src/vec.rs index 2420f82c0418d..03600ddd97bb7 100644 --- a/compiler/rustc_index/src/vec.rs +++ b/compiler/rustc_index/src/vec.rs @@ -842,5 +842,144 @@ impl FnMut<(usize,)> for IntoIdx { } } +#[derive(Clone, PartialEq, Eq, Hash)] +pub struct IndexArray { + pub raw: Box<[T]>, + _marker: PhantomData, +} + +// Whether `IndexArray` is `Send` depends only on the data, +// not the phantom data. +unsafe impl Send for IndexArray where T: Send {} + +impl> Encodable for IndexArray { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + Encodable::encode(&self.raw, s) + } +} + +impl> Encodable for &IndexArray { + fn encode(&self, s: &mut S) -> Result<(), S::Error> { + Encodable::encode(&self.raw, s) + } +} + +impl> Decodable for IndexArray { + fn decode(d: &mut D) -> Result { + Decodable::decode(d).map(|v| IndexArray { raw: v, _marker: PhantomData }) + } +} + +impl fmt::Debug for IndexArray { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.raw, fmt) + } +} + +impl IndexArray { + #[inline] + pub fn new() -> Self { + IndexArray { raw: Box::default(), _marker: PhantomData } + } + + #[inline] + pub fn from_raw(raw: Box<[T]>) -> Self { + IndexArray { raw, _marker: PhantomData } + } + + #[inline] + pub fn from_vec(raw: Vec) -> Self { + IndexArray { raw: raw.into(), _marker: PhantomData } + } + + #[inline] + pub fn len(&self) -> usize { + self.raw.len() + } + + #[inline] + pub fn is_empty(&self) -> bool { + self.raw.is_empty() + } + + #[inline] + pub fn iter(&self) -> slice::Iter<'_, T> { + self.raw.iter() + } + + #[inline] + pub fn iter_enumerated(&self) -> Enumerated> { + self.raw.iter().enumerate().map(IntoIdx { _marker: PhantomData }) + } + + #[inline] + pub fn indices(&self) -> iter::Map, IntoIdx> { + (0..self.len()).map(IntoIdx { _marker: PhantomData }) + } + + #[inline] + pub fn iter_mut(&mut self) -> slice::IterMut<'_, T> { + self.raw.iter_mut() + } + + #[inline] + pub fn iter_enumerated_mut(&mut self) -> Enumerated> { + self.raw.iter_mut().enumerate().map(IntoIdx { _marker: PhantomData }) + } +} + +impl Index for IndexArray { + type Output = T; + + #[inline] + fn index(&self, index: I) -> &T { + &self.raw[index.index()] + } +} + +impl IndexMut for IndexArray { + #[inline] + fn index_mut(&mut self, index: I) -> &mut T { + &mut self.raw[index.index()] + } +} + +impl Default for IndexArray { + #[inline] + fn default() -> Self { + Self::new() + } +} + +impl FromIterator for IndexArray { + #[inline] + fn from_iter(iter: J) -> Self + where + J: IntoIterator, + { + IndexArray { raw: FromIterator::from_iter(iter), _marker: PhantomData } + } +} + +impl<'a, I: Idx, T> IntoIterator for &'a IndexArray { + type Item = &'a T; + type IntoIter = slice::Iter<'a, T>; + + #[inline] + fn into_iter(self) -> slice::Iter<'a, T> { + self.raw.iter() + } +} + +impl<'a, I: Idx, T> IntoIterator for &'a mut IndexArray { + type Item = &'a mut T; + type IntoIter = slice::IterMut<'a, T>; + + #[inline] + fn into_iter(self) -> slice::IterMut<'a, T> { + self.raw.iter_mut() + } +} + #[cfg(test)] mod tests; diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs index 22d78efb9de1c..5b655cf744132 100644 --- a/compiler/rustc_query_system/src/dep_graph/serialized.rs +++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs @@ -4,7 +4,7 @@ use super::query::DepGraphQuery; use super::{DepKind, DepNode}; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxHashMap; -use rustc_index::vec::{Idx, IndexVec}; +use rustc_index::vec::{Idx, IndexArray, IndexVec}; use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; use std::convert::TryInto; use std::ops::Range; @@ -140,19 +140,17 @@ fn shrink_range(range: Range) -> Range { } /// Data for use when recompiling the **previous crate**. -/// -/// Those IndexVec are never pushed to, so as to avoid large reallocations. #[derive(Debug)] pub struct SerializedDepGraph { /// The set of all DepNodes in the graph - nodes: IndexVec>, + nodes: IndexArray>, /// The set of all Fingerprints in the graph. Each Fingerprint corresponds to /// the DepNode at the same index in the nodes vector. - fingerprints: IndexVec, + fingerprints: IndexArray, /// For each DepNode, stores the list of edges originating from that /// DepNode. Encoded as a [start, end) pair indexing into edge_list_data, /// which holds the actual DepNodeIndices of the target nodes. - edge_list_indices: IndexVec, + edge_list_indices: IndexArray, /// A flattened list of all edge targets in the graph. Edge sources are /// implicit in edge_list_indices. edge_list_data: Vec, @@ -182,9 +180,9 @@ pub struct CurrentDepGraph { impl Default for SerializedDepGraph { fn default() -> Self { Self { - nodes: IndexVec::new(), - fingerprints: IndexVec::new(), - edge_list_indices: IndexVec::new(), + nodes: IndexArray::new(), + fingerprints: IndexArray::new(), + edge_list_indices: IndexArray::new(), edge_list_data: Vec::new(), } } @@ -606,20 +604,19 @@ fn edge_count_estimate(prev_graph_node_count: usize) -> usize { impl> Decodable for SerializedDepGraph { fn decode(d: &mut D) -> Result, D::Error> { d.read_struct("SerializedDepGraph", 4, |d| { - let nodes: IndexVec> = - d.read_struct_field("nodes", 0, |d| { - d.read_seq(|d, len| { - let mut nodes = IndexVec::with_capacity(len); - for i in 0..len { - let node = d.read_seq_elt(i, Decodable::decode)?; - nodes.push(node); - } - Ok(nodes) - }) - })?; + let nodes = d.read_struct_field("nodes", 0, |d| { + d.read_seq(|d, len| { + let mut nodes = Vec::with_capacity(len); + for i in 0..len { + let node = d.read_seq_elt(i, Decodable::decode)?; + nodes.push(node); + } + Ok(nodes) + }) + })?; let fingerprints = d.read_struct_field("fingerprints", 1, |d| { d.read_seq(|d, len| { - let mut fingerprints = IndexVec::with_capacity(len); + let mut fingerprints = Vec::with_capacity(len); for i in 0..len { let fingerprint = d.read_seq_elt(i, Decodable::decode)?; fingerprints.push(fingerprint); @@ -639,7 +636,7 @@ impl> Decodable for SerializedDepGraph< })?; let edge_list_indices = d.read_struct_field("edge_list_ends", 3, |d| { d.read_seq(|d, len| { - let mut indices = IndexVec::with_capacity(len); + let mut indices = Vec::with_capacity(len); let mut start: u32 = 0; for i in 0..len { let end: u32 = d.read_seq_elt(i, Decodable::decode)?; @@ -650,7 +647,12 @@ impl> Decodable for SerializedDepGraph< }) })?; - Ok(SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data }) + Ok(SerializedDepGraph { + nodes: IndexArray::from_vec(nodes), + fingerprints: IndexArray::from_vec(fingerprints), + edge_list_indices: IndexArray::from_vec(edge_list_indices), + edge_list_data, + }) }) } } From 33c9290b2a53b3d50c497ec5a2de5c3e790dc5a4 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Tue, 23 Feb 2021 20:29:26 +0100 Subject: [PATCH 10/14] Simplify debugging options. --- .../rustc_query_system/src/dep_graph/graph.rs | 80 ++++++++++--------- 1 file changed, 41 insertions(+), 39 deletions(-) diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index d54b77dde93ea..0a349558be3b3 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -2,19 +2,17 @@ use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::profiling::QueryInvocationId; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; -use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, RwLock}; +use rustc_data_structures::sync::{AtomicU32, Lock, Lrc, RwLock}; use rustc_data_structures::unlikely; use rustc_errors::Diagnostic; use rustc_index::vec::IndexVec; use rustc_serialize::{Encodable, Encoder}; use smallvec::{smallvec, SmallVec}; -use std::env; use std::hash::Hash; use std::marker::PhantomData; use std::sync::atomic::Ordering::Relaxed; -use super::debug::EdgeFilter; use super::query::DepGraphQuery; use super::serialized::{ CurrentDepGraph, DepNodeColor, DepNodeIndex, SerializedDepGraph, SerializedDepNodeIndex, @@ -22,6 +20,9 @@ use super::serialized::{ use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId}; use crate::query::QueryContext; +#[cfg(debug_assertions)] +use {super::debug::EdgeFilter, rustc_data_structures::sync::AtomicU64, std::env}; + #[derive(Clone)] pub struct DepGraph { data: Option>>, @@ -49,11 +50,6 @@ struct DepGraphData { /// nodes and edges as well as all fingerprints of nodes that have them. previous: RwLock>, - /// Used to trap when a specific edge is added to the graph. - /// This is used for debug purposes and is only active with `debug_assertions`. - #[allow(dead_code)] - forbidden_edge: Option, - /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of /// their edges. This has the beneficial side-effect that multiple anonymous /// nodes can be coalesced into one without changing the semantics of the @@ -67,11 +63,6 @@ struct DepGraphData { /// the `DepGraph` is created. anon_id_seed: Fingerprint, - /// These are simple counters that are for profiling and - /// debugging and only active with `debug_assertions`. - total_read_count: AtomicU64, - total_duplicate_read_count: AtomicU64, - /// A set of loaded diagnostics that is in the progress of being emitted. emitting_diagnostics: Lock>, @@ -82,6 +73,18 @@ struct DepGraphData { previous_work_products: FxHashMap, dep_node_debug: Lock, String>>, + + /// Used to trap when a specific edge is added to the graph. + /// This is used for debug purposes and is only active with `debug_assertions`. + #[cfg(debug_assertions)] + forbidden_edge: Option, + + /// These are simple counters that are for profiling and + /// debugging and only active with `debug_assertions`. + #[cfg(debug_assertions)] + total_read_count: AtomicU64, + #[cfg(debug_assertions)] + total_duplicate_read_count: AtomicU64, } pub fn hash_result(hcx: &mut HashCtxt, result: &R) -> Option @@ -106,28 +109,28 @@ impl DepGraph { let mut stable_hasher = StableHasher::new(); nanos.hash(&mut stable_hasher); - let forbidden_edge = if cfg!(debug_assertions) { - match env::var("RUST_FORBID_DEP_GRAPH_EDGE") { - Ok(s) => match EdgeFilter::new(&s) { - Ok(f) => Some(f), - Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err), - }, - Err(_) => None, - } - } else { - None + #[cfg(debug_assertions)] + let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") { + Ok(s) => match EdgeFilter::new(&s) { + Ok(f) => Some(f), + Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err), + }, + Err(_) => None, }; DepGraph { data: Some(Lrc::new(DepGraphData { + previous: RwLock::new(CurrentDepGraph::new(prev_graph)), + emitting_diagnostics: Default::default(), previous_work_products: prev_work_products, dep_node_debug: Default::default(), anon_id_seed: stable_hasher.finish(), + #[cfg(debug_assertions)] forbidden_edge, + #[cfg(debug_assertions)] total_read_count: AtomicU64::new(0), + #[cfg(debug_assertions)] total_duplicate_read_count: AtomicU64::new(0), - emitting_diagnostics: Default::default(), - previous: RwLock::new(CurrentDepGraph::new(prev_graph)), })), virtual_dep_node_index: Lrc::new(AtomicU32::new(0)), } @@ -305,14 +308,13 @@ impl DepGraph { #[inline] pub fn read_index(&self, dep_node_index: DepNodeIndex) { - if let Some(ref data) = self.data { + if let Some(ref _data) = self.data { K::read_deps(|task_deps| { if let Some(task_deps) = task_deps { let mut task_deps = task_deps.lock(); let task_deps = &mut *task_deps; - if cfg!(debug_assertions) { - data.total_read_count.fetch_add(1, Relaxed); - } + #[cfg(debug_assertions)] + _data.total_read_count.fetch_add(1, Relaxed); // As long as we only have a low number of reads we can avoid doing a hash // insert and potentially allocating/reallocating the hashmap @@ -330,18 +332,17 @@ impl DepGraph { } #[cfg(debug_assertions)] - { - if let Some(target) = task_deps.node { - if let Some(ref forbidden_edge) = data.forbidden_edge { - let src = self.dep_node_of(dep_node_index); - if forbidden_edge.test(&src, &target) { - panic!("forbidden edge {:?} -> {:?} created", src, target) - } + if let Some(target) = task_deps.node { + if let Some(ref forbidden_edge) = _data.forbidden_edge { + let src = self.dep_node_of(dep_node_index); + if forbidden_edge.test(&src, &target) { + panic!("forbidden edge {:?} -> {:?} created", src, target) } } } - } else if cfg!(debug_assertions) { - data.total_duplicate_read_count.fetch_add(1, Relaxed); + } else { + #[cfg(debug_assertions)] + _data.total_duplicate_read_count.fetch_add(1, Relaxed); } } }) @@ -750,7 +751,8 @@ impl DepGraph { eprintln!("[incremental] Total Node Count: {}", total_node_count); eprintln!("[incremental] Total Edge Count: {}", total_edge_count); - if cfg!(debug_assertions) { + #[cfg(debug_assertions)] + { let total_edge_reads = data.total_read_count.load(Relaxed); let total_duplicate_edge_reads = data.total_duplicate_read_count.load(Relaxed); From ac230b7b9f4d0388ee9228d9a36652c3480f29a0 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Wed, 24 Feb 2021 19:20:57 +0100 Subject: [PATCH 11/14] Cleanup. --- .../rustc_query_system/src/dep_graph/graph.rs | 91 +++++++++---------- 1 file changed, 43 insertions(+), 48 deletions(-) diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 0a349558be3b3..3352691b8e833 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -46,9 +46,9 @@ impl std::convert::From for QueryInvocationId { } struct DepGraphData { - /// The dep-graph from the previous compilation session. It contains all + /// The dep-graph from the compilation session. It contains all /// nodes and edges as well as all fingerprints of nodes that have them. - previous: RwLock>, + graph: RwLock>, /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of /// their edges. This has the beneficial side-effect that multiple anonymous @@ -120,7 +120,7 @@ impl DepGraph { DepGraph { data: Some(Lrc::new(DepGraphData { - previous: RwLock::new(CurrentDepGraph::new(prev_graph)), + graph: RwLock::new(CurrentDepGraph::new(prev_graph)), emitting_diagnostics: Default::default(), previous_work_products: prev_work_products, dep_node_debug: Default::default(), @@ -148,7 +148,7 @@ impl DepGraph { pub fn query(&self) -> DepGraphQuery { let data = self.data.as_ref().unwrap(); - data.previous.read().query() + data.graph.read().query() } pub fn assert_ignored(&self) { @@ -238,7 +238,7 @@ impl DepGraph { let current_fingerprint = hash_result(&mut hcx, &result); // Intern the new `DepNode`. - let dep_node_index = data.previous.write().intern_task_node( + let dep_node_index = data.graph.write().intern_task_node( key, &edges[..], current_fingerprint, @@ -284,8 +284,8 @@ impl DepGraph { hash: data.anon_id_seed.combine(hasher.finish()).into(), }; - let mut previous = data.previous.write(); - let dep_node_index = previous.intern_anon_node(target_dep_node, &task_deps.reads[..]); + let dep_node_index = + data.graph.write().intern_anon_node(target_dep_node, &task_deps.reads[..]); (result, dep_node_index) } else { @@ -295,7 +295,7 @@ impl DepGraph { /// Executes something within an "eval-always" task which is a task /// that runs whenever anything changes. - pub fn with_eval_always_task, A, R>( + pub(crate) fn with_eval_always_task, A, R>( &self, key: DepNode, cx: Ctxt, @@ -350,31 +350,26 @@ impl DepGraph { } #[inline] - pub fn dep_node_index_of(&self, dep_node: &DepNode) -> DepNodeIndex { - self.dep_node_index_of_opt(dep_node).unwrap() - } - - #[inline] - pub fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option { - let data = self.data.as_ref().unwrap(); - data.previous.read().dep_node_index_of_opt(dep_node) + fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option { + let data = self.data.as_ref()?; + data.graph.read().dep_node_index_of_opt(dep_node) } #[inline] pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool { - self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some() + self.dep_node_index_of_opt(dep_node).is_some() } #[inline] - pub fn dep_node_of(&self, dep_node_index: DepNodeIndex) -> DepNode { + fn dep_node_of(&self, dep_node_index: DepNodeIndex) -> DepNode { let data = self.data.as_ref().unwrap(); - data.previous.read().dep_node_of(dep_node_index) + data.graph.read().dep_node_of(dep_node_index) } #[inline] - pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint { + pub(crate) fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint { let data = self.data.as_ref().unwrap(); - data.previous.read().fingerprint_of(dep_node_index) + data.graph.read().fingerprint_of(dep_node_index) } /// Checks whether a previous work product exists for `v` and, if @@ -390,7 +385,7 @@ impl DepGraph { } #[inline(always)] - pub fn register_dep_node_debug_str(&self, dep_node: DepNode, debug_str_gen: F) + pub(crate) fn register_dep_node_debug_str(&self, dep_node: DepNode, debug_str_gen: F) where F: FnOnce() -> String, { @@ -409,9 +404,9 @@ impl DepGraph { pub fn node_color(&self, dep_node: &DepNode) -> Option { if let Some(ref data) = self.data { - let previous = data.previous.read(); - if let Some(prev_index) = previous.node_to_index_opt(dep_node) { - return previous.color(prev_index); + let graph = data.graph.read(); + if let Some(prev_index) = graph.node_to_index_opt(dep_node) { + return graph.color(prev_index); } else { // This is a node that did not exist in the previous compilation // session, so we consider it to be red. @@ -426,7 +421,7 @@ impl DepGraph { /// A node will have an index, when it's already been marked green, or when we can mark it /// green. This function will mark the current task as a reader of the specified node, when /// a node index can be found for that node. - pub fn try_mark_green_and_read>( + pub(crate) fn try_mark_green_and_read>( &self, tcx: Ctxt, dep_node: &DepNode, @@ -449,8 +444,8 @@ impl DepGraph { let data = self.data.as_ref()?; // Return None if the dep node didn't exist in the previous session - let prev_index = data.previous.read().node_to_index_opt(dep_node)?; - let prev_deps = data.previous.read().color_or_edges(prev_index); + let prev_index = data.graph.read().node_to_index_opt(dep_node)?; + let prev_deps = data.graph.read().color_or_edges(prev_index); let prev_deps = match prev_deps { Err(prev_deps) => prev_deps, Ok(DepNodeColor::Green) => return Some((prev_index, prev_index.rejuvenate())), @@ -473,7 +468,7 @@ impl DepGraph { parent_dep_node_index: SerializedDepNodeIndex, dep_node: &DepNode, ) -> Option<()> { - let dep_dep_node_color = data.previous.read().color_or_edges(parent_dep_node_index); + let dep_dep_node_color = data.graph.read().color_or_edges(parent_dep_node_index); let prev_deps = match dep_dep_node_color { Ok(DepNodeColor::Green) => { // This dependency has been marked as green before, we are @@ -482,7 +477,7 @@ impl DepGraph { debug!( "try_mark_parent_green({:?}) --- found dependency {:?} to be immediately green", dep_node, - data.previous.read().index_to_node(parent_dep_node_index) + data.graph.read().index_to_node(parent_dep_node_index) ); return Some(()); } @@ -494,7 +489,7 @@ impl DepGraph { debug!( "try_mark_parent_green({:?}) - END - dependency {:?} was immediately red", dep_node, - data.previous.read().index_to_node(parent_dep_node_index) + data.graph.read().index_to_node(parent_dep_node_index) ); return None; } @@ -508,12 +503,12 @@ impl DepGraph { is unknown, trying to mark it green", dep_node, { - let dep_dep_node = data.previous.read().index_to_node(parent_dep_node_index); + let dep_dep_node = data.graph.read().index_to_node(parent_dep_node_index); (dep_dep_node, dep_dep_node.hash) } ); - let dep_dep_node = &data.previous.read().index_to_node(parent_dep_node_index); + let dep_dep_node = &data.graph.read().index_to_node(parent_dep_node_index); let node_index = self.try_mark_previous_green(tcx, data, parent_dep_node_index, prev_deps, dep_dep_node); if node_index.is_some() { @@ -538,7 +533,7 @@ impl DepGraph { return None; } - let dep_dep_node_color = data.previous.read().color(parent_dep_node_index); + let dep_dep_node_color = data.graph.read().color(parent_dep_node_index); match dep_dep_node_color { Some(DepNodeColor::Green) => { @@ -597,7 +592,7 @@ impl DepGraph { // We never try to mark eval_always nodes as green debug_assert!(!dep_node.kind.is_eval_always()); - debug_assert_eq!(data.previous.read().index_to_node(prev_dep_node_index), *dep_node); + debug_assert_eq!(data.graph.read().index_to_node(prev_dep_node_index), *dep_node); for &dep_dep_node_index in prev_deps { self.try_mark_parent_green(tcx, data, dep_dep_node_index, dep_node)? @@ -605,7 +600,7 @@ impl DepGraph { #[cfg(not(parallel_compiler))] debug_assert_eq!( - data.previous.read().color(prev_dep_node_index), + data.graph.read().color(prev_dep_node_index), None, "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \ insertion for {:?}", @@ -621,7 +616,7 @@ impl DepGraph { let dep_node_index = { // We allocating an entry for the node in the current dependency graph and // adding all the appropriate edges imported from the previous graph - data.previous.write().intern_dark_green_node(prev_dep_node_index) + data.graph.write().intern_dark_green_node(prev_dep_node_index) }; // ... and emitting any stored diagnostic. @@ -677,11 +672,11 @@ impl DepGraph { let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion"); let data = self.data.as_ref().unwrap(); - let previous = data.previous.read(); - for prev_index in previous.serialized_indices() { - match previous.color(prev_index) { + let graph = data.graph.read(); + for prev_index in graph.serialized_indices() { + match graph.color(prev_index) { Some(DepNodeColor::Green) => { - let dep_node = data.previous.read().index_to_node(prev_index); + let dep_node = data.graph.read().index_to_node(prev_index); debug!("PROMOTE {:?} {:?}", prev_index, dep_node); qcx.try_load_from_on_disk_cache(&dep_node); } @@ -697,11 +692,11 @@ impl DepGraph { // Register reused dep nodes (i.e. nodes we've marked red or green) with the context. pub fn register_reused_dep_nodes>(&self, tcx: Ctxt) { let data = self.data.as_ref().unwrap(); - let previous = data.previous.read(); - for prev_index in previous.serialized_indices() { - match previous.color(prev_index) { + let graph = data.graph.read(); + for prev_index in graph.serialized_indices() { + match graph.color(prev_index) { Some(_) => { - let dep_node = data.previous.read().index_to_node(prev_index); + let dep_node = data.graph.read().index_to_node(prev_index); tcx.register_reused_dep_node(&dep_node); } None => {} @@ -718,7 +713,7 @@ impl DepGraph { } let data = self.data.as_ref().unwrap(); - let prev = &data.previous.read(); + let prev = &data.graph.read(); let mut stats: FxHashMap<_, Stat> = FxHashMap::with_hasher(Default::default()); @@ -797,14 +792,14 @@ impl DepGraph { } pub fn compression_map(&self) -> IndexVec> { - self.data.as_ref().unwrap().previous.read().compression_map() + self.data.as_ref().unwrap().graph.read().compression_map() } pub fn encode(&self, encoder: &mut E) -> Result<(), E::Error> where K: Encodable, { - if let Some(data) = &self.data { data.previous.read().encode(encoder) } else { Ok(()) } + if let Some(data) = &self.data { data.graph.read().encode(encoder) } else { Ok(()) } } } From 064bbb8db6e7bfab21c10431e27061883e2eb730 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Wed, 24 Feb 2021 20:42:55 +0100 Subject: [PATCH 12/14] Make try_mark_previous_green lock-free. --- compiler/rustc_data_structures/src/sync.rs | 11 +- .../rustc_query_system/src/dep_graph/graph.rs | 92 ++-- .../src/dep_graph/serialized.rs | 404 ++++++++++-------- 3 files changed, 288 insertions(+), 219 deletions(-) diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs index 26706cd2b1b77..4467e8e1fd852 100644 --- a/compiler/rustc_data_structures/src/sync.rs +++ b/compiler/rustc_data_structures/src/sync.rs @@ -40,7 +40,7 @@ cfg_if! { } } - use std::ops::Add; + use std::ops::{Add, BitOr}; use std::panic::{resume_unwind, catch_unwind, AssertUnwindSafe}; /// This is a single threaded variant of AtomicCell provided by crossbeam. @@ -147,6 +147,15 @@ cfg_if! { } } + impl + Copy> Atomic { + #[inline] + pub fn fetch_or(&self, val: T, _: Ordering) -> T { + let old = self.0.get(); + self.0.set(old | val); + old + } + } + pub type AtomicUsize = Atomic; pub type AtomicBool = Atomic; pub type AtomicU32 = Atomic; diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 3352691b8e833..19deffdcf3b5a 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -46,6 +46,10 @@ impl std::convert::From for QueryInvocationId { } struct DepGraphData { + /// The dep-graph from the previous compilation session. It contains all + /// nodes and edges as well as all fingerprints of nodes that have them. + serialized: SerializedDepGraph, + /// The dep-graph from the compilation session. It contains all /// nodes and edges as well as all fingerprints of nodes that have them. graph: RwLock>, @@ -118,9 +122,11 @@ impl DepGraph { Err(_) => None, }; + let graph = RwLock::new(CurrentDepGraph::new(&prev_graph)); DepGraph { data: Some(Lrc::new(DepGraphData { - graph: RwLock::new(CurrentDepGraph::new(prev_graph)), + serialized: prev_graph, + graph, emitting_diagnostics: Default::default(), previous_work_products: prev_work_products, dep_node_debug: Default::default(), @@ -148,7 +154,7 @@ impl DepGraph { pub fn query(&self) -> DepGraphQuery { let data = self.data.as_ref().unwrap(); - data.graph.read().query() + data.graph.read().query(&data.serialized) } pub fn assert_ignored(&self) { @@ -239,6 +245,7 @@ impl DepGraph { // Intern the new `DepNode`. let dep_node_index = data.graph.write().intern_task_node( + &data.serialized, key, &edges[..], current_fingerprint, @@ -284,8 +291,11 @@ impl DepGraph { hash: data.anon_id_seed.combine(hasher.finish()).into(), }; - let dep_node_index = - data.graph.write().intern_anon_node(target_dep_node, &task_deps.reads[..]); + let dep_node_index = data.graph.write().intern_anon_node( + &data.serialized, + target_dep_node, + &task_deps.reads[..], + ); (result, dep_node_index) } else { @@ -334,7 +344,10 @@ impl DepGraph { #[cfg(debug_assertions)] if let Some(target) = task_deps.node { if let Some(ref forbidden_edge) = _data.forbidden_edge { - let src = self.dep_node_of(dep_node_index); + let src = _data + .graph + .read() + .index_to_node(&_data.serialized, dep_node_index); if forbidden_edge.test(&src, &target) { panic!("forbidden edge {:?} -> {:?} created", src, target) } @@ -352,7 +365,7 @@ impl DepGraph { #[inline] fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option { let data = self.data.as_ref()?; - data.graph.read().dep_node_index_of_opt(dep_node) + data.graph.read().dep_node_index_of_opt(&data.serialized, dep_node) } #[inline] @@ -360,16 +373,10 @@ impl DepGraph { self.dep_node_index_of_opt(dep_node).is_some() } - #[inline] - fn dep_node_of(&self, dep_node_index: DepNodeIndex) -> DepNode { - let data = self.data.as_ref().unwrap(); - data.graph.read().dep_node_of(dep_node_index) - } - #[inline] pub(crate) fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint { let data = self.data.as_ref().unwrap(); - data.graph.read().fingerprint_of(dep_node_index) + data.graph.read().fingerprint_of(&data.serialized, dep_node_index) } /// Checks whether a previous work product exists for `v` and, if @@ -405,8 +412,8 @@ impl DepGraph { pub fn node_color(&self, dep_node: &DepNode) -> Option { if let Some(ref data) = self.data { let graph = data.graph.read(); - if let Some(prev_index) = graph.node_to_index_opt(dep_node) { - return graph.color(prev_index); + if let Some(prev_index) = graph.node_to_index_opt(&data.serialized, dep_node) { + return data.serialized.color(prev_index); } else { // This is a node that did not exist in the previous compilation // session, so we consider it to be red. @@ -444,8 +451,8 @@ impl DepGraph { let data = self.data.as_ref()?; // Return None if the dep node didn't exist in the previous session - let prev_index = data.graph.read().node_to_index_opt(dep_node)?; - let prev_deps = data.graph.read().color_or_edges(prev_index); + let prev_index = data.graph.read().node_to_index_opt(&data.serialized, dep_node)?; + let prev_deps = data.serialized.color_or_edges(prev_index); let prev_deps = match prev_deps { Err(prev_deps) => prev_deps, Ok(DepNodeColor::Green) => return Some((prev_index, prev_index.rejuvenate())), @@ -468,7 +475,7 @@ impl DepGraph { parent_dep_node_index: SerializedDepNodeIndex, dep_node: &DepNode, ) -> Option<()> { - let dep_dep_node_color = data.graph.read().color_or_edges(parent_dep_node_index); + let dep_dep_node_color = data.serialized.color_or_edges(parent_dep_node_index); let prev_deps = match dep_dep_node_color { Ok(DepNodeColor::Green) => { // This dependency has been marked as green before, we are @@ -477,7 +484,7 @@ impl DepGraph { debug!( "try_mark_parent_green({:?}) --- found dependency {:?} to be immediately green", dep_node, - data.graph.read().index_to_node(parent_dep_node_index) + data.serialized.index_to_node(parent_dep_node_index) ); return Some(()); } @@ -489,7 +496,7 @@ impl DepGraph { debug!( "try_mark_parent_green({:?}) - END - dependency {:?} was immediately red", dep_node, - data.graph.read().index_to_node(parent_dep_node_index) + data.serialized.index_to_node(parent_dep_node_index) ); return None; } @@ -503,12 +510,12 @@ impl DepGraph { is unknown, trying to mark it green", dep_node, { - let dep_dep_node = data.graph.read().index_to_node(parent_dep_node_index); + let dep_dep_node = data.serialized.index_to_node(parent_dep_node_index); (dep_dep_node, dep_dep_node.hash) } ); - let dep_dep_node = &data.graph.read().index_to_node(parent_dep_node_index); + let dep_dep_node = &data.serialized.index_to_node(parent_dep_node_index); let node_index = self.try_mark_previous_green(tcx, data, parent_dep_node_index, prev_deps, dep_dep_node); if node_index.is_some() { @@ -533,7 +540,7 @@ impl DepGraph { return None; } - let dep_dep_node_color = data.graph.read().color(parent_dep_node_index); + let dep_dep_node_color = data.serialized.color(parent_dep_node_index); match dep_dep_node_color { Some(DepNodeColor::Green) => { @@ -592,7 +599,7 @@ impl DepGraph { // We never try to mark eval_always nodes as green debug_assert!(!dep_node.kind.is_eval_always()); - debug_assert_eq!(data.graph.read().index_to_node(prev_dep_node_index), *dep_node); + debug_assert_eq!(data.serialized.index_to_node(prev_dep_node_index), *dep_node); for &dep_dep_node_index in prev_deps { self.try_mark_parent_green(tcx, data, dep_dep_node_index, dep_node)? @@ -600,7 +607,7 @@ impl DepGraph { #[cfg(not(parallel_compiler))] debug_assert_eq!( - data.graph.read().color(prev_dep_node_index), + data.serialized.color(prev_dep_node_index), None, "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \ insertion for {:?}", @@ -616,7 +623,7 @@ impl DepGraph { let dep_node_index = { // We allocating an entry for the node in the current dependency graph and // adding all the appropriate edges imported from the previous graph - data.graph.write().intern_dark_green_node(prev_dep_node_index) + data.serialized.intern_dark_green_node(prev_dep_node_index) }; // ... and emitting any stored diagnostic. @@ -672,11 +679,10 @@ impl DepGraph { let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion"); let data = self.data.as_ref().unwrap(); - let graph = data.graph.read(); - for prev_index in graph.serialized_indices() { - match graph.color(prev_index) { + for prev_index in data.serialized.serialized_indices() { + match data.serialized.color(prev_index) { Some(DepNodeColor::Green) => { - let dep_node = data.graph.read().index_to_node(prev_index); + let dep_node = data.serialized.index_to_node(prev_index); debug!("PROMOTE {:?} {:?}", prev_index, dep_node); qcx.try_load_from_on_disk_cache(&dep_node); } @@ -692,11 +698,10 @@ impl DepGraph { // Register reused dep nodes (i.e. nodes we've marked red or green) with the context. pub fn register_reused_dep_nodes>(&self, tcx: Ctxt) { let data = self.data.as_ref().unwrap(); - let graph = data.graph.read(); - for prev_index in graph.serialized_indices() { - match graph.color(prev_index) { + for prev_index in data.serialized.serialized_indices() { + match data.serialized.color(prev_index) { Some(_) => { - let dep_node = data.graph.read().index_to_node(prev_index); + let dep_node = data.serialized.index_to_node(prev_index); tcx.register_reused_dep_node(&dep_node); } None => {} @@ -717,17 +722,17 @@ impl DepGraph { let mut stats: FxHashMap<_, Stat> = FxHashMap::with_hasher(Default::default()); - for index in prev.live_indices() { - let kind = prev.dep_node_of(index).kind; - let edge_count = prev.edge_targets_from(index).len(); + for index in prev.live_indices(&data.serialized) { + let kind = prev.index_to_node(&data.serialized, index).kind; + let edge_count = prev.edge_targets_from(&data.serialized, index).len(); let stat = stats.entry(kind).or_insert(Stat { kind, node_counter: 0, edge_counter: 0 }); stat.node_counter += 1; stat.edge_counter += edge_count as u64; } - let total_node_count = prev.node_count(); - let total_edge_count = prev.edge_count(); + let total_node_count = prev.node_count(&data.serialized); + let total_edge_count = prev.edge_count(&data.serialized); // Drop the lock guard. std::mem::drop(prev); @@ -792,14 +797,19 @@ impl DepGraph { } pub fn compression_map(&self) -> IndexVec> { - self.data.as_ref().unwrap().graph.read().compression_map() + let data = self.data.as_ref().unwrap(); + data.graph.read().compression_map(&data.serialized) } pub fn encode(&self, encoder: &mut E) -> Result<(), E::Error> where K: Encodable, { - if let Some(data) = &self.data { data.graph.read().encode(encoder) } else { Ok(()) } + if let Some(data) = &self.data { + data.graph.read().encode(&data.serialized, encoder) + } else { + Ok(()) + } } } diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs index 5b655cf744132..71ad8db1bbbb5 100644 --- a/compiler/rustc_query_system/src/dep_graph/serialized.rs +++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs @@ -4,6 +4,7 @@ use super::query::DepGraphQuery; use super::{DepKind, DepNode}; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::sync::{AtomicU64, Ordering}; use rustc_index::vec::{Idx, IndexArray, IndexVec}; use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; use std::convert::TryInto; @@ -16,15 +17,16 @@ pub enum DepNodeColor { New, } -const TAG_UNKNOWN: u32 = 0; -const TAG_GREEN: u32 = 1 << 30; -const TAG_RED: u32 = 2 << 30; -const TAG_NEW: u32 = 3 << 30; -const TAG_MASK: u32 = TAG_UNKNOWN | TAG_GREEN | TAG_RED | TAG_NEW; -const OFFSET_MASK: u32 = !TAG_MASK; +const TAG_UNKNOWN: u64 = 0; +const TAG_GREEN: u64 = 1 << 62; +const TAG_RED: u64 = 2 << 62; +const TAG_NEW: u64 = 3 << 62; +const TAG_LIFTED: u64 = 1 << 31; +const TAG_MASK: u64 = TAG_UNKNOWN | TAG_GREEN | TAG_RED | TAG_NEW | TAG_LIFTED; +const OFFSET_MASK: u64 = !TAG_MASK; impl DepNodeColor { - const fn tag(self) -> u32 { + const fn tag(self) -> u64 { match self { Self::Green => TAG_GREEN, Self::Red => TAG_RED, @@ -67,50 +69,51 @@ impl SerializedDepNodeIndex { static_assert_size!(Option, 4); static_assert_size!(Option, 4); -#[derive(Copy, Clone, Encodable, Decodable)] -struct ColorAndOffset(u32, u32); +struct ColorAndOffset(AtomicU64); impl std::fmt::Debug for ColorAndOffset { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let (lifted, range) = self.range(); fmt.debug_struct("ColorAndOffset") .field("color", &self.color()) - .field("lifted", &self.lifted()) - .field("range", &self.range()) + .field("lifted", &lifted) + .field("range", &range) .finish() } } impl ColorAndOffset { fn unknown(start: u32, end: u32) -> ColorAndOffset { - debug_assert_eq!(start & TAG_MASK, 0); - debug_assert_eq!(end & TAG_MASK, 0); - ColorAndOffset(start | TAG_UNKNOWN, end | TAG_UNKNOWN) + let val = (start as u64) << 32 | (end as u64); + debug_assert_eq!(val & TAG_MASK, 0); + ColorAndOffset(AtomicU64::new(val)) } #[allow(dead_code)] fn new(color: DepNodeColor, range: Range) -> ColorAndOffset { let start: u32 = range.start.try_into().unwrap(); let end: u32 = range.end.try_into().unwrap(); - debug_assert_eq!(start & TAG_MASK, 0); - debug_assert_eq!(end & TAG_MASK, 0); - ColorAndOffset(start | color.tag(), end | TAG_UNKNOWN) + let val = (start as u64) << 32 | (end as u64); + debug_assert_eq!(val & TAG_MASK, 0); + let val = val | color.tag(); + ColorAndOffset(AtomicU64::new(val)) } - fn new_lifted(color: DepNodeColor, range: Range) -> ColorAndOffset { + fn set_lifted(&self, color: DepNodeColor, range: Range) { let start: u32 = range.start.try_into().unwrap(); let end: u32 = range.end.try_into().unwrap(); - debug_assert_eq!(start & TAG_MASK, 0); - debug_assert_eq!(end & TAG_MASK, 0); - ColorAndOffset(start | color.tag(), end | TAG_GREEN) + let val = (start as u64) << 32 | (end as u64); + debug_assert_eq!(val & TAG_MASK, 0); + let val = val | color.tag() | TAG_LIFTED; + self.0.store(val, Ordering::Release) } - fn set_color(&mut self, color: DepNodeColor) { - let offset = self.0 & OFFSET_MASK; - self.0 = color.tag() | offset; + fn set_color(&self, color: DepNodeColor) { + self.0.fetch_or(color.tag(), Ordering::SeqCst); } - fn color(self) -> Option { - let tag = self.0 & TAG_MASK; + fn color(&self) -> Option { + let tag = self.0.load(Ordering::Acquire) & TAG_MASK & !TAG_LIFTED; match tag { TAG_NEW => Some(DepNodeColor::New), TAG_RED => Some(DepNodeColor::Red), @@ -120,18 +123,13 @@ impl ColorAndOffset { } } - fn lifted(self) -> bool { - let tag = self.1 & TAG_MASK; - match tag { - TAG_UNKNOWN => false, - _ => true, - } - } - - fn range(self) -> Range { - let start = (self.0 & OFFSET_MASK) as usize; - let end = (self.1 & OFFSET_MASK) as usize; - start..end + fn range(&self) -> (bool, Range) { + let val = self.0.load(Ordering::Acquire); + let lifted = val & TAG_LIFTED != 0; + let val = val & OFFSET_MASK; + let start = (val >> 32) as usize; + let end = val as u32 as usize; + (lifted, start..end) } } @@ -146,6 +144,7 @@ pub struct SerializedDepGraph { nodes: IndexArray>, /// The set of all Fingerprints in the graph. Each Fingerprint corresponds to /// the DepNode at the same index in the nodes vector. + // This field must only be read or modified when holding a lock to the CurrentDepGraph. fingerprints: IndexArray, /// For each DepNode, stores the list of edges originating from that /// DepNode. Encoded as a [start, end) pair indexing into edge_list_data, @@ -159,8 +158,6 @@ pub struct SerializedDepGraph { /// Data for use when recompiling the **current crate**. #[derive(Debug)] pub struct CurrentDepGraph { - /// The previous graph. - serialized: SerializedDepGraph, /// The set of all DepNodes in the graph nodes: IndexVec>, /// The set of all Fingerprints in the graph. Each Fingerprint corresponds to @@ -188,8 +185,82 @@ impl Default for SerializedDepGraph { } } +impl SerializedDepGraph { + pub(crate) fn intern_dark_green_node(&self, index: SerializedDepNodeIndex) -> DepNodeIndex { + debug!("intern_drak_green: {:?}", index); + debug_assert_eq!(self.edge_list_indices[index].color(), None); + self.edge_list_indices[index].set_color(DepNodeColor::Green); + debug!("intern_color: {:?} => Green", index); + index.rejuvenate() + } + + #[inline] + pub(crate) fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode { + self.nodes[dep_node_index] + } + + #[inline] + pub(crate) fn color(&self, index: SerializedDepNodeIndex) -> Option { + self.edge_list_indices[index].color() + } + + #[inline] + pub(crate) fn color_or_edges( + &self, + source: SerializedDepNodeIndex, + ) -> Result { + let range = &self.edge_list_indices[source]; + if let Some(color) = range.color() { + return Ok(color); + } + // The node has not been colored, so the dependencies have not been lifted to point to the + // new nodes vector. + let (_lifted, range) = range.range(); + debug_assert!(!_lifted); + let edges = &self.edge_list_data[range]; + debug_assert_eq!( + std::mem::size_of::(), + std::mem::size_of::() + ); + // SAFETY: 1. self.edge_list_data is never modified. + // 2. SerializedDepNodeIndex and DepNodeIndex have the same binary representation. + let edges = unsafe { std::mem::transmute::<&[_], &[_]>(edges) }; + Err(edges) + } + + #[inline] + fn as_serialized(&self, index: DepNodeIndex) -> Result { + let index = index.index(); + let count = self.nodes.len(); + if index < count { + Ok(SerializedDepNodeIndex::new(index)) + } else { + Err(SplitIndex::new(index - count)) + } + } + + #[inline] + fn from_split(&self, index: SplitIndex) -> DepNodeIndex { + DepNodeIndex::new(self.nodes.len() + index.index()) + } + + #[inline] + pub(crate) fn serialized_indices(&self) -> impl Iterator { + self.nodes.indices() + } + + #[inline] + fn live_serialized_indices(&self) -> impl Iterator + '_ { + self.edge_list_indices.iter_enumerated().filter_map(|(i, range)| { + // Return none if the node has not been coloured yet. + let _ = range.color()?; + Some(i) + }) + } +} + impl CurrentDepGraph { - pub(crate) fn new(serialized: SerializedDepGraph) -> Self { + pub(crate) fn new(serialized: &SerializedDepGraph) -> Self { let prev_graph_node_count = serialized.nodes.len(); let nodes = node_count_estimate(prev_graph_node_count); let edges = edge_count_estimate(prev_graph_node_count); @@ -201,7 +272,6 @@ impl CurrentDepGraph { debug_assert_eq!(_o, None); } Self { - serialized, nodes: IndexVec::with_capacity(nodes), fingerprints: IndexVec::with_capacity(nodes), edge_list_indices: IndexVec::with_capacity(nodes), @@ -212,18 +282,19 @@ impl CurrentDepGraph { fn intern_new_node( &mut self, + serialized: &SerializedDepGraph, node: DepNode, deps: &[DepNodeIndex], fingerprint: Fingerprint, ) -> DepNodeIndex { let index = self.nodes.push(node); - debug!("intern_new: {:?} {:?}", self.from_split(index), node); + debug!("intern_new: {:?} {:?}", serialized.from_split(index), node); let _index = self.fingerprints.push(fingerprint); debug_assert_eq!(index, _index); let range = self.insert_deps(deps); let _index = self.edge_list_indices.push(shrink_range(range)); debug_assert_eq!(index, _index); - let index = self.from_split(index); + let index = serialized.from_split(index); let _o = self.index.insert(node, index); debug_assert_eq!(_o, None); index @@ -238,37 +309,31 @@ impl CurrentDepGraph { fn update_deps( &mut self, + serialized: &SerializedDepGraph, index: SerializedDepNodeIndex, color: DepNodeColor, deps: &[DepNodeIndex], ) { debug!("intern_color: {:?} => {:?}", index, color); - let range = self.serialized.edge_list_indices[index]; + let range = &serialized.edge_list_indices[index]; debug_assert_eq!(range.color(), None); let range = self.insert_deps(deps); - let range = ColorAndOffset::new_lifted(color, range); - self.serialized.edge_list_indices[index] = range; - } - - pub(crate) fn intern_dark_green_node(&mut self, index: SerializedDepNodeIndex) -> DepNodeIndex { - debug!("intern_drak_green: {:?}", index); - debug_assert_eq!(self.serialized.edge_list_indices[index].color(), None); - self.serialized.edge_list_indices[index].set_color(DepNodeColor::Green); - debug!("intern_color: {:?} => Green", index); - index.rejuvenate() + serialized.edge_list_indices[index].set_lifted(color, range); } pub(crate) fn intern_anon_node( &mut self, + serialized: &SerializedDepGraph, node: DepNode, deps: &[DepNodeIndex], ) -> DepNodeIndex { - self.dep_node_index_of_opt(&node) - .unwrap_or_else(|| self.intern_new_node(node, deps, Fingerprint::ZERO)) + self.dep_node_index_of_opt(serialized, &node) + .unwrap_or_else(|| self.intern_new_node(serialized, node, deps, Fingerprint::ZERO)) } pub(crate) fn intern_task_node( &mut self, + serialized: &SerializedDepGraph, node: DepNode, deps: &[DepNodeIndex], fingerprint: Option, @@ -277,10 +342,10 @@ impl CurrentDepGraph { let print_status = cfg!(debug_assertions) && print_status; if let Some(&existing) = self.index.get(&node) { - let prev_index = self + let prev_index = serialized .as_serialized(existing) .unwrap_or_else(|_| panic!("Node {:?} is being interned multiple times.", node)); - match self.color(prev_index) { + match serialized.color(prev_index) { Some(DepNodeColor::Red) | Some(DepNodeColor::New) => { panic!("Node {:?} is being interned multiple times.", node) } @@ -293,7 +358,7 @@ impl CurrentDepGraph { // Determine the color and index of the new `DepNode`. let color = if let Some(fingerprint) = fingerprint { - if fingerprint == self.serialized.fingerprints[prev_index] { + if fingerprint == serialized.fingerprints[prev_index] { if print_status { eprintln!("[task::green] {:?}", node); } @@ -308,7 +373,11 @@ impl CurrentDepGraph { // This is a red node: it existed in the previous compilation, its query // was re-executed, but it has a different result from before. - self.serialized.fingerprints[prev_index] = fingerprint; + // SAFETY: `serialized.fingerprints` is only read or mutated when holding a + // lock to CurrentDepGraph. + unsafe { + *(&serialized.fingerprints[prev_index] as *const _ as *mut _) = fingerprint + }; DepNodeColor::Red } } else { @@ -320,11 +389,16 @@ impl CurrentDepGraph { // session, its query was re-executed, but it doesn't compute a result hash // (i.e. it represents a `no_hash` query), so we have no way of determining // whether or not the result was the same as before. - self.serialized.fingerprints[prev_index] = Fingerprint::ZERO; + // SAFETY: `serialized.fingerprints` is only read or mutated when holding a + // lock to CurrentDepGraph. + unsafe { + *(&serialized.fingerprints[prev_index] as *const _ as *mut _) = + Fingerprint::ZERO + }; DepNodeColor::Red }; - self.update_deps(prev_index, color, deps); + self.update_deps(serialized, prev_index, color, deps); prev_index.rejuvenate() } else { if print_status { @@ -332,34 +406,29 @@ impl CurrentDepGraph { } // This is a new node: it didn't exist in the previous compilation session. - self.intern_new_node(node, deps, fingerprint.unwrap_or(Fingerprint::ZERO)) + self.intern_new_node(serialized, node, deps, fingerprint.unwrap_or(Fingerprint::ZERO)) } } #[inline] - fn as_serialized(&self, index: DepNodeIndex) -> Result { - let index = index.index(); - let count = self.serialized.nodes.len(); - if index < count { - Ok(SerializedDepNodeIndex::new(index)) - } else { - Err(SplitIndex::new(index - count)) - } - } - - #[inline] - fn from_split(&self, index: SplitIndex) -> DepNodeIndex { - DepNodeIndex::new(self.serialized.nodes.len() + index.index()) + pub(crate) fn node_to_index_opt( + &self, + serialized: &SerializedDepGraph, + dep_node: &DepNode, + ) -> Option { + let idx = *self.index.get(dep_node)?; + serialized.as_serialized(idx).ok() } #[inline] - fn serialized_edges(&self, source: SerializedDepNodeIndex) -> &[DepNodeIndex] { - let range = self.serialized.edge_list_indices[source]; - if range.lifted() { - &self.edge_list_data[range.range()] - } else { - &self.serialized.edge_list_data[range.range()] - } + fn serialized_edges<'a>( + &'a self, + serialized: &'a SerializedDepGraph, + source: SerializedDepNodeIndex, + ) -> &[DepNodeIndex] { + let range = &serialized.edge_list_indices[source]; + let (lifted, range) = range.range(); + if lifted { &self.edge_list_data[range] } else { &serialized.edge_list_data[range] } } #[inline] @@ -371,117 +440,88 @@ impl CurrentDepGraph { } #[inline] - pub(crate) fn color_or_edges( - &self, - source: SerializedDepNodeIndex, - ) -> Result { - let range = self.serialized.edge_list_indices[source]; - if let Some(color) = range.color() { - return Ok(color); - } - // The node has not been colored, so the dependencies have not been lifted to point to the - // new nodes vector. - debug_assert!(!range.lifted()); - let edges = &self.serialized.edge_list_data[range.range()]; - debug_assert_eq!( - std::mem::size_of::(), - std::mem::size_of::() - ); - // SAFETY: 1. serialized.edge_list_data is never modified. - // 2. SerializedDepNodeIndex and DepNodeIndex have the same binary representation. - let edges = unsafe { std::mem::transmute::<&[_], &[_]>(edges) }; - Err(edges) - } - - #[inline] - pub(crate) fn edge_targets_from(&self, source: DepNodeIndex) -> &[DepNodeIndex] { - match self.as_serialized(source) { - Ok(source) => self.serialized_edges(source), + pub(crate) fn edge_targets_from<'a>( + &'a self, + serialized: &'a SerializedDepGraph, + source: DepNodeIndex, + ) -> &[DepNodeIndex] { + match serialized.as_serialized(source) { + Ok(source) => self.serialized_edges(serialized, source), Err(source) => self.new_edges(source), } } #[inline] - pub(crate) fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode { - self.serialized.nodes[dep_node_index] - } - - #[inline] - pub(crate) fn dep_node_of(&self, dep_node_index: DepNodeIndex) -> DepNode { - match self.as_serialized(dep_node_index) { - Ok(serialized) => self.serialized.nodes[serialized], + pub(crate) fn index_to_node( + &self, + serialized: &SerializedDepGraph, + dep_node_index: DepNodeIndex, + ) -> DepNode { + match serialized.as_serialized(dep_node_index) { + Ok(sni) => serialized.nodes[sni], Err(new) => self.nodes[new], } } #[inline] - pub(crate) fn node_to_index_opt( + pub(crate) fn dep_node_index_of_opt( &self, + serialized: &SerializedDepGraph, dep_node: &DepNode, - ) -> Option { - let idx = *self.index.get(dep_node)?; - self.as_serialized(idx).ok() - } - - #[inline] - pub(crate) fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option { + ) -> Option { let index = *self.index.get(dep_node)?; - if let Ok(prev) = self.as_serialized(index) { + if let Ok(prev) = serialized.as_serialized(index) { // Return none if the node has not been coloured yet. - self.serialized.edge_list_indices[prev].color()?; + serialized.edge_list_indices[prev].color()?; } Some(index) } #[inline] - pub(crate) fn color(&self, index: SerializedDepNodeIndex) -> Option { - self.serialized.edge_list_indices[index].color() - } - - #[inline] - pub(crate) fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint { - match self.as_serialized(dep_node_index) { - Ok(serialized) => self.serialized.fingerprints[serialized], + pub(crate) fn fingerprint_of( + &self, + serialized: &SerializedDepGraph, + dep_node_index: DepNodeIndex, + ) -> Fingerprint { + match serialized.as_serialized(dep_node_index) { + Ok(sni) => serialized.fingerprints[sni], Err(split) => self.fingerprints[split], } } #[inline] - pub(crate) fn serialized_indices(&self) -> impl Iterator { - self.serialized.nodes.indices() - } - - #[inline] - fn live_serialized_indices(&self) -> impl Iterator + '_ { - self.serialized.edge_list_indices.iter_enumerated().filter_map(|(i, range)| { - // Return none if the node has not been coloured yet. - let _ = range.color()?; - Some(i) - }) - } - - #[inline] - fn new_indices(&self) -> impl Iterator + '_ { - self.nodes.indices().map(move |i| self.from_split(i)) + fn new_indices<'a>( + &self, + serialized: &'a SerializedDepGraph, + ) -> impl Iterator + 'a { + self.nodes.indices().map(move |i| serialized.from_split(i)) } #[inline] - pub(crate) fn live_indices(&self) -> impl Iterator + '_ { + pub(crate) fn live_indices<'a>( + &self, + serialized: &'a SerializedDepGraph, + ) -> impl Iterator + 'a { // New indices are always live. - self.live_serialized_indices() + serialized + .live_serialized_indices() .map(SerializedDepNodeIndex::rejuvenate) - .chain(self.new_indices()) + .chain(self.new_indices(serialized)) } #[inline] - pub(crate) fn node_count(&self) -> usize { - self.live_indices().count() + pub(crate) fn node_count(&self, serialized: &SerializedDepGraph) -> usize { + self.live_indices(serialized).count() } #[inline] - fn edge_map(&self) -> impl Iterator + '_ { - let serialized_edges = - self.live_serialized_indices().map(move |index| self.serialized_edges(index)); + fn edge_map<'a>( + &'a self, + serialized: &'a SerializedDepGraph, + ) -> impl Iterator + 'a { + let serialized_edges = serialized + .live_serialized_indices() + .map(move |index| self.serialized_edges(serialized, index)); let new_edges = self.edge_list_indices.iter().map(move |range| { let start = range.start as usize; let end = range.end as usize; @@ -491,20 +531,20 @@ impl CurrentDepGraph { } #[inline] - pub(crate) fn edge_count(&self) -> usize { - self.edge_map().flatten().count() + pub(crate) fn edge_count(&self, serialized: &SerializedDepGraph) -> usize { + self.edge_map(serialized).flatten().count() } - pub(crate) fn query(&self) -> DepGraphQuery { - let node_count = self.node_count(); - let edge_count = self.edge_count(); + pub(crate) fn query(&self, serialized: &SerializedDepGraph) -> DepGraphQuery { + let node_count = self.node_count(serialized); + let edge_count = self.edge_count(serialized); let mut nodes = Vec::with_capacity(node_count); - nodes.extend(self.live_indices().map(|i| self.dep_node_of(i))); + nodes.extend(self.live_indices(serialized).map(|i| self.index_to_node(serialized, i))); let mut edge_list_indices = Vec::with_capacity(node_count); let mut edge_list_data = Vec::with_capacity(edge_count); - for edges in self.edge_map() { + for edges in self.edge_map(serialized) { let start = edge_list_data.len(); edge_list_data.extend(edges.iter().map(|i| i.index() as usize)); let end = edge_list_data.len(); @@ -515,10 +555,13 @@ impl CurrentDepGraph { DepGraphQuery::new(&nodes[..], &edge_list_indices[..], &edge_list_data[..]) } - pub(crate) fn compression_map(&self) -> IndexVec> { + pub(crate) fn compression_map( + &self, + serialized: &SerializedDepGraph, + ) -> IndexVec> { let mut new_index = SerializedDepNodeIndex::new(0); - let mut remap = IndexVec::from_elem_n(None, self.serialized.nodes.len() + self.nodes.len()); - for index in self.live_indices() { + let mut remap = IndexVec::from_elem_n(None, serialized.nodes.len() + self.nodes.len()); + for index in self.live_indices(serialized) { debug_assert!(new_index.index() <= index.index()); remap[index] = Some(new_index); new_index.increment_by(1); @@ -527,18 +570,25 @@ impl CurrentDepGraph { } } -impl> Encodable for CurrentDepGraph { - fn encode(&self, e: &mut E) -> Result<(), E::Error> { - let remap = self.compression_map(); +impl CurrentDepGraph { + pub(crate) fn encode( + &self, + serialized: &SerializedDepGraph, + e: &mut E, + ) -> Result<(), E::Error> + where + K: Encodable, + { + let remap = self.compression_map(serialized); let live_indices = || remap.iter_enumerated().filter_map(|(s, &n)| Some((s, n?))); let node_count = live_indices().count(); - let edge_count = self.edge_count(); + let edge_count = self.edge_count(serialized); e.emit_struct("SerializedDepGraph", 4, |e| { e.emit_struct_field("nodes", 0, |e| { e.emit_seq(node_count, |e| { for (index, new_index) in live_indices() { - let node = self.dep_node_of(index); + let node = self.index_to_node(serialized, index); e.emit_seq_elt(new_index.index(), |e| node.encode(e))?; } Ok(()) @@ -547,7 +597,7 @@ impl> Encodable for CurrentDepGraph e.emit_struct_field("fingerprints", 1, |e| { e.emit_seq(node_count, |e| { for (index, new_index) in live_indices() { - let node = self.fingerprint_of(index); + let node = self.fingerprint_of(serialized, index); e.emit_seq_elt(new_index.index(), |e| node.encode(e))?; } Ok(()) @@ -561,7 +611,7 @@ impl> Encodable for CurrentDepGraph e.emit_struct_field("edge_list_data", 2, |e| { e.emit_seq(edge_count, |e| { let mut pos: u32 = 0; - for (new_index, edges) in self.edge_map().enumerate() { + for (new_index, edges) in self.edge_map(serialized).enumerate() { // Reconstruct the edges vector since it may be out of order. // We only store the end indices, since the start can be reconstructed. for &edge in edges { From 91734f198bc32e56e29263227e2f86dc54ca8a88 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Wed, 24 Feb 2021 21:56:28 +0100 Subject: [PATCH 13/14] More cleanup. --- .../rustc_query_system/src/dep_graph/serialized.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs index 71ad8db1bbbb5..6e998aa0638a7 100644 --- a/compiler/rustc_query_system/src/dep_graph/serialized.rs +++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs @@ -89,16 +89,6 @@ impl ColorAndOffset { ColorAndOffset(AtomicU64::new(val)) } - #[allow(dead_code)] - fn new(color: DepNodeColor, range: Range) -> ColorAndOffset { - let start: u32 = range.start.try_into().unwrap(); - let end: u32 = range.end.try_into().unwrap(); - let val = (start as u64) << 32 | (end as u64); - debug_assert_eq!(val & TAG_MASK, 0); - let val = val | color.tag(); - ColorAndOffset(AtomicU64::new(val)) - } - fn set_lifted(&self, color: DepNodeColor, range: Range) { let start: u32 = range.start.try_into().unwrap(); let end: u32 = range.end.try_into().unwrap(); From 0c7de436d7160001b778e8d77e773af789ac46f3 Mon Sep 17 00:00:00 2001 From: Camille GILLOT Date: Thu, 25 Feb 2021 17:51:36 +0100 Subject: [PATCH 14/14] Hide debug function. --- compiler/rustc_query_system/src/dep_graph/graph.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 19deffdcf3b5a..e9394ea476dc6 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -392,6 +392,7 @@ impl DepGraph { } #[inline(always)] + #[cfg(debug_assertions)] pub(crate) fn register_dep_node_debug_str(&self, dep_node: DepNode, debug_str_gen: F) where F: FnOnce() -> String,