Skip to content
This repository was archived by the owner on May 28, 2025. It is now read-only.

Commit e1c99e5

Browse files
committed
Remove the parallel version.
1 parent 8208872 commit e1c99e5

File tree

1 file changed

+13
-135
lines changed

1 file changed

+13
-135
lines changed

compiler/rustc_query_system/src/dep_graph/serialized.rs

Lines changed: 13 additions & 135 deletions
Original file line numberDiff line numberDiff line change
@@ -4,18 +4,13 @@ use super::query::DepGraphQuery;
44
use super::{DepKind, DepNode, DepNodeIndex};
55
use rustc_data_structures::fingerprint::Fingerprint;
66
use rustc_data_structures::fx::FxHashMap;
7-
use rustc_data_structures::sync::{AtomicU32, Lock, Lrc, Ordering};
8-
use rustc_index::vec::IndexVec;
7+
use rustc_data_structures::sync::{Lock, Lrc};
8+
use rustc_index::vec::{Idx, IndexVec};
99
use rustc_serialize::opaque::{self, FileEncodeResult, FileEncoder, IntEncodedWithFixedSize};
1010
use rustc_serialize::{Decodable, Decoder, Encodable};
1111
use smallvec::SmallVec;
1212
use std::convert::TryInto;
1313

14-
#[cfg(parallel_compiler)]
15-
use {
16-
rustc_data_structures::sync::WorkerLocal, rustc_index::vec::Idx, std::sync::mpsc, std::thread,
17-
};
18-
1914
// The maximum value of `SerializedDepNodeIndex` leaves the upper two bits
2015
// unused so that we can store multiple index types in `CompressedHybridIndex`,
2116
// and use those bits to encode which index type it contains.
@@ -146,12 +141,8 @@ fn encode_node<K: DepKind>(
146141
) -> FileEncodeResult {
147142
#[cfg(debug_assertions)]
148143
if let Some(record_graph) = &_record_graph {
149-
if let Some(record_graph) = &mut if cfg!(parallel_compiler) {
150-
Some(record_graph.lock())
151-
} else {
152-
// Do not ICE when a query is called from within `with_query`.
153-
record_graph.try_lock()
154-
} {
144+
// Do not ICE when a query is called from within `with_query`.
145+
if let Some(record_graph) = &mut record_graph.try_lock() {
155146
record_graph.push(_index, node.node, &node.edges);
156147
}
157148
}
@@ -190,19 +181,8 @@ fn encode_counts(
190181
encoder.flush()
191182
}
192183

193-
#[cfg(not(parallel_compiler))]
194184
pub struct GraphEncoder<K: DepKind> {
195-
status: Lock<(FileEncoder, usize, FileEncodeResult)>,
196-
counter: AtomicU32,
197-
record_graph: Option<Lrc<Lock<DepGraphQuery<K>>>>,
198-
record_stats: Option<Lrc<Lock<Stats<K>>>>,
199-
}
200-
201-
#[cfg(parallel_compiler)]
202-
pub struct GraphEncoder<K: DepKind> {
203-
send: WorkerLocal<mpsc::Sender<(DepNodeIndex, NodeInfo<K>)>>,
204-
thread: thread::JoinHandle<FileEncodeResult>,
205-
counter: AtomicU32,
185+
status: Lock<(FileEncoder, DepNodeIndex, usize, FileEncodeResult)>,
206186
record_graph: Option<Lrc<Lock<DepGraphQuery<K>>>>,
207187
record_stats: Option<Lrc<Lock<Stats<K>>>>,
208188
}
@@ -228,29 +208,8 @@ impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
228208
} else {
229209
None
230210
};
231-
let counter = AtomicU32::new(0);
232-
233-
#[cfg(not(parallel_compiler))]
234-
{
235-
let status = Lock::new((encoder, 0, Ok(())));
236-
GraphEncoder { status, counter, record_graph, record_stats }
237-
}
238-
#[cfg(parallel_compiler)]
239-
{
240-
let (send, recv) = mpsc::channel();
241-
let thread = {
242-
let record_graph = record_graph.clone();
243-
let record_stats = record_stats.clone();
244-
thread::spawn(move || {
245-
encode_graph(encoder, recv, |encoder, index, node| {
246-
encode_node(encoder, index, node, &record_graph, &record_stats)
247-
})
248-
})
249-
};
250-
let send = WorkerLocal::new(move |_| send.clone());
251-
252-
GraphEncoder { send, thread, counter, record_graph, record_stats }
253-
}
211+
let status = Lock::new((encoder, DepNodeIndex::new(0), 0, Ok(())));
212+
GraphEncoder { status, record_graph, record_stats }
254213
}
255214

256215
pub(crate) fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
@@ -314,19 +273,17 @@ impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
314273
eprintln!("[incremental]");
315274
}
316275
}
317-
}
318276

319-
#[cfg(not(parallel_compiler))]
320-
impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
321277
pub(crate) fn send(
322278
&self,
323279
node: DepNode<K>,
324280
fingerprint: Fingerprint,
325281
edges: SmallVec<[DepNodeIndex; 8]>,
326282
) -> DepNodeIndex {
327-
let index = self.counter.fetch_add(1, Ordering::SeqCst);
328-
let index = DepNodeIndex::from_u32(index);
329-
let &mut (ref mut encoder, ref mut edge_count, ref mut result) = &mut *self.status.lock();
283+
let &mut (ref mut encoder, ref mut next_index, ref mut edge_count, ref mut result) =
284+
&mut *self.status.lock();
285+
let index = next_index.clone();
286+
next_index.increment_by(1);
330287
*edge_count += edges.len();
331288
*result = std::mem::replace(result, Ok(())).and_then(|()| {
332289
let node = NodeInfo { node, fingerprint, edges };
@@ -336,89 +293,10 @@ impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
336293
}
337294

338295
pub fn finish(self) -> FileEncodeResult {
339-
let (encoder, edge_count, result) = self.status.into_inner();
296+
let (encoder, node_count, edge_count, result) = self.status.into_inner();
340297
let () = result?;
341-
let node_count = self.counter.into_inner() as usize;
298+
let node_count = node_count.index();
342299

343300
encode_counts(encoder, node_count, edge_count)
344301
}
345302
}
346-
347-
#[cfg(parallel_compiler)]
348-
impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
349-
pub(crate) fn send(
350-
&self,
351-
node: DepNode<K>,
352-
fingerprint: Fingerprint,
353-
edges: SmallVec<[DepNodeIndex; 8]>,
354-
) -> DepNodeIndex {
355-
let node = NodeInfo { node, fingerprint, edges };
356-
let index = self.counter.fetch_add(1, Ordering::SeqCst);
357-
let index = DepNodeIndex::from_u32(index);
358-
self.send.send((index, node)).unwrap();
359-
index
360-
}
361-
362-
pub fn finish(self) -> FileEncodeResult {
363-
std::mem::drop(self.send);
364-
self.thread.join().unwrap()
365-
}
366-
}
367-
368-
#[cfg(parallel_compiler)]
369-
#[instrument(skip(encoder, recv, process))]
370-
fn encode_graph<K: DepKind + Encodable<FileEncoder>>(
371-
mut encoder: FileEncoder,
372-
recv: mpsc::Receiver<(DepNodeIndex, NodeInfo<K>)>,
373-
process: impl Fn(&mut FileEncoder, DepNodeIndex, &NodeInfo<K>) -> FileEncodeResult,
374-
) -> FileEncodeResult {
375-
let mut edge_count: usize = 0;
376-
let node_count: usize = ordered_recv(recv, |index, node| {
377-
edge_count += node.edges.len();
378-
process(&mut encoder, index, node)
379-
})?;
380-
381-
encode_counts(encoder, node_count, edge_count)
382-
}
383-
384-
/// Since there are multiple producers assigning the DepNodeIndex using an atomic,
385-
/// the messages may not arrive in order. This function sorts them as they come.
386-
#[cfg(parallel_compiler)]
387-
fn ordered_recv<K: DepKind + Encodable<opaque::FileEncoder>>(
388-
recv: mpsc::Receiver<(DepNodeIndex, NodeInfo<K>)>,
389-
mut f: impl FnMut(DepNodeIndex, &NodeInfo<K>) -> FileEncodeResult,
390-
) -> Result<usize, std::io::Error> {
391-
let mut pending = Vec::<(DepNodeIndex, _)>::new();
392-
let mut expected = DepNodeIndex::new(0);
393-
394-
// INVARIANT: No message can arrive with an index less than `expected`.
395-
'outer: loop {
396-
pending.sort_by_key(|n| n.0);
397-
for (index, node) in pending.drain_filter(|(index, _)| {
398-
if *index == expected {
399-
expected.increment_by(1);
400-
true
401-
} else {
402-
false
403-
}
404-
}) {
405-
f(index, &node)?;
406-
}
407-
408-
while let Ok((index, node)) = recv.recv() {
409-
if index > expected {
410-
pending.push((index, node));
411-
} else if index == expected {
412-
f(index, &node)?;
413-
expected.increment_by(1);
414-
continue 'outer;
415-
} else {
416-
panic!("Unexpected index {:?} while waiting for {:?}", index, expected);
417-
}
418-
}
419-
420-
break;
421-
}
422-
423-
Ok(expected.as_u32() as usize)
424-
}

0 commit comments

Comments
 (0)