[[package]]
name = "arena"
version = "0.0.0"
+dependencies = [
+ "rustc_data_structures 0.0.0",
+]
[[package]]
name = "arrayvec"
name = "arena"
path = "lib.rs"
crate-type = ["dylib"]
+
+[dependencies]
+rustc_data_structures = { path = "../librustc_data_structures" }
\ No newline at end of file
#![allow(deprecated)]
extern crate alloc;
+extern crate rustc_data_structures;
+
+use rustc_data_structures::sync::MTLock;
use std::cell::{Cell, RefCell};
use std::cmp;
chunks: RefCell<Vec<TypedArenaChunk<u8>>>,
}
+unsafe impl Send for DroplessArena {}
+
impl DroplessArena {
pub fn new() -> DroplessArena {
DroplessArena {
}
}
+pub struct SyncTypedArena<T> {
+ lock: MTLock<TypedArena<T>>,
+}
+
+impl<T> SyncTypedArena<T> {
+ #[inline(always)]
+ pub fn new() -> SyncTypedArena<T> {
+ SyncTypedArena {
+ lock: MTLock::new(TypedArena::new())
+ }
+ }
+
+ #[inline(always)]
+ pub fn alloc(&self, object: T) -> &mut T {
+ // Extend the lifetime of the result since it's limited to the lock guard
+ unsafe { &mut *(self.lock.lock().alloc(object) as *mut T) }
+ }
+
+ #[inline(always)]
+ pub fn alloc_slice(&self, slice: &[T]) -> &mut [T]
+ where
+ T: Copy,
+ {
+ // Extend the lifetime of the result since it's limited to the lock guard
+ unsafe { &mut *(self.lock.lock().alloc_slice(slice) as *mut [T]) }
+ }
+
+ #[inline(always)]
+ pub fn clear(&mut self) {
+ self.lock.get_mut().clear();
+ }
+}
+
+pub struct SyncDroplessArena {
+ lock: MTLock<DroplessArena>,
+}
+
+impl SyncDroplessArena {
+ #[inline(always)]
+ pub fn new() -> SyncDroplessArena {
+ SyncDroplessArena {
+ lock: MTLock::new(DroplessArena::new())
+ }
+ }
+
+ #[inline(always)]
+ pub fn in_arena<T: ?Sized>(&self, ptr: *const T) -> bool {
+ self.lock.lock().in_arena(ptr)
+ }
+
+ #[inline(always)]
+ pub fn alloc<T>(&self, object: T) -> &mut T {
+ // Extend the lifetime of the result since it's limited to the lock guard
+ unsafe { &mut *(self.lock.lock().alloc(object) as *mut T) }
+ }
+
+ #[inline(always)]
+ pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
+ where
+ T: Copy,
+ {
+ // Extend the lifetime of the result since it's limited to the lock guard
+ unsafe { &mut *(self.lock.lock().alloc_slice(slice) as *mut [T]) }
+ }
+}
+
#[cfg(test)]
mod tests {
extern crate test;
use hir::svh::Svh;
use util::nodemap::{DefIdMap, FxHashMap};
-use arena::TypedArena;
+use arena::SyncTypedArena;
use std::io;
use ty::TyCtxt;
pub struct Forest {
krate: Crate,
pub dep_graph: DepGraph,
- inlined_bodies: TypedArena<Body>
+ inlined_bodies: SyncTypedArena<Body>
}
impl Forest {
Forest {
krate,
dep_graph: dep_graph.clone(),
- inlined_bodies: TypedArena::new()
+ inlined_bodies: SyncTypedArena::new()
}
}
use syntax_pos::{self, Span};
use syntax_pos::symbol::InternedString;
use util::nodemap::FxHashMap;
-use arena::DroplessArena;
+use arena::SyncDroplessArena;
use self::combine::CombineFields;
use self::higher_ranked::HrMatchResult;
/// F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(InferCtxt<'b, 'gcx, 'tcx>).
pub struct InferCtxtBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
global_tcx: TyCtxt<'a, 'gcx, 'gcx>,
- arena: DroplessArena,
+ arena: SyncDroplessArena,
fresh_tables: Option<RefCell<ty::TypeckTables<'tcx>>>,
}
pub fn infer_ctxt(self) -> InferCtxtBuilder<'a, 'gcx, 'tcx> {
InferCtxtBuilder {
global_tcx: self,
- arena: DroplessArena::new(),
+ arena: SyncDroplessArena::new(),
fresh_tables: None,
}
use self::TargetLint::*;
use std::slice;
+use rustc_data_structures::sync::{RwLock, ReadGuard};
use lint::{EarlyLintPassObject, LateLintPassObject};
use lint::{Level, Lint, LintId, LintPass, LintBuffer};
use lint::builtin::BuiltinLintDiagnostics;
use util::nodemap::FxHashMap;
use std::default::Default as StdDefault;
-use std::cell::{Ref, RefCell};
use syntax::ast;
use syntax::edition;
use syntax_pos::{MultiSpan, Span};
pub struct LintSession<'a, PassObject> {
/// Reference to the store of registered lints.
- lints: Ref<'a, LintStore>,
+ lints: ReadGuard<'a, LintStore>,
/// Trait objects for each lint pass.
passes: Option<Vec<PassObject>>,
/// Creates a new `LintSession`, by moving out the `LintStore`'s initial
/// lint levels and pass objects. These can be restored using the `restore`
/// method.
- fn new(store: &'a RefCell<LintStore>) -> LintSession<'a, PassObject> {
+ fn new(store: &'a RwLock<LintStore>) -> LintSession<'a, PassObject> {
let mut s = store.borrow_mut();
let passes = PassObject::take_passes(&mut *s);
drop(s);
}
/// Restores the levels back to the original lint store.
- fn restore(self, store: &RefCell<LintStore>) {
+ fn restore(self, store: &RwLock<LintStore>) {
drop(self.lints);
let mut s = store.borrow_mut();
PassObject::restore_passes(&mut *s, self.passes);
use util::common::{duration_to_secs_str, ErrorReported};
use util::common::ProfileQueriesMsg;
-use rustc_data_structures::sync::{Lrc, Lock, LockCell, OneThread, Once};
+use rustc_data_structures::sync::{Lrc, Lock, LockCell, OneThread, Once, RwLock};
use syntax::ast::NodeId;
use errors::{self, DiagnosticBuilder, DiagnosticId};
// FIXME: lint_store and buffered_lints are not thread-safe,
// but are only used in a single thread
- pub lint_store: OneThread<RefCell<lint::LintStore>>,
- pub buffered_lints: OneThread<RefCell<Option<lint::LintBuffer>>>,
+ pub lint_store: RwLock<lint::LintStore>,
+ pub buffered_lints: Lock<Option<lint::LintBuffer>>,
/// Set of (DiagnosticId, Option<Span>, message) tuples tracking
/// (sub)diagnostics that have been set once, but should not be set again,
default_sysroot,
local_crate_source_file,
working_dir,
- lint_store: OneThread::new(RefCell::new(lint::LintStore::new())),
- buffered_lints: OneThread::new(RefCell::new(Some(lint::LintBuffer::new()))),
+ lint_store: RwLock::new(lint::LintStore::new()),
+ buffered_lints: Lock::new(Some(lint::LintBuffer::new())),
one_time_diagnostics: RefCell::new(FxHashSet()),
plugin_llvm_passes: OneThread::new(RefCell::new(Vec::new())),
plugin_attributes: OneThread::new(RefCell::new(Vec::new())),
use rustc_data_structures::stable_hasher::{HashStable, hash_stable_hashmap,
StableHasher, StableHasherResult,
StableVec};
-use arena::{TypedArena, DroplessArena};
+use arena::{TypedArena, SyncDroplessArena};
use rustc_data_structures::indexed_vec::IndexVec;
use rustc_data_structures::sync::{Lrc, Lock};
use std::any::Any;
use std::borrow::Borrow;
-use std::cell::Cell;
use std::cmp::Ordering;
use std::collections::hash_map::{self, Entry};
use std::hash::{Hash, Hasher};
pub struct AllArenas<'tcx> {
pub global: GlobalArenas<'tcx>,
- pub interner: DroplessArena,
+ pub interner: SyncDroplessArena,
}
impl<'tcx> AllArenas<'tcx> {
pub fn new() -> Self {
AllArenas {
global: GlobalArenas::new(),
- interner: DroplessArena::new(),
+ interner: SyncDroplessArena::new(),
}
}
}
pub struct CtxtInterners<'tcx> {
/// The arena that types, regions, etc are allocated from
- arena: &'tcx DroplessArena,
+ arena: &'tcx SyncDroplessArena,
/// Specifically use a speedy hash algorithm for these hash sets,
/// they're accessed quite often.
}
impl<'gcx: 'tcx, 'tcx> CtxtInterners<'tcx> {
- fn new(arena: &'tcx DroplessArena) -> CtxtInterners<'tcx> {
+ fn new(arena: &'tcx SyncDroplessArena) -> CtxtInterners<'tcx> {
CtxtInterners {
arena,
type_: Default::default(),
return ty;
}
let global_interner = global_interners.map(|interners| {
- interners.type_.borrow_mut()
+ (interners.type_.borrow_mut(), &interners.arena)
});
- if let Some(ref interner) = global_interner {
- if let Some(&Interned(ty)) = interner.get(&st) {
+ if let Some((ref type_, _)) = global_interner {
+ if let Some(&Interned(ty)) = type_.get(&st) {
return ty;
}
}
// determine that all contents are in the global tcx.
// See comments on Lift for why we can't use that.
if !flags.flags.intersects(ty::TypeFlags::KEEP_IN_LOCAL_TCX) {
- if let Some(interner) = global_interners {
+ if let Some((mut type_, arena)) = global_interner {
let ty_struct: TyS<'gcx> = unsafe {
mem::transmute(ty_struct)
};
- let ty: Ty<'gcx> = interner.arena.alloc(ty_struct);
- global_interner.unwrap().insert(Interned(ty));
+ let ty: Ty<'gcx> = arena.alloc(ty_struct);
+ type_.insert(Interned(ty));
return ty;
}
} else {
// Make sure we don't end up with inference
// types/regions in the global tcx.
- if global_interners.is_none() {
+ if global_interner.is_none() {
drop(interner);
bug!("Attempted to intern `{:?}` which contains \
inference types/regions in the global type context",
/// Data layout specification for the current target.
pub data_layout: TargetDataLayout,
- /// Used to prevent layout from recursing too deeply.
- pub layout_depth: Cell<usize>,
-
stability_interner: Lock<FxHashSet<&'tcx attr::Stability>>,
pub interpret_interner: InterpretInterner<'tcx>,
crate_name: Symbol::intern(crate_name),
data_layout,
layout_interner: Lock::new(FxHashSet()),
- layout_depth: Cell::new(0),
stability_interner: Lock::new(FxHashSet()),
interpret_interner: Default::default(),
tx_to_llvm_workers: Lock::new(tx),
/// Call the closure with a local `TyCtxt` using the given arena.
pub fn enter_local<F, R>(
&self,
- arena: &'tcx DroplessArena,
+ arena: &'tcx SyncDroplessArena,
f: F
) -> R
where
let new_icx = ty::tls::ImplicitCtxt {
tcx,
query: icx.query.clone(),
+ layout_depth: icx.layout_depth,
};
ty::tls::enter_context(&new_icx, |new_icx| {
f(new_icx.tcx)
/// The current query job, if any. This is updated by start_job in
/// ty::maps::plumbing when executing a query
pub query: Option<Lrc<maps::QueryJob<'gcx>>>,
+
+ /// Used to prevent layout from recursing too deeply.
+ pub layout_depth: usize,
}
// A thread local value which stores a pointer to the current ImplicitCtxt
let icx = ImplicitCtxt {
tcx,
query: None,
+ layout_depth: 0,
};
enter_context(&icx, |_| {
f(tcx)
query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
-> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
{
- let (param_env, ty) = query.into_parts();
+ ty::tls::with_related_context(tcx, move |icx| {
+ let rec_limit = *tcx.sess.recursion_limit.get();
+ let (param_env, ty) = query.into_parts();
- let rec_limit = *tcx.sess.recursion_limit.get();
- let depth = tcx.layout_depth.get();
- if depth > rec_limit {
- tcx.sess.fatal(
- &format!("overflow representing the type `{}`", ty));
- }
+ if icx.layout_depth > rec_limit {
+ tcx.sess.fatal(
+ &format!("overflow representing the type `{}`", ty));
+ }
- tcx.layout_depth.set(depth+1);
- let cx = LayoutCx { tcx, param_env };
- let layout = cx.layout_raw_uncached(ty);
- tcx.layout_depth.set(depth);
+ // Update the ImplicitCtxt to increase the layout_depth
+ let icx = ty::tls::ImplicitCtxt {
+ layout_depth: icx.layout_depth + 1,
+ ..icx.clone()
+ };
- layout
+ ty::tls::enter_context(&icx, |_| {
+ let cx = LayoutCx { tcx, param_env };
+ cx.layout_raw_uncached(ty)
+ })
+ })
}
pub fn provide(providers: &mut ty::maps::Providers) {
use ty::maps::job::QueryResult;
use ty::codec::{self as ty_codec, TyDecoder, TyEncoder};
use ty::context::TyCtxt;
+use util::common::time;
const TAG_FILE_FOOTER: u128 = 0xC0FFEE_C0FFEE_C0FFEE_C0FFEE_C0FFEE;
// Encode query results
let mut query_result_index = EncodedQueryResultIndex::new();
- {
+ time(tcx.sess, "encode query results", || {
use ty::maps::queries::*;
let enc = &mut encoder;
let qri = &mut query_result_index;
}
}
}
- }
+
+ Ok(())
+ })?;
// Encode diagnostics
let diagnostics_index = {
E: 'enc + TyEncoder,
Q::Value: Encodable,
{
+ let desc = &format!("encode_query_results for {}",
+ unsafe { ::std::intrinsics::type_name::<Q>() });
+
+ time(tcx.sess, desc, || {
+
for (key, entry) in Q::get_cache_internal(tcx).map.iter() {
if Q::cache_on_disk(key.clone()) {
let entry = match *entry {
}
Ok(())
+ })
}
let icx = ty::tls::ImplicitCtxt {
tcx,
query: Some(job.clone()),
+ layout_depth: icx.layout_depth,
};
// Use the ImplicitCtxt while we execute the query
let mut ecx = ExtCtxt::new(&sess.parse_sess, cfg, &mut resolver);
let err_count = ecx.parse_sess.span_diagnostic.err_count();
- let krate = ecx.monotonic_expander().expand_crate(krate);
+ let krate = time(sess, "expand crate", || {
+ ecx.monotonic_expander().expand_crate(krate)
+ });
- ecx.check_unused_macros();
+ time(sess, "check unused macros", || {
+ ecx.check_unused_macros();
+ });
let mut missing_fragment_specifiers: Vec<_> =
ecx.parse_sess.missing_fragment_specifiers.borrow().iter().cloned().collect();
time(sess, "persist dep-graph", || {
save_in(sess,
dep_graph_path(sess),
- |e| encode_dep_graph(tcx, e));
+ |e| {
+ time(sess, "encode dep-graph", || {
+ encode_dep_graph(tcx, e)
+ })
+ });
});
}
tcx.sess.opts.dep_tracking_hash().encode(encoder)?;
// Encode the graph data.
- let serialized_graph = tcx.dep_graph.serialize();
+ let serialized_graph = time(tcx.sess, "getting serialized graph", || {
+ tcx.dep_graph.serialize()
+ });
if tcx.sess.opts.debugging_opts.incremental_info {
#[derive(Clone)]
println!("[incremental]");
}
- serialized_graph.encode(encoder)?;
+ time(tcx.sess, "encoding serialized graph", || {
+ serialized_graph.encode(encoder)
+ })?;
Ok(())
}
fn encode_query_cache(tcx: TyCtxt,
encoder: &mut Encoder)
-> io::Result<()> {
- tcx.serialize_query_result_cache(encoder)
+ time(tcx.sess, "serialize query result cache", || {
+ tcx.serialize_query_result_cache(encoder)
+ })
}