[[package]]
name = "backtrace"
-version = "0.3.44"
+version = "0.3.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e4036b9bf40f3cf16aba72a3d65e8a520fc4bafcdc7079aea8f848c58c5b5536"
+checksum = "ad235dabf00f36301792cfe82499880ba54c6486be094d1047b02bacb67c14e8"
dependencies = [
"backtrace-sys",
"cfg-if",
[[package]]
name = "backtrace-sys"
-version = "0.1.32"
+version = "0.1.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5d6575f128516de27e3ce99689419835fce9643a9b215a14d2b5b685be018491"
+checksum = "ca797db0057bae1a7aa2eef3283a874695455cecf08a43bfb8507ee0ebc1ed69"
dependencies = [
"cc",
"compiler_builtins",
if self.clean and os.path.exists(build_dir):
shutil.rmtree(build_dir)
env = os.environ.copy()
+ # `CARGO_BUILD_TARGET` breaks bootstrap build.
+ # See also: <https://github.com/rust-lang/rust/issues/70208>.
+ if "CARGO_BUILD_TARGET" in env:
+ del env["CARGO_BUILD_TARGET"]
env["RUSTC_BOOTSTRAP"] = '1'
env["CARGO_TARGET_DIR"] = build_dir
env["RUSTC"] = self.rustc()
def children_of_node(boxed_node, height, want_values):
node_ptr = boxed_node['ptr']['pointer']
if height > 0:
- type_name = str(node_ptr.type.target()).replace('LeafNode', 'InternalNode')
+ type_name = str(node_ptr.type.target()).replace('LeafNode', 'InternalNode', 1)
node_type = gdb.lookup_type(type_name)
node_ptr = node_ptr.cast(node_type.pointer())
leaf = node_ptr['data']
#[cfg(test)]
mod tests;
+// This is repr(C) to future-proof against possible field-reordering, which
+// would interfere with otherwise safe [into|from]_raw() of transmutable
+// inner types.
+#[repr(C)]
struct RcBox<T: ?Sized> {
strong: Cell<usize>,
weak: Cell<usize>,
}
}
- /// Constructs an `Rc` from a raw pointer.
+ /// Constructs an `Rc<T>` from a raw pointer.
///
- /// The raw pointer must have been previously returned by a call to a
- /// [`Rc::into_raw`][into_raw].
+ /// The raw pointer must have been previously returned by a call to
+ /// [`Rc<U>::into_raw`][into_raw] where `U` must have the same size
+ /// and alignment as `T`. This is trivially true if `U` is `T`.
+ /// Note that if `U` is not `T` but has the same size and alignment, this is
+ /// basically like transmuting references of different types. See
+ /// [`mem::transmute`][transmute] for more information on what
+ /// restrictions apply in this case.
///
- /// This function is unsafe because improper use may lead to memory problems. For example, a
- /// double-free may occur if the function is called twice on the same raw pointer.
+ /// The user of `from_raw` has to make sure a specific value of `T` is only
+ /// dropped once.
+ ///
+ /// This function is unsafe because improper use may lead to memory unsafety,
+ /// even if the returned `Rc<T>` is never accessed.
///
/// [into_raw]: struct.Rc.html#method.into_raw
+ /// [transmute]: ../../std/mem/fn.transmute.html
///
/// # Examples
///
}
}
+// This is repr(C) to future-proof against possible field-reordering, which
+// would interfere with otherwise safe [into|from]_raw() of transmutable
+// inner types.
+#[repr(C)]
struct ArcInner<T: ?Sized> {
strong: atomic::AtomicUsize,
}
}
- /// Constructs an `Arc` from a raw pointer.
+ /// Constructs an `Arc<T>` from a raw pointer.
///
- /// The raw pointer must have been previously returned by a call to a
- /// [`Arc::into_raw`][into_raw].
+ /// The raw pointer must have been previously returned by a call to
+ /// [`Arc<U>::into_raw`][into_raw] where `U` must have the same size and
+ /// alignment as `T`. This is trivially true if `U` is `T`.
+ /// Note that if `U` is not `T` but has the same size and alignment, this is
+ /// basically like transmuting references of different types. See
+ /// [`mem::transmute`][transmute] for more information on what
+ /// restrictions apply in this case.
///
- /// This function is unsafe because improper use may lead to memory problems. For example, a
- /// double-free may occur if the function is called twice on the same raw pointer.
+ /// The user of `from_raw` has to make sure a specific value of `T` is only
+ /// dropped once.
+ ///
+ /// This function is unsafe because improper use may lead to memory unsafety,
+ /// even if the returned `Arc<T>` is never accessed.
///
/// [into_raw]: struct.Arc.html#method.into_raw
+ /// [transmute]: ../../std/mem/fn.transmute.html
///
/// # Examples
///
}
fn find_entry(&self, id: HirId) -> Option<Entry<'hir>> {
- Some(self.get_entry(id))
- }
-
- fn get_entry(&self, id: HirId) -> Entry<'hir> {
if id.local_id == ItemLocalId::from_u32(0) {
let owner = self.tcx.hir_owner(id.owner);
- Entry { parent: owner.parent, node: owner.node }
+ owner.map(|owner| Entry { parent: owner.parent, node: owner.node })
} else {
let owner = self.tcx.hir_owner_nodes(id.owner);
- let node = owner.nodes[id.local_id].as_ref().unwrap();
- // FIXME(eddyb) use a single generic type insted of having both
- // `Entry` and `ParentedNode`, which are effectively the same.
- // Alternatively, rewrite code using `Entry` to use `ParentedNode`.
- Entry { parent: HirId { owner: id.owner, local_id: node.parent }, node: node.node }
+ owner.and_then(|owner| {
+ let node = owner.nodes[id.local_id].as_ref();
+ // FIXME(eddyb) use a single generic type insted of having both
+ // `Entry` and `ParentedNode`, which are effectively the same.
+ // Alternatively, rewrite code using `Entry` to use `ParentedNode`.
+ node.map(|node| Entry {
+ parent: HirId { owner: id.owner, local_id: node.parent },
+ node: node.node,
+ })
+ })
}
}
+ fn get_entry(&self, id: HirId) -> Entry<'hir> {
+ self.find_entry(id).unwrap()
+ }
+
pub fn item(&self, id: HirId) -> &'hir Item<'hir> {
match self.find(id).unwrap() {
Node::Item(item) => item,
}
pub fn body(&self, id: BodyId) -> &'hir Body<'hir> {
- self.tcx.hir_owner_nodes(id.hir_id.owner).bodies.get(&id.hir_id.local_id).unwrap()
+ self.tcx.hir_owner_nodes(id.hir_id.owner).unwrap().bodies.get(&id.hir_id.local_id).unwrap()
}
pub fn fn_decl_by_hir_id(&self, hir_id: HirId) -> Option<&'hir FnDecl<'hir>> {
/// Retrieves the `Node` corresponding to `id`, returning `None` if cannot be found.
pub fn find(&self, hir_id: HirId) -> Option<Node<'hir>> {
- let node = self.get_entry(hir_id).node;
- if let Node::Crate(..) = node { None } else { Some(node) }
+ self.find_entry(hir_id).and_then(|entry| {
+ if let Node::Crate(..) = entry.node { None } else { Some(entry.node) }
+ })
}
/// Similar to `get_parent`; returns the parent HIR Id, or just `hir_id` if there
let module = hir.as_local_hir_id(id.to_def_id()).unwrap();
&tcx.untracked_crate.modules[&module]
};
- providers.hir_owner = |tcx, id| tcx.index_hir(LOCAL_CRATE).map[id].signature.unwrap();
- providers.hir_owner_nodes = |tcx, id| {
- tcx.index_hir(LOCAL_CRATE).map[id].with_bodies.as_ref().map(|nodes| &**nodes).unwrap()
- };
+ providers.hir_owner = |tcx, id| tcx.index_hir(LOCAL_CRATE).map[id].signature;
+ providers.hir_owner_nodes =
+ |tcx, id| tcx.index_hir(LOCAL_CRATE).map[id].with_bodies.as_ref().map(|nodes| &**nodes);
map::provide(providers);
}
//
// This can be conveniently accessed by methods on `tcx.hir()`.
// Avoid calling this query directly.
- query hir_owner(key: LocalDefId) -> &'tcx crate::hir::Owner<'tcx> {
+ query hir_owner(key: LocalDefId) -> Option<&'tcx crate::hir::Owner<'tcx>> {
eval_always
desc { |tcx| "HIR owner of `{}`", tcx.def_path_str(key.to_def_id()) }
}
//
// This can be conveniently accessed by methods on `tcx.hir()`.
// Avoid calling this query directly.
- query hir_owner_nodes(key: LocalDefId) -> &'tcx crate::hir::OwnerNodes<'tcx> {
+ query hir_owner_nodes(key: LocalDefId) -> Option<&'tcx crate::hir::OwnerNodes<'tcx>> {
eval_always
desc { |tcx| "HIR owner items in `{}`", tcx.def_path_str(key.to_def_id()) }
}
}
pub struct LocalTableInContext<'a, V> {
- local_id_root: Option<DefId>,
+ hir_owner: Option<LocalDefId>,
data: &'a ItemLocalMap<V>,
}
/// Validate that the given HirId (respectively its `local_id` part) can be
/// safely used as a key in the tables of a TypeckTable. For that to be
/// the case, the HirId must have the same `owner` as all the other IDs in
-/// this table (signified by `local_id_root`). Otherwise the HirId
+/// this table (signified by `hir_owner`). Otherwise the HirId
/// would be in a different frame of reference and using its `local_id`
/// would result in lookup errors, or worse, in silently wrong data being
/// stored/returned.
fn validate_hir_id_for_typeck_tables(
- local_id_root: Option<DefId>,
+ hir_owner: Option<LocalDefId>,
hir_id: hir::HirId,
mut_access: bool,
) {
- if let Some(local_id_root) = local_id_root {
- if hir_id.owner.to_def_id() != local_id_root {
+ if let Some(hir_owner) = hir_owner {
+ if hir_id.owner != hir_owner {
ty::tls::with(|tcx| {
bug!(
"node {} with HirId::owner {:?} cannot be placed in \
- TypeckTables with local_id_root {:?}",
+ TypeckTables with hir_owner {:?}",
tcx.hir().node_to_string(hir_id),
hir_id.owner,
- local_id_root
+ hir_owner
)
});
}
} else {
// We use "Null Object" TypeckTables in some of the analysis passes.
- // These are just expected to be empty and their `local_id_root` is
+ // These are just expected to be empty and their `hir_owner` is
// `None`. Therefore we cannot verify whether a given `HirId` would
// be a valid key for the given table. Instead we make sure that
// nobody tries to write to such a Null Object table.
impl<'a, V> LocalTableInContext<'a, V> {
pub fn contains_key(&self, id: hir::HirId) -> bool {
- validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
+ validate_hir_id_for_typeck_tables(self.hir_owner, id, false);
self.data.contains_key(&id.local_id)
}
pub fn get(&self, id: hir::HirId) -> Option<&V> {
- validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
+ validate_hir_id_for_typeck_tables(self.hir_owner, id, false);
self.data.get(&id.local_id)
}
}
pub struct LocalTableInContextMut<'a, V> {
- local_id_root: Option<DefId>,
+ hir_owner: Option<LocalDefId>,
data: &'a mut ItemLocalMap<V>,
}
impl<'a, V> LocalTableInContextMut<'a, V> {
pub fn get_mut(&mut self, id: hir::HirId) -> Option<&mut V> {
- validate_hir_id_for_typeck_tables(self.local_id_root, id, true);
+ validate_hir_id_for_typeck_tables(self.hir_owner, id, true);
self.data.get_mut(&id.local_id)
}
pub fn entry(&mut self, id: hir::HirId) -> Entry<'_, hir::ItemLocalId, V> {
- validate_hir_id_for_typeck_tables(self.local_id_root, id, true);
+ validate_hir_id_for_typeck_tables(self.hir_owner, id, true);
self.data.entry(id.local_id)
}
pub fn insert(&mut self, id: hir::HirId, val: V) -> Option<V> {
- validate_hir_id_for_typeck_tables(self.local_id_root, id, true);
+ validate_hir_id_for_typeck_tables(self.hir_owner, id, true);
self.data.insert(id.local_id, val)
}
pub fn remove(&mut self, id: hir::HirId) -> Option<V> {
- validate_hir_id_for_typeck_tables(self.local_id_root, id, true);
+ validate_hir_id_for_typeck_tables(self.hir_owner, id, true);
self.data.remove(&id.local_id)
}
}
#[derive(RustcEncodable, RustcDecodable, Debug)]
pub struct TypeckTables<'tcx> {
- /// The HirId::owner all ItemLocalIds in this table are relative to.
- pub local_id_root: Option<DefId>,
+ /// The `HirId::owner` all `ItemLocalId`s in this table are relative to.
+ pub hir_owner: Option<LocalDefId>,
/// Resolved definitions for `<T>::X` associated paths and
/// method calls, including those of overloaded operators.
}
impl<'tcx> TypeckTables<'tcx> {
- pub fn empty(local_id_root: Option<DefId>) -> TypeckTables<'tcx> {
+ pub fn empty(hir_owner: Option<LocalDefId>) -> TypeckTables<'tcx> {
TypeckTables {
- local_id_root,
+ hir_owner,
type_dependent_defs: Default::default(),
field_indices: Default::default(),
user_provided_types: Default::default(),
pub fn type_dependent_defs(
&self,
) -> LocalTableInContext<'_, Result<(DefKind, DefId), ErrorReported>> {
- LocalTableInContext { local_id_root: self.local_id_root, data: &self.type_dependent_defs }
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.type_dependent_defs }
}
pub fn type_dependent_def(&self, id: HirId) -> Option<(DefKind, DefId)> {
- validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
+ validate_hir_id_for_typeck_tables(self.hir_owner, id, false);
self.type_dependent_defs.get(&id.local_id).cloned().and_then(|r| r.ok())
}
pub fn type_dependent_defs_mut(
&mut self,
) -> LocalTableInContextMut<'_, Result<(DefKind, DefId), ErrorReported>> {
- LocalTableInContextMut {
- local_id_root: self.local_id_root,
- data: &mut self.type_dependent_defs,
- }
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.type_dependent_defs }
}
pub fn field_indices(&self) -> LocalTableInContext<'_, usize> {
- LocalTableInContext { local_id_root: self.local_id_root, data: &self.field_indices }
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.field_indices }
}
pub fn field_indices_mut(&mut self) -> LocalTableInContextMut<'_, usize> {
- LocalTableInContextMut { local_id_root: self.local_id_root, data: &mut self.field_indices }
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.field_indices }
}
pub fn user_provided_types(&self) -> LocalTableInContext<'_, CanonicalUserType<'tcx>> {
- LocalTableInContext { local_id_root: self.local_id_root, data: &self.user_provided_types }
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.user_provided_types }
}
pub fn user_provided_types_mut(
&mut self,
) -> LocalTableInContextMut<'_, CanonicalUserType<'tcx>> {
- LocalTableInContextMut {
- local_id_root: self.local_id_root,
- data: &mut self.user_provided_types,
- }
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.user_provided_types }
}
pub fn node_types(&self) -> LocalTableInContext<'_, Ty<'tcx>> {
- LocalTableInContext { local_id_root: self.local_id_root, data: &self.node_types }
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.node_types }
}
pub fn node_types_mut(&mut self) -> LocalTableInContextMut<'_, Ty<'tcx>> {
- LocalTableInContextMut { local_id_root: self.local_id_root, data: &mut self.node_types }
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.node_types }
}
pub fn node_type(&self, id: hir::HirId) -> Ty<'tcx> {
}
pub fn node_type_opt(&self, id: hir::HirId) -> Option<Ty<'tcx>> {
- validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
+ validate_hir_id_for_typeck_tables(self.hir_owner, id, false);
self.node_types.get(&id.local_id).cloned()
}
pub fn node_substs_mut(&mut self) -> LocalTableInContextMut<'_, SubstsRef<'tcx>> {
- LocalTableInContextMut { local_id_root: self.local_id_root, data: &mut self.node_substs }
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.node_substs }
}
pub fn node_substs(&self, id: hir::HirId) -> SubstsRef<'tcx> {
- validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
+ validate_hir_id_for_typeck_tables(self.hir_owner, id, false);
self.node_substs.get(&id.local_id).cloned().unwrap_or_else(|| InternalSubsts::empty())
}
pub fn node_substs_opt(&self, id: hir::HirId) -> Option<SubstsRef<'tcx>> {
- validate_hir_id_for_typeck_tables(self.local_id_root, id, false);
+ validate_hir_id_for_typeck_tables(self.hir_owner, id, false);
self.node_substs.get(&id.local_id).cloned()
}
}
pub fn adjustments(&self) -> LocalTableInContext<'_, Vec<ty::adjustment::Adjustment<'tcx>>> {
- LocalTableInContext { local_id_root: self.local_id_root, data: &self.adjustments }
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.adjustments }
}
pub fn adjustments_mut(
&mut self,
) -> LocalTableInContextMut<'_, Vec<ty::adjustment::Adjustment<'tcx>>> {
- LocalTableInContextMut { local_id_root: self.local_id_root, data: &mut self.adjustments }
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.adjustments }
}
pub fn expr_adjustments(&self, expr: &hir::Expr<'_>) -> &[ty::adjustment::Adjustment<'tcx>] {
- validate_hir_id_for_typeck_tables(self.local_id_root, expr.hir_id, false);
+ validate_hir_id_for_typeck_tables(self.hir_owner, expr.hir_id, false);
self.adjustments.get(&expr.hir_id.local_id).map_or(&[], |a| &a[..])
}
}
pub fn pat_binding_modes(&self) -> LocalTableInContext<'_, BindingMode> {
- LocalTableInContext { local_id_root: self.local_id_root, data: &self.pat_binding_modes }
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.pat_binding_modes }
}
pub fn pat_binding_modes_mut(&mut self) -> LocalTableInContextMut<'_, BindingMode> {
- LocalTableInContextMut {
- local_id_root: self.local_id_root,
- data: &mut self.pat_binding_modes,
- }
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.pat_binding_modes }
}
pub fn pat_adjustments(&self) -> LocalTableInContext<'_, Vec<Ty<'tcx>>> {
- LocalTableInContext { local_id_root: self.local_id_root, data: &self.pat_adjustments }
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.pat_adjustments }
}
pub fn pat_adjustments_mut(&mut self) -> LocalTableInContextMut<'_, Vec<Ty<'tcx>>> {
- LocalTableInContextMut {
- local_id_root: self.local_id_root,
- data: &mut self.pat_adjustments,
- }
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.pat_adjustments }
}
pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> ty::UpvarCapture<'tcx> {
}
pub fn closure_kind_origins(&self) -> LocalTableInContext<'_, (Span, ast::Name)> {
- LocalTableInContext { local_id_root: self.local_id_root, data: &self.closure_kind_origins }
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.closure_kind_origins }
}
pub fn closure_kind_origins_mut(&mut self) -> LocalTableInContextMut<'_, (Span, ast::Name)> {
- LocalTableInContextMut {
- local_id_root: self.local_id_root,
- data: &mut self.closure_kind_origins,
- }
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.closure_kind_origins }
}
pub fn liberated_fn_sigs(&self) -> LocalTableInContext<'_, ty::FnSig<'tcx>> {
- LocalTableInContext { local_id_root: self.local_id_root, data: &self.liberated_fn_sigs }
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.liberated_fn_sigs }
}
pub fn liberated_fn_sigs_mut(&mut self) -> LocalTableInContextMut<'_, ty::FnSig<'tcx>> {
- LocalTableInContextMut {
- local_id_root: self.local_id_root,
- data: &mut self.liberated_fn_sigs,
- }
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.liberated_fn_sigs }
}
pub fn fru_field_types(&self) -> LocalTableInContext<'_, Vec<Ty<'tcx>>> {
- LocalTableInContext { local_id_root: self.local_id_root, data: &self.fru_field_types }
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.fru_field_types }
}
pub fn fru_field_types_mut(&mut self) -> LocalTableInContextMut<'_, Vec<Ty<'tcx>>> {
- LocalTableInContextMut {
- local_id_root: self.local_id_root,
- data: &mut self.fru_field_types,
- }
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.fru_field_types }
}
pub fn is_coercion_cast(&self, hir_id: hir::HirId) -> bool {
- validate_hir_id_for_typeck_tables(self.local_id_root, hir_id, true);
+ validate_hir_id_for_typeck_tables(self.hir_owner, hir_id, true);
self.coercion_casts.contains(&hir_id.local_id)
}
impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for TypeckTables<'tcx> {
fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
let ty::TypeckTables {
- local_id_root,
+ hir_owner,
ref type_dependent_defs,
ref field_indices,
ref user_provided_types,
hash_stable_hashmap(hcx, hasher, upvar_capture_map, |up_var_id, hcx| {
let ty::UpvarId { var_path, closure_expr_id } = *up_var_id;
- let local_id_root = local_id_root.expect("trying to hash invalid TypeckTables");
+ assert_eq!(Some(var_path.hir_id.owner), hir_owner);
- let var_owner_def_id = DefId {
- krate: local_id_root.krate,
- index: var_path.hir_id.owner.local_def_index,
- };
- let closure_def_id =
- DefId { krate: local_id_root.krate, index: closure_expr_id.local_def_index };
(
- hcx.def_path_hash(var_owner_def_id),
+ hcx.local_def_path_hash(var_path.hir_id.owner),
var_path.hir_id.local_id,
- hcx.def_path_hash(closure_def_id),
+ hcx.local_def_path_hash(closure_expr_id),
)
});
| InstanceDef::VtableShim(..) => Some(self.substs),
}
}
-
- pub fn is_vtable_shim(&self) -> bool {
- if let InstanceDef::VtableShim(..) = self.def { true } else { false }
- }
}
fn needs_fn_once_adapter_shim(
use crate::dep_graph::DepNodeIndex;
-use crate::ty::query::config::QueryAccessors;
use crate::ty::query::plumbing::{QueryLookup, QueryState, QueryStateShard};
use crate::ty::TyCtxt;
use rustc_data_structures::sharded::Sharded;
use std::default::Default;
use std::hash::Hash;
+use std::marker::PhantomData;
pub(crate) trait CacheSelector<K, V> {
- type Cache: QueryCache<K, V>;
+ type Cache: QueryCache<Key = K, Value = V>;
}
-pub(crate) trait QueryCache<K, V>: Default {
+pub(crate) trait QueryCache: Default {
+ type Key;
+ type Value;
type Sharded: Default;
/// Checks if the query is already computed and in the cache.
/// It returns the shard index and a lock guard to the shard,
/// which will be used if the query is not in the cache and we need
/// to compute it.
- fn lookup<'tcx, R, GetCache, OnHit, OnMiss, Q>(
+ fn lookup<'tcx, R, GetCache, OnHit, OnMiss>(
&self,
- state: &'tcx QueryState<'tcx, Q>,
+ state: &'tcx QueryState<'tcx, Self>,
get_cache: GetCache,
- key: K,
+ key: Self::Key,
// `on_hit` can be called while holding a lock to the query state shard.
on_hit: OnHit,
on_miss: OnMiss,
) -> R
where
- Q: QueryAccessors<'tcx>,
- GetCache: for<'a> Fn(&'a mut QueryStateShard<'tcx, Q>) -> &'a mut Self::Sharded,
- OnHit: FnOnce(&V, DepNodeIndex) -> R,
- OnMiss: FnOnce(K, QueryLookup<'tcx, Q>) -> R;
+ GetCache: for<'a> Fn(
+ &'a mut QueryStateShard<'tcx, Self::Key, Self::Sharded>,
+ ) -> &'a mut Self::Sharded,
+ OnHit: FnOnce(&Self::Value, DepNodeIndex) -> R,
+ OnMiss: FnOnce(Self::Key, QueryLookup<'tcx, Self::Key, Self::Sharded>) -> R;
fn complete(
&self,
tcx: TyCtxt<'tcx>,
lock_sharded_storage: &mut Self::Sharded,
- key: K,
- value: V,
+ key: Self::Key,
+ value: Self::Value,
index: DepNodeIndex,
);
&self,
shards: &Sharded<L>,
get_shard: impl Fn(&mut L) -> &mut Self::Sharded,
- f: impl for<'a> FnOnce(Box<dyn Iterator<Item = (&'a K, &'a V, DepNodeIndex)> + 'a>) -> R,
+ f: impl for<'a> FnOnce(
+ Box<dyn Iterator<Item = (&'a Self::Key, &'a Self::Value, DepNodeIndex)> + 'a>,
+ ) -> R,
) -> R;
}
pub struct DefaultCacheSelector;
impl<K: Eq + Hash, V: Clone> CacheSelector<K, V> for DefaultCacheSelector {
- type Cache = DefaultCache;
+ type Cache = DefaultCache<K, V>;
}
-#[derive(Default)]
-pub struct DefaultCache;
+pub struct DefaultCache<K, V>(PhantomData<(K, V)>);
+
+impl<K, V> Default for DefaultCache<K, V> {
+ fn default() -> Self {
+ DefaultCache(PhantomData)
+ }
+}
-impl<K: Eq + Hash, V: Clone> QueryCache<K, V> for DefaultCache {
+impl<K: Eq + Hash, V: Clone> QueryCache for DefaultCache<K, V> {
+ type Key = K;
+ type Value = V;
type Sharded = FxHashMap<K, (V, DepNodeIndex)>;
#[inline(always)]
- fn lookup<'tcx, R, GetCache, OnHit, OnMiss, Q>(
+ fn lookup<'tcx, R, GetCache, OnHit, OnMiss>(
&self,
- state: &'tcx QueryState<'tcx, Q>,
+ state: &'tcx QueryState<'tcx, Self>,
get_cache: GetCache,
key: K,
on_hit: OnHit,
on_miss: OnMiss,
) -> R
where
- Q: QueryAccessors<'tcx>,
- GetCache: for<'a> Fn(&'a mut QueryStateShard<'tcx, Q>) -> &'a mut Self::Sharded,
+ GetCache:
+ for<'a> Fn(&'a mut QueryStateShard<'tcx, K, Self::Sharded>) -> &'a mut Self::Sharded,
OnHit: FnOnce(&V, DepNodeIndex) -> R,
- OnMiss: FnOnce(K, QueryLookup<'tcx, Q>) -> R,
+ OnMiss: FnOnce(K, QueryLookup<'tcx, K, Self::Sharded>) -> R,
{
let mut lookup = state.get_lookup(&key);
let lock = &mut *lookup.lock;
use crate::dep_graph::{DepKind, DepNode};
use crate::ty::query::caches::QueryCache;
use crate::ty::query::plumbing::CycleError;
-use crate::ty::query::{Query, QueryState};
+use crate::ty::query::QueryState;
use crate::ty::TyCtxt;
use rustc_data_structures::profiling::ProfileCategory;
use rustc_hir::def_id::DefId;
pub(crate) trait QueryAccessors<'tcx>: QueryConfig<'tcx> {
const ANON: bool;
const EVAL_ALWAYS: bool;
+ const DEP_KIND: DepKind;
- type Cache: QueryCache<Self::Key, Self::Value>;
-
- fn query(key: Self::Key) -> Query<'tcx>;
+ type Cache: QueryCache<Key = Self::Key, Value = Self::Value>;
// Don't use this method to access query results, instead use the methods on TyCtxt
- fn query_state<'a>(tcx: TyCtxt<'tcx>) -> &'a QueryState<'tcx, Self>;
+ fn query_state<'a>(tcx: TyCtxt<'tcx>) -> &'a QueryState<'tcx, Self::Cache>;
fn to_dep_node(tcx: TyCtxt<'tcx>, key: &Self::Key) -> DepNode;
- fn dep_kind() -> DepKind;
-
// Don't use this method to compute query results, instead use the methods on TyCtxt
fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value;
}
}
-impl<'tcx, M: QueryAccessors<'tcx, Key = DefId>> QueryDescription<'tcx> for M
-where
- <M as QueryAccessors<'tcx>>::Cache: QueryCache<DefId, <M as QueryConfig<'tcx>>::Value>,
-{
+impl<'tcx, M: QueryAccessors<'tcx, Key = DefId>> QueryDescription<'tcx> for M {
default fn describe(tcx: TyCtxt<'_>, def_id: DefId) -> Cow<'static, str> {
if !tcx.sess.verbose() {
format!("processing `{}`", tcx.def_path_str(def_id)).into()
use rustc_span::{Span, DUMMY_SP};
use std::borrow::Cow;
use std::collections::BTreeMap;
-use std::convert::TryFrom;
use std::ops::Deref;
use std::sync::Arc;
#[macro_use]
mod plumbing;
-pub use self::plumbing::CycleError;
+pub(crate) use self::plumbing::CycleError;
use self::plumbing::*;
mod stats;
//! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth.
-use crate::dep_graph::{DepNode, DepNodeIndex, SerializedDepNodeIndex};
+use crate::dep_graph::{DepKind, DepNode, DepNodeIndex, SerializedDepNodeIndex};
use crate::ty::query::caches::QueryCache;
-use crate::ty::query::config::{QueryAccessors, QueryDescription};
-use crate::ty::query::job::{QueryInfo, QueryJob, QueryJobId, QueryShardJobId};
+use crate::ty::query::config::QueryDescription;
+use crate::ty::query::job::{QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryShardJobId};
use crate::ty::query::Query;
use crate::ty::tls;
use crate::ty::{self, TyCtxt};
use rustc_span::source_map::DUMMY_SP;
use rustc_span::Span;
use std::collections::hash_map::Entry;
+use std::convert::TryFrom;
+use std::fmt::Debug;
use std::hash::{Hash, Hasher};
use std::mem;
use std::num::NonZeroU32;
#[cfg(debug_assertions)]
use std::sync::atomic::{AtomicUsize, Ordering};
-pub(crate) struct QueryStateShard<'tcx, D: QueryAccessors<'tcx> + ?Sized> {
- pub(super) cache: <<D as QueryAccessors<'tcx>>::Cache as QueryCache<D::Key, D::Value>>::Sharded,
- pub(super) active: FxHashMap<D::Key, QueryResult<'tcx>>,
+pub(crate) struct QueryStateShard<'tcx, K, C> {
+ cache: C,
+ active: FxHashMap<K, QueryResult<'tcx>>,
/// Used to generate unique ids for active jobs.
- pub(super) jobs: u32,
+ jobs: u32,
}
-impl<'tcx, Q: QueryAccessors<'tcx>> QueryStateShard<'tcx, Q> {
- fn get_cache(
- &mut self,
- ) -> &mut <<Q as QueryAccessors<'tcx>>::Cache as QueryCache<Q::Key, Q::Value>>::Sharded {
+impl<'tcx, K, C> QueryStateShard<'tcx, K, C> {
+ fn get_cache(&mut self) -> &mut C {
&mut self.cache
}
}
-impl<'tcx, Q: QueryAccessors<'tcx>> Default for QueryStateShard<'tcx, Q> {
- fn default() -> QueryStateShard<'tcx, Q> {
+impl<'tcx, K, C: Default> Default for QueryStateShard<'tcx, K, C> {
+ fn default() -> QueryStateShard<'tcx, K, C> {
QueryStateShard { cache: Default::default(), active: Default::default(), jobs: 0 }
}
}
-pub(crate) struct QueryState<'tcx, D: QueryAccessors<'tcx> + ?Sized> {
- pub(super) cache: D::Cache,
- pub(super) shards: Sharded<QueryStateShard<'tcx, D>>,
+pub(crate) struct QueryState<'tcx, C: QueryCache> {
+ cache: C,
+ shards: Sharded<QueryStateShard<'tcx, C::Key, C::Sharded>>,
#[cfg(debug_assertions)]
pub(super) cache_hits: AtomicUsize,
}
-impl<'tcx, Q: QueryAccessors<'tcx>> QueryState<'tcx, Q> {
- pub(super) fn get_lookup<K: Hash>(&'tcx self, key: &K) -> QueryLookup<'tcx, Q> {
+impl<'tcx, C: QueryCache> QueryState<'tcx, C> {
+ pub(super) fn get_lookup<K2: Hash>(
+ &'tcx self,
+ key: &K2,
+ ) -> QueryLookup<'tcx, C::Key, C::Sharded> {
// We compute the key's hash once and then use it for both the
// shard lookup and the hashmap lookup. This relies on the fact
// that both of them use `FxHasher`.
}
/// Indicates the state of a query for a given key in a query map.
-pub(super) enum QueryResult<'tcx> {
+enum QueryResult<'tcx> {
/// An already executing query. The query job can be used to await for its completion.
Started(QueryJob<'tcx>),
Poisoned,
}
-impl<'tcx, M: QueryAccessors<'tcx>> QueryState<'tcx, M> {
- pub fn iter_results<R>(
+impl<'tcx, C: QueryCache> QueryState<'tcx, C> {
+ pub(super) fn iter_results<R>(
&self,
f: impl for<'a> FnOnce(
- Box<dyn Iterator<Item = (&'a M::Key, &'a M::Value, DepNodeIndex)> + 'a>,
+ Box<dyn Iterator<Item = (&'a C::Key, &'a C::Value, DepNodeIndex)> + 'a>,
) -> R,
) -> R {
self.cache.iter(&self.shards, |shard| &mut shard.cache, f)
}
- pub fn all_inactive(&self) -> bool {
+ pub(super) fn all_inactive(&self) -> bool {
let shards = self.shards.lock_shards();
shards.iter().all(|shard| shard.active.is_empty())
}
+
+ pub(super) fn try_collect_active_jobs(
+ &self,
+ kind: DepKind,
+ make_query: fn(C::Key) -> Query<'tcx>,
+ jobs: &mut FxHashMap<QueryJobId, QueryJobInfo<'tcx>>,
+ ) -> Option<()>
+ where
+ C::Key: Clone,
+ {
+ // We use try_lock_shards here since we are called from the
+ // deadlock handler, and this shouldn't be locked.
+ let shards = self.shards.try_lock_shards()?;
+ let shards = shards.iter().enumerate();
+ jobs.extend(shards.flat_map(|(shard_id, shard)| {
+ shard.active.iter().filter_map(move |(k, v)| {
+ if let QueryResult::Started(ref job) = *v {
+ let id =
+ QueryJobId { job: job.id, shard: u16::try_from(shard_id).unwrap(), kind };
+ let info = QueryInfo { span: job.span, query: make_query(k.clone()) };
+ Some((id, QueryJobInfo { info, job: job.clone() }))
+ } else {
+ None
+ }
+ })
+ }));
+
+ Some(())
+ }
}
-impl<'tcx, M: QueryAccessors<'tcx>> Default for QueryState<'tcx, M> {
- fn default() -> QueryState<'tcx, M> {
+impl<'tcx, C: QueryCache> Default for QueryState<'tcx, C> {
+ fn default() -> QueryState<'tcx, C> {
QueryState {
- cache: M::Cache::default(),
+ cache: C::default(),
shards: Default::default(),
#[cfg(debug_assertions)]
cache_hits: AtomicUsize::new(0),
}
/// Values used when checking a query cache which can be reused on a cache-miss to execute the query.
-pub(crate) struct QueryLookup<'tcx, Q: QueryAccessors<'tcx>> {
+pub(crate) struct QueryLookup<'tcx, K, C> {
pub(super) key_hash: u64,
- pub(super) shard: usize,
- pub(super) lock: LockGuard<'tcx, QueryStateShard<'tcx, Q>>,
+ shard: usize,
+ pub(super) lock: LockGuard<'tcx, QueryStateShard<'tcx, K, C>>,
}
/// A type representing the responsibility to execute the job in the `job` field.
/// This will poison the relevant query if dropped.
-pub(super) struct JobOwner<'tcx, Q: QueryDescription<'tcx>> {
- tcx: TyCtxt<'tcx>,
- key: Q::Key,
+struct JobOwner<'tcx, C>
+where
+ C: QueryCache,
+ C::Key: Eq + Hash + Clone + Debug,
+ C::Value: Clone,
+{
+ state: &'tcx QueryState<'tcx, C>,
+ key: C::Key,
id: QueryJobId,
}
-impl<'tcx, Q: QueryDescription<'tcx>> JobOwner<'tcx, Q> {
+impl<'tcx, C: QueryCache> JobOwner<'tcx, C>
+where
+ C: QueryCache,
+ C::Key: Eq + Hash + Clone + Debug,
+ C::Value: Clone,
+{
/// Either gets a `JobOwner` corresponding the query, allowing us to
/// start executing the query, or returns with the result of the query.
/// This function assumes that `try_get_cached` is already called and returned `lookup`.
/// This function is inlined because that results in a noticeable speed-up
/// for some compile-time benchmarks.
#[inline(always)]
- pub(super) fn try_start(
+ fn try_start<Q>(
tcx: TyCtxt<'tcx>,
span: Span,
- key: &Q::Key,
- mut lookup: QueryLookup<'tcx, Q>,
- ) -> TryGetJob<'tcx, Q> {
+ key: &C::Key,
+ mut lookup: QueryLookup<'tcx, C::Key, C::Sharded>,
+ ) -> TryGetJob<'tcx, C>
+ where
+ Q: QueryDescription<'tcx, Key = C::Key, Value = C::Value, Cache = C>,
+ {
let lock = &mut *lookup.lock;
let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
};
// Create the id of the job we're waiting for
- let id = QueryJobId::new(job.id, lookup.shard, Q::dep_kind());
+ let id = QueryJobId::new(job.id, lookup.shard, Q::DEP_KIND);
(job.latch(id), _query_blocked_prof_timer)
}
lock.jobs = id;
let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
- let global_id = QueryJobId::new(id, lookup.shard, Q::dep_kind());
+ let global_id = QueryJobId::new(id, lookup.shard, Q::DEP_KIND);
let job = tls::with_related_context(tcx, |icx| QueryJob::new(id, span, icx.query));
entry.insert(QueryResult::Started(job));
- let owner = JobOwner { tcx, id: global_id, key: (*key).clone() };
+ let owner =
+ JobOwner { state: Q::query_state(tcx), id: global_id, key: (*key).clone() };
return TryGetJob::NotYetStarted(owner);
}
};
return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle));
}
- let cached = tcx.try_get_cached::<Q, _, _, _>(
+ let cached = tcx.try_get_cached(
+ Q::query_state(tcx),
(*key).clone(),
|value, index| (value.clone(), index),
|_, _| panic!("value must be in cache after waiting"),
/// Completes the query by updating the query cache with the `result`,
/// signals the waiter and forgets the JobOwner, so it won't poison the query
#[inline(always)]
- pub(super) fn complete(self, result: &Q::Value, dep_node_index: DepNodeIndex) {
+ fn complete(self, tcx: TyCtxt<'tcx>, result: &C::Value, dep_node_index: DepNodeIndex) {
// We can move out of `self` here because we `mem::forget` it below
let key = unsafe { ptr::read(&self.key) };
- let tcx = self.tcx;
+ let state = self.state;
// Forget ourself so our destructor won't poison the query
mem::forget(self);
let job = {
- let state = Q::query_state(tcx);
let result = result.clone();
let mut lock = state.shards.get_shard_by_value(&key).lock();
let job = match lock.active.remove(&key).unwrap() {
(result, diagnostics.into_inner())
}
-impl<'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'tcx, Q> {
+impl<'tcx, C: QueryCache> Drop for JobOwner<'tcx, C>
+where
+ C::Key: Eq + Hash + Clone + Debug,
+ C::Value: Clone,
+{
#[inline(never)]
#[cold]
fn drop(&mut self) {
// Poison the query so jobs waiting on it panic.
- let state = Q::query_state(self.tcx);
+ let state = self.state;
let shard = state.shards.get_shard_by_value(&self.key);
let job = {
let mut shard = shard.lock();
}
#[derive(Clone)]
-pub struct CycleError<'tcx> {
+pub(crate) struct CycleError<'tcx> {
/// The query and related span that uses the cycle.
pub(super) usage: Option<(Span, Query<'tcx>)>,
pub(super) cycle: Vec<QueryInfo<'tcx>>,
}
/// The result of `try_start`.
-pub(super) enum TryGetJob<'tcx, D: QueryDescription<'tcx>> {
+enum TryGetJob<'tcx, C: QueryCache>
+where
+ C::Key: Eq + Hash + Clone + Debug,
+ C::Value: Clone,
+{
/// The query is not yet started. Contains a guard to the cache eventually used to start it.
- NotYetStarted(JobOwner<'tcx, D>),
+ NotYetStarted(JobOwner<'tcx, C>),
/// The query was already completed.
/// Returns the result of the query and its dep-node index
/// if it succeeded or a cycle error if it failed.
#[cfg(parallel_compiler)]
- JobCompleted((D::Value, DepNodeIndex)),
+ JobCompleted((C::Value, DepNodeIndex)),
/// Trying to execute the query resulted in a cycle.
- Cycle(D::Value),
+ Cycle(C::Value),
}
impl<'tcx> TyCtxt<'tcx> {
/// new query job while it executes. It returns the diagnostics
/// captured during execution and the actual result.
#[inline(always)]
- pub(super) fn start_query<F, R>(
+ fn start_query<F, R>(
self,
token: QueryJobId,
diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
/// which will be used if the query is not in the cache and we need
/// to compute it.
#[inline(always)]
- fn try_get_cached<Q, R, OnHit, OnMiss>(
+ fn try_get_cached<C, R, OnHit, OnMiss>(
self,
- key: Q::Key,
+ state: &'tcx QueryState<'tcx, C>,
+ key: C::Key,
// `on_hit` can be called while holding a lock to the query cache
on_hit: OnHit,
on_miss: OnMiss,
) -> R
where
- Q: QueryDescription<'tcx> + 'tcx,
- OnHit: FnOnce(&Q::Value, DepNodeIndex) -> R,
- OnMiss: FnOnce(Q::Key, QueryLookup<'tcx, Q>) -> R,
+ C: QueryCache,
+ OnHit: FnOnce(&C::Value, DepNodeIndex) -> R,
+ OnMiss: FnOnce(C::Key, QueryLookup<'tcx, C::Key, C::Sharded>) -> R,
{
- let state = Q::query_state(self);
-
state.cache.lookup(
state,
- QueryStateShard::<Q>::get_cache,
+ QueryStateShard::<C::Key, C::Sharded>::get_cache,
key,
|value, index| {
if unlikely!(self.prof.enabled()) {
) -> Q::Value {
debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span);
- self.try_get_cached::<Q, _, _, _>(
+ self.try_get_cached(
+ Q::query_state(self),
key,
|value, index| {
self.dep_graph.read_index(index);
}
#[inline(always)]
- pub(super) fn try_execute_query<Q: QueryDescription<'tcx>>(
+ fn try_execute_query<Q: QueryDescription<'tcx> + 'tcx>(
self,
span: Span,
key: Q::Key,
- lookup: QueryLookup<'tcx, Q>,
+ lookup: QueryLookup<'tcx, Q::Key, <Q::Cache as QueryCache>::Sharded>,
) -> Q::Value {
- let job = match JobOwner::try_start(self, span, &key, lookup) {
+ let job = match JobOwner::try_start::<Q>(self, span, &key, lookup) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::Cycle(result) => return result,
#[cfg(parallel_compiler)]
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
self.start_query(job.id, diagnostics, |tcx| {
- tcx.dep_graph.with_anon_task(Q::dep_kind(), || Q::compute(tcx, key))
+ tcx.dep_graph.with_anon_task(Q::DEP_KIND, || Q::compute(tcx, key))
})
});
.store_diagnostics_for_anon_node(dep_node_index, diagnostics);
}
- job.complete(&result, dep_node_index);
+ job.complete(self, &result, dep_node_index);
return result;
}
})
});
if let Some((result, dep_node_index)) = loaded {
- job.complete(&result, dep_node_index);
+ job.complete(self, &result, dep_node_index);
return result;
}
}
}
#[inline(always)]
- fn force_query_with_job<Q: QueryDescription<'tcx>>(
+ fn force_query_with_job<Q: QueryDescription<'tcx> + 'tcx>(
self,
key: Q::Key,
- job: JobOwner<'tcx, Q>,
+ job: JobOwner<'tcx, Q::Cache>,
dep_node: DepNode,
) -> (Q::Value, DepNodeIndex) {
// If the following assertion triggers, it can have two reasons:
}
}
- job.complete(&result, dep_node_index);
+ job.complete(self, &result, dep_node_index);
(result, dep_node_index)
}
// We may be concurrently trying both execute and force a query.
// Ensure that only one of them runs the query.
- self.try_get_cached::<Q, _, _, _>(
+ self.try_get_cached(
+ Q::query_state(self),
key,
|_, _| {
// Cache hit, do nothing
},
|key, lookup| {
- let job = match JobOwner::try_start(self, span, &key, lookup) {
+ let job = match JobOwner::try_start::<Q>(self, span, &key, lookup) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::Cycle(_) => return,
#[cfg(parallel_compiler)]
input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
}
- impl<$tcx> Queries<$tcx> {
- pub fn new(
- providers: IndexVec<CrateNum, Providers<$tcx>>,
- fallback_extern_providers: Providers<$tcx>,
- on_disk_cache: OnDiskCache<'tcx>,
- ) -> Self {
- Queries {
- providers,
- fallback_extern_providers: Box::new(fallback_extern_providers),
- on_disk_cache,
- $($name: Default::default()),*
- }
- }
-
- pub fn try_collect_active_jobs(
- &self
- ) -> Option<FxHashMap<QueryJobId, QueryJobInfo<'tcx>>> {
- let mut jobs = FxHashMap::default();
-
- $(
- // We use try_lock_shards here since we are called from the
- // deadlock handler, and this shouldn't be locked.
- let shards = self.$name.shards.try_lock_shards()?;
- let shards = shards.iter().enumerate();
- jobs.extend(shards.flat_map(|(shard_id, shard)| {
- shard.active.iter().filter_map(move |(k, v)| {
- if let QueryResult::Started(ref job) = *v {
- let id = QueryJobId {
- job: job.id,
- shard: u16::try_from(shard_id).unwrap(),
- kind:
- <queries::$name<'tcx> as QueryAccessors<'tcx>>::dep_kind(),
- };
- let info = QueryInfo {
- span: job.span,
- query: queries::$name::query(k.clone())
- };
- Some((id, QueryJobInfo { info, job: job.clone() }))
- } else {
- None
- }
- })
- }));
- )*
-
- Some(jobs)
- }
- }
-
#[allow(nonstandard_style)]
#[derive(Clone, Debug)]
pub enum Query<$tcx> {
})*
}
- // This module and the functions in it exist only to provide a
- // predictable symbol name prefix for query providers. This is helpful
- // for analyzing queries in profilers.
- pub(super) mod __query_compute {
- $(#[inline(never)]
- pub fn $name<F: FnOnce() -> R, R>(f: F) -> R {
- f()
- })*
- }
-
$(impl<$tcx> QueryConfig<$tcx> for queries::$name<$tcx> {
type Key = $K;
type Value = $V;
impl<$tcx> QueryAccessors<$tcx> for queries::$name<$tcx> {
const ANON: bool = is_anon!([$($modifiers)*]);
const EVAL_ALWAYS: bool = is_eval_always!([$($modifiers)*]);
+ const DEP_KIND: dep_graph::DepKind = dep_graph::DepKind::$node;
type Cache = query_storage!([$($modifiers)*][$K, $V]);
#[inline(always)]
- fn query(key: Self::Key) -> Query<'tcx> {
- Query::$name(key)
- }
-
- #[inline(always)]
- fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<$tcx, Self> {
+ fn query_state<'a>(tcx: TyCtxt<$tcx>) -> &'a QueryState<$tcx, Self::Cache> {
&tcx.queries.$name
}
DepConstructor::$node(tcx, *key)
}
- #[inline(always)]
- fn dep_kind() -> dep_graph::DepKind {
- dep_graph::DepKind::$node
- }
-
#[inline]
fn compute(tcx: TyCtxt<'tcx>, key: Self::Key) -> Self::Value {
- __query_compute::$name(move || {
- let provider = tcx.queries.providers.get(key.query_crate())
- // HACK(eddyb) it's possible crates may be loaded after
- // the query engine is created, and because crate loading
- // is not yet integrated with the query engine, such crates
- // would be missing appropriate entries in `providers`.
- .unwrap_or(&tcx.queries.fallback_extern_providers)
- .$name;
- provider(tcx, key)
- })
+ let provider = tcx.queries.providers.get(key.query_crate())
+ // HACK(eddyb) it's possible crates may be loaded after
+ // the query engine is created, and because crate loading
+ // is not yet integrated with the query engine, such crates
+ // would be missing appropriate entries in `providers`.
+ .unwrap_or(&tcx.queries.fallback_extern_providers)
+ .$name;
+ provider(tcx, key)
}
fn hash_result(
providers: IndexVec<CrateNum, Providers<$tcx>>,
fallback_extern_providers: Box<Providers<$tcx>>,
- $($(#[$attr])* $name: QueryState<$tcx, queries::$name<$tcx>>,)*
+ $($(#[$attr])* $name: QueryState<
+ $tcx,
+ <queries::$name<$tcx> as QueryAccessors<'tcx>>::Cache,
+ >,)*
+ }
+
+ impl<$tcx> Queries<$tcx> {
+ pub(crate) fn new(
+ providers: IndexVec<CrateNum, Providers<$tcx>>,
+ fallback_extern_providers: Providers<$tcx>,
+ on_disk_cache: OnDiskCache<'tcx>,
+ ) -> Self {
+ Queries {
+ providers,
+ fallback_extern_providers: Box::new(fallback_extern_providers),
+ on_disk_cache,
+ $($name: Default::default()),*
+ }
+ }
+
+ pub(crate) fn try_collect_active_jobs(
+ &self
+ ) -> Option<FxHashMap<QueryJobId, QueryJobInfo<'tcx>>> {
+ let mut jobs = FxHashMap::default();
+
+ $(
+ self.$name.try_collect_active_jobs(
+ <queries::$name<'tcx> as QueryAccessors<'tcx>>::DEP_KIND,
+ Query::$name,
+ &mut jobs,
+ )?;
+ )*
+
+ Some(jobs)
+ }
}
};
}
use crate::hir::map::definitions::DefPathData;
use crate::ty::context::TyCtxt;
-use crate::ty::query::config::QueryAccessors;
+use crate::ty::query::caches::QueryCache;
use crate::ty::query::plumbing::QueryState;
use measureme::{StringComponent, StringId};
use rustc_data_structures::fx::FxHashMap;
/// Allocate the self-profiling query strings for a single query cache. This
/// method is called from `alloc_self_profile_query_strings` which knows all
/// the queries via macro magic.
-pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, Q>(
+pub(super) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
tcx: TyCtxt<'tcx>,
query_name: &'static str,
- query_state: &QueryState<'tcx, Q>,
+ query_state: &QueryState<'tcx, C>,
string_cache: &mut QueryKeyStringCache,
) where
- Q: QueryAccessors<'tcx>,
+ C: QueryCache,
+ C::Key: Debug + Clone,
{
tcx.prof.with_profiler(|profiler| {
let event_id_builder = profiler.event_id_builder();
+use crate::ty::query::caches::QueryCache;
use crate::ty::query::config::QueryAccessors;
use crate::ty::query::plumbing::QueryState;
use crate::ty::query::queries;
local_def_id_keys: Option<usize>,
}
-fn stats<'tcx, Q: QueryAccessors<'tcx>>(
- name: &'static str,
- map: &QueryState<'tcx, Q>,
-) -> QueryStats {
+fn stats<'tcx, C: QueryCache>(name: &'static str, map: &QueryState<'tcx, C>) -> QueryStats {
let mut stats = QueryStats {
name,
#[cfg(debug_assertions)]
cache_hits: map.cache_hits.load(Ordering::Relaxed),
#[cfg(not(debug_assertions))]
cache_hits: 0,
- key_size: mem::size_of::<Q::Key>(),
- key_type: type_name::<Q::Key>(),
- value_size: mem::size_of::<Q::Value>(),
- value_type: type_name::<Q::Value>(),
+ key_size: mem::size_of::<C::Key>(),
+ key_type: type_name::<C::Key>(),
+ value_size: mem::size_of::<C::Value>(),
+ value_type: type_name::<C::Value>(),
entry_count: map.iter_results(|results| results.count()),
local_def_id_keys: None,
};
let mut queries = Vec::new();
$($(
- queries.push(stats::<queries::$name<'_>>(
+ queries.push(stats::<
+ <queries::$name<'_> as QueryAccessors<'_>>::Cache,
+ >(
stringify!($name),
&tcx.queries.$name,
));
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::thin_vec::ThinVec;
-use rustc_index::vec::Idx;
use rustc_macros::HashStable_Generic;
use rustc_serialize::{self, Decoder, Encoder};
use rustc_span::source_map::{respan, Spanned};
Inner,
}
-#[derive(Clone, PartialEq, Eq, Hash, Debug, PartialOrd, Ord, Copy)]
-pub struct AttrId(pub usize);
-
-impl Idx for AttrId {
- fn new(idx: usize) -> Self {
- AttrId(idx)
- }
- fn index(self) -> usize {
- self.0
+rustc_index::newtype_index! {
+ pub struct AttrId {
+ ENCODABLE = custom
+ DEBUG_FORMAT = "AttrId({})"
}
}
impl rustc_serialize::Encodable for AttrId {
- fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
- s.emit_unit()
+ fn encode<S: Encoder>(&self, _: &mut S) -> Result<(), S::Error> {
+ Ok(())
}
}
impl rustc_serialize::Decodable for AttrId {
- fn decode<D: Decoder>(d: &mut D) -> Result<AttrId, D::Error> {
- d.read_nil().map(|_| crate::attr::mk_attr_id())
+ fn decode<D: Decoder>(_: &mut D) -> Result<AttrId, D::Error> {
+ Ok(crate::attr::mk_attr_id())
}
}
}
crate fn mk_attr_id() -> AttrId {
- use std::sync::atomic::AtomicUsize;
+ use std::sync::atomic::AtomicU32;
use std::sync::atomic::Ordering;
- static NEXT_ATTR_ID: AtomicUsize = AtomicUsize::new(0);
+ static NEXT_ATTR_ID: AtomicU32 = AtomicU32::new(0);
let id = NEXT_ATTR_ID.fetch_add(1, Ordering::SeqCst);
- assert!(id != ::std::usize::MAX);
- AttrId(id)
+ assert!(id != u32::MAX);
+ AttrId::from_u32(id)
}
pub fn mk_attr(style: AttrStyle, path: Path, args: MacArgs, span: Span) -> Attribute {
}
}
+/// Checks that generic parameters are in the correct order,
+/// which is lifetimes, then types and then consts. (`<'a, T, const N: usize>`)
fn validate_generic_param_order<'a>(
sess: &Session,
handler: &rustc_errors::Handler,
if let Some(discr) =
operand.layout.ty.discriminant_for_variant(bx.tcx(), index)
{
- let discr_val = bx.cx().const_uint_big(ll_t_out, discr.val);
+ let discr_layout = bx.cx().layout_of(discr.ty);
+ let discr_t = bx.cx().immediate_backend_type(discr_layout);
+ let discr_val = bx.cx().const_uint_big(discr_t, discr.val);
+ let discr_val =
+ bx.intcast(discr_val, ll_t_out, discr.ty.is_signed());
+
return (
bx,
OperandRef {
E0221: include_str!("./error_codes/E0221.md"),
E0222: include_str!("./error_codes/E0222.md"),
E0223: include_str!("./error_codes/E0223.md"),
+E0224: include_str!("./error_codes/E0224.md"),
E0225: include_str!("./error_codes/E0225.md"),
E0229: include_str!("./error_codes/E0229.md"),
E0230: include_str!("./error_codes/E0230.md"),
// E0217, // ambiguous associated type, defined in multiple supertraits
// E0218, // no associated type defined
// E0219, // associated type defined in higher-ranked supertrait
- E0224, // at least one non-builtin train is required for an object type
E0226, // only a single explicit lifetime bound is permitted
E0227, // ambiguous lifetime bound, explicit lifetime bound required
E0228, // explicit lifetime bound required
--- /dev/null
+A trait object was declaired with no traits.
+
+Erroneous code example:
+
+```compile_fail,E0224
+type Foo = dyn 'static +;
+```
+
+Rust does not currently support this.
+
+To solve ensure the the trait object has at least one trait:
+
+```
+type Foo = dyn 'static + Copy;
+```
pub struct ExtCtxt<'a> {
pub parse_sess: &'a ParseSess,
pub ecfg: expand::ExpansionConfig<'a>,
+ pub reduced_recursion_limit: Option<usize>,
pub root_path: PathBuf,
pub resolver: &'a mut dyn Resolver,
pub current_expansion: ExpansionData,
ExtCtxt {
parse_sess,
ecfg,
+ reduced_recursion_limit: None,
resolver,
extern_mod_loaded,
root_path: PathBuf::new(),
use rustc_ast::visit::{self, AssocCtxt, Visitor};
use rustc_ast_pretty::pprust;
use rustc_attr::{self as attr, is_builtin_attr, HasAttrs};
-use rustc_errors::{Applicability, FatalError, PResult};
+use rustc_errors::{Applicability, PResult};
use rustc_feature::Features;
use rustc_parse::parser::Parser;
use rustc_parse::validate_attr;
))
.emit();
self.cx.trace_macros_diag();
- FatalError.raise();
}
/// A macro's expansion does not fit in this fragment kind.
invoc: Invocation,
ext: &SyntaxExtensionKind,
) -> ExpandResult<AstFragment, Invocation> {
- if self.cx.current_expansion.depth > self.cx.ecfg.recursion_limit {
- self.error_recursion_limit_reached();
+ let recursion_limit =
+ self.cx.reduced_recursion_limit.unwrap_or(self.cx.ecfg.recursion_limit);
+ if self.cx.current_expansion.depth > recursion_limit {
+ if self.cx.reduced_recursion_limit.is_none() {
+ self.error_recursion_limit_reached();
+ }
+
+ // Reduce the recursion limit by half each time it triggers.
+ self.cx.reduced_recursion_limit = Some(recursion_limit / 2);
+
+ return ExpandResult::Ready(invoc.fragment_kind.dummy(invoc.span()));
}
let (fragment_kind, span) = (invoc.fragment_kind, invoc.span());
// suggest adding an explicit lifetime bound to it.
let type_param_span = match (self.in_progress_tables, bound_kind) {
(Some(ref table), GenericKind::Param(ref param)) => {
- let table = table.borrow();
- table.local_id_root.and_then(|did| {
- let generics = self.tcx.generics_of(did);
- // Account for the case where `did` corresponds to `Self`, which doesn't have
- // the expected type argument.
+ let table_owner = table.borrow().hir_owner;
+ table_owner.and_then(|table_owner| {
+ let generics = self.tcx.generics_of(table_owner.to_def_id());
+ // Account for the case where `param` corresponds to `Self`,
+ // which doesn't have the expected type argument.
if !(generics.has_self && param.index == 0) {
let type_param = generics.type_param(param, self.tcx);
let hir = &self.tcx.hir();
use rustc_data_structures::unify as ut;
use rustc_errors::DiagnosticBuilder;
use rustc_hir as hir;
-use rustc_hir::def_id::DefId;
+use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_session::config::BorrowckMode;
use rustc_span::symbol::Symbol;
use rustc_span::Span;
impl<'tcx> InferCtxtBuilder<'tcx> {
/// Used only by `rustc_typeck` during body type-checking/inference,
/// will initialize `in_progress_tables` with fresh `TypeckTables`.
- pub fn with_fresh_in_progress_tables(mut self, table_owner: DefId) -> Self {
+ pub fn with_fresh_in_progress_tables(mut self, table_owner: LocalDefId) -> Self {
self.fresh_tables = Some(RefCell::new(ty::TypeckTables::empty(Some(table_owner))));
self
}
ecx.parse_sess.missing_fragment_specifiers.borrow().iter().cloned().collect();
missing_fragment_specifiers.sort();
+ let recursion_limit_hit = ecx.reduced_recursion_limit.is_some();
+
for span in missing_fragment_specifiers {
let lint = lint::builtin::MISSING_FRAGMENT_SPECIFIER;
let msg = "missing fragment specifier";
if cfg!(windows) {
env::set_var("PATH", &old_path);
}
- krate
- });
+
+ if recursion_limit_hit {
+ // If we hit a recursion limit, exit early to avoid later passes getting overwhelmed
+ // with a large AST
+ Err(ErrorReported)
+ } else {
+ Ok(krate)
+ }
+ })?;
sess.time("maybe_building_test_harness", || {
rustc_builtin_macros::test_harness::inject(
use rustc::ty::{self, Ty, TypeAndMut, TypeFoldable};
use rustc_ast::ast::FloatTy;
use rustc_span::symbol::sym;
+use rustc_target::abi::LayoutOf;
use rustc::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
use rustc::mir::CastKind;
layout::Variants::Single { index } => {
if let Some(discr) = src.layout.ty.discriminant_for_variant(*self.tcx, index) {
assert!(src.layout.is_zst());
- return Ok(Scalar::from_uint(discr.val, dest_layout.size).into());
+ let discr_layout = self.layout_of(discr.ty)?;
+ return Ok(self
+ .cast_from_int_like(discr.val, discr_layout, dest_layout)?
+ .into());
}
}
layout::Variants::Multiple { .. } => {}
// (b) cast from an integer-like (including bool, char, enums).
// In both cases we want the bits.
let bits = self.force_bits(src.to_scalar()?, src.layout.size)?;
- Ok(self.cast_from_int(bits, src.layout, dest_layout)?.into())
+ Ok(self.cast_from_int_like(bits, src.layout, dest_layout)?.into())
}
- fn cast_from_int(
+ fn cast_from_int_like(
&self,
v: u128, // raw bits
src_layout: TyLayout<'tcx>,
source_file: Lrc<rustc_span::SourceFile>,
override_span: Option<Span>,
) -> Self {
- if source_file.src.is_none() {
+ // Make sure external source is loaded first, before accessing it.
+ // While this can't show up during normal parsing, `retokenize` may
+ // be called with a source file from an external crate.
+ sess.source_map().ensure_source_file_source_present(source_file.clone());
+
+ // FIXME(eddyb) use `Lrc<str>` or similar to avoid cloning the `String`.
+ let src = if let Some(src) = &source_file.src {
+ src.clone()
+ } else if let Some(src) = source_file.external_src.borrow().get_source() {
+ src.clone()
+ } else {
sess.span_diagnostic
.bug(&format!("cannot lex `source_file` without source: {}", source_file.name));
- }
-
- let src = (*source_file.src.as_ref().unwrap()).clone();
+ };
StringReader {
sess,
self.parse_closure_expr(attrs)
} else if self.eat_keyword(kw::If) {
self.parse_if_expr(attrs)
- } else if self.eat_keyword(kw::For) {
- self.parse_for_expr(None, self.prev_token.span, attrs)
+ } else if self.check_keyword(kw::For) {
+ if self.choose_generics_over_qpath(1) {
+ // NOTE(Centril, eddyb): DO NOT REMOVE! Beyond providing parser recovery,
+ // this is an insurance policy in case we allow qpaths in (tuple-)struct patterns.
+ // When `for <Foo as Bar>::Proj in $expr $block` is wanted,
+ // you can disambiguate in favor of a pattern with `(...)`.
+ self.recover_quantified_closure_expr(attrs)
+ } else {
+ assert!(self.eat_keyword(kw::For));
+ self.parse_for_expr(None, self.prev_token.span, attrs)
+ }
} else if self.eat_keyword(kw::While) {
self.parse_while_expr(None, self.prev_token.span, attrs)
} else if let Some(label) = self.eat_label() {
Ok(self.mk_expr(blk.span, ExprKind::Block(blk, opt_label), attrs))
}
+ /// Recover on an explicitly quantified closure expression, e.g., `for<'a> |x: &'a u8| *x + 1`.
+ fn recover_quantified_closure_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ let lo = self.token.span;
+ let _ = self.parse_late_bound_lifetime_defs()?;
+ let span_for = lo.to(self.prev_token.span);
+ let closure = self.parse_closure_expr(attrs)?;
+
+ self.struct_span_err(span_for, "cannot introduce explicit parameters for a closure")
+ .span_label(closure.span, "the parameters are attached to this closure")
+ .span_suggestion(
+ span_for,
+ "remove the parameters",
+ String::new(),
+ Applicability::MachineApplicable,
+ )
+ .emit();
+
+ Ok(self.mk_expr_err(lo.to(closure.span)))
+ }
+
/// Parses a closure expression (e.g., `move |args| expr`).
fn parse_closure_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
// We are considering adding generics to the `where` keyword as an alternative higher-rank
// parameter syntax (as in `where<'a>` or `where<T>`. To avoid that being a breaking
// change we parse those generics now, but report an error.
- if self.choose_generics_over_qpath() {
+ if self.choose_generics_over_qpath(0) {
let generics = self.parse_generics()?;
self.struct_span_err(
generics.span,
}
}
- pub(super) fn choose_generics_over_qpath(&self) -> bool {
+ pub(super) fn choose_generics_over_qpath(&self, start: usize) -> bool {
// There's an ambiguity between generic parameters and qualified paths in impls.
// If we see `<` it may start both, so we have to inspect some following tokens.
// The following combinations can only start generics,
// we disambiguate it in favor of generics (`impl<T> ::absolute::Path<T> { ... }`)
// because this is what almost always expected in practice, qualified paths in impls
// (`impl <Type>::AssocTy { ... }`) aren't even allowed by type checker at the moment.
- self.token == token::Lt
- && (self.look_ahead(1, |t| t == &token::Pound || t == &token::Gt)
- || self.look_ahead(1, |t| t.is_lifetime() || t.is_ident())
- && self.look_ahead(2, |t| {
- t == &token::Gt
- || t == &token::Comma
- || t == &token::Colon
- || t == &token::Eq
+ self.look_ahead(start, |t| t == &token::Lt)
+ && (self.look_ahead(start + 1, |t| t == &token::Pound || t == &token::Gt)
+ || self.look_ahead(start + 1, |t| t.is_lifetime() || t.is_ident())
+ && self.look_ahead(start + 2, |t| {
+ matches!(t.kind, token::Gt | token::Comma | token::Colon | token::Eq)
})
- || self.is_keyword_ahead(1, &[kw::Const]))
+ || self.is_keyword_ahead(start + 1, &[kw::Const]))
}
}
self.expect_keyword(kw::Impl)?;
// First, parse generic parameters if necessary.
- let mut generics = if self.choose_generics_over_qpath() {
+ let mut generics = if self.choose_generics_over_qpath(0) {
self.parse_generics()?
} else {
let mut generics = Generics::default();
pub fn test_layout(tcx: TyCtxt<'_>) {
if tcx.features().rustc_attrs {
// if the `rustc_attrs` feature is not enabled, don't bother testing layout
- tcx.hir().krate().visit_all_item_likes(&mut VarianceTest { tcx });
+ tcx.hir().krate().visit_all_item_likes(&mut LayoutTest { tcx });
}
}
-struct VarianceTest<'tcx> {
+struct LayoutTest<'tcx> {
tcx: TyCtxt<'tcx>,
}
-impl ItemLikeVisitor<'tcx> for VarianceTest<'tcx> {
+impl ItemLikeVisitor<'tcx> for LayoutTest<'tcx> {
fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
let item_def_id = self.tcx.hir().local_def_id(item.hir_id);
- if let ItemKind::TyAlias(..) = item.kind {
- for attr in self.tcx.get_attrs(item_def_id).iter() {
- if attr.check_name(sym::rustc_layout) {
- self.dump_layout_of(item_def_id, item, attr);
+ match item.kind {
+ ItemKind::TyAlias(..)
+ | ItemKind::Enum(..)
+ | ItemKind::Struct(..)
+ | ItemKind::Union(..) => {
+ for attr in self.tcx.get_attrs(item_def_id).iter() {
+ if attr.check_name(sym::rustc_layout) {
+ self.dump_layout_of(item_def_id, item, attr);
+ }
}
}
+ _ => {}
}
}
fn visit_impl_item(&mut self, _: &'tcx hir::ImplItem<'tcx>) {}
}
-impl VarianceTest<'tcx> {
+impl LayoutTest<'tcx> {
fn dump_layout_of(&self, item_def_id: DefId, item: &hir::Item<'tcx>, attr: &Attribute) {
let tcx = self.tcx;
let param_env = self.tcx.param_env(item_def_id);
);
}
+ sym::debug => {
+ self.tcx.sess.span_err(
+ item.span,
+ &format!("layout debugging: {:#?}", *ty_layout),
+ );
+ }
+
name => {
self.tcx.sess.span_err(
meta_item.span(),
#[derive(PartialEq, Eq, Clone, Debug)]
pub enum ExternalSourceKind {
/// The external source has been loaded already.
- Present(String),
+ Present(Lrc<String>),
/// No attempt has been made to load the external source.
AbsentOk,
/// A failed attempt has been made to load the external source.
}
}
- pub fn get_source(&self) -> Option<&str> {
+ pub fn get_source(&self) -> Option<&Lrc<String>> {
match self {
ExternalSource::Foreign { kind: ExternalSourceKind::Present(ref src), .. } => Some(src),
_ => None,
hasher.write(src.as_bytes());
if hasher.finish::<u128>() == self.src_hash {
- *src_kind = ExternalSourceKind::Present(src);
+ *src_kind = ExternalSourceKind::Present(Lrc::new(src));
return true;
}
} else {
debug_trait,
declare_lint_pass,
decl_macro,
+ debug,
Debug,
Decodable,
Default,
.print_def_path(def_id, &[])
.unwrap();
- if instance.is_vtable_shim() {
+ if let ty::InstanceDef::VtableShim(..) = instance.def {
let _ = printer.write_str("{{vtable-shim}}");
}
+ if let ty::InstanceDef::ReifyShim(..) = instance.def {
+ let _ = printer.write_str("{{reify-shim}}");
+ }
+
printer.path.finish(hash)
}
}
// We want to avoid accidental collision between different types of instances.
- // Especially, VtableShim may overlap with its original instance without this.
+ // Especially, `VtableShim`s and `ReifyShim`s may overlap with their original
+ // instances without this.
discriminant(&instance.def).hash_stable(&mut hcx, &mut hasher);
});
binders: vec![],
out: String::from(prefix),
};
- cx = if instance.is_vtable_shim() {
- cx.path_append_ns(|cx| cx.print_def_path(def_id, substs), 'S', 0, "").unwrap()
+
+ // Append `::{shim:...#0}` to shims that can coexist with a non-shim instance.
+ let shim_kind = match instance.def {
+ ty::InstanceDef::VtableShim(_) => Some("vtable"),
+ ty::InstanceDef::ReifyShim(_) => Some("reify"),
+
+ _ => None,
+ };
+
+ cx = if let Some(shim_kind) = shim_kind {
+ cx.path_append_ns(|cx| cx.print_def_path(def_id, substs), 'S', 0, shim_kind).unwrap()
} else {
cx.print_def_path(def_id, substs).unwrap()
};
let generator_did_root = self.tcx.closure_base_def_id(generator_did);
debug!(
"maybe_note_obligation_cause_for_async_await: generator_did={:?} \
- generator_did_root={:?} in_progress_tables.local_id_root={:?} span={:?}",
+ generator_did_root={:?} in_progress_tables.hir_owner={:?} span={:?}",
generator_did,
generator_did_root,
- in_progress_tables.as_ref().map(|t| t.local_id_root),
+ in_progress_tables.as_ref().map(|t| t.hir_owner),
span
);
let query_tables;
let tables: &TypeckTables<'tcx> = match &in_progress_tables {
- Some(t) if t.local_id_root == Some(generator_did_root) => t,
+ Some(t) if t.hir_owner.map(|owner| owner.to_def_id()) == Some(generator_did_root) => t,
_ => {
query_tables = self.tcx.typeck_tables_of(generator_did);
&query_tables
);
tcx.infer_ctxt().enter(|infcx| {
- let inh = Inherited::new(infcx, impl_m.def_id);
+ let inh = Inherited::new(infcx, impl_m.def_id.expect_local());
let infcx = &inh.infcx;
debug!("compare_impl_method: caller_bounds={:?}", param_env.caller_bounds);
tcx.infer_ctxt().enter(|infcx| {
let param_env = tcx.param_env(impl_c.def_id);
- let inh = Inherited::new(infcx, impl_c.def_id);
+ let inh = Inherited::new(infcx, impl_c.def_id.expect_local());
let infcx = &inh.infcx;
// The below is for the most part highly similar to the procedure
normalize_cause.clone(),
);
tcx.infer_ctxt().enter(|infcx| {
- let inh = Inherited::new(infcx, impl_ty.def_id);
+ let inh = Inherited::new(infcx, impl_ty.def_id.expect_local());
let infcx = &inh.infcx;
debug!("compare_type_predicate_entailment: caller_bounds={:?}", param_env.caller_bounds);
// Obtain the span for `param` and use it for a structured suggestion.
let mut suggested = false;
if let (Some(ref param), Some(ref table)) = (param_type, self.in_progress_tables) {
- let table = table.borrow();
- if let Some(did) = table.local_id_root {
- let generics = self.tcx.generics_of(did);
+ let table_owner = table.borrow().hir_owner;
+ if let Some(table_owner) = table_owner {
+ let generics = self.tcx.generics_of(table_owner.to_def_id());
let type_param = generics.type_param(param, self.tcx);
let hir = &self.tcx.hir();
if let Some(id) = hir.as_local_hir_id(type_param.def_id) {
use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder, DiagnosticId};
use rustc_hir as hir;
use rustc_hir::def::{CtorOf, DefKind, Res};
-use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, LOCAL_CRATE};
+use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId, LOCAL_CRATE};
use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor};
use rustc_hir::itemlikevisit::ItemLikeVisitor;
use rustc_hir::{ExprKind, GenericArg, HirIdMap, Item, ItemKind, Node, PatKind, QPath};
/// `F: for<'b, 'tcx> where 'tcx FnOnce(Inherited<'b, 'tcx>)`.
pub struct InheritedBuilder<'tcx> {
infcx: infer::InferCtxtBuilder<'tcx>,
- def_id: DefId,
+ def_id: LocalDefId,
}
impl Inherited<'_, 'tcx> {
- pub fn build(tcx: TyCtxt<'tcx>, def_id: DefId) -> InheritedBuilder<'tcx> {
- let hir_id_root = if let Some(def_id) = def_id.as_local() {
- tcx.hir().local_def_id_to_hir_id(def_id).owner.to_def_id()
- } else {
- def_id
- };
+ pub fn build(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> InheritedBuilder<'tcx> {
+ let hir_owner = tcx.hir().local_def_id_to_hir_id(def_id).owner;
InheritedBuilder {
- infcx: tcx.infer_ctxt().with_fresh_in_progress_tables(hir_id_root),
+ infcx: tcx.infer_ctxt().with_fresh_in_progress_tables(hir_owner),
def_id,
}
}
}
impl Inherited<'a, 'tcx> {
- fn new(infcx: InferCtxt<'a, 'tcx>, def_id: DefId) -> Self {
+ fn new(infcx: InferCtxt<'a, 'tcx>, def_id: LocalDefId) -> Self {
let tcx = infcx.tcx;
- let item_id = tcx.hir().as_local_hir_id(def_id);
- let body_id = item_id.and_then(|id| tcx.hir().maybe_body_owned_by(id));
+ let item_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let body_id = tcx.hir().maybe_body_owned_by(item_id);
let implicit_region_bound = body_id.map(|body_id| {
let body = tcx.hir().body(body_id);
tcx.mk_region(ty::ReScope(region::Scope {
});
let body = tcx.hir().body(body_id);
- let tables = Inherited::build(tcx, def_id).enter(|inh| {
+ let tables = Inherited::build(tcx, def_id.expect_local()).enter(|inh| {
let param_env = tcx.param_env(def_id);
let fcx = if let (Some(header), Some(decl)) = (fn_header, fn_decl) {
let fn_sig = if crate::collect::get_infer_ret_ty(&decl.output).is_some() {
// Consistency check our TypeckTables instance can hold all ItemLocalIds
// it will need to hold.
- assert_eq!(tables.local_id_root, Some(id.owner.to_def_id()));
+ assert_eq!(tables.hir_owner, Some(id.owner));
tables
}
}
fn for_id(tcx: TyCtxt<'_>, id: hir::HirId, span: Span) -> CheckWfFcxBuilder<'_> {
- let def_id = tcx.hir().local_def_id(id);
+ let def_id = tcx.hir().local_def_id(id).expect_local();
CheckWfFcxBuilder {
inherited: Inherited::build(tcx, def_id),
id,
span,
- param_env: tcx.param_env(def_id),
+ param_env: tcx.param_env(def_id.to_def_id()),
}
}
WritebackCx {
fcx,
- tables: ty::TypeckTables::empty(Some(owner.to_def_id())),
+ tables: ty::TypeckTables::empty(Some(owner)),
body,
rustc_dump_user_substs,
}
fn visit_closures(&mut self) {
let fcx_tables = self.fcx.tables.borrow();
- debug_assert_eq!(fcx_tables.local_id_root, self.tables.local_id_root);
- let common_local_id_root = fcx_tables.local_id_root.unwrap();
+ assert_eq!(fcx_tables.hir_owner, self.tables.hir_owner);
+ let common_hir_owner = fcx_tables.hir_owner.unwrap();
for (&id, &origin) in fcx_tables.closure_kind_origins().iter() {
- let hir_id = hir::HirId { owner: common_local_id_root.expect_local(), local_id: id };
+ let hir_id = hir::HirId { owner: common_hir_owner, local_id: id };
self.tables.closure_kind_origins_mut().insert(hir_id, origin);
}
}
fn visit_coercion_casts(&mut self) {
let fcx_tables = self.fcx.tables.borrow();
let fcx_coercion_casts = fcx_tables.coercion_casts();
- debug_assert_eq!(fcx_tables.local_id_root, self.tables.local_id_root);
+ assert_eq!(fcx_tables.hir_owner, self.tables.hir_owner);
for local_id in fcx_coercion_casts {
self.tables.set_coercion_cast(*local_id);
fn visit_user_provided_tys(&mut self) {
let fcx_tables = self.fcx.tables.borrow();
- debug_assert_eq!(fcx_tables.local_id_root, self.tables.local_id_root);
- let common_local_id_root = fcx_tables.local_id_root.unwrap();
+ assert_eq!(fcx_tables.hir_owner, self.tables.hir_owner);
+ let common_hir_owner = fcx_tables.hir_owner.unwrap();
let mut errors_buffer = Vec::new();
for (&local_id, c_ty) in fcx_tables.user_provided_types().iter() {
- let hir_id = hir::HirId { owner: common_local_id_root.expect_local(), local_id };
+ let hir_id = hir::HirId { owner: common_hir_owner, local_id };
if cfg!(debug_assertions) && c_ty.has_local_value() {
span_bug!(hir_id.to_span(self.fcx.tcx), "writeback: `{:?}` is a local value", c_ty);
fn visit_user_provided_sigs(&mut self) {
let fcx_tables = self.fcx.tables.borrow();
- debug_assert_eq!(fcx_tables.local_id_root, self.tables.local_id_root);
+ assert_eq!(fcx_tables.hir_owner, self.tables.hir_owner);
for (&def_id, c_sig) in fcx_tables.user_provided_sigs.iter() {
if cfg!(debug_assertions) && c_sig.has_local_value() {
fn visit_generator_interior_types(&mut self) {
let fcx_tables = self.fcx.tables.borrow();
- debug_assert_eq!(fcx_tables.local_id_root, self.tables.local_id_root);
+ assert_eq!(fcx_tables.hir_owner, self.tables.hir_owner);
self.tables.generator_interior_types = fcx_tables.generator_interior_types.clone();
}
fn visit_liberated_fn_sigs(&mut self) {
let fcx_tables = self.fcx.tables.borrow();
- debug_assert_eq!(fcx_tables.local_id_root, self.tables.local_id_root);
- let common_local_id_root = fcx_tables.local_id_root.unwrap();
+ assert_eq!(fcx_tables.hir_owner, self.tables.hir_owner);
+ let common_hir_owner = fcx_tables.hir_owner.unwrap();
for (&local_id, fn_sig) in fcx_tables.liberated_fn_sigs().iter() {
- let hir_id = hir::HirId { owner: common_local_id_root.expect_local(), local_id };
+ let hir_id = hir::HirId { owner: common_hir_owner, local_id };
let fn_sig = self.resolve(fn_sig, &hir_id);
self.tables.liberated_fn_sigs_mut().insert(hir_id, fn_sig.clone());
}
fn visit_fru_field_types(&mut self) {
let fcx_tables = self.fcx.tables.borrow();
- debug_assert_eq!(fcx_tables.local_id_root, self.tables.local_id_root);
- let common_local_id_root = fcx_tables.local_id_root.unwrap();
+ assert_eq!(fcx_tables.hir_owner, self.tables.hir_owner);
+ let common_hir_owner = fcx_tables.hir_owner.unwrap();
for (&local_id, ftys) in fcx_tables.fru_field_types().iter() {
- let hir_id = hir::HirId { owner: common_local_id_root.expect_local(), local_id };
+ let hir_id = hir::HirId { owner: common_hir_owner, local_id };
let ftys = self.resolve(ftys, &hir_id);
self.tables.fru_field_types_mut().insert(hir_id, ftys);
}
// Now create the real type and const parameters.
let type_start = own_start - has_self as u32 + params.len() as u32;
let mut i = 0;
- params.extend(ast_generics.params.iter().filter_map(|param| {
- let kind = match param.kind {
- GenericParamKind::Type { ref default, synthetic, .. } => {
- if !allow_defaults && default.is_some() {
- if !tcx.features().default_type_parameter_fallback {
- tcx.struct_span_lint_hir(
- lint::builtin::INVALID_TYPE_PARAM_DEFAULT,
- param.hir_id,
- param.span,
- |lint| {
- lint.build(
- "defaults for type parameters are only allowed in \
- `struct`, `enum`, `type`, or `trait` definitions.",
- )
- .emit();
- },
- );
- }
- }
- ty::GenericParamDefKind::Type {
- has_default: default.is_some(),
- object_lifetime_default: object_lifetime_defaults
- .as_ref()
- .map_or(rl::Set1::Empty, |o| o[i]),
- synthetic,
+ // FIXME(const_generics): a few places in the compiler expect generic params
+ // to be in the order lifetimes, then type params, then const params.
+ //
+ // To prevent internal errors in case const parameters are supplied before
+ // type parameters we first add all type params, then all const params.
+ params.extend(ast_generics.params.iter().filter_map(|param| {
+ if let GenericParamKind::Type { ref default, synthetic, .. } = param.kind {
+ if !allow_defaults && default.is_some() {
+ if !tcx.features().default_type_parameter_fallback {
+ tcx.struct_span_lint_hir(
+ lint::builtin::INVALID_TYPE_PARAM_DEFAULT,
+ param.hir_id,
+ param.span,
+ |lint| {
+ lint.build(
+ "defaults for type parameters are only allowed in \
+ `struct`, `enum`, `type`, or `trait` definitions.",
+ )
+ .emit();
+ },
+ );
}
}
- GenericParamKind::Const { .. } => ty::GenericParamDefKind::Const,
- _ => return None,
- };
- let param_def = ty::GenericParamDef {
- index: type_start + i as u32,
- name: param.name.ident().name,
- def_id: tcx.hir().local_def_id(param.hir_id),
- pure_wrt_drop: param.pure_wrt_drop,
- kind,
- };
- i += 1;
- Some(param_def)
+ let kind = ty::GenericParamDefKind::Type {
+ has_default: default.is_some(),
+ object_lifetime_default: object_lifetime_defaults
+ .as_ref()
+ .map_or(rl::Set1::Empty, |o| o[i]),
+ synthetic,
+ };
+
+ let param_def = ty::GenericParamDef {
+ index: type_start + i as u32,
+ name: param.name.ident().name,
+ def_id: tcx.hir().local_def_id(param.hir_id),
+ pure_wrt_drop: param.pure_wrt_drop,
+ kind,
+ };
+ i += 1;
+ Some(param_def)
+ } else {
+ None
+ }
+ }));
+
+ params.extend(ast_generics.params.iter().filter_map(|param| {
+ if let GenericParamKind::Const { .. } = param.kind {
+ let param_def = ty::GenericParamDef {
+ index: type_start + i as u32,
+ name: param.name.ident().name,
+ def_id: tcx.hir().local_def_id(param.hir_id),
+ pure_wrt_drop: param.pure_wrt_drop,
+ kind: ty::GenericParamDefKind::Const,
+ };
+ i += 1;
+ Some(param_def)
+ } else {
+ None
+ }
}));
// provide junk type parameter defs - the only place that
// figure out which generic parameter it corresponds to and return
// the relevant type.
let generics = match path.res {
- Res::Def(DefKind::Ctor(..), def_id) => {
+ Res::Def(DefKind::Ctor(..), def_id)
+ | Res::Def(DefKind::AssocTy, def_id) => {
tcx.generics_of(tcx.parent(def_id).unwrap())
}
Res::Def(_, def_id) => tcx.generics_of(def_id),
- Res::Err => return tcx.types.err,
res => {
tcx.sess.delay_span_bug(
DUMMY_SP,
- &format!("unexpected const parent path def {:?}", res,),
+ &format!(
+ "unexpected const parent path def, parent: {:?}, def: {:?}",
+ parent_node, res
+ ),
);
return tcx.types.err;
}
.map(|param| tcx.type_of(param.def_id))
// This is no generic parameter associated with the arg. This is
// probably from an extra arg where one is not needed.
- .unwrap_or(tcx.types.err)
+ .unwrap_or_else(|| {
+ tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ &format!(
+ "missing generic parameter for `AnonConst`, parent {:?}",
+ parent_node
+ ),
+ );
+ tcx.types.err
+ })
} else {
tcx.sess.delay_span_bug(
DUMMY_SP,
/// fn main() {
/// match get_super_error() {
/// Err(e) => {
- /// println!("Error: {}", e.description());
+ /// println!("Error: {}", e);
/// println!("Caused by: {}", e.source().unwrap());
/// }
/// _ => println!("No error"),
/// how many bytes were read.
///
/// This function does not provide any guarantees about whether it blocks
- /// waiting for data, but if an object needs to block for a read but cannot
+ /// waiting for data, but if an object needs to block for a read and cannot,
/// it will typically signal this via an [`Err`] return value.
///
/// If the return value of this method is [`Ok(n)`], then it must be
use crate::fmt;
use crate::io::lazy::Lazy;
use crate::io::{self, BufReader, Initializer, IoSlice, IoSliceMut, LineWriter};
-use crate::sync::{Arc, Mutex, MutexGuard};
+use crate::sync::{Arc, Mutex, MutexGuard, Once};
use crate::sys::stdio;
use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard};
use crate::thread::LocalKey;
Ok(stdout) => Maybe::Real(stdout),
_ => Maybe::Fake,
};
- Arc::new(ReentrantMutex::new(RefCell::new(LineWriter::new(stdout))))
+ unsafe {
+ let ret = Arc::new(ReentrantMutex::new(RefCell::new(LineWriter::new(stdout))));
+ ret.init();
+ return ret;
+ }
}
}
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn lock(&self) -> StdoutLock<'_> {
- StdoutLock { inner: self.inner.lock().unwrap_or_else(|e| e.into_inner()) }
+ StdoutLock { inner: self.inner.lock() }
}
}
/// an error.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Stderr {
- inner: Arc<ReentrantMutex<RefCell<Maybe<StderrRaw>>>>,
+ inner: &'static ReentrantMutex<RefCell<Maybe<StderrRaw>>>,
}
/// A locked reference to the `Stderr` handle.
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stderr() -> Stderr {
- static INSTANCE: Lazy<ReentrantMutex<RefCell<Maybe<StderrRaw>>>> = Lazy::new();
- return Stderr {
- inner: unsafe { INSTANCE.get(stderr_init).expect("cannot access stderr during shutdown") },
- };
-
- fn stderr_init() -> Arc<ReentrantMutex<RefCell<Maybe<StderrRaw>>>> {
- // This must not reentrantly access `INSTANCE`
- let stderr = match stderr_raw() {
- Ok(stderr) => Maybe::Real(stderr),
- _ => Maybe::Fake,
- };
- Arc::new(ReentrantMutex::new(RefCell::new(stderr)))
- }
+ // Note that unlike `stdout()` we don't use `Lazy` here which registers a
+ // destructor. Stderr is not buffered nor does the `stderr_raw` type consume
+ // any owned resources, so there's no need to run any destructors at some
+ // point in the future.
+ //
+ // This has the added benefit of allowing `stderr` to be usable during
+ // process shutdown as well!
+ static INSTANCE: ReentrantMutex<RefCell<Maybe<StderrRaw>>> =
+ unsafe { ReentrantMutex::new(RefCell::new(Maybe::Fake)) };
+
+ // When accessing stderr we need one-time initialization of the reentrant
+ // mutex, followed by one-time detection of whether we actually have a
+ // stderr handle or not. Afterwards we can just always use the now-filled-in
+ // `INSTANCE` value.
+ static INIT: Once = Once::new();
+ INIT.call_once(|| unsafe {
+ INSTANCE.init();
+ if let Ok(stderr) = stderr_raw() {
+ *INSTANCE.lock().borrow_mut() = Maybe::Real(stderr);
+ }
+ });
+ return Stderr { inner: &INSTANCE };
}
impl Stderr {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn lock(&self) -> StderrLock<'_> {
- StderrLock { inner: self.inner.lock().unwrap_or_else(|e| e.into_inner()) }
+ StderrLock { inner: self.inner.lock() }
}
}
// s has been moved into the tsa call
}
- // FIXME: figure out why this fails on openbsd and fix it
#[test]
- #[cfg(not(any(windows, target_os = "openbsd")))]
- fn to_socket_addr_str_bad() {
- assert!(tsa("1200::AB00:1234::2552:7777:1313:34300").is_err());
+ fn bind_udp_socket_bad() {
+ // rust-lang/rust#53957: This is a regression test for a parsing problem
+ // discovered as part of issue rust-lang/rust#23076, where we were
+ // incorrectly parsing invalid input and then that would result in a
+ // successful `UdpSocket` binding when we would expect failure.
+ //
+ // At one time, this test was written as a call to `tsa` with
+ // INPUT_23076. However, that structure yields an unreliable test,
+ // because it ends up passing junk input to the DNS server, and some DNS
+ // servers will respond with `Ok` to such input, with the ip address of
+ // the DNS server itself.
+ //
+ // This form of the test is more robust: even when the DNS server
+ // returns its own address, it is still an error to bind a UDP socket to
+ // a non-local address, and so we still get an error here in that case.
+
+ const INPUT_23076: &'static str = "1200::AB00:1234::2552:7777:1313:34300";
+
+ assert!(crate::net::UdpSocket::bind(INPUT_23076).is_err())
}
#[test]
}
impl ReentrantMutex {
- pub unsafe fn uninitialized() -> ReentrantMutex {
+ pub const unsafe fn uninitialized() -> ReentrantMutex {
ReentrantMutex {
lock: UnsafeCell::new(MaybeUninit::uninit()),
recursion: UnsafeCell::new(MaybeUninit::uninit()),
}
}
- pub unsafe fn init(&mut self) {
- self.lock = UnsafeCell::new(MaybeUninit::new(AtomicU32::new(abi::LOCK_UNLOCKED.0)));
- self.recursion = UnsafeCell::new(MaybeUninit::new(0));
+ pub unsafe fn init(&self) {
+ *self.lock.get() = MaybeUninit::new(AtomicU32::new(abi::LOCK_UNLOCKED.0));
+ *self.recursion.get() = MaybeUninit::new(0);
}
pub unsafe fn try_lock(&self) -> bool {
}
impl ReentrantMutex {
- pub unsafe fn uninitialized() -> ReentrantMutex {
+ pub const unsafe fn uninitialized() -> ReentrantMutex {
ReentrantMutex { inner: ptr::null() }
}
#[inline]
- pub unsafe fn init(&mut self) {
- let _ = abi::recmutex_init(&mut self.inner as *mut *const c_void);
+ pub unsafe fn init(&self) {
+ let _ = abi::recmutex_init(&self.inner as *const *const c_void as *mut _);
}
#[inline]
}
#[inline]
- pub unsafe fn init(&mut self) {}
+ pub unsafe fn init(&self) {}
#[inline]
pub unsafe fn lock(&self) {
unsafe impl Sync for ReentrantMutex {}
impl ReentrantMutex {
- pub unsafe fn uninitialized() -> ReentrantMutex {
+ pub const unsafe fn uninitialized() -> ReentrantMutex {
ReentrantMutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
}
- pub unsafe fn init(&mut self) {
+ pub unsafe fn init(&self) {
let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
let result = libc::pthread_mutexattr_init(attr.as_mut_ptr());
debug_assert_eq!(result, 0);
unsafe impl Sync for ReentrantMutex {}
impl ReentrantMutex {
- pub unsafe fn uninitialized() -> ReentrantMutex {
+ pub const unsafe fn uninitialized() -> ReentrantMutex {
ReentrantMutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
}
- pub unsafe fn init(&mut self) {
+ pub unsafe fn init(&self) {
let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
let result = libc::pthread_mutexattr_init(attr.as_mut_ptr());
debug_assert_eq!(result, 0);
pub struct ReentrantMutex {}
impl ReentrantMutex {
- pub unsafe fn uninitialized() -> ReentrantMutex {
+ pub const unsafe fn uninitialized() -> ReentrantMutex {
ReentrantMutex {}
}
- pub unsafe fn init(&mut self) {}
+ pub unsafe fn init(&self) {}
pub unsafe fn lock(&self) {}
// released when this recursion counter reaches 0.
impl ReentrantMutex {
- pub unsafe fn uninitialized() -> ReentrantMutex {
+ pub const unsafe fn uninitialized() -> ReentrantMutex {
ReentrantMutex { owner: AtomicU32::new(0), recursions: UnsafeCell::new(0) }
}
- pub unsafe fn init(&mut self) {
+ pub unsafe fn init(&self) {
// nothing to do...
}
0 => {}
n => return n as *mut _,
}
- let mut re = box ReentrantMutex::uninitialized();
+ let re = box ReentrantMutex::uninitialized();
re.init();
let re = Box::into_raw(re);
match self.lock.compare_and_swap(0, re as usize, Ordering::SeqCst) {
unsafe impl Sync for ReentrantMutex {}
impl ReentrantMutex {
- pub fn uninitialized() -> ReentrantMutex {
+ pub const fn uninitialized() -> ReentrantMutex {
ReentrantMutex { inner: UnsafeCell::new(MaybeUninit::uninit()) }
}
- pub unsafe fn init(&mut self) {
+ pub unsafe fn init(&self) {
c::InitializeCriticalSection((&mut *self.inner.get()).as_mut_ptr());
}
use crate::ops::Deref;
use crate::panic::{RefUnwindSafe, UnwindSafe};
use crate::sys::mutex as sys;
-use crate::sys_common::poison::{self, LockResult, TryLockError, TryLockResult};
/// A re-entrant mutual exclusion
///
/// available. The thread which has already locked the mutex can lock it
/// multiple times without blocking, preventing a common source of deadlocks.
pub struct ReentrantMutex<T> {
- inner: Box<sys::ReentrantMutex>,
- poison: poison::Flag,
+ inner: sys::ReentrantMutex,
data: T,
}
// funny underscores due to how Deref currently works (it disregards field
// privacy).
__lock: &'a ReentrantMutex<T>,
- __poison: poison::Guard,
}
impl<T> !marker::Send for ReentrantMutexGuard<'_, T> {}
impl<T> ReentrantMutex<T> {
/// Creates a new reentrant mutex in an unlocked state.
- pub fn new(t: T) -> ReentrantMutex<T> {
- unsafe {
- let mut mutex = ReentrantMutex {
- inner: box sys::ReentrantMutex::uninitialized(),
- poison: poison::Flag::new(),
- data: t,
- };
- mutex.inner.init();
- mutex
- }
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because it is required that `init` is called
+ /// once this mutex is in its final resting place, and only then are the
+ /// lock/unlock methods safe.
+ pub const unsafe fn new(t: T) -> ReentrantMutex<T> {
+ ReentrantMutex { inner: sys::ReentrantMutex::uninitialized(), data: t }
+ }
+
+ /// Initializes this mutex so it's ready for use.
+ ///
+ /// # Unsafety
+ ///
+ /// Unsafe to call more than once, and must be called after this will no
+ /// longer move in memory.
+ pub unsafe fn init(&self) {
+ self.inner.init();
}
/// Acquires a mutex, blocking the current thread until it is able to do so.
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return failure if the mutex would otherwise be
/// acquired.
- pub fn lock(&self) -> LockResult<ReentrantMutexGuard<'_, T>> {
+ pub fn lock(&self) -> ReentrantMutexGuard<'_, T> {
unsafe { self.inner.lock() }
ReentrantMutexGuard::new(&self)
}
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return failure if the mutex would otherwise be
/// acquired.
- pub fn try_lock(&self) -> TryLockResult<ReentrantMutexGuard<'_, T>> {
- if unsafe { self.inner.try_lock() } {
- Ok(ReentrantMutexGuard::new(&self)?)
- } else {
- Err(TryLockError::WouldBlock)
- }
+ pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, T>> {
+ if unsafe { self.inner.try_lock() } { Some(ReentrantMutexGuard::new(&self)) } else { None }
}
}
impl<T: fmt::Debug + 'static> fmt::Debug for ReentrantMutex<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.try_lock() {
- Ok(guard) => f.debug_struct("ReentrantMutex").field("data", &*guard).finish(),
- Err(TryLockError::Poisoned(err)) => {
- f.debug_struct("ReentrantMutex").field("data", &**err.get_ref()).finish()
- }
- Err(TryLockError::WouldBlock) => {
+ Some(guard) => f.debug_struct("ReentrantMutex").field("data", &*guard).finish(),
+ None => {
struct LockedPlaceholder;
impl fmt::Debug for LockedPlaceholder {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
}
impl<'mutex, T> ReentrantMutexGuard<'mutex, T> {
- fn new(lock: &'mutex ReentrantMutex<T>) -> LockResult<ReentrantMutexGuard<'mutex, T>> {
- poison::map_result(lock.poison.borrow(), |guard| ReentrantMutexGuard {
- __lock: lock,
- __poison: guard,
- })
+ fn new(lock: &'mutex ReentrantMutex<T>) -> ReentrantMutexGuard<'mutex, T> {
+ ReentrantMutexGuard { __lock: lock }
}
}
#[inline]
fn drop(&mut self) {
unsafe {
- self.__lock.poison.done(&self.__poison);
self.__lock.inner.unlock();
}
}
#[test]
fn smoke() {
- let m = ReentrantMutex::new(());
+ let m = unsafe {
+ let m = ReentrantMutex::new(());
+ m.init();
+ m
+ };
{
- let a = m.lock().unwrap();
+ let a = m.lock();
{
- let b = m.lock().unwrap();
+ let b = m.lock();
{
- let c = m.lock().unwrap();
+ let c = m.lock();
assert_eq!(*c, ());
}
assert_eq!(*b, ());
#[test]
fn is_mutex() {
- let m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
+ let m = unsafe {
+ let m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
+ m.init();
+ m
+ };
let m2 = m.clone();
- let lock = m.lock().unwrap();
+ let lock = m.lock();
let child = thread::spawn(move || {
- let lock = m2.lock().unwrap();
+ let lock = m2.lock();
assert_eq!(*lock.borrow(), 4950);
});
for i in 0..100 {
- let lock = m.lock().unwrap();
+ let lock = m.lock();
*lock.borrow_mut() += i;
}
drop(lock);
#[test]
fn trylock_works() {
- let m = Arc::new(ReentrantMutex::new(()));
+ let m = unsafe {
+ let m = Arc::new(ReentrantMutex::new(()));
+ m.init();
+ m
+ };
let m2 = m.clone();
- let _lock = m.try_lock().unwrap();
- let _lock2 = m.try_lock().unwrap();
+ let _lock = m.try_lock();
+ let _lock2 = m.try_lock();
thread::spawn(move || {
let lock = m2.try_lock();
- assert!(lock.is_err());
+ assert!(lock.is_none());
})
.join()
.unwrap();
- let _lock3 = m.try_lock().unwrap();
+ let _lock3 = m.try_lock();
}
pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell<u32>>);
*self.0.borrow_mut() = 42;
}
}
-
- #[test]
- fn poison_works() {
- let m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
- let mc = m.clone();
- let result = thread::spawn(move || {
- let lock = mc.lock().unwrap();
- *lock.borrow_mut() = 1;
- let lock2 = mc.lock().unwrap();
- *lock.borrow_mut() = 2;
- let _answer = Answer(lock2);
- panic!("What the answer to my lifetimes dilemma is?");
- })
- .join();
- assert!(result.is_err());
- let r = m.lock().err().unwrap().into_inner();
- assert_eq!(*r.borrow(), 42);
- }
}
// gdb-command: print empty_btree_map
// gdb-check:$4 = BTreeMap<i32, u32>(len: 0)
+// gdb-command: print nasty_btree_map
+// gdb-check:$5 = BTreeMap<i32, pretty_std_collections::MyLeafNode>(len: 1) = {[1] = pretty_std_collections::MyLeafNode (11)}
+
// gdb-command: print vec_deque
-// gdb-check:$5 = VecDeque<i32>(len: 3, cap: 8) = {5, 3, 7}
+// gdb-check:$6 = VecDeque<i32>(len: 3, cap: 8) = {5, 3, 7}
// gdb-command: print vec_deque2
-// gdb-check:$6 = VecDeque<i32>(len: 7, cap: 8) = {2, 3, 4, 5, 6, 7, 8}
+// gdb-check:$7 = VecDeque<i32>(len: 7, cap: 8) = {2, 3, 4, 5, 6, 7, 8}
#![allow(unused_variables)]
use std::collections::BTreeMap;
use std::collections::BTreeSet;
use std::collections::VecDeque;
+struct MyLeafNode(i32); // helps to ensure we don't blindly replace substring "LeafNode"
+
fn main() {
// BTreeSet
let mut btree_set = BTreeSet::new();
let mut empty_btree_map: BTreeMap<i32, u32> = BTreeMap::new();
+ let mut nasty_btree_map: BTreeMap<i32, MyLeafNode> = BTreeMap::new();
+ nasty_btree_map.insert(1, MyLeafNode(11));
+
// VecDeque
let mut vec_deque = VecDeque::new();
vec_deque.push_back(5);
--- /dev/null
+#![feature(const_generics)]
+//~^ WARN the feature `const_generics` is incomplete and may cause the compiler to crash
+
+struct Bad<const N: usize, T> { //~ ERROR type parameters must be declared prior
+ arr: [u8; { N }],
+ another: T,
+}
+
+fn main() { }
--- /dev/null
+error: type parameters must be declared prior to const parameters
+ --> $DIR/argument_order.rs:4:28
+ |
+LL | struct Bad<const N: usize, T> {
+ | -----------------^- help: reorder the parameters: lifetimes, then types, then consts: `<T, const N: usize>`
+
+warning: the feature `const_generics` is incomplete and may cause the compiler to crash
+ --> $DIR/argument_order.rs:1:12
+ |
+LL | #![feature(const_generics)]
+ | ^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+
+error: aborting due to previous error
+
--- /dev/null
+// check-pass
+
+#![feature(const_generics)]
+//~^ WARN the feature `const_generics` is incomplete and may cause the compiler to crash
+
+pub struct Tuple;
+
+pub trait Trait<const I: usize> {
+ type Input: From<<Self as Trait<I>>::Input>;
+}
+
+fn main() {}
--- /dev/null
+warning: the feature `const_generics` is incomplete and may cause the compiler to crash
+ --> $DIR/issue-66906.rs:3:12
+ |
+LL | #![feature(const_generics)]
+ | ^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+
--- /dev/null
+// check-pass
+
+#![feature(const_generics)]
+//~^ WARN the feature `const_generics` is incomplete and may cause the compiler to crash
+
+pub trait Trait<const N: usize>: From<<Self as Trait<N>>::Item> {
+ type Item;
+}
+
+fn main() {}
--- /dev/null
+warning: the feature `const_generics` is incomplete and may cause the compiler to crash
+ --> $DIR/issue-70167.rs:3:12
+ |
+LL | #![feature(const_generics)]
+ | ^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+
--- /dev/null
+// run-pass
+// Test a ZST enum whose dicriminant is ~0i128. This caused an ICE when casting to a i32.
+
+#[derive(Copy, Clone)]
+enum Nums {
+ NegOne = -1,
+}
+
+const NEG_ONE_I8: i8 = Nums::NegOne as i8;
+const NEG_ONE_I16: i16 = Nums::NegOne as i16;
+const NEG_ONE_I32: i32 = Nums::NegOne as i32;
+const NEG_ONE_I64: i64 = Nums::NegOne as i64;
+const NEG_ONE_I128: i128 = Nums::NegOne as i128;
+
+#[inline(never)]
+fn identity<T>(t: T) -> T { t }
+
+fn test_as_arg(n: Nums) {
+ assert_eq!(-1i8, n as i8);
+ assert_eq!(-1i16, n as i16);
+ assert_eq!(-1i32, n as i32);
+ assert_eq!(-1i64, n as i64);
+ assert_eq!(-1i128, n as i128);
+}
+
+fn main() {
+ let kind = Nums::NegOne;
+ assert_eq!(-1i8, kind as i8);
+ assert_eq!(-1i16, kind as i16);
+ assert_eq!(-1i32, kind as i32);
+ assert_eq!(-1i64, kind as i64);
+ assert_eq!(-1i128, kind as i128);
+
+ assert_eq!(-1i8, identity(kind) as i8);
+ assert_eq!(-1i16, identity(kind) as i16);
+ assert_eq!(-1i32, identity(kind) as i32);
+ assert_eq!(-1i64, identity(kind) as i64);
+ assert_eq!(-1i128, identity(kind) as i128);
+
+ test_as_arg(Nums::NegOne);
+
+ assert_eq!(-1i8, NEG_ONE_I8);
+ assert_eq!(-1i16, NEG_ONE_I16);
+ assert_eq!(-1i32, NEG_ONE_I32);
+ assert_eq!(-1i64, NEG_ONE_I64);
+ assert_eq!(-1i128, NEG_ONE_I128);
+}
--- /dev/null
+// run-pass
+// ignore-emscripten no processes
+
+use std::cell::RefCell;
+use std::env;
+use std::process::Command;
+
+fn main() {
+ let name = "YOU_ARE_THE_TEST";
+ if env::var(name).is_ok() {
+ std::thread::spawn(|| {
+ TLS.with(|f| f.borrow().ensure());
+ })
+ .join()
+ .unwrap();
+ } else {
+ let me = env::current_exe().unwrap();
+ let output = Command::new(&me).env(name, "1").output().unwrap();
+ println!("{:?}", output);
+ assert!(output.status.success());
+ let stderr = String::from_utf8(output.stderr).unwrap();
+ assert!(stderr.contains("hello new\n"));
+ assert!(stderr.contains("hello drop\n"));
+ }
+}
+
+struct Stuff {
+ _x: usize,
+}
+
+impl Stuff {
+ fn new() -> Self {
+ eprintln!("hello new");
+ Self { _x: 0 }
+ }
+
+ fn ensure(&self) {}
+}
+
+impl Drop for Stuff {
+ fn drop(&mut self) {
+ eprintln!("hello drop");
+ }
+}
+
+thread_local! {
+ static TLS: RefCell<Stuff> = RefCell::new(Stuff::new());
+}
--- /dev/null
+// compile-flags: --edition=2018
+// run-pass
+
+macro_rules! regex {
+ //~^ WARN unused macro definition
+ () => {};
+}
+
+#[allow(dead_code)]
+use regex;
+//~^ WARN unused import
+
+fn main() {}
--- /dev/null
+warning: unused macro definition
+ --> $DIR/issue-70041.rs:4:1
+ |
+LL | / macro_rules! regex {
+LL | |
+LL | | () => {};
+LL | | }
+ | |_^
+ |
+ = note: `#[warn(unused_macros)]` on by default
+
+warning: unused import: `regex`
+ --> $DIR/issue-70041.rs:10:5
+ |
+LL | use regex;
+ | ^^^^^
+ |
+ = note: `#[warn(unused_imports)]` on by default
+
--- /dev/null
+// normalize-stderr-test "pref: Align \{\n *pow2: [1-3],\n *\}" -> "pref: $$PREF_ALIGN"
+#![feature(never_type, rustc_attrs)]
+#![crate_type = "lib"]
+
+#[rustc_layout(debug)]
+enum E { Foo, Bar(!, i32, i32) } //~ ERROR: layout debugging
+
+#[rustc_layout(debug)]
+struct S { f1: i32, f2: (), f3: i32 } //~ ERROR: layout debugging
+
+#[rustc_layout(debug)]
+union U { f1: (i32, i32), f3: i32 } //~ ERROR: layout debugging
+
+#[rustc_layout(debug)]
+type Test = Result<i32, i32>; //~ ERROR: layout debugging
--- /dev/null
+error: layout debugging: LayoutDetails {
+ fields: Arbitrary {
+ offsets: [
+ Size {
+ raw: 0,
+ },
+ ],
+ memory_index: [
+ 0,
+ ],
+ },
+ variants: Multiple {
+ discr: Scalar {
+ value: Int(
+ I32,
+ false,
+ ),
+ valid_range: 0..=0,
+ },
+ discr_kind: Tag,
+ discr_index: 0,
+ variants: [
+ LayoutDetails {
+ fields: Arbitrary {
+ offsets: [],
+ memory_index: [],
+ },
+ variants: Single {
+ index: 0,
+ },
+ abi: Aggregate {
+ sized: true,
+ },
+ largest_niche: None,
+ align: AbiAndPrefAlign {
+ abi: Align {
+ pow2: 0,
+ },
+ pref: $PREF_ALIGN,
+ },
+ size: Size {
+ raw: 4,
+ },
+ },
+ LayoutDetails {
+ fields: Arbitrary {
+ offsets: [
+ Size {
+ raw: 4,
+ },
+ Size {
+ raw: 4,
+ },
+ Size {
+ raw: 8,
+ },
+ ],
+ memory_index: [
+ 0,
+ 1,
+ 2,
+ ],
+ },
+ variants: Single {
+ index: 1,
+ },
+ abi: Uninhabited,
+ largest_niche: None,
+ align: AbiAndPrefAlign {
+ abi: Align {
+ pow2: 2,
+ },
+ pref: $PREF_ALIGN,
+ },
+ size: Size {
+ raw: 12,
+ },
+ },
+ ],
+ },
+ abi: Aggregate {
+ sized: true,
+ },
+ largest_niche: Some(
+ Niche {
+ offset: Size {
+ raw: 0,
+ },
+ scalar: Scalar {
+ value: Int(
+ I32,
+ false,
+ ),
+ valid_range: 0..=0,
+ },
+ },
+ ),
+ align: AbiAndPrefAlign {
+ abi: Align {
+ pow2: 2,
+ },
+ pref: $PREF_ALIGN,
+ },
+ size: Size {
+ raw: 12,
+ },
+}
+ --> $DIR/debug.rs:6:1
+ |
+LL | enum E { Foo, Bar(!, i32, i32) }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error: layout debugging: LayoutDetails {
+ fields: Arbitrary {
+ offsets: [
+ Size {
+ raw: 0,
+ },
+ Size {
+ raw: 0,
+ },
+ Size {
+ raw: 4,
+ },
+ ],
+ memory_index: [
+ 1,
+ 0,
+ 2,
+ ],
+ },
+ variants: Single {
+ index: 0,
+ },
+ abi: ScalarPair(
+ Scalar {
+ value: Int(
+ I32,
+ true,
+ ),
+ valid_range: 0..=4294967295,
+ },
+ Scalar {
+ value: Int(
+ I32,
+ true,
+ ),
+ valid_range: 0..=4294967295,
+ },
+ ),
+ largest_niche: None,
+ align: AbiAndPrefAlign {
+ abi: Align {
+ pow2: 2,
+ },
+ pref: $PREF_ALIGN,
+ },
+ size: Size {
+ raw: 8,
+ },
+}
+ --> $DIR/debug.rs:9:1
+ |
+LL | struct S { f1: i32, f2: (), f3: i32 }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error: layout debugging: LayoutDetails {
+ fields: Union(
+ 2,
+ ),
+ variants: Single {
+ index: 0,
+ },
+ abi: Aggregate {
+ sized: true,
+ },
+ largest_niche: None,
+ align: AbiAndPrefAlign {
+ abi: Align {
+ pow2: 2,
+ },
+ pref: $PREF_ALIGN,
+ },
+ size: Size {
+ raw: 8,
+ },
+}
+ --> $DIR/debug.rs:12:1
+ |
+LL | union U { f1: (i32, i32), f3: i32 }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error: layout debugging: LayoutDetails {
+ fields: Arbitrary {
+ offsets: [
+ Size {
+ raw: 0,
+ },
+ ],
+ memory_index: [
+ 0,
+ ],
+ },
+ variants: Multiple {
+ discr: Scalar {
+ value: Int(
+ I32,
+ false,
+ ),
+ valid_range: 0..=1,
+ },
+ discr_kind: Tag,
+ discr_index: 0,
+ variants: [
+ LayoutDetails {
+ fields: Arbitrary {
+ offsets: [
+ Size {
+ raw: 4,
+ },
+ ],
+ memory_index: [
+ 0,
+ ],
+ },
+ variants: Single {
+ index: 0,
+ },
+ abi: Aggregate {
+ sized: true,
+ },
+ largest_niche: None,
+ align: AbiAndPrefAlign {
+ abi: Align {
+ pow2: 2,
+ },
+ pref: $PREF_ALIGN,
+ },
+ size: Size {
+ raw: 8,
+ },
+ },
+ LayoutDetails {
+ fields: Arbitrary {
+ offsets: [
+ Size {
+ raw: 4,
+ },
+ ],
+ memory_index: [
+ 0,
+ ],
+ },
+ variants: Single {
+ index: 1,
+ },
+ abi: Aggregate {
+ sized: true,
+ },
+ largest_niche: None,
+ align: AbiAndPrefAlign {
+ abi: Align {
+ pow2: 2,
+ },
+ pref: $PREF_ALIGN,
+ },
+ size: Size {
+ raw: 8,
+ },
+ },
+ ],
+ },
+ abi: ScalarPair(
+ Scalar {
+ value: Int(
+ I32,
+ false,
+ ),
+ valid_range: 0..=1,
+ },
+ Scalar {
+ value: Int(
+ I32,
+ true,
+ ),
+ valid_range: 0..=4294967295,
+ },
+ ),
+ largest_niche: Some(
+ Niche {
+ offset: Size {
+ raw: 0,
+ },
+ scalar: Scalar {
+ value: Int(
+ I32,
+ false,
+ ),
+ valid_range: 0..=1,
+ },
+ },
+ ),
+ align: AbiAndPrefAlign {
+ abi: Align {
+ pow2: 2,
+ },
+ pref: $PREF_ALIGN,
+ },
+ size: Size {
+ raw: 8,
+ },
+}
+ --> $DIR/debug.rs:15:1
+ |
+LL | type Test = Result<i32, i32>;
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error: aborting due to 4 previous errors
+
pat_macro!(A{a:a, b:0, c:_, ..});
};
($a:pat) => {
- $a
+ $a //~ ERROR expected expression
};
}
= note: expanding `my_recursive_macro! { }`
= note: to `my_recursive_macro ! () ;`
-error: aborting due to 2 previous errors
+error: expected expression, found `A { a: a, b: 0, c: _, .. }`
+ --> $DIR/trace_faulty_macros.rs:16:9
+ |
+LL | $a
+ | ^^ expected expression
+...
+LL | let a = pat_macro!();
+ | ------------ in this macro invocation
+ |
+ = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info)
+
+error: aborting due to 3 previous errors
error: aborting due to 2 previous errors
+For more information about this error, try `rustc --explain E0224`.
error: aborting due to 2 previous errors
+For more information about this error, try `rustc --explain E0224`.
--- /dev/null
+fn main() {
+ for<'a> |x: &'a u8| *x + 1;
+ //~^ ERROR cannot introduce explicit parameters for a closure
+}
+
+enum Foo { Bar }
+fn foo(x: impl Iterator<Item = Foo>) {
+ for <Foo>::Bar in x {}
+ //~^ ERROR expected one of `move`, `static`, `|`
+}
--- /dev/null
+error: cannot introduce explicit parameters for a closure
+ --> $DIR/recover-quantified-closure.rs:2:5
+ |
+LL | for<'a> |x: &'a u8| *x + 1;
+ | ^^^^^^^ ------------------ the parameters are attached to this closure
+ | |
+ | help: remove the parameters
+
+error: expected one of `move`, `static`, `|`, or `||`, found `::`
+ --> $DIR/recover-quantified-closure.rs:8:14
+ |
+LL | for <Foo>::Bar in x {}
+ | ^^ expected one of `move`, `static`, `|`, or `||`
+
+error: aborting due to 2 previous errors
+
--- /dev/null
+// Out-of-line module is found on the filesystem if passed through a proc macro (issue #58818).
+
+// check-pass
+// aux-build:test-macros.rs
+
+#[macro_use]
+extern crate test_macros;
+
+mod outer {
+ identity! { mod inner; }
+}
+
+fn main() {}
error: aborting due to 2 previous errors
+For more information about this error, try `rustc --explain E0224`.
error: aborting due to 2 previous errors
-For more information about this error, try `rustc --explain E0038`.
+Some errors have detailed explanations: E0038, E0224.
+For more information about an error, try `rustc --explain E0038`.
error: aborting due to previous error
+For more information about this error, try `rustc --explain E0224`.
error: aborting due to 5 previous errors
-Some errors have detailed explanations: E0107, E0747.
+Some errors have detailed explanations: E0107, E0224, E0747.
For more information about an error, try `rustc --explain E0107`.
error: aborting due to 2 previous errors
+For more information about this error, try `rustc --explain E0224`.
-Subproject commit 23549a8c362a403026432f65a6cb398cb10d44b7
+Subproject commit d8e6e4cfcd83d555bd7717ea24224b777ed75773