[[package]]
name = "odht"
-version = "0.3.0"
+version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d2504d29fda40b3f2f9ef525392435ab660e407c188196cb664b116ebcca0142"
+checksum = "5a518809ac14b25b569624d0268eba1e88498f71615893dca57982bed7621abb"
dependencies = [
"cfg-if 1.0.0",
]
"itertools 0.9.0",
"minifier",
"pulldown-cmark 0.8.0",
+ "rayon",
"regex",
- "rustc-rayon",
"rustdoc-json-types",
"serde",
"serde_json",
for attr in &data.attrs {
match attr.style {
crate::AttrStyle::Outer => {
- assert!(
- inner_attrs.len() == 0,
- "Found outer attribute {:?} after inner attrs {:?}",
- attr,
- inner_attrs
- );
outer_attrs.push(attr);
}
crate::AttrStyle::Inner => {
impl<'a, 'hir> LoweringContext<'a, 'hir> {
crate fn lower_inline_asm(&mut self, sp: Span, asm: &InlineAsm) -> &'hir hir::InlineAsm<'hir> {
- // Rustdoc needs to support asm! from foriegn architectures: don't try
- // lowering the register contraints in this case.
+ // Rustdoc needs to support asm! from foreign architectures: don't try
+ // lowering the register constraints in this case.
let asm_arch = if self.sess.opts.actually_rustdoc { None } else { self.sess.asm_arch };
if asm_arch.is_none() && !self.sess.opts.actually_rustdoc {
struct_span_err!(self.sess, sp, E0472, "inline assembly is unsupported on this target")
// means that we disallow passing a value in/out of the asm and
// require that the operand name an explicit register, not a
// register class.
- if reg_class.is_clobber_only(asm_arch.unwrap())
- && !(op.is_clobber() && matches!(reg, asm::InlineAsmRegOrRegClass::Reg(_)))
- {
+ if reg_class.is_clobber_only(asm_arch.unwrap()) && !op.is_clobber() {
let msg = format!(
"register class `{}` can only be used as a clobber, \
not as an input or output",
use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sorted_map::SortedMap;
use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
use rustc_hir::definitions;
use rustc_span::source_map::SourceMap;
use rustc_span::{Span, DUMMY_SP};
-use std::iter::repeat;
use tracing::debug;
/// A visitor that walks over the HIR and collects `Node`s into a HIR map.
pub(super) struct NodeCollector<'a, 'hir> {
/// Source map
source_map: &'a SourceMap,
- bodies: &'a IndexVec<ItemLocalId, Option<&'hir Body<'hir>>>,
+ bodies: &'a SortedMap<ItemLocalId, &'hir Body<'hir>>,
/// Outputs
nodes: IndexVec<ItemLocalId, Option<ParentedNode<'hir>>>,
definitions: &'a definitions::Definitions,
}
-fn insert_vec_map<K: Idx, V: Clone>(map: &mut IndexVec<K, Option<V>>, k: K, v: V) {
- let i = k.index();
- let len = map.len();
- if i >= len {
- map.extend(repeat(None).take(i - len + 1));
- }
- debug_assert!(map[k].is_none());
- map[k] = Some(v);
-}
-
pub(super) fn index_hir<'hir>(
sess: &Session,
definitions: &definitions::Definitions,
item: hir::OwnerNode<'hir>,
- bodies: &IndexVec<ItemLocalId, Option<&'hir Body<'hir>>>,
+ bodies: &SortedMap<ItemLocalId, &'hir Body<'hir>>,
) -> (IndexVec<ItemLocalId, Option<ParentedNode<'hir>>>, FxHashMap<LocalDefId, ItemLocalId>) {
let mut nodes = IndexVec::new();
// This node's parent should never be accessed: the owner's parent is computed by the
}
}
- insert_vec_map(
- &mut self.nodes,
- hir_id.local_id,
- ParentedNode { parent: self.parent_node, node: node },
- );
+ self.nodes.insert(hir_id.local_id, ParentedNode { parent: self.parent_node, node: node });
}
fn with_parent<F: FnOnce(&mut Self)>(&mut self, parent_node_id: HirId, f: F) {
fn visit_nested_body(&mut self, id: BodyId) {
debug_assert_eq!(id.hir_id.owner, self.owner);
- let body = self.bodies[id.hir_id.local_id].unwrap();
+ let body = self.bodies[&id.hir_id.local_id];
self.visit_body(body);
}
let body = hir::Body { generator_kind: self.generator_kind, params, value };
let id = body.id();
debug_assert_eq!(id.hir_id.owner, self.current_hir_id_owner);
- self.bodies.ensure_contains_elem(id.hir_id.local_id, || None);
- self.bodies[id.hir_id.local_id] = Some(self.arena.alloc(body));
+ self.bodies.push((id.hir_id.local_id, self.arena.alloc(body)));
id
}
#![feature(iter_zip)]
#![feature(never_type)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
use rustc_ast::token::{self, Token};
use rustc_ast::tokenstream::{CanSynthesizeMissingTokens, TokenStream, TokenTree};
use rustc_data_structures::captures::Captures;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::sorted_map::SortedMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::Lrc;
use rustc_errors::{struct_span_err, Applicability};
use rustc_span::{Span, DUMMY_SP};
use smallvec::SmallVec;
-use std::collections::BTreeMap;
use tracing::{debug, trace};
macro_rules! arena_vec {
/// The items being lowered are collected here.
owners: IndexVec<LocalDefId, Option<hir::OwnerInfo<'hir>>>,
/// Bodies inside the owner being lowered.
- bodies: IndexVec<hir::ItemLocalId, Option<&'hir hir::Body<'hir>>>,
+ bodies: Vec<(hir::ItemLocalId, &'hir hir::Body<'hir>)>,
/// Attributes inside the owner being lowered.
- attrs: BTreeMap<hir::ItemLocalId, &'hir [Attribute]>,
+ attrs: SortedMap<hir::ItemLocalId, &'hir [Attribute]>,
generator_kind: Option<hir::GeneratorKind>,
nt_to_tokenstream,
arena,
owners,
- bodies: IndexVec::new(),
- attrs: BTreeMap::default(),
+ bodies: Vec::new(),
+ attrs: SortedMap::new(),
catch_scope: None,
loop_scope: None,
is_in_loop_condition: false,
fn make_owner_info(&mut self, node: hir::OwnerNode<'hir>) -> hir::OwnerInfo<'hir> {
let attrs = std::mem::take(&mut self.attrs);
- let bodies = std::mem::take(&mut self.bodies);
+ let mut bodies = std::mem::take(&mut self.bodies);
let local_node_ids = std::mem::take(&mut self.local_node_ids);
let trait_map = local_node_ids
.into_iter()
.collect();
#[cfg(debug_assertions)]
- for (&id, attrs) in attrs.iter() {
+ for (id, attrs) in attrs.iter() {
// Verify that we do not store empty slices in the map.
if attrs.is_empty() {
panic!("Stored empty attributes for {:?}", id);
}
}
+ bodies.sort_by_key(|(k, _)| *k);
+ let bodies = SortedMap::from_presorted_elements(bodies);
let (hash_including_bodies, hash_without_bodies) = self.hash_owner(node, &bodies);
let (nodes, parenting) =
index::index_hir(self.sess, self.resolver.definitions(), node, &bodies);
fn hash_owner(
&mut self,
node: hir::OwnerNode<'hir>,
- bodies: &IndexVec<hir::ItemLocalId, Option<&'hir hir::Body<'hir>>>,
+ bodies: &SortedMap<hir::ItemLocalId, &'hir hir::Body<'hir>>,
) -> (Fingerprint, Fingerprint) {
let mut hcx = self.resolver.create_stable_hashing_context();
let mut stable_hasher = StableHasher::new();
#![feature(iter_is_partitioned)]
#![feature(box_patterns)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
pub mod ast_validation;
pub mod feature_gate;
use std::fmt;
use std::ops::Index;
-crate struct BorrowSet<'tcx> {
+pub struct BorrowSet<'tcx> {
/// The fundamental map relating bitvector indexes to the borrows
/// in the MIR. Each borrow is also uniquely identified in the MIR
/// by the `Location` of the assignment statement in which it
/// appears on the right hand side. Thus the location is the map
/// key, and its position in the map corresponds to `BorrowIndex`.
- crate location_map: FxIndexMap<Location, BorrowData<'tcx>>,
+ pub location_map: FxIndexMap<Location, BorrowData<'tcx>>,
/// Locations which activate borrows.
/// NOTE: a given location may activate more than one borrow in the future
/// when more general two-phase borrow support is introduced, but for now we
/// only need to store one borrow index.
- crate activation_map: FxHashMap<Location, Vec<BorrowIndex>>,
+ pub activation_map: FxHashMap<Location, Vec<BorrowIndex>>,
/// Map from local to all the borrows on that local.
- crate local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>,
+ pub local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>,
crate locals_state_at_exit: LocalsStateAtExit,
}
/// Location where a two-phase borrow is activated, if a borrow
/// is in fact a two-phase borrow.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
-crate enum TwoPhaseActivation {
+pub enum TwoPhaseActivation {
NotTwoPhase,
NotActivated,
ActivatedAt(Location),
}
#[derive(Debug, Clone)]
-crate struct BorrowData<'tcx> {
+pub struct BorrowData<'tcx> {
/// Location where the borrow reservation starts.
/// In many cases, this will be equal to the activation location but not always.
- crate reserve_location: Location,
+ pub reserve_location: Location,
/// Location where the borrow is activated.
- crate activation_location: TwoPhaseActivation,
+ pub activation_location: TwoPhaseActivation,
/// What kind of borrow this is
- crate kind: mir::BorrowKind,
+ pub kind: mir::BorrowKind,
/// The region for which this borrow is live
- crate region: RegionVid,
+ pub region: RegionVid,
/// Place from which we are borrowing
- crate borrowed_place: mir::Place<'tcx>,
+ pub borrowed_place: mir::Place<'tcx>,
/// Place to which the borrow was stored
- crate assigned_place: mir::Place<'tcx>,
+ pub assigned_place: mir::Place<'tcx>,
}
impl<'tcx> fmt::Display for BorrowData<'tcx> {
}
}
-crate enum LocalsStateAtExit {
+pub enum LocalsStateAtExit {
AllAreInvalidated,
SomeAreInvalidated { has_storage_dead_or_moved: BitSet<Local> },
}
#![feature(trusted_step)]
#![feature(try_blocks)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate rustc_middle;
use self::path_utils::*;
-mod borrow_set;
+pub mod borrow_set;
mod borrowck_errors;
mod constraint_generation;
mod constraints;
use rustc_span::{Span, DUMMY_SP};
use rustc_target::abi::VariantIdx;
use rustc_trait_selection::infer::InferCtxtExt as _;
-use rustc_trait_selection::opaque_types::InferCtxtExt;
use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
use rustc_trait_selection::traits::query::type_op;
use rustc_trait_selection::traits::query::type_op::custom::CustomTypeOp;
#![feature(proc_macro_internals)]
#![feature(proc_macro_quote)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
extern crate proc_macro;
true
}
- fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, _span: &[Span]) {
+ fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, _span: &[Span], _instance: Instance<'_>) {
let asm_arch = self.tcx.sess.asm_arch.unwrap();
let is_x86 = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
let att_dialect = is_x86 && options.contains(InlineAsmOptions::ATT_SYNTAX);
// TODO(antoyo)
}
+ fn type_metadata(&mut self, _function: RValue<'gcc>, _typeid: String) {
+ // Unsupported.
+ }
+
+ fn typeid_metadata(&mut self, _typeid: String) -> RValue<'gcc> {
+ // Unsupported.
+ self.context.new_rvalue_from_int(self.int_type, 0)
+ }
+
+
fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
self.store_with_flags(val, ptr, align, MemFlags::empty())
}
// TODO(antoyo)
}
+ fn type_test(&mut self, _pointer: Self::Value, _typeid: Self::Value) -> Self::Value {
+ // Unsupported.
+ self.context.new_rvalue_from_int(self.int_type, 0)
+ }
+
fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!();
}
use rustc_data_structures::fx::FxHashMap;
use rustc_hir as hir;
use rustc_middle::ty::layout::TyAndLayout;
-use rustc_middle::{bug, span_bug};
+use rustc_middle::{bug, span_bug, ty::Instance};
use rustc_span::{Pos, Span, Symbol};
use rustc_target::abi::*;
use rustc_target::asm::*;
operands: &[InlineAsmOperandRef<'tcx, Self>],
options: InlineAsmOptions,
line_spans: &[Span],
+ instance: Instance<'_>,
) {
let asm_arch = self.tcx.sess.asm_arch.unwrap();
let is_target_supported = |reg_class: InlineAsmRegClass| {
for &(_, feature) in reg_class.supported_types(asm_arch) {
if let Some(feature) = feature {
- if self.tcx.sess.target_features.contains(&Symbol::intern(feature))
+ let codegen_fn_attrs = self.tcx.codegen_fn_attrs(instance.def_id());
+ let feature_name = Symbol::intern(feature);
+ if self.tcx.sess.target_features.contains(&feature_name)
+ || codegen_fn_attrs.target_features.contains(&feature_name)
{
return true;
}
}
}
+ fn type_metadata(&mut self, function: &'ll Value, typeid: String) {
+ let typeid_metadata = self.typeid_metadata(typeid);
+ let v = [self.const_usize(0), typeid_metadata];
+ unsafe {
+ llvm::LLVMGlobalSetMetadata(
+ function,
+ llvm::MD_type as c_uint,
+ llvm::LLVMValueAsMetadata(llvm::LLVMMDNodeInContext(
+ self.cx.llcx,
+ v.as_ptr(),
+ v.len() as c_uint,
+ )),
+ )
+ }
+ }
+
+ fn typeid_metadata(&mut self, typeid: String) -> Self::Value {
+ unsafe {
+ llvm::LLVMMDStringInContext(
+ self.cx.llcx,
+ typeid.as_ptr() as *const c_char,
+ typeid.as_bytes().len() as c_uint,
+ )
+ }
+ }
+
fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
self.store_with_flags(val, ptr, align, MemFlags::empty())
}
llvm::LLVMRustAddModuleFlag(llmod, avoid_plt, 1);
}
+ if sess.is_sanitizer_cfi_enabled() {
+ // FIXME(rcvalle): Add support for non canonical jump tables.
+ let canonical_jump_tables = "CFI Canonical Jump Tables\0".as_ptr().cast();
+ // FIXME(rcvalle): Add it with Override behavior flag--LLVMRustAddModuleFlag adds it with
+ // Warning behavior flag. Add support for specifying the behavior flag to
+ // LLVMRustAddModuleFlag.
+ llvm::LLVMRustAddModuleFlag(llmod, canonical_jump_tables, 1);
+ }
+
// Control Flow Guard is currently only supported by the MSVC linker on Windows.
if sess.target.is_like_msvc {
match sess.opts.cg.control_flow_guard {
ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void);
}
+ ifn!("llvm.type.test", fn(i8p, self.type_metadata()) -> i1);
+
if self.sess().opts.debuginfo != DebugInfo::None {
ifn!("llvm.dbg.declare", fn(self.type_metadata(), self.type_metadata()) -> void);
ifn!("llvm.dbg.value", fn(self.type_metadata(), t_i64, self.type_metadata()) -> void);
}
}
+ fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value {
+ // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time
+ // optimization pass replaces calls to this intrinsic with code to test type membership.
+ let i8p_ty = self.type_i8p();
+ let bitcast = self.bitcast(pointer, i8p_ty);
+ self.call_intrinsic("llvm.type.test", &[bitcast, typeid])
+ }
+
fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
self.call_intrinsic("llvm.va_start", &[va_list])
}
#![feature(iter_zip)]
#![feature(nll)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
use back::write::{create_informational_target_machine, create_target_machine};
MD_nontemporal = 9,
MD_mem_parallel_loop_access = 10,
MD_nonnull = 11,
+ MD_type = 19,
}
/// LLVMRustAsmDialect
pub fn LLVMSetValueName2(Val: &Value, Name: *const c_char, NameLen: size_t);
pub fn LLVMReplaceAllUsesWith(OldVal: &'a Value, NewVal: &'a Value);
pub fn LLVMSetMetadata(Val: &'a Value, KindID: c_uint, Node: &'a Value);
+ pub fn LLVMGlobalSetMetadata(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata);
+ pub fn LLVMValueAsMetadata(Node: &'a Value) -> &Metadata;
// Operations on constants of any type
pub fn LLVMConstNull(Ty: &Type) -> &Value;
#![feature(nll)]
#![feature(associated_type_bounds)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
//! This crate contains codegen code that is used by all codegen backends (LLVM and others).
//! The backend-agnostic functions of this crate use functions defined in various traits that
use rustc_middle::ty::{self, Instance, Ty, TypeFoldable};
use rustc_span::source_map::Span;
use rustc_span::{sym, Symbol};
+use rustc_symbol_mangling::typeid_for_fnabi;
use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
use rustc_target::abi::{self, HasDataLayout, WrappingRange};
use rustc_target::spec::abi::Abi;
self.codegen_argument(&mut bx, location, &mut llargs, last_arg);
}
- let fn_ptr = match (llfn, instance) {
- (Some(llfn), _) => llfn,
- (None, Some(instance)) => bx.get_fn_addr(instance),
+ let (is_indirect_call, fn_ptr) = match (llfn, instance) {
+ (Some(llfn), _) => (true, llfn),
+ (None, Some(instance)) => (false, bx.get_fn_addr(instance)),
_ => span_bug!(span, "no llfn for call"),
};
+ // For backends that support CFI using type membership (i.e., testing whether a given
+ // pointer is associated with a type identifier).
+ if bx.tcx().sess.is_sanitizer_cfi_enabled() && is_indirect_call {
+ // Emit type metadata and checks.
+ // FIXME(rcvalle): Add support for generalized identifiers.
+ // FIXME(rcvalle): Create distinct unnamed MDNodes for internal identifiers.
+ let typeid = typeid_for_fnabi(bx.tcx(), fn_abi);
+ let typeid_metadata = bx.typeid_metadata(typeid.clone());
+
+ // Test whether the function pointer is associated with the type identifier.
+ let cond = bx.type_test(fn_ptr, typeid_metadata);
+ let mut bx_pass = bx.build_sibling_block("type_test.pass");
+ let mut bx_fail = bx.build_sibling_block("type_test.fail");
+ bx.cond_br(cond, bx_pass.llbb(), bx_fail.llbb());
+
+ helper.do_call(
+ self,
+ &mut bx_pass,
+ fn_abi,
+ fn_ptr,
+ &llargs,
+ destination.as_ref().map(|&(_, target)| (ret_dest, target)),
+ cleanup,
+ );
+
+ bx_fail.abort();
+ bx_fail.unreachable();
+
+ return;
+ }
+
helper.do_call(
self,
&mut bx,
options: ast::InlineAsmOptions,
line_spans: &[Span],
destination: Option<mir::BasicBlock>,
+ instance: Instance<'_>,
) {
let span = terminator.source_info.span;
})
.collect();
- bx.codegen_inline_asm(template, &operands, options, line_spans);
+ bx.codegen_inline_asm(template, &operands, options, line_spans, instance);
if let Some(target) = destination {
helper.funclet_br(self, &mut bx, target);
options,
line_spans,
destination,
+ self.instance,
);
}
}
use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, TyAndLayout};
use rustc_middle::ty::{self, Instance, Ty, TypeFoldable};
+use rustc_symbol_mangling::typeid_for_fnabi;
use rustc_target::abi::call::{FnAbi, PassMode};
use std::iter;
for (bb, _) in traversal::reverse_postorder(&mir) {
fx.codegen_block(bb);
}
+
+ // For backends that support CFI using type membership (i.e., testing whether a given pointer
+ // is associated with a type identifier).
+ if cx.tcx().sess.is_sanitizer_cfi_enabled() {
+ let typeid = typeid_for_fnabi(cx.tcx(), fn_abi);
+ bx.type_metadata(llfn, typeid.clone());
+ }
}
/// Produces, for each argument, a `Value` pointing at the
operands: &[InlineAsmOperandRef<'tcx, Self>],
options: InlineAsmOptions,
line_spans: &[Span],
+ instance: Instance<'_>,
);
}
fn range_metadata(&mut self, load: Self::Value, range: WrappingRange);
fn nonnull_metadata(&mut self, load: Self::Value);
+ fn type_metadata(&mut self, function: Self::Function, typeid: String);
+ fn typeid_metadata(&mut self, typeid: String) -> Self::Value;
fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
fn store_with_flags(
///
/// Currently has any effect only when LLVM versions prior to 12.0 are used as the backend.
fn sideeffect(&mut self);
+ /// Trait method used to test whether a given pointer is associated with a type identifier.
+ fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value;
/// Trait method used to inject `va_start` on the "spoofed" `VaListImpl` in
/// Rust defined C-variadic functions.
fn va_start(&mut self, val: Self::Value) -> Self::Value;
&mut self,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
- is_const_fn: bool,
) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
- // The list of functions we handle here must be in sync with
- // `is_lang_special_const_fn` in `transform/check_consts/mod.rs`.
+ // All `#[rustc_do_not_const_check]` functions should be hooked here.
let def_id = instance.def_id();
- if is_const_fn {
- if Some(def_id) == self.tcx.lang_items().const_eval_select() {
- // redirect to const_eval_select_ct
- if let Some(const_eval_select) = self.tcx.lang_items().const_eval_select_ct() {
- return Ok(Some(
- ty::Instance::resolve(
- *self.tcx,
- ty::ParamEnv::reveal_all(),
- const_eval_select,
- instance.substs,
- )
- .unwrap()
- .unwrap(),
- ));
- }
+ if Some(def_id) == self.tcx.lang_items().const_eval_select() {
+ // redirect to const_eval_select_ct
+ if let Some(const_eval_select) = self.tcx.lang_items().const_eval_select_ct() {
+ return Ok(Some(
+ ty::Instance::resolve(
+ *self.tcx,
+ ty::ParamEnv::reveal_all(),
+ const_eval_select,
+ instance.substs,
+ )
+ .unwrap()
+ .unwrap(),
+ ));
}
- return Ok(None);
- }
-
- if Some(def_id) == self.tcx.lang_items().panic_fn()
- || Some(def_id) == self.tcx.lang_items().panic_str()
- || Some(def_id) == self.tcx.lang_items().panic_display()
+ } else if Some(def_id) == self.tcx.lang_items().panic_display()
|| Some(def_id) == self.tcx.lang_items().begin_panic_fn()
{
// &str or &&str
// Only check non-glue functions
if let ty::InstanceDef::Item(def) = instance.def {
- let mut is_const_fn = true;
-
// Execution might have wandered off into other crates, so we cannot do a stability-
// sensitive check here. But we can at least rule out functions that are not const
// at all.
if !ecx.tcx.is_const_fn_raw(def.did) {
// allow calling functions marked with #[default_method_body_is_const].
if !ecx.tcx.has_attr(def.did, sym::default_method_body_is_const) {
- is_const_fn = false;
+ // We certainly do *not* want to actually call the fn
+ // though, so be sure we return here.
+ throw_unsup_format!("calling non-const function `{}`", instance)
}
}
- // Some functions we support even if they are non-const -- but avoid testing
- // that for const fn!
- // `const_eval_select` is a const fn because it must use const trait bounds.
- if let Some(new_instance) = ecx.hook_special_const_fn(instance, args, is_const_fn)? {
+ if let Some(new_instance) = ecx.hook_special_const_fn(instance, args)? {
// We call another const fn instead.
return Self::find_mir_or_eval_fn(ecx, new_instance, _abi, args, _ret, _unwind);
}
-
- if !is_const_fn {
- // We certainly do *not* want to actually call the fn
- // though, so be sure we return here.
- throw_unsup_format!("calling non-const function `{}`", instance)
- }
}
// This is a const fn. Call it.
Ok(Some(ecx.load_mir(instance.def, None)?))
#![feature(trusted_step)]
#![feature(try_blocks)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate tracing;
use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
use rustc_middle::ty::{self, adjustment::PointerCast, Instance, InstanceDef, Ty, TyCtxt};
use rustc_middle::ty::{Binder, TraitPredicate, TraitRef};
-use rustc_mir_dataflow::impls::MaybeMutBorrowedLocals;
use rustc_mir_dataflow::{self, Analysis};
use rustc_span::{sym, Span, Symbol};
use rustc_trait_selection::traits::error_reporting::InferCtxtExt;
use super::ops::{self, NonConstOp, Status};
use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop, NeedsNonConstDrop};
use super::resolver::FlowSensitiveAnalysis;
-use super::{is_lang_panic_fn, is_lang_special_const_fn, ConstCx, Qualif};
+use super::{ConstCx, Qualif};
use crate::const_eval::is_unstable_const_fn;
-// We are using `MaybeMutBorrowedLocals` as a proxy for whether an item may have been mutated
-// through a pointer prior to the given point. This is okay even though `MaybeMutBorrowedLocals`
-// kills locals upon `StorageDead` because a local will never be used after a `StorageDead`.
-type IndirectlyMutableResults<'mir, 'tcx> =
- rustc_mir_dataflow::ResultsCursor<'mir, 'tcx, MaybeMutBorrowedLocals<'mir, 'tcx>>;
-
type QualifResults<'mir, 'tcx, Q> =
rustc_mir_dataflow::ResultsCursor<'mir, 'tcx, FlowSensitiveAnalysis<'mir, 'mir, 'tcx, Q>>;
has_mut_interior: Option<QualifResults<'mir, 'tcx, HasMutInterior>>,
needs_drop: Option<QualifResults<'mir, 'tcx, NeedsDrop>>,
needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>,
- indirectly_mutable: Option<IndirectlyMutableResults<'mir, 'tcx>>,
}
impl Qualifs<'mir, 'tcx> {
- pub fn indirectly_mutable(
- &mut self,
- ccx: &'mir ConstCx<'mir, 'tcx>,
- local: Local,
- location: Location,
- ) -> bool {
- let indirectly_mutable = self.indirectly_mutable.get_or_insert_with(|| {
- let ConstCx { tcx, body, param_env, .. } = *ccx;
-
- // We can use `unsound_ignore_borrow_on_drop` here because custom drop impls are not
- // allowed in a const.
- //
- // FIXME(ecstaticmorse): Someday we want to allow custom drop impls. How do we do this
- // without breaking stable code?
- MaybeMutBorrowedLocals::mut_borrows_only(tcx, &body, param_env)
- .unsound_ignore_borrow_on_drop()
- .into_engine(tcx, &body)
- .pass_name("const_qualification")
- .iterate_to_fixpoint()
- .into_results_cursor(&body)
- });
-
- indirectly_mutable.seek_before_primary_effect(location);
- indirectly_mutable.get().contains(local)
- }
-
/// Returns `true` if `local` is `NeedsDrop` at the given `Location`.
///
/// Only updates the cursor if absolutely necessary
});
needs_drop.seek_before_primary_effect(location);
- needs_drop.get().contains(local) || self.indirectly_mutable(ccx, local, location)
+ needs_drop.get().contains(local)
}
/// Returns `true` if `local` is `NeedsNonConstDrop` at the given `Location`.
});
needs_non_const_drop.seek_before_primary_effect(location);
- needs_non_const_drop.get().contains(local) || self.indirectly_mutable(ccx, local, location)
+ needs_non_const_drop.get().contains(local)
}
/// Returns `true` if `local` is `HasMutInterior` at the given `Location`.
});
has_mut_interior.seek_before_primary_effect(location);
- has_mut_interior.get().contains(local) || self.indirectly_mutable(ccx, local, location)
+ has_mut_interior.get().contains(local)
}
fn in_return_place(
.into_results_cursor(&ccx.body);
cursor.seek_after_primary_effect(return_loc);
- cursor.contains(RETURN_PLACE)
+ cursor.get().contains(RETURN_PLACE)
}
};
}
// At this point, we are calling a function, `callee`, whose `DefId` is known...
- if is_lang_special_const_fn(tcx, callee) {
- // `begin_panic` and `panic_display` are generic functions that accept
- // types other than str. Check to enforce that only str can be used in
- // const-eval.
-
- // const-eval of the `begin_panic` fn assumes the argument is `&str`
- if Some(callee) == tcx.lang_items().begin_panic_fn() {
- match args[0].ty(&self.ccx.body.local_decls, tcx).kind() {
- ty::Ref(_, ty, _) if ty.is_str() => (),
- _ => self.check_op(ops::PanicNonStr),
- }
- }
- // const-eval of the `panic_display` fn assumes the argument is `&&str`
- if Some(callee) == tcx.lang_items().panic_display() {
- match args[0].ty(&self.ccx.body.local_decls, tcx).kind() {
- ty::Ref(_, ty, _) if matches!(ty.kind(), ty::Ref(_, ty, _) if ty.is_str()) =>
- {}
- _ => self.check_op(ops::PanicNonStr),
- }
+ // `begin_panic` and `panic_display` are generic functions that accept
+ // types other than str. Check to enforce that only str can be used in
+ // const-eval.
+
+ // const-eval of the `begin_panic` fn assumes the argument is `&str`
+ if Some(callee) == tcx.lang_items().begin_panic_fn() {
+ match args[0].ty(&self.ccx.body.local_decls, tcx).kind() {
+ ty::Ref(_, ty, _) if ty.is_str() => return,
+ _ => self.check_op(ops::PanicNonStr),
}
+ }
- if is_lang_panic_fn(tcx, callee) {
- // run stability check on non-panic special const fns.
- return;
+ // const-eval of the `panic_display` fn assumes the argument is `&&str`
+ if Some(callee) == tcx.lang_items().panic_display() {
+ match args[0].ty(&self.ccx.body.local_decls, tcx).kind() {
+ ty::Ref(_, ty, _) if matches!(ty.kind(), ty::Ref(_, ty, _) if ty.is_str()) =>
+ {
+ return;
+ }
+ _ => self.check_op(ops::PanicNonStr),
}
}
}
}
-/// Returns `true` if this `DefId` points to one of the official `panic` lang items.
-pub fn is_lang_panic_fn(tcx: TyCtxt<'tcx>, def_id: DefId) -> bool {
- Some(def_id) == tcx.lang_items().panic_fn()
- || Some(def_id) == tcx.lang_items().panic_str()
- || Some(def_id) == tcx.lang_items().panic_display()
- || Some(def_id) == tcx.lang_items().begin_panic_fn()
- || Some(def_id) == tcx.lang_items().panic_fmt()
-}
-
-/// Returns `true` if this `DefId` points to one of the lang items that will be handled differently
-/// in const_eval.
-pub fn is_lang_special_const_fn(tcx: TyCtxt<'tcx>, def_id: DefId) -> bool {
- // We can allow calls to these functions because `hook_special_const_fn` in
- // `const_eval/machine.rs` ensures the calls are handled specially.
- // Keep in sync with what that function handles!
- is_lang_panic_fn(tcx, def_id) || Some(def_id) == tcx.lang_items().const_eval_select()
-}
-
pub fn rustc_allow_const_fn_unstable(
tcx: TyCtxt<'tcx>,
def_id: DefId,
if Q::in_adt_inherently(cx, def, substs) {
return true;
}
+ if def.is_union() && Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx)) {
+ return true;
+ }
}
// Otherwise, proceed structurally...
use rustc_index::bit_set::BitSet;
use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::{self, BasicBlock, Local, Location, Statement, StatementKind};
+use rustc_mir_dataflow::fmt::DebugWithContext;
+use rustc_mir_dataflow::JoinSemiLattice;
+use rustc_span::DUMMY_SP;
+use std::fmt;
use std::marker::PhantomData;
use super::{qualifs, ConstCx, Qualif};
/// A `Visitor` that propagates qualifs between locals. This defines the transfer function of
/// `FlowSensitiveAnalysis`.
///
-/// This transfer does nothing when encountering an indirect assignment. Consumers should rely on
-/// the `MaybeMutBorrowedLocals` dataflow pass to see if a `Local` may have become qualified via
-/// an indirect assignment or function call.
+/// To account for indirect assignments, data flow conservatively assumes that local becomes
+/// qualified immediately after it is borrowed or its address escapes. The borrow must allow for
+/// mutation, which includes shared borrows of places with interior mutability. The type of
+/// borrowed place must contain the qualif.
struct TransferFunction<'a, 'mir, 'tcx, Q> {
ccx: &'a ConstCx<'mir, 'tcx>,
- qualifs_per_local: &'a mut BitSet<Local>,
-
+ state: &'a mut State,
_qualif: PhantomData<Q>,
}
where
Q: Qualif,
{
- fn new(ccx: &'a ConstCx<'mir, 'tcx>, qualifs_per_local: &'a mut BitSet<Local>) -> Self {
- TransferFunction { ccx, qualifs_per_local, _qualif: PhantomData }
+ fn new(ccx: &'a ConstCx<'mir, 'tcx>, state: &'a mut State) -> Self {
+ TransferFunction { ccx, state, _qualif: PhantomData }
}
fn initialize_state(&mut self) {
- self.qualifs_per_local.clear();
+ self.state.qualif.clear();
+ self.state.borrow.clear();
for arg in self.ccx.body.args_iter() {
let arg_ty = self.ccx.body.local_decls[arg].ty;
if Q::in_any_value_of_ty(self.ccx, arg_ty) {
- self.qualifs_per_local.insert(arg);
+ self.state.qualif.insert(arg);
}
}
}
- fn assign_qualif_direct(&mut self, place: &mir::Place<'tcx>, value: bool) {
+ fn assign_qualif_direct(&mut self, place: &mir::Place<'tcx>, mut value: bool) {
debug_assert!(!place.is_indirect());
+ if !value {
+ for (base, _elem) in place.iter_projections() {
+ let base_ty = base.ty(self.ccx.body, self.ccx.tcx);
+ if base_ty.ty.is_union() && Q::in_any_value_of_ty(self.ccx, base_ty.ty) {
+ value = true;
+ break;
+ }
+ }
+ }
+
match (value, place.as_ref()) {
(true, mir::PlaceRef { local, .. }) => {
- self.qualifs_per_local.insert(local);
+ self.state.qualif.insert(local);
}
// For now, we do not clear the qualif if a local is overwritten in full by
// with aggregates where we overwrite all fields with assignments, which would not
// get this feature.
(false, mir::PlaceRef { local: _, projection: &[] }) => {
- // self.qualifs_per_local.remove(*local);
+ // self.state.qualif.remove(*local);
}
_ => {}
self.assign_qualif_direct(&return_place, qualif);
}
}
+
+ fn address_of_allows_mutation(&self, mt: mir::Mutability, place: mir::Place<'tcx>) -> bool {
+ match mt {
+ mir::Mutability::Mut => true,
+ mir::Mutability::Not => self.shared_borrow_allows_mutation(place),
+ }
+ }
+
+ fn ref_allows_mutation(&self, kind: mir::BorrowKind, place: mir::Place<'tcx>) -> bool {
+ match kind {
+ mir::BorrowKind::Mut { .. } => true,
+ mir::BorrowKind::Shared | mir::BorrowKind::Shallow | mir::BorrowKind::Unique => {
+ self.shared_borrow_allows_mutation(place)
+ }
+ }
+ }
+
+ fn shared_borrow_allows_mutation(&self, place: mir::Place<'tcx>) -> bool {
+ !place
+ .ty(self.ccx.body, self.ccx.tcx)
+ .ty
+ .is_freeze(self.ccx.tcx.at(DUMMY_SP), self.ccx.param_env)
+ }
}
impl<Q> Visitor<'tcx> for TransferFunction<'_, '_, 'tcx, Q>
// it no longer needs to be dropped.
if let mir::Operand::Move(place) = operand {
if let Some(local) = place.as_local() {
- self.qualifs_per_local.remove(local);
+ // For backward compatibility with the MaybeMutBorrowedLocals used in an earlier
+ // implementation we retain qualif if a local had been borrowed before. This might
+ // not be strictly necessary since the local is no longer initialized.
+ if !self.state.borrow.contains(local) {
+ self.state.qualif.remove(local);
+ }
}
}
}
rvalue: &mir::Rvalue<'tcx>,
location: Location,
) {
- let qualif = qualifs::in_rvalue::<Q, _>(
- self.ccx,
- &mut |l| self.qualifs_per_local.contains(l),
- rvalue,
- );
+ let qualif =
+ qualifs::in_rvalue::<Q, _>(self.ccx, &mut |l| self.state.qualif.contains(l), rvalue);
if !place.is_indirect() {
self.assign_qualif_direct(place, qualif);
}
self.super_assign(place, rvalue, location);
}
+ fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) {
+ self.super_rvalue(rvalue, location);
+
+ match rvalue {
+ mir::Rvalue::AddressOf(mt, borrowed_place) => {
+ if !borrowed_place.is_indirect()
+ && self.address_of_allows_mutation(*mt, *borrowed_place)
+ {
+ let place_ty = borrowed_place.ty(self.ccx.body, self.ccx.tcx).ty;
+ if Q::in_any_value_of_ty(self.ccx, place_ty) {
+ self.state.qualif.insert(borrowed_place.local);
+ self.state.borrow.insert(borrowed_place.local);
+ }
+ }
+ }
+
+ mir::Rvalue::Ref(_, kind, borrowed_place) => {
+ if !borrowed_place.is_indirect() && self.ref_allows_mutation(*kind, *borrowed_place)
+ {
+ let place_ty = borrowed_place.ty(self.ccx.body, self.ccx.tcx).ty;
+ if Q::in_any_value_of_ty(self.ccx, place_ty) {
+ self.state.qualif.insert(borrowed_place.local);
+ self.state.borrow.insert(borrowed_place.local);
+ }
+ }
+ }
+
+ mir::Rvalue::Cast(..)
+ | mir::Rvalue::ShallowInitBox(..)
+ | mir::Rvalue::Use(..)
+ | mir::Rvalue::ThreadLocalRef(..)
+ | mir::Rvalue::Repeat(..)
+ | mir::Rvalue::Len(..)
+ | mir::Rvalue::BinaryOp(..)
+ | mir::Rvalue::CheckedBinaryOp(..)
+ | mir::Rvalue::NullaryOp(..)
+ | mir::Rvalue::UnaryOp(..)
+ | mir::Rvalue::Discriminant(..)
+ | mir::Rvalue::Aggregate(..) => {}
+ }
+ }
+
fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
match statement.kind {
StatementKind::StorageDead(local) => {
- self.qualifs_per_local.remove(local);
+ self.state.qualif.remove(local);
+ self.state.borrow.remove(local);
}
_ => self.super_statement(statement, location),
}
if let mir::TerminatorKind::DropAndReplace { value, place, .. } = &terminator.kind {
let qualif = qualifs::in_operand::<Q, _>(
self.ccx,
- &mut |l| self.qualifs_per_local.contains(l),
+ &mut |l| self.state.qualif.contains(l),
value,
);
}
}
+ // We ignore borrow on drop because custom drop impls are not allowed in consts.
+ // FIXME: Reconsider if accounting for borrows in drops is necessary for const drop.
+
// We need to assign qualifs to the dropped location before visiting the operand that
// replaces it since qualifs can be cleared on move.
self.super_terminator(terminator, location);
FlowSensitiveAnalysis { ccx, _qualif: PhantomData }
}
- fn transfer_function(
- &self,
- state: &'a mut BitSet<Local>,
- ) -> TransferFunction<'a, 'mir, 'tcx, Q> {
+ fn transfer_function(&self, state: &'a mut State) -> TransferFunction<'a, 'mir, 'tcx, Q> {
TransferFunction::<Q>::new(self.ccx, state)
}
}
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub(super) struct State {
+ /// Describes whether a local contains qualif.
+ pub qualif: BitSet<Local>,
+ /// Describes whether a local's address escaped and it might become qualified as a result an
+ /// indirect mutation.
+ pub borrow: BitSet<Local>,
+}
+
+impl State {
+ #[inline]
+ pub(super) fn contains(&self, local: Local) -> bool {
+ self.qualif.contains(local)
+ }
+}
+
+impl<C> DebugWithContext<C> for State {
+ fn fmt_with(&self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("qualif: ")?;
+ self.qualif.fmt_with(ctxt, f)?;
+ f.write_str(" borrow: ")?;
+ self.borrow.fmt_with(ctxt, f)?;
+ Ok(())
+ }
+
+ fn fmt_diff_with(&self, old: &Self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self == old {
+ return Ok(());
+ }
+
+ if self.qualif != old.qualif {
+ f.write_str("qualif: ")?;
+ self.qualif.fmt_diff_with(&old.qualif, ctxt, f)?;
+ f.write_str("\n")?;
+ }
+
+ if self.borrow != old.borrow {
+ f.write_str("borrow: ")?;
+ self.qualif.fmt_diff_with(&old.borrow, ctxt, f)?;
+ f.write_str("\n")?;
+ }
+
+ Ok(())
+ }
+}
+
+impl JoinSemiLattice for State {
+ fn join(&mut self, other: &Self) -> bool {
+ self.qualif.join(&other.qualif) || self.borrow.join(&other.borrow)
+ }
+}
+
impl<Q> rustc_mir_dataflow::AnalysisDomain<'tcx> for FlowSensitiveAnalysis<'_, '_, 'tcx, Q>
where
Q: Qualif,
{
- type Domain = BitSet<Local>;
+ type Domain = State;
const NAME: &'static str = Q::ANALYSIS_NAME;
fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
- BitSet::new_empty(body.local_decls.len())
+ State {
+ qualif: BitSet::new_empty(body.local_decls.len()),
+ borrow: BitSet::new_empty(body.local_decls.len()),
+ }
}
fn initialize_start_block(&self, _body: &mir::Body<'tcx>, state: &mut Self::Domain) {
use std::cell::Cell;
use std::{cmp, iter, mem};
-use crate::transform::check_consts::{is_lang_special_const_fn, qualifs, ConstCx};
+use crate::transform::check_consts::{qualifs, ConstCx};
use crate::transform::MirPass;
/// A `MirPass` for promotion.
}
let is_const_fn = match *fn_ty.kind() {
- ty::FnDef(def_id, _) => {
- self.tcx.is_const_fn_raw(def_id) || is_lang_special_const_fn(self.tcx, def_id)
- }
+ ty::FnDef(def_id, _) => self.tcx.is_const_fn_raw(def_id),
_ => false,
};
if !is_const_fn {
#![feature(thread_id_value)]
#![allow(rustc::default_hash_types)]
#![deny(unaligned_references)]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate tracing;
+use crate::stable_hasher::{HashStable, StableHasher};
use std::borrow::Borrow;
use std::cmp::Ordering;
use std::iter::FromIterator;
/// stores data in a more compact way. It also supports accessing contiguous
/// ranges of elements as a slice, and slices of already sorted elements can be
/// inserted efficiently.
-#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Debug, Encodable, Decodable)]
-pub struct SortedMap<K: Ord, V> {
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Encodable, Decodable)]
+pub struct SortedMap<K, V> {
data: Vec<(K, V)>,
}
-impl<K: Ord, V> SortedMap<K, V> {
+impl<K, V> Default for SortedMap<K, V> {
+ #[inline]
+ fn default() -> SortedMap<K, V> {
+ SortedMap { data: Vec::new() }
+ }
+}
+
+impl<K, V> SortedMap<K, V> {
#[inline]
- pub fn new() -> SortedMap<K, V> {
- SortedMap { data: vec![] }
+ pub const fn new() -> SortedMap<K, V> {
+ SortedMap { data: Vec::new() }
}
+}
+impl<K: Ord, V> SortedMap<K, V> {
/// Construct a `SortedMap` from a presorted set of elements. This is faster
/// than creating an empty map and then inserting the elements individually.
///
}
}
+impl<K: HashStable<CTX>, V: HashStable<CTX>, CTX> HashStable<CTX> for SortedMap<K, V> {
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.data.hash_stable(ctx, hasher);
+ }
+}
+
#[cfg(test)]
mod tests;
#![feature(nll)]
#![feature(once_cell)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate tracing;
let builtin = sort_lints(sess, builtin);
let (plugin_groups, builtin_groups): (Vec<_>, _) =
- lint_store.get_lint_groups().partition(|&(.., p)| p);
+ lint_store.get_lint_groups().iter().cloned().partition(|&(.., p)| p);
let plugin_groups = sort_lint_groups(plugin_groups);
let builtin_groups = sort_lint_groups(builtin_groups);
#![feature(iter_zip)]
#![feature(let_else)]
#![feature(nll)]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate rustc_macros;
#![feature(proc_macro_span)]
#![feature(try_blocks)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate rustc_macros;
// Prevents field reads in the marked trait or method to be considered
// during dead code analysis.
rustc_attr!(rustc_trivial_field_reads, Normal, template!(Word), INTERNAL_UNSTABLE),
- // Used by the `rustc::potential_query_instability` lint to warn methods which
- // might not be stable during incremental compilation.
- rustc_attr!(rustc_lint_query_instability, Normal, template!(Word), INTERNAL_UNSTABLE),
// ==========================================================================
// Internal attributes, Const related:
rustc_ast = { path = "../rustc_ast" }
tracing = "0.1"
smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
-odht = { version = "0.3.0", features = ["nightly"] }
+odht = { version = "0.3.1", features = ["nightly"] }
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sorted_map::SortedMap;
use rustc_index::vec::IndexVec;
use rustc_macros::HashStable_Generic;
use rustc_span::source_map::Spanned;
use rustc_target::spec::abi::Abi;
use smallvec::SmallVec;
-use std::collections::BTreeMap;
use std::fmt;
#[derive(Copy, Clone, Encodable, HashStable_Generic)]
/// Attributes owned by a HIR owner.
#[derive(Debug)]
pub struct AttributeMap<'tcx> {
- pub map: BTreeMap<ItemLocalId, &'tcx [Attribute]>,
+ pub map: SortedMap<ItemLocalId, &'tcx [Attribute]>,
pub hash: Fingerprint,
}
impl<'tcx> AttributeMap<'tcx> {
pub const EMPTY: &'static AttributeMap<'static> =
- &AttributeMap { map: BTreeMap::new(), hash: Fingerprint::ZERO };
+ &AttributeMap { map: SortedMap::new(), hash: Fingerprint::ZERO };
#[inline]
pub fn get(&self, id: ItemLocalId) -> &'tcx [Attribute] {
// used.
pub nodes: IndexVec<ItemLocalId, Option<ParentedNode<'tcx>>>,
/// Content of local bodies.
- pub bodies: IndexVec<ItemLocalId, Option<&'tcx Body<'tcx>>>,
+ pub bodies: SortedMap<ItemLocalId, &'tcx Body<'tcx>>,
}
/// Full information resulting from lowering an AST node.
#![feature(let_else)]
#![feature(nll)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate rustc_middle;
Ok(a.rebind(self.relate(a.skip_binder(), b.skip_binder())?))
}
+ #[tracing::instrument(level = "debug", skip(self))]
fn tys(&mut self, t: Ty<'tcx>, _t: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
debug_assert_eq!(t, _t);
debug!("ConstInferUnifier: t={:?}", t);
}
}
+ #[tracing::instrument(level = "debug", skip(self))]
fn consts(
&mut self,
c: &'tcx ty::Const<'tcx>,
match c.val {
ty::ConstKind::Infer(InferConst::Var(vid)) => {
- let mut inner = self.infcx.inner.borrow_mut();
- let variable_table = &mut inner.const_unification_table();
-
// Check if the current unification would end up
// unifying `target_vid` with a const which contains
// an inference variable which is unioned with `target_vid`.
//
// Not doing so can easily result in stack overflows.
- if variable_table.unioned(self.target_vid, vid) {
+ if self
+ .infcx
+ .inner
+ .borrow_mut()
+ .const_unification_table()
+ .unioned(self.target_vid, vid)
+ {
return Err(TypeError::CyclicConst(c));
}
- let var_value = variable_table.probe_value(vid);
+ let var_value =
+ self.infcx.inner.borrow_mut().const_unification_table().probe_value(vid);
match var_value.val {
ConstVariableValue::Known { value: u } => self.consts(u, u),
ConstVariableValue::Unknown { universe } => {
if self.for_universe.can_name(universe) {
Ok(c)
} else {
- let new_var_id = variable_table.new_key(ConstVarValue {
- origin: var_value.origin,
- val: ConstVariableValue::Unknown { universe: self.for_universe },
- });
+ let new_var_id =
+ self.infcx.inner.borrow_mut().const_unification_table().new_key(
+ ConstVarValue {
+ origin: var_value.origin,
+ val: ConstVariableValue::Unknown {
+ universe: self.for_universe,
+ },
+ },
+ );
Ok(self.tcx().mk_const_var(new_var_id, c.ty))
}
}
+use crate::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use crate::infer::{InferCtxt, InferOk};
+use crate::traits;
+use rustc_data_structures::sync::Lrc;
use rustc_data_structures::vec_map::VecMap;
use rustc_hir as hir;
-use rustc_middle::ty::{OpaqueTypeKey, Ty};
+use rustc_hir::def_id::LocalDefId;
+use rustc_middle::ty::fold::BottomUpFolder;
+use rustc_middle::ty::subst::{GenericArgKind, Subst};
+use rustc_middle::ty::{self, OpaqueTypeKey, Ty, TyCtxt, TypeFoldable, TypeVisitor};
use rustc_span::Span;
+use std::ops::ControlFlow;
+
pub type OpaqueTypeMap<'tcx> = VecMap<OpaqueTypeKey<'tcx>, OpaqueTypeDecl<'tcx>>;
/// Information about the opaque types whose values we
/// The origin of the opaque type.
pub origin: hir::OpaqueTyOrigin,
}
+
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+ /// Replaces all opaque types in `value` with fresh inference variables
+ /// and creates appropriate obligations. For example, given the input:
+ ///
+ /// impl Iterator<Item = impl Debug>
+ ///
+ /// this method would create two type variables, `?0` and `?1`. It would
+ /// return the type `?0` but also the obligations:
+ ///
+ /// ?0: Iterator<Item = ?1>
+ /// ?1: Debug
+ ///
+ /// Moreover, it returns an `OpaqueTypeMap` that would map `?0` to
+ /// info about the `impl Iterator<..>` type and `?1` to info about
+ /// the `impl Debug` type.
+ ///
+ /// # Parameters
+ ///
+ /// - `parent_def_id` -- the `DefId` of the function in which the opaque type
+ /// is defined
+ /// - `body_id` -- the body-id with which the resulting obligations should
+ /// be associated
+ /// - `param_env` -- the in-scope parameter environment to be used for
+ /// obligations
+ /// - `value` -- the value within which we are instantiating opaque types
+ /// - `value_span` -- the span where the value came from, used in error reporting
+ pub fn instantiate_opaque_types<T: TypeFoldable<'tcx>>(
+ &self,
+ body_id: hir::HirId,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ value_span: Span,
+ ) -> InferOk<'tcx, T> {
+ debug!(
+ "instantiate_opaque_types(value={:?}, body_id={:?}, \
+ param_env={:?}, value_span={:?})",
+ value, body_id, param_env, value_span,
+ );
+ let mut instantiator =
+ Instantiator { infcx: self, body_id, param_env, value_span, obligations: vec![] };
+ let value = instantiator.instantiate_opaque_types_in_map(value);
+ InferOk { value, obligations: instantiator.obligations }
+ }
+
+ /// Given the map `opaque_types` containing the opaque
+ /// `impl Trait` types whose underlying, hidden types are being
+ /// inferred, this method adds constraints to the regions
+ /// appearing in those underlying hidden types to ensure that they
+ /// at least do not refer to random scopes within the current
+ /// function. These constraints are not (quite) sufficient to
+ /// guarantee that the regions are actually legal values; that
+ /// final condition is imposed after region inference is done.
+ ///
+ /// # The Problem
+ ///
+ /// Let's work through an example to explain how it works. Assume
+ /// the current function is as follows:
+ ///
+ /// ```text
+ /// fn foo<'a, 'b>(..) -> (impl Bar<'a>, impl Bar<'b>)
+ /// ```
+ ///
+ /// Here, we have two `impl Trait` types whose values are being
+ /// inferred (the `impl Bar<'a>` and the `impl
+ /// Bar<'b>`). Conceptually, this is sugar for a setup where we
+ /// define underlying opaque types (`Foo1`, `Foo2`) and then, in
+ /// the return type of `foo`, we *reference* those definitions:
+ ///
+ /// ```text
+ /// type Foo1<'x> = impl Bar<'x>;
+ /// type Foo2<'x> = impl Bar<'x>;
+ /// fn foo<'a, 'b>(..) -> (Foo1<'a>, Foo2<'b>) { .. }
+ /// // ^^^^ ^^
+ /// // | |
+ /// // | substs
+ /// // def_id
+ /// ```
+ ///
+ /// As indicating in the comments above, each of those references
+ /// is (in the compiler) basically a substitution (`substs`)
+ /// applied to the type of a suitable `def_id` (which identifies
+ /// `Foo1` or `Foo2`).
+ ///
+ /// Now, at this point in compilation, what we have done is to
+ /// replace each of the references (`Foo1<'a>`, `Foo2<'b>`) with
+ /// fresh inference variables C1 and C2. We wish to use the values
+ /// of these variables to infer the underlying types of `Foo1` and
+ /// `Foo2`. That is, this gives rise to higher-order (pattern) unification
+ /// constraints like:
+ ///
+ /// ```text
+ /// for<'a> (Foo1<'a> = C1)
+ /// for<'b> (Foo1<'b> = C2)
+ /// ```
+ ///
+ /// For these equation to be satisfiable, the types `C1` and `C2`
+ /// can only refer to a limited set of regions. For example, `C1`
+ /// can only refer to `'static` and `'a`, and `C2` can only refer
+ /// to `'static` and `'b`. The job of this function is to impose that
+ /// constraint.
+ ///
+ /// Up to this point, C1 and C2 are basically just random type
+ /// inference variables, and hence they may contain arbitrary
+ /// regions. In fact, it is fairly likely that they do! Consider
+ /// this possible definition of `foo`:
+ ///
+ /// ```text
+ /// fn foo<'a, 'b>(x: &'a i32, y: &'b i32) -> (impl Bar<'a>, impl Bar<'b>) {
+ /// (&*x, &*y)
+ /// }
+ /// ```
+ ///
+ /// Here, the values for the concrete types of the two impl
+ /// traits will include inference variables:
+ ///
+ /// ```text
+ /// &'0 i32
+ /// &'1 i32
+ /// ```
+ ///
+ /// Ordinarily, the subtyping rules would ensure that these are
+ /// sufficiently large. But since `impl Bar<'a>` isn't a specific
+ /// type per se, we don't get such constraints by default. This
+ /// is where this function comes into play. It adds extra
+ /// constraints to ensure that all the regions which appear in the
+ /// inferred type are regions that could validly appear.
+ ///
+ /// This is actually a bit of a tricky constraint in general. We
+ /// want to say that each variable (e.g., `'0`) can only take on
+ /// values that were supplied as arguments to the opaque type
+ /// (e.g., `'a` for `Foo1<'a>`) or `'static`, which is always in
+ /// scope. We don't have a constraint quite of this kind in the current
+ /// region checker.
+ ///
+ /// # The Solution
+ ///
+ /// We generally prefer to make `<=` constraints, since they
+ /// integrate best into the region solver. To do that, we find the
+ /// "minimum" of all the arguments that appear in the substs: that
+ /// is, some region which is less than all the others. In the case
+ /// of `Foo1<'a>`, that would be `'a` (it's the only choice, after
+ /// all). Then we apply that as a least bound to the variables
+ /// (e.g., `'a <= '0`).
+ ///
+ /// In some cases, there is no minimum. Consider this example:
+ ///
+ /// ```text
+ /// fn baz<'a, 'b>() -> impl Trait<'a, 'b> { ... }
+ /// ```
+ ///
+ /// Here we would report a more complex "in constraint", like `'r
+ /// in ['a, 'b, 'static]` (where `'r` is some region appearing in
+ /// the hidden type).
+ ///
+ /// # Constrain regions, not the hidden concrete type
+ ///
+ /// Note that generating constraints on each region `Rc` is *not*
+ /// the same as generating an outlives constraint on `Tc` iself.
+ /// For example, if we had a function like this:
+ ///
+ /// ```rust
+ /// fn foo<'a, T>(x: &'a u32, y: T) -> impl Foo<'a> {
+ /// (x, y)
+ /// }
+ ///
+ /// // Equivalent to:
+ /// type FooReturn<'a, T> = impl Foo<'a>;
+ /// fn foo<'a, T>(..) -> FooReturn<'a, T> { .. }
+ /// ```
+ ///
+ /// then the hidden type `Tc` would be `(&'0 u32, T)` (where `'0`
+ /// is an inference variable). If we generated a constraint that
+ /// `Tc: 'a`, then this would incorrectly require that `T: 'a` --
+ /// but this is not necessary, because the opaque type we
+ /// create will be allowed to reference `T`. So we only generate a
+ /// constraint that `'0: 'a`.
+ ///
+ /// # The `free_region_relations` parameter
+ ///
+ /// The `free_region_relations` argument is used to find the
+ /// "minimum" of the regions supplied to a given opaque type.
+ /// It must be a relation that can answer whether `'a <= 'b`,
+ /// where `'a` and `'b` are regions that appear in the "substs"
+ /// for the opaque type references (the `<'a>` in `Foo1<'a>`).
+ ///
+ /// Note that we do not impose the constraints based on the
+ /// generic regions from the `Foo1` definition (e.g., `'x`). This
+ /// is because the constraints we are imposing here is basically
+ /// the concern of the one generating the constraining type C1,
+ /// which is the current function. It also means that we can
+ /// take "implied bounds" into account in some cases:
+ ///
+ /// ```text
+ /// trait SomeTrait<'a, 'b> { }
+ /// fn foo<'a, 'b>(_: &'a &'b u32) -> impl SomeTrait<'a, 'b> { .. }
+ /// ```
+ ///
+ /// Here, the fact that `'b: 'a` is known only because of the
+ /// implied bounds from the `&'a &'b u32` parameter, and is not
+ /// "inherent" to the opaque type definition.
+ ///
+ /// # Parameters
+ ///
+ /// - `opaque_types` -- the map produced by `instantiate_opaque_types`
+ /// - `free_region_relations` -- something that can be used to relate
+ /// the free regions (`'a`) that appear in the impl trait.
+ #[instrument(level = "debug", skip(self))]
+ pub fn constrain_opaque_type(
+ &self,
+ opaque_type_key: OpaqueTypeKey<'tcx>,
+ opaque_defn: &OpaqueTypeDecl<'tcx>,
+ ) {
+ let def_id = opaque_type_key.def_id;
+
+ let tcx = self.tcx;
+
+ let concrete_ty = self.resolve_vars_if_possible(opaque_defn.concrete_ty);
+
+ debug!(?concrete_ty);
+
+ let first_own_region = match opaque_defn.origin {
+ hir::OpaqueTyOrigin::FnReturn | hir::OpaqueTyOrigin::AsyncFn => {
+ // We lower
+ //
+ // fn foo<'l0..'ln>() -> impl Trait<'l0..'lm>
+ //
+ // into
+ //
+ // type foo::<'p0..'pn>::Foo<'q0..'qm>
+ // fn foo<l0..'ln>() -> foo::<'static..'static>::Foo<'l0..'lm>.
+ //
+ // For these types we only iterate over `'l0..lm` below.
+ tcx.generics_of(def_id).parent_count
+ }
+ // These opaque type inherit all lifetime parameters from their
+ // parent, so we have to check them all.
+ hir::OpaqueTyOrigin::TyAlias => 0,
+ };
+
+ // For a case like `impl Foo<'a, 'b>`, we would generate a constraint
+ // `'r in ['a, 'b, 'static]` for each region `'r` that appears in the
+ // hidden type (i.e., it must be equal to `'a`, `'b`, or `'static`).
+ //
+ // `conflict1` and `conflict2` are the two region bounds that we
+ // detected which were unrelated. They are used for diagnostics.
+
+ // Create the set of choice regions: each region in the hidden
+ // type can be equal to any of the region parameters of the
+ // opaque type definition.
+ let choice_regions: Lrc<Vec<ty::Region<'tcx>>> = Lrc::new(
+ opaque_type_key.substs[first_own_region..]
+ .iter()
+ .filter_map(|arg| match arg.unpack() {
+ GenericArgKind::Lifetime(r) => Some(r),
+ GenericArgKind::Type(_) | GenericArgKind::Const(_) => None,
+ })
+ .chain(std::iter::once(self.tcx.lifetimes.re_static))
+ .collect(),
+ );
+
+ concrete_ty.visit_with(&mut ConstrainOpaqueTypeRegionVisitor {
+ tcx: self.tcx,
+ op: |r| {
+ self.member_constraint(
+ opaque_type_key.def_id,
+ opaque_defn.definition_span,
+ concrete_ty,
+ r,
+ &choice_regions,
+ )
+ },
+ });
+ }
+}
+
+// Visitor that requires that (almost) all regions in the type visited outlive
+// `least_region`. We cannot use `push_outlives_components` because regions in
+// closure signatures are not included in their outlives components. We need to
+// ensure all regions outlive the given bound so that we don't end up with,
+// say, `ReVar` appearing in a return type and causing ICEs when other
+// functions end up with region constraints involving regions from other
+// functions.
+//
+// We also cannot use `for_each_free_region` because for closures it includes
+// the regions parameters from the enclosing item.
+//
+// We ignore any type parameters because impl trait values are assumed to
+// capture all the in-scope type parameters.
+struct ConstrainOpaqueTypeRegionVisitor<'tcx, OP> {
+ tcx: TyCtxt<'tcx>,
+ op: OP,
+}
+
+impl<'tcx, OP> TypeVisitor<'tcx> for ConstrainOpaqueTypeRegionVisitor<'tcx, OP>
+where
+ OP: FnMut(ty::Region<'tcx>),
+{
+ fn tcx_for_anon_const_substs(&self) -> Option<TyCtxt<'tcx>> {
+ Some(self.tcx)
+ }
+
+ fn visit_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: &ty::Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ t.as_ref().skip_binder().visit_with(self);
+ ControlFlow::CONTINUE
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match *r {
+ // ignore bound regions, keep visiting
+ ty::ReLateBound(_, _) => ControlFlow::CONTINUE,
+ _ => {
+ (self.op)(r);
+ ControlFlow::CONTINUE
+ }
+ }
+ }
+
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // We're only interested in types involving regions
+ if !ty.flags().intersects(ty::TypeFlags::HAS_POTENTIAL_FREE_REGIONS) {
+ return ControlFlow::CONTINUE;
+ }
+
+ match ty.kind() {
+ ty::Closure(_, ref substs) => {
+ // Skip lifetime parameters of the enclosing item(s)
+
+ substs.as_closure().tupled_upvars_ty().visit_with(self);
+ substs.as_closure().sig_as_fn_ptr_ty().visit_with(self);
+ }
+
+ ty::Generator(_, ref substs, _) => {
+ // Skip lifetime parameters of the enclosing item(s)
+ // Also skip the witness type, because that has no free regions.
+
+ substs.as_generator().tupled_upvars_ty().visit_with(self);
+ substs.as_generator().return_ty().visit_with(self);
+ substs.as_generator().yield_ty().visit_with(self);
+ substs.as_generator().resume_ty().visit_with(self);
+ }
+ _ => {
+ ty.super_visit_with(self);
+ }
+ }
+
+ ControlFlow::CONTINUE
+ }
+}
+
+struct Instantiator<'a, 'tcx> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ body_id: hir::HirId,
+ param_env: ty::ParamEnv<'tcx>,
+ value_span: Span,
+ obligations: Vec<traits::PredicateObligation<'tcx>>,
+}
+
+impl<'a, 'tcx> Instantiator<'a, 'tcx> {
+ fn instantiate_opaque_types_in_map<T: TypeFoldable<'tcx>>(&mut self, value: T) -> T {
+ let tcx = self.infcx.tcx;
+ value.fold_with(&mut BottomUpFolder {
+ tcx,
+ ty_op: |ty| {
+ if ty.references_error() {
+ return tcx.ty_error();
+ } else if let ty::Opaque(def_id, substs) = ty.kind() {
+ // Check that this is `impl Trait` type is
+ // declared by `parent_def_id` -- i.e., one whose
+ // value we are inferring. At present, this is
+ // always true during the first phase of
+ // type-check, but not always true later on during
+ // NLL. Once we support named opaque types more fully,
+ // this same scenario will be able to arise during all phases.
+ //
+ // Here is an example using type alias `impl Trait`
+ // that indicates the distinction we are checking for:
+ //
+ // ```rust
+ // mod a {
+ // pub type Foo = impl Iterator;
+ // pub fn make_foo() -> Foo { .. }
+ // }
+ //
+ // mod b {
+ // fn foo() -> a::Foo { a::make_foo() }
+ // }
+ // ```
+ //
+ // Here, the return type of `foo` references an
+ // `Opaque` indeed, but not one whose value is
+ // presently being inferred. You can get into a
+ // similar situation with closure return types
+ // today:
+ //
+ // ```rust
+ // fn foo() -> impl Iterator { .. }
+ // fn bar() {
+ // let x = || foo(); // returns the Opaque assoc with `foo`
+ // }
+ // ```
+ if let Some(def_id) = def_id.as_local() {
+ let opaque_hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let parent_def_id = self.infcx.defining_use_anchor;
+ let def_scope_default = || {
+ let opaque_parent_hir_id = tcx.hir().get_parent_item(opaque_hir_id);
+ parent_def_id == tcx.hir().local_def_id(opaque_parent_hir_id)
+ };
+ let (in_definition_scope, origin) =
+ match tcx.hir().expect_item(opaque_hir_id).kind {
+ // Anonymous `impl Trait`
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy {
+ impl_trait_fn: Some(parent),
+ origin,
+ ..
+ }) => (parent == parent_def_id.to_def_id(), origin),
+ // Named `type Foo = impl Bar;`
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy {
+ impl_trait_fn: None,
+ origin,
+ ..
+ }) => (
+ may_define_opaque_type(tcx, parent_def_id, opaque_hir_id),
+ origin,
+ ),
+ _ => (def_scope_default(), hir::OpaqueTyOrigin::TyAlias),
+ };
+ if in_definition_scope {
+ let opaque_type_key =
+ OpaqueTypeKey { def_id: def_id.to_def_id(), substs };
+ return self.fold_opaque_ty(ty, opaque_type_key, origin);
+ }
+
+ debug!(
+ "instantiate_opaque_types_in_map: \
+ encountered opaque outside its definition scope \
+ def_id={:?}",
+ def_id,
+ );
+ }
+ }
+
+ ty
+ },
+ lt_op: |lt| lt,
+ ct_op: |ct| ct,
+ })
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn fold_opaque_ty(
+ &mut self,
+ ty: Ty<'tcx>,
+ opaque_type_key: OpaqueTypeKey<'tcx>,
+ origin: hir::OpaqueTyOrigin,
+ ) -> Ty<'tcx> {
+ let infcx = self.infcx;
+ let tcx = infcx.tcx;
+ let OpaqueTypeKey { def_id, substs } = opaque_type_key;
+
+ // Use the same type variable if the exact same opaque type appears more
+ // than once in the return type (e.g., if it's passed to a type alias).
+ if let Some(opaque_defn) = infcx.inner.borrow().opaque_types.get(&opaque_type_key) {
+ debug!("re-using cached concrete type {:?}", opaque_defn.concrete_ty.kind());
+ return opaque_defn.concrete_ty;
+ }
+
+ let ty_var = infcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: self.value_span,
+ });
+
+ // Ideally, we'd get the span where *this specific `ty` came
+ // from*, but right now we just use the span from the overall
+ // value being folded. In simple cases like `-> impl Foo`,
+ // these are the same span, but not in cases like `-> (impl
+ // Foo, impl Bar)`.
+ let definition_span = self.value_span;
+
+ {
+ let mut infcx = self.infcx.inner.borrow_mut();
+ infcx.opaque_types.insert(
+ OpaqueTypeKey { def_id, substs },
+ OpaqueTypeDecl { opaque_type: ty, definition_span, concrete_ty: ty_var, origin },
+ );
+ infcx.opaque_types_vars.insert(ty_var, ty);
+ }
+
+ debug!("generated new type inference var {:?}", ty_var.kind());
+
+ let item_bounds = tcx.explicit_item_bounds(def_id);
+
+ self.obligations.reserve(item_bounds.len());
+ for (predicate, _) in item_bounds {
+ debug!(?predicate);
+ let predicate = predicate.subst(tcx, substs);
+ debug!(?predicate);
+
+ // We can't normalize associated types from `rustc_infer`, but we can eagerly register inference variables for them.
+ let predicate = predicate.fold_with(&mut BottomUpFolder {
+ tcx,
+ ty_op: |ty| match ty.kind() {
+ ty::Projection(projection_ty) => infcx.infer_projection(
+ self.param_env,
+ *projection_ty,
+ traits::ObligationCause::misc(self.value_span, self.body_id),
+ 0,
+ &mut self.obligations,
+ ),
+ _ => ty,
+ },
+ lt_op: |lt| lt,
+ ct_op: |ct| ct,
+ });
+ debug!(?predicate);
+
+ if let ty::PredicateKind::Projection(projection) = predicate.kind().skip_binder() {
+ if projection.ty.references_error() {
+ // No point on adding these obligations since there's a type error involved.
+ return tcx.ty_error();
+ }
+ }
+ // Change the predicate to refer to the type variable,
+ // which will be the concrete type instead of the opaque type.
+ // This also instantiates nested instances of `impl Trait`.
+ let predicate = self.instantiate_opaque_types_in_map(predicate);
+
+ let cause =
+ traits::ObligationCause::new(self.value_span, self.body_id, traits::OpaqueType);
+
+ // Require that the predicate holds for the concrete type.
+ debug!(?predicate);
+ self.obligations.push(traits::Obligation::new(cause, self.param_env, predicate));
+ }
+
+ ty_var
+ }
+}
+
+/// Returns `true` if `opaque_hir_id` is a sibling or a child of a sibling of `def_id`.
+///
+/// Example:
+/// ```rust
+/// pub mod foo {
+/// pub mod bar {
+/// pub trait Bar { .. }
+///
+/// pub type Baz = impl Bar;
+///
+/// fn f1() -> Baz { .. }
+/// }
+///
+/// fn f2() -> bar::Baz { .. }
+/// }
+/// ```
+///
+/// Here, `def_id` is the `LocalDefId` of the defining use of the opaque type (e.g., `f1` or `f2`),
+/// and `opaque_hir_id` is the `HirId` of the definition of the opaque type `Baz`.
+/// For the above example, this function returns `true` for `f1` and `false` for `f2`.
+fn may_define_opaque_type(tcx: TyCtxt<'_>, def_id: LocalDefId, opaque_hir_id: hir::HirId) -> bool {
+ let mut hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+
+ // Named opaque types can be defined by any siblings or children of siblings.
+ let scope = tcx.hir().get_defining_scope(opaque_hir_id);
+ // We walk up the node tree until we hit the root or the scope of the opaque type.
+ while hir_id != scope && hir_id != hir::CRATE_HIR_ID {
+ hir_id = tcx.hir().get_parent_item(hir_id);
+ }
+ // Syntactically, we are allowed to define the concrete type if:
+ let res = hir_id == scope;
+ trace!(
+ "may_define_opaque_type(def={:?}, opaque_node={:?}) = {}",
+ tcx.hir().find(hir_id),
+ tcx.hir().get(opaque_hir_id),
+ res
+ );
+ res
+}
#![feature(min_specialization)]
#![feature(label_break_value)]
#![recursion_limit = "512"] // For rustdoc
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate rustc_macros;
#![feature(nll)]
#![feature(once_cell)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
mod callbacks;
pub mod interface;
&self.lints
}
- pub fn get_lint_groups<'t>(
- &'t self,
- ) -> impl Iterator<Item = (&'static str, Vec<LintId>, bool)> + 't {
- // This function is not used in a way which observes the order of lints.
- #[cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
+ pub fn get_lint_groups<'t>(&'t self) -> Vec<(&'static str, Vec<LintId>, bool)> {
self.lint_groups
.iter()
.filter(|(_, LintGroup { depr, .. })| {
.map(|(k, LintGroup { lint_ids, from_plugin, .. })| {
(*k, lint_ids.clone(), *from_plugin)
})
+ .collect()
}
pub fn register_early_pass(
use rustc_ast as ast;
use rustc_errors::Applicability;
use rustc_hir::def::Res;
-use rustc_hir::*;
+use rustc_hir::{
+ GenericArg, HirId, Item, ItemKind, MutTy, Mutability, Node, Path, PathSegment, QPath, Ty,
+ TyKind,
+};
use rustc_middle::ty;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::hygiene::{ExpnKind, MacroKind};
}
}
-declare_tool_lint! {
- pub rustc::POTENTIAL_QUERY_INSTABILITY,
- Allow,
- "require explicit opt-in when using potentially unstable methods or functions",
- report_in_external_macro: true
-}
-
-declare_lint_pass!(QueryStability => [POTENTIAL_QUERY_INSTABILITY]);
-
-impl LateLintPass<'_> for QueryStability {
- fn check_expr(&mut self, cx: &LateContext<'_>, expr: &Expr<'_>) {
- // FIXME(rustdoc): This lint uses typecheck results, causing rustdoc to
- // error if there are resolution failures.
- //
- // As internal lints are currently always run if there are `unstable_options`,
- // they are added to the lint store of rustdoc. Internal lints are also
- // not used via the `lint_mod` query. Crate lints run outside of a query
- // so rustdoc currently doesn't disable them.
- //
- // Instead of relying on this, either change crate lints to a query disabled by
- // rustdoc, only run internal lints if the user is explicitly opting in
- // or figure out a different way to avoid running lints for rustdoc.
- if cx.tcx.sess.opts.actually_rustdoc {
- return;
- }
-
- let (def_id, span) = match expr.kind {
- ExprKind::Path(ref path) if let Some(def_id) = cx.qpath_res(path, expr.hir_id).opt_def_id() => {
- (def_id, expr.span)
- }
- ExprKind::MethodCall(_, span, _, _) if let Some(def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id) => {
- (def_id, span)
- },
- _ => return,
- };
-
- let substs = cx.typeck_results().node_substs(expr.hir_id);
- if let Ok(Some(instance)) = ty::Instance::resolve(cx.tcx, cx.param_env, def_id, substs) {
- let def_id = instance.def_id();
- if cx.tcx.has_attr(def_id, sym::rustc_lint_query_instability) {
- cx.struct_span_lint(POTENTIAL_QUERY_INSTABILITY, span, |lint| {
- let msg = format!(
- "using `{}` can result in unstable query results",
- cx.tcx.item_name(def_id)
- );
- lint.build(&msg)
- .note("if you believe this case to be fine, allow this lint and add a comment explaining your rationale")
- .emit();
- })
- }
- }
- }
-}
-
declare_tool_lint! {
pub rustc::USAGE_OF_TY_TYKIND,
Allow,
#![feature(box_patterns)]
#![feature(crate_visibility_modifier)]
#![feature(format_args_capture)]
-#![feature(if_let_guard)]
#![feature(iter_order_by)]
#![feature(iter_zip)]
#![feature(never_type)]
#![feature(nll)]
#![feature(control_flow_enum)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate rustc_middle;
store.register_early_pass(|| Box::new(LintPassImpl));
store.register_lints(&DefaultHashTypes::get_lints());
store.register_late_pass(|| Box::new(DefaultHashTypes));
- store.register_lints(&QueryStability::get_lints());
- store.register_late_pass(|| Box::new(QueryStability));
store.register_lints(&ExistingDocKeyword::get_lints());
store.register_late_pass(|| Box::new(ExistingDocKeyword));
store.register_lints(&TyTyKind::get_lints());
None,
vec![
LintId::of(DEFAULT_HASH_TYPES),
- LintId::of(POTENTIAL_QUERY_INSTABILITY),
LintId::of(USAGE_OF_TY_TYKIND),
LintId::of(LINT_PASS_IMPL_WITHOUT_MACRO),
LintId::of(TY_PASS_BY_REFERENCE),
let path = PathBuf::from(s);
println!("cargo:rustc-link-search=native={}", path.parent().unwrap().display());
if target.contains("windows") {
- println!("cargo:rustc-link-lib=static-nobundle={}", stdcppname);
+ println!("cargo:rustc-link-lib=static:-bundle={}", stdcppname);
} else {
println!("cargo:rustc-link-lib=static={}", stdcppname);
}
// Libstdc++ depends on pthread which Rust doesn't link on MinGW
// since nothing else requires it.
if target.contains("windows-gnu") {
- println!("cargo:rustc-link-lib=static-nobundle=pthread");
+ println!("cargo:rustc-link-lib=static:-bundle=pthread");
}
}
#![feature(nll)]
-#![feature(static_nobundle)]
+#![feature(native_link_modifiers)]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
// NOTE: This crate only exists to allow linking on mingw targets.
[dependencies]
libc = "0.2"
-odht = { version = "0.3.0", features = ["nightly"] }
+odht = { version = "0.3.1", features = ["nightly"] }
snap = "1"
tracing = "0.1"
smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
#![feature(try_blocks)]
#![feature(never_type)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
extern crate proc_macro;
}
}
- if let EntryKind::Mod(data) = kind {
- for exp in data.decode((self, sess)).reexports.decode((self, sess)) {
+ if let EntryKind::Mod(exports) = kind {
+ for exp in exports.decode((self, sess)) {
match exp.res {
Res::Def(DefKind::Macro(..), _) => {}
_ if macros_only => continue,
}
fn module_expansion(&self, id: DefIndex, sess: &Session) -> ExpnId {
- if let EntryKind::Mod(m) = self.kind(id) {
- m.decode((self, sess)).expansion
- } else {
- panic!("Expected module, found {:?}", self.local_def_id(id))
+ match self.kind(id) {
+ EntryKind::Mod(_) | EntryKind::Enum(_) | EntryKind::Trait(_) => {
+ self.get_expn_that_defined(id, sess)
+ }
+ _ => panic!("Expected module, found {:?}", self.local_def_id(id)),
}
}
Lazy::empty()
};
- let data = ModData { reexports, expansion: tcx.expn_that_defined(local_def_id) };
-
- record!(self.tables.kind[def_id] <- EntryKind::Mod(self.lazy(data)));
+ record!(self.tables.kind[def_id] <- EntryKind::Mod(reexports));
if self.is_proc_macro {
record!(self.tables.children[def_id] <- &[]);
+ // Encode this here because we don't do it in encode_def_ids.
+ record!(self.tables.expn_that_defined[def_id] <- tcx.expn_that_defined(local_def_id));
} else {
record!(self.tables.children[def_id] <- md.item_ids.iter().map(|item_id| {
item_id.def_id.local_def_index
Union(Lazy<VariantData>, ReprOptions),
Fn(Lazy<FnData>),
ForeignFn(Lazy<FnData>),
- Mod(Lazy<ModData>),
+ Mod(Lazy<[Export]>),
MacroDef(Lazy<MacroDef>),
ProcMacro(MacroKind),
Closure,
#[derive(Encodable, Decodable)]
struct RenderedConst(String);
-#[derive(MetadataEncodable, MetadataDecodable)]
-struct ModData {
- reexports: Lazy<[Export]>,
- expansion: ExpnId,
-}
-
#[derive(MetadataEncodable, MetadataDecodable)]
struct FnData {
asyncness: hir::IsAsync,
}
pub fn body(&self, id: BodyId) -> &'hir Body<'hir> {
- self.tcx.hir_owner_nodes(id.hir_id.owner).unwrap().bodies[id.hir_id.local_id].unwrap()
+ self.tcx.hir_owner_nodes(id.hir_id.owner).unwrap().bodies[&id.hir_id.local_id]
}
pub fn fn_decl_by_hir_id(&self, hir_id: HirId) -> Option<&'hir FnDecl<'hir>> {
.iter_enumerated()
.flat_map(move |(owner, owner_info)| {
let bodies = &owner_info.as_ref()?.nodes.bodies;
- Some(bodies.iter_enumerated().filter_map(move |(local_id, body)| {
- if body.is_none() {
- return None;
- }
+ Some(bodies.iter().map(move |&(local_id, _)| {
let hir_id = HirId { owner, local_id };
let body_id = BodyId { hir_id };
- Some(self.body_owner_def_id(body_id))
+ self.body_owner_def_id(body_id)
}))
})
.flatten()
par_iter(&self.krate().owners.raw).enumerate().for_each(|(owner, owner_info)| {
let owner = LocalDefId::new(owner);
if let Some(owner_info) = owner_info {
- par_iter(&owner_info.nodes.bodies.raw).enumerate().for_each(|(local_id, body)| {
- if body.is_some() {
- let local_id = ItemLocalId::new(local_id);
- let hir_id = HirId { owner, local_id };
- let body_id = BodyId { hir_id };
- f(self.body_owner_def_id(body_id))
- }
+ par_iter(owner_info.nodes.bodies.range(..)).for_each(|(local_id, _)| {
+ let hir_id = HirId { owner, local_id: *local_id };
+ let body_id = BodyId { hir_id };
+ f(self.body_owner_def_id(body_id))
})
}
});
let krate = self.krate();
for (owner, info) in krate.owners.iter_enumerated() {
if let Some(info) = info {
- for (&local_id, attrs) in info.attrs.map.iter() {
- let id = HirId { owner, local_id };
+ for (local_id, attrs) in info.attrs.map.iter() {
+ let id = HirId { owner, local_id: *local_id };
for a in *attrs {
visitor.visit_attribute(id, a)
}
#![feature(try_reserve_kind)]
#![feature(nonzero_ops)]
#![recursion_limit = "512"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate bitflags;
desc { |tcx| "processing `{}`", tcx.def_path_str(key.to_def_id()) }
}
- /// The signature of functions.
+ /// Computes the signature of the function.
query fn_sig(key: DefId) -> ty::PolyFnSig<'tcx> {
desc { |tcx| "computing function signature of `{}`", tcx.def_path_str(key) }
separate_provide_extern
}
+ /// Performs lint checking for the module.
query lint_mod(key: LocalDefId) -> () {
desc { |tcx| "linting {}", describe_as_module(key, tcx) }
}
desc { |tcx| "checking attributes in {}", describe_as_module(key, tcx) }
}
+ /// Checks for uses of unstable APIs in the module.
query check_mod_unstable_api_usage(key: LocalDefId) -> () {
desc { |tcx| "checking for unstable API usage in {}", describe_as_module(key, tcx) }
}
desc { |tcx| "computing drop scopes for `{}`", tcx.def_path_str(def_id) }
}
+ /// Generates a MIR body for the shim.
query mir_shims(key: ty::InstanceDef<'tcx>) -> mir::Body<'tcx> {
storage(ArenaCacheSelector<'tcx>)
desc { |tcx| "generating MIR shim for `{}`", tcx.def_path_str(key.def_id()) }
separate_provide_extern
}
+ /// Gets the span for the definition.
query def_span(def_id: DefId) -> Span {
desc { |tcx| "looking up span for `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
}
+ /// Gets the span for the identifier of the definition.
query def_ident_span(def_id: DefId) -> Option<Span> {
desc { |tcx| "looking up span for `{}`'s identifier", tcx.def_path_str(def_id) }
separate_provide_extern
desc { "fetching what a dependency looks like" }
separate_provide_extern
}
+
+ /// Gets the name of the crate.
query crate_name(_: CrateNum) -> Symbol {
eval_always
desc { "fetching what a crate is named" }
#![feature(once_cell)]
#![feature(min_specialization)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate tracing;
#![feature(trusted_step)]
#![feature(try_blocks)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate tracing;
#![feature(let_else)]
#![feature(in_band_lifetimes)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate tracing;
fn check_attributes(
&self,
hir_id: HirId,
- span: Span,
+ span: &Span,
target: Target,
item: Option<ItemLike<'_>>,
) {
sym::marker => self.check_marker(hir_id, attr, span, target),
sym::target_feature => self.check_target_feature(hir_id, attr, span, target),
sym::track_caller => {
- self.check_track_caller(hir_id, attr.span, attrs, span, target)
+ self.check_track_caller(hir_id, &attr.span, attrs, span, target)
}
sym::doc => self.check_doc_attrs(
attr,
sym::rustc_legacy_const_generics => {
self.check_rustc_legacy_const_generics(&attr, span, target, item)
}
- sym::rustc_lint_query_instability => {
- self.check_rustc_lint_query_instability(&attr, span, target)
- }
sym::rustc_clean
| sym::rustc_dirty
| sym::rustc_if_this_changed
}
/// Checks if an `#[inline]` is applied to a function or a closure. Returns `true` if valid.
- fn check_inline(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) -> bool {
+ fn check_inline(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) -> bool {
match target {
Target::Fn
| Target::Closure
E0518,
"attribute should be applied to function or closure",
)
- .span_label(span, "not a function or closure")
+ .span_label(*span, "not a function or closure")
.emit();
false
}
}
/// Checks if `#[naked]` is applied to a function definition.
- fn check_naked(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) -> bool {
+ fn check_naked(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) -> bool {
match target {
Target::Fn
| Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
attr.span,
"attribute should be applied to a function definition",
)
- .span_label(span, "not a function definition")
+ .span_label(*span, "not a function definition")
.emit();
false
}
}
/// Checks if `#[cmse_nonsecure_entry]` is applied to a function definition.
- fn check_cmse_nonsecure_entry(&self, attr: &Attribute, span: Span, target: Target) -> bool {
+ fn check_cmse_nonsecure_entry(&self, attr: &Attribute, span: &Span, target: Target) -> bool {
match target {
Target::Fn
| Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
attr.span,
"attribute should be applied to a function definition",
)
- .span_label(span, "not a function definition")
+ .span_label(*span, "not a function definition")
.emit();
false
}
fn check_track_caller(
&self,
hir_id: HirId,
- attr_span: Span,
+ attr_span: &Span,
attrs: &'hir [Attribute],
- span: Span,
+ span: &Span,
target: Target,
) -> bool {
match target {
_ if attrs.iter().any(|attr| attr.has_name(sym::naked)) => {
struct_span_err!(
self.tcx.sess,
- attr_span,
+ *attr_span,
E0736,
"cannot use `#[track_caller]` with `#[naked]`",
)
_ => {
struct_span_err!(
self.tcx.sess,
- attr_span,
+ *attr_span,
E0739,
"attribute should be applied to function"
)
- .span_label(span, "not a function")
+ .span_label(*span, "not a function")
.emit();
false
}
&self,
hir_id: HirId,
attr: &Attribute,
- span: Span,
+ span: &Span,
target: Target,
) -> bool {
match target {
E0701,
"attribute can only be applied to a struct or enum"
)
- .span_label(span, "not a struct or enum")
+ .span_label(*span, "not a struct or enum")
.emit();
false
}
}
/// Checks if the `#[marker]` attribute on an `item` is valid. Returns `true` if valid.
- fn check_marker(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) -> bool {
+ fn check_marker(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) -> bool {
match target {
Target::Trait => true,
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
self.tcx
.sess
.struct_span_err(attr.span, "attribute can only be applied to a trait")
- .span_label(span, "not a trait")
+ .span_label(*span, "not a trait")
.emit();
false
}
&self,
hir_id: HirId,
attr: &Attribute,
- span: Span,
+ span: &Span,
target: Target,
) -> bool {
match target {
being phased out; it will become a hard error in \
a future release!",
)
- .span_label(span, "not a function")
+ .span_label(*span, "not a function")
.emit();
});
true
self.tcx
.sess
.struct_span_err(attr.span, "attribute should be applied to a function")
- .span_label(span, "not a function")
+ .span_label(*span, "not a function")
.emit();
false
}
"not a `use` item",
);
}
- err.note("read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#docno_inlinedocinline for more information")
+ err.note("read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#inline-and-no_inline for more information")
.emit();
},
);
}
/// Checks if `#[must_not_suspend]` is applied to a function. Returns `true` if valid.
- fn check_must_not_suspend(&self, attr: &Attribute, span: Span, target: Target) -> bool {
+ fn check_must_not_suspend(&self, attr: &Attribute, span: &Span, target: Target) -> bool {
match target {
Target::Struct | Target::Enum | Target::Union | Target::Trait => true,
_ => {
self.tcx
.sess
.struct_span_err(attr.span, "`must_not_suspend` attribute should be applied to a struct, enum, or trait")
- .span_label(span, "is not a struct, enum, or trait")
+ .span_label(*span, "is not a struct, enum, or trait")
.emit();
false
}
}
/// Checks if `#[cold]` is applied to a non-function. Returns `true` if valid.
- fn check_cold(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) {
+ fn check_cold(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) {
match target {
Target::Fn | Target::Method(..) | Target::ForeignFn | Target::Closure => {}
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
being phased out; it will become a hard error in \
a future release!",
)
- .span_label(span, "not a function")
+ .span_label(*span, "not a function")
.emit();
});
}
}
/// Checks if `#[link_name]` is applied to an item other than a foreign function or static.
- fn check_link_name(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) {
+ fn check_link_name(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) {
match target {
Target::ForeignFn | Target::ForeignStatic => {}
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
}
}
- diag.span_label(span, "not a foreign function or static");
+ diag.span_label(*span, "not a foreign function or static");
diag.emit();
});
}
}
/// Checks if `#[no_link]` is applied to an `extern crate`. Returns `true` if valid.
- fn check_no_link(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) -> bool {
+ fn check_no_link(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) -> bool {
match target {
Target::ExternCrate => true,
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
attr.span,
"attribute should be applied to an `extern crate` item",
)
- .span_label(span, "not an `extern crate` item")
+ .span_label(*span, "not an `extern crate` item")
.emit();
false
}
&self,
hir_id: HirId,
attr: &Attribute,
- span: Span,
+ span: &Span,
target: Target,
) -> bool {
match target {
attr.span,
"attribute should be applied to a free function, impl method or static",
)
- .span_label(span, "not a free function, impl method or static")
+ .span_label(*span, "not a free function, impl method or static")
.emit();
false
}
fn check_rustc_layout_scalar_valid_range(
&self,
attr: &Attribute,
- span: Span,
+ span: &Span,
target: Target,
) -> bool {
if target != Target::Struct {
self.tcx
.sess
.struct_span_err(attr.span, "attribute should be applied to a struct")
- .span_label(span, "not a struct")
+ .span_label(*span, "not a struct")
.emit();
return false;
}
fn check_rustc_legacy_const_generics(
&self,
attr: &Attribute,
- span: Span,
+ span: &Span,
target: Target,
item: Option<ItemLike<'_>>,
) -> bool {
self.tcx
.sess
.struct_span_err(attr.span, "attribute should be applied to a function")
- .span_label(span, "not a function")
+ .span_label(*span, "not a function")
.emit();
return false;
}
}
}
- fn check_rustc_lint_query_instability(
- &self,
- attr: &Attribute,
- span: Span,
- target: Target,
- ) -> bool {
- let is_function = matches!(target, Target::Fn | Target::Method(..));
- if !is_function {
- self.tcx
- .sess
- .struct_span_err(attr.span, "attribute should be applied to a function")
- .span_label(span, "not a function")
- .emit();
- false
- } else {
- true
- }
- }
-
/// Checks that the dep-graph debugging attributes are only present when the query-dep-graph
/// option is passed to the compiler.
fn check_rustc_dirty_clean(&self, attr: &Attribute) -> bool {
}
/// Checks if `#[link_section]` is applied to a function or static.
- fn check_link_section(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) {
+ fn check_link_section(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) {
match target {
Target::Static | Target::Fn | Target::Method(..) => {}
// FIXME(#80564): We permit struct fields, match arms and macro defs to have an
being phased out; it will become a hard error in \
a future release!",
)
- .span_label(span, "not a function or static")
+ .span_label(*span, "not a function or static")
.emit();
});
}
}
/// Checks if `#[no_mangle]` is applied to a function or static.
- fn check_no_mangle(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) {
+ fn check_no_mangle(&self, hir_id: HirId, attr: &Attribute, span: &Span, target: Target) {
match target {
Target::Static | Target::Fn => {}
Target::Method(..) if self.is_impl_item(hir_id) => {}
being phased out; it will become a hard error in \
a future release!",
)
- .span_label(span, format!("foreign {}", foreign_item_kind))
+ .span_label(*span, format!("foreign {}", foreign_item_kind))
.note("symbol names in extern blocks are not mangled")
.span_suggestion(
attr.span,
being phased out; it will become a hard error in \
a future release!",
)
- .span_label(span, "not a free function, impl method or static")
+ .span_label(*span, "not a free function, impl method or static")
.emit();
});
}
fn check_repr(
&self,
attrs: &'hir [Attribute],
- span: Span,
+ span: &Span,
target: Target,
item: Option<ItemLike<'_>>,
hir_id: HirId,
"{}",
&format!("attribute should be applied to {} {}", article, allowed_targets)
)
- .span_label(span, &format!("not {} {}", article, allowed_targets))
+ .span_label(*span, &format!("not {} {}", article, allowed_targets))
.emit();
}
&self,
hir_id: HirId,
attr: &Attribute,
- span: Span,
+ span: &Span,
target: Target,
attrs: &[Attribute],
) -> bool {
self.tcx
.sess
.struct_span_err(attr.span, "attribute should be applied to a macro")
- .span_label(span, "not a macro")
+ .span_label(*span, "not a macro")
.emit();
false
}
&self,
hir_id: HirId,
attr: &Attribute,
- span: Span,
+ span: &Span,
target: Target,
) -> bool {
match target {
self.tcx
.sess
.struct_span_err(attr.span, "attribute should be applied to `const fn`")
- .span_label(span, "not a `const fn`")
+ .span_label(*span, "not a `const fn`")
.emit();
false
}
fn check_default_method_body_is_const(
&self,
attr: &Attribute,
- span: Span,
+ span: &Span,
target: Target,
) -> bool {
match target {
attr.span,
"attribute should be applied to a trait method with body",
)
- .span_label(span, "not a trait method or missing a body")
+ .span_label(*span, "not a trait method or missing a body")
.emit();
false
}
}
}
- fn check_stability_promotable(&self, attr: &Attribute, _span: Span, target: Target) -> bool {
+ fn check_stability_promotable(&self, attr: &Attribute, _span: &Span, target: Target) -> bool {
match target {
Target::Expression => {
self.tcx
}
}
- fn check_deprecated(&self, hir_id: HirId, attr: &Attribute, _span: Span, target: Target) {
+ fn check_deprecated(&self, hir_id: HirId, attr: &Attribute, _span: &Span, target: Target) {
match target {
Target::Closure | Target::Expression | Target::Statement | Target::Arm => {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
}
let target = Target::from_item(item);
- self.check_attributes(item.hir_id(), item.span, target, Some(ItemLike::Item(item)));
+ self.check_attributes(item.hir_id(), &item.span, target, Some(ItemLike::Item(item)));
intravisit::walk_item(self, item)
}
fn visit_generic_param(&mut self, generic_param: &'tcx hir::GenericParam<'tcx>) {
let target = Target::from_generic_param(generic_param);
- self.check_attributes(generic_param.hir_id, generic_param.span, target, None);
+ self.check_attributes(generic_param.hir_id, &generic_param.span, target, None);
intravisit::walk_generic_param(self, generic_param)
}
fn visit_trait_item(&mut self, trait_item: &'tcx TraitItem<'tcx>) {
let target = Target::from_trait_item(trait_item);
- self.check_attributes(trait_item.hir_id(), trait_item.span, target, None);
+ self.check_attributes(trait_item.hir_id(), &trait_item.span, target, None);
intravisit::walk_trait_item(self, trait_item)
}
fn visit_field_def(&mut self, struct_field: &'tcx hir::FieldDef<'tcx>) {
- self.check_attributes(struct_field.hir_id, struct_field.span, Target::Field, None);
+ self.check_attributes(struct_field.hir_id, &struct_field.span, Target::Field, None);
intravisit::walk_field_def(self, struct_field);
}
fn visit_arm(&mut self, arm: &'tcx hir::Arm<'tcx>) {
- self.check_attributes(arm.hir_id, arm.span, Target::Arm, None);
+ self.check_attributes(arm.hir_id, &arm.span, Target::Arm, None);
intravisit::walk_arm(self, arm);
}
let target = Target::from_foreign_item(f_item);
self.check_attributes(
f_item.hir_id(),
- f_item.span,
+ &f_item.span,
target,
Some(ItemLike::ForeignItem(f_item)),
);
fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
let target = target_from_impl_item(self.tcx, impl_item);
- self.check_attributes(impl_item.hir_id(), impl_item.span, target, None);
+ self.check_attributes(impl_item.hir_id(), &impl_item.span, target, None);
intravisit::walk_impl_item(self, impl_item)
}
fn visit_stmt(&mut self, stmt: &'tcx hir::Stmt<'tcx>) {
// When checking statements ignore expressions, they will be checked later.
if let hir::StmtKind::Local(ref l) = stmt.kind {
- self.check_attributes(l.hir_id, stmt.span, Target::Statement, None);
+ self.check_attributes(l.hir_id, &stmt.span, Target::Statement, None);
}
intravisit::walk_stmt(self, stmt)
}
_ => Target::Expression,
};
- self.check_attributes(expr.hir_id, expr.span, target, None);
+ self.check_attributes(expr.hir_id, &expr.span, target, None);
intravisit::walk_expr(self, expr)
}
generics: &'tcx hir::Generics<'tcx>,
item_id: HirId,
) {
- self.check_attributes(variant.id, variant.span, Target::Variant, None);
+ self.check_attributes(variant.id, &variant.span, Target::Variant, None);
intravisit::walk_variant(self, variant, generics, item_id)
}
fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
- self.check_attributes(param.hir_id, param.span, Target::Param, None);
+ self.check_attributes(param.hir_id, ¶m.span, Target::Param, None);
intravisit::walk_param(self, param);
}
let check_attr_visitor = &mut CheckAttrVisitor { tcx };
tcx.hir().visit_item_likes_in_module(module_def_id, &mut check_attr_visitor.as_deep_visitor());
if module_def_id.is_top_level_module() {
- check_attr_visitor.check_attributes(CRATE_HIR_ID, DUMMY_SP, Target::Mod, None);
+ check_attr_visitor.check_attributes(CRATE_HIR_ID, &DUMMY_SP, Target::Mod, None);
check_invalid_crate_level_attr(tcx, tcx.hir().krate_attrs());
}
}
#![feature(nll)]
#![feature(try_blocks)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate rustc_middle;
#![feature(try_blocks)]
#![feature(associated_type_defaults)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
use rustc_ast::MacroDef;
use rustc_attr as attr;
#![feature(once_cell)]
#![feature(rustc_attrs)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate rustc_macros;
use crate::ich;
use rustc_ast as ast;
use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::sorted_map::SortedMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::Lrc;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::definitions::{DefPathHash, Definitions};
-use rustc_index::vec::IndexVec;
use rustc_session::cstore::CrateStore;
use rustc_session::Session;
use rustc_span::source_map::SourceMap;
Traverse {
hash_bodies: bool,
owner: LocalDefId,
- bodies: &'tcx IndexVec<hir::ItemLocalId, Option<&'tcx hir::Body<'tcx>>>,
+ bodies: &'tcx SortedMap<hir::ItemLocalId, &'tcx hir::Body<'tcx>>,
},
}
&mut self,
hash_bodies: bool,
owner: LocalDefId,
- bodies: &'a IndexVec<hir::ItemLocalId, Option<&'a hir::Body<'a>>>,
+ bodies: &'a SortedMap<hir::ItemLocalId, &'a hir::Body<'a>>,
f: impl FnOnce(&mut Self),
) {
let prev = self.body_resolver;
BodyResolver::Traverse { hash_bodies: false, .. } => {}
BodyResolver::Traverse { hash_bodies: true, owner, bodies } => {
assert_eq!(id.hir_id.owner, owner);
- bodies[id.hir_id.local_id].unwrap().hash_stable(hcx, hasher);
+ bodies[&id.hir_id.local_id].hash_stable(hcx, hasher);
}
}
}
#![feature(let_else)]
#![feature(min_specialization)]
#![feature(thread_local_const_init)]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate tracing;
} else {
def_key.disambiguated_data.data.get_opt_name().expect("module without name")
};
- let expn_id = if def_kind == DefKind::Mod {
- self.cstore().module_expansion_untracked(def_id, &self.session)
- } else {
- // FIXME: Parent expansions for enums and traits are not kept in metadata.
- ExpnId::root()
- };
Some(self.new_module(
parent,
ModuleKind::Def(def_kind, def_id, name),
- expn_id,
+ self.cstore().module_expansion_untracked(def_id, &self.session),
self.cstore().get_span_untracked(def_id, &self.session),
// FIXME: Account for `#[no_implicit_prelude]` attributes.
parent.map_or(false, |module| module.no_implicit_prelude),
pub descr: &'static str,
pub path: Path,
pub accessible: bool,
+ /// An extra note that should be issued if this item is suggested
+ pub note: Option<String>,
}
/// Adjust the impl span so that just the `impl` keyword is taken by removing
// collect results based on the filter function
// avoid suggesting anything from the same module in which we are resolving
+ // avoid suggesting anything with a hygienic name
if ident.name == lookup_ident.name
&& ns == namespace
&& !ptr::eq(in_module, parent_scope.module)
+ && !ident.span.normalize_to_macros_2_0().from_expansion()
{
let res = name_binding.res();
if filter_fn(res) {
}
if candidates.iter().all(|v: &ImportSuggestion| v.did != did) {
+ // See if we're recommending TryFrom, TryInto, or FromIterator and add
+ // a note about editions
+ let note = if let Some(did) = did {
+ let requires_note = !did.is_local()
+ && this.cstore().item_attrs(did, this.session).iter().any(
+ |attr| {
+ if attr.has_name(sym::rustc_diagnostic_item) {
+ [sym::TryInto, sym::TryFrom, sym::FromIterator]
+ .map(|x| Some(x))
+ .contains(&attr.value_str())
+ } else {
+ false
+ }
+ },
+ );
+
+ requires_note.then(|| {
+ format!(
+ "'{}' is included in the prelude starting in Edition 2021",
+ path_names_to_string(&path)
+ )
+ })
+ } else {
+ None
+ };
+
candidates.push(ImportSuggestion {
did,
descr: res.descr(),
path,
accessible: child_accessible,
+ note,
});
}
}
return;
}
- let mut accessible_path_strings: Vec<(String, &str, Option<DefId>)> = Vec::new();
- let mut inaccessible_path_strings: Vec<(String, &str, Option<DefId>)> = Vec::new();
+ let mut accessible_path_strings: Vec<(String, &str, Option<DefId>, &Option<String>)> =
+ Vec::new();
+ let mut inaccessible_path_strings: Vec<(String, &str, Option<DefId>, &Option<String>)> =
+ Vec::new();
candidates.iter().for_each(|c| {
(if c.accessible { &mut accessible_path_strings } else { &mut inaccessible_path_strings })
- .push((path_names_to_string(&c.path), c.descr, c.did))
+ .push((path_names_to_string(&c.path), c.descr, c.did, &c.note))
});
// we want consistent results across executions, but candidates are produced
let instead = if instead { " instead" } else { "" };
let mut msg = format!("consider importing {} {}{}", determiner, kind, instead);
+ for note in accessible_path_strings.iter().map(|cand| cand.3.as_ref()).flatten() {
+ err.note(note);
+ }
+
if let Some(span) = use_placement_span {
for candidate in &mut accessible_path_strings {
// produce an additional newline to separate the new use statement
assert!(!inaccessible_path_strings.is_empty());
if inaccessible_path_strings.len() == 1 {
- let (name, descr, def_id) = &inaccessible_path_strings[0];
+ let (name, descr, def_id, note) = &inaccessible_path_strings[0];
let msg = format!("{} `{}` exists but is inaccessible", descr, name);
if let Some(local_def_id) = def_id.and_then(|did| did.as_local()) {
} else {
err.note(&msg);
}
+ if let Some(note) = (*note).as_deref() {
+ err.note(note);
+ }
} else {
- let (_, descr_first, _) = &inaccessible_path_strings[0];
+ let (_, descr_first, _, _) = &inaccessible_path_strings[0];
let descr = if inaccessible_path_strings
.iter()
.skip(1)
- .all(|(_, descr, _)| descr == descr_first)
+ .all(|(_, descr, _, _)| descr == descr_first)
{
descr_first.to_string()
} else {
let mut has_colon = false;
let mut spans = Vec::new();
- for (name, _, def_id) in &inaccessible_path_strings {
+ for (name, _, def_id, _) in &inaccessible_path_strings {
if let Some(local_def_id) = def_id.and_then(|did| did.as_local()) {
let span = definitions.def_span(local_def_id);
let span = session.source_map().guess_head_span(span);
multi_span.push_span_label(span, format!("`{}`: not accessible", name));
}
+ for note in inaccessible_path_strings.iter().map(|cand| cand.3.as_ref()).flatten() {
+ err.note(note);
+ }
+
err.span_note(multi_span, &msg);
}
}
descr: "module",
path,
accessible: true,
+ note: None,
},
));
} else {
#![feature(nll)]
#![recursion_limit = "256"]
#![allow(rustdoc::private_intra_doc_links)]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate tracing;
#![feature(if_let_guard)]
#![feature(nll)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
mod dump_visitor;
mod dumper;
#![feature(min_specialization)]
#![feature(once_cell)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate rustc_macros;
pub const parse_panic_strategy: &str = "either `unwind` or `abort`";
pub const parse_opt_panic_strategy: &str = parse_panic_strategy;
pub const parse_relro_level: &str = "one of: `full`, `partial`, or `off`";
- pub const parse_sanitizers: &str =
- "comma separated list of sanitizers: `address`, `hwaddress`, `leak`, `memory` or `thread`";
+ pub const parse_sanitizers: &str = "comma separated list of sanitizers: `address`, `cfi`, `hwaddress`, `leak`, `memory` or `thread`";
pub const parse_sanitizer_memory_track_origins: &str = "0, 1, or 2";
pub const parse_cfguard: &str =
"either a boolean (`yes`, `no`, `on`, `off`, etc), `checks`, or `nochecks`";
for s in v.split(',') {
*slot |= match s {
"address" => SanitizerSet::ADDRESS,
+ "cfi" => SanitizerSet::CFI,
"leak" => SanitizerSet::LEAK,
"memory" => SanitizerSet::MEMORY,
"thread" => SanitizerSet::THREAD,
pub fn is_nightly_build(&self) -> bool {
self.opts.unstable_features.is_nightly_build()
}
+ pub fn is_sanitizer_cfi_enabled(&self) -> bool {
+ self.opts.debugging_opts.sanitizer.contains(SanitizerSet::CFI)
+ }
pub fn overflow_checks(&self) -> bool {
self.opts
.cg
disable it using `-C target-feature=-crt-static`",
);
}
+
+ // LLVM CFI requires LTO.
+ if sess.is_sanitizer_cfi_enabled() {
+ if sess.opts.cg.lto == config::LtoCli::Unspecified
+ || sess.opts.cg.lto == config::LtoCli::No
+ || sess.opts.cg.lto == config::LtoCli::Thin
+ {
+ sess.err("`-Zsanitizer=cfi` requires `-Clto`");
+ }
+ }
}
/// Holds data on the current incremental compilation session, if there is one.
/// pub fn f() {} // `f`'s `SyntaxContext` has a single `ExpnId` from `m`.
/// pub fn $i() {} // `$i`'s `SyntaxContext` is empty.
/// }
- /// n(f);
+ /// n!(f);
/// macro n($j:ident) {
/// use foo::*;
/// f(); // `f`'s `SyntaxContext` has a mark from `m` and a mark from `n`
#![feature(nll)]
#![feature(min_specialization)]
#![feature(thread_local_const_init)]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate rustc_macros;
cfg_target_thread_local,
cfg_target_vendor,
cfg_version,
+ cfi,
char,
client,
clippy,
rustc_layout_scalar_valid_range_end,
rustc_layout_scalar_valid_range_start,
rustc_legacy_const_generics,
- rustc_lint_query_instability,
rustc_macro_transparency,
rustc_main,
rustc_mir,
#![feature(in_band_lifetimes)]
#![feature(iter_zip)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate rustc_middle;
use rustc_middle::mir::mono::{InstantiationMode, MonoItem};
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::subst::SubstsRef;
-use rustc_middle::ty::{self, Instance, TyCtxt};
+use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
use rustc_session::config::SymbolManglingVersion;
+use rustc_target::abi::call::FnAbi;
use tracing::debug;
ty::SymbolName::new(tcx, &symbol_name)
}
+/// This function computes the typeid for the given function ABI.
+pub fn typeid_for_fnabi(tcx: TyCtxt<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> String {
+ v0::mangle_typeid_for_fnabi(tcx, fn_abi)
+}
+
/// Computes the symbol name for the given instance. This function will call
/// `compute_instantiating_crate` if it needs to factor the instantiating crate
/// into the symbol name.
use rustc_middle::ty::print::{Print, Printer};
use rustc_middle::ty::subst::{GenericArg, GenericArgKind, Subst};
use rustc_middle::ty::{self, FloatTy, Instance, IntTy, Ty, TyCtxt, TypeFoldable, UintTy};
+use rustc_target::abi::call::FnAbi;
use rustc_target::abi::Integer;
use rustc_target::spec::abi::Abi;
std::mem::take(&mut cx.out)
}
+pub(super) fn mangle_typeid_for_fnabi(
+ _tcx: TyCtxt<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+) -> String {
+ // LLVM uses type metadata to allow IR modules to aggregate pointers by their types.[1] This
+ // type metadata is used by LLVM Control Flow Integrity to test whether a given pointer is
+ // associated with a type identifier (i.e., test type membership).
+ //
+ // Clang uses the Itanium C++ ABI's[2] virtual tables and RTTI typeinfo structure name[3] as
+ // type metadata identifiers for function pointers. The typeinfo name encoding is a
+ // two-character code (i.e., “TS”) prefixed to the type encoding for the function.
+ //
+ // For cross-language LLVM CFI support, a compatible encoding must be used by either
+ //
+ // a. Using a superset of types that encompasses types used by Clang (i.e., Itanium C++ ABI's
+ // type encodings[4]), or at least types used at the FFI boundary.
+ // b. Reducing the types to the least common denominator between types used by Clang (or at
+ // least types used at the FFI boundary) and Rust compilers (if even possible).
+ // c. Creating a new ABI for cross-language CFI and using it for Clang and Rust compilers (and
+ // possibly other compilers).
+ //
+ // Option (b) may weaken the protection for Rust-compiled only code, so it should be provided
+ // as an alternative to a Rust-specific encoding for when mixing Rust and C and C++ -compiled
+ // code. Option (c) would require changes to Clang to use the new ABI.
+ //
+ // [1] https://llvm.org/docs/TypeMetadata.html
+ // [2] https://itanium-cxx-abi.github.io/cxx-abi/abi.html
+ // [3] https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling-special-vtables
+ // [4] https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling-type
+ //
+ // FIXME(rcvalle): See comment above.
+ let arg_count = fn_abi.args.len() + fn_abi.ret.is_indirect() as usize;
+ format!("typeid{}", arg_count)
+}
+
struct BinderLevel {
/// The range of distances from the root of what's
/// being printed, to the lifetimes in a binder.
base.max_atomic_width = Some(128);
// FIXME: The leak sanitizer currently fails the tests, see #88132.
- base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::THREAD;
+ base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI | SanitizerSet::THREAD;
base.pre_link_args.insert(LinkerFlavor::Gcc, vec!["-arch".to_string(), "arm64".to_string()]);
base.link_env_remove.extend(super::apple_base::macos_link_env_remove());
arch: "aarch64".to_string(),
options: TargetOptions {
max_atomic_width: Some(128),
- supported_sanitizers: SanitizerSet::ADDRESS,
+ supported_sanitizers: SanitizerSet::ADDRESS | SanitizerSet::CFI,
..super::fuchsia_base::opts()
},
}
// As documented in https://developer.android.com/ndk/guides/cpu-features.html
// the neon (ASIMD) and FP must exist on all android aarch64 targets.
features: "+neon,+fp-armv8".to_string(),
- supported_sanitizers: SanitizerSet::HWADDRESS,
+ supported_sanitizers: SanitizerSet::CFI | SanitizerSet::HWADDRESS,
..super::android_base::opts()
},
}
options: TargetOptions {
max_atomic_width: Some(128),
supported_sanitizers: SanitizerSet::ADDRESS
+ | SanitizerSet::CFI
| SanitizerSet::MEMORY
| SanitizerSet::THREAD,
..super::freebsd_base::opts()
mcount: "\u{1}_mcount".to_string(),
max_atomic_width: Some(128),
supported_sanitizers: SanitizerSet::ADDRESS
+ | SanitizerSet::CFI
| SanitizerSet::LEAK
| SanitizerSet::MEMORY
| SanitizerSet::THREAD
);
TargetOptions {
- os: "hermit".to_string(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
disable_redzone: true,
linker: Some("rust-lld".to_owned()),
const MEMORY = 1 << 2;
const THREAD = 1 << 3;
const HWADDRESS = 1 << 4;
+ const CFI = 1 << 5;
}
}
fn as_str(self) -> Option<&'static str> {
Some(match self {
SanitizerSet::ADDRESS => "address",
+ SanitizerSet::CFI => "cfi",
SanitizerSet::LEAK => "leak",
SanitizerSet::MEMORY => "memory",
SanitizerSet::THREAD => "thread",
fn into_iter(self) -> Self::IntoIter {
[
SanitizerSet::ADDRESS,
+ SanitizerSet::CFI,
SanitizerSet::LEAK,
SanitizerSet::MEMORY,
SanitizerSet::THREAD,
for s in a {
base.$key_name |= match s.as_string() {
Some("address") => SanitizerSet::ADDRESS,
+ Some("cfi") => SanitizerSet::CFI,
Some("leak") => SanitizerSet::LEAK,
Some("memory") => SanitizerSet::MEMORY,
Some("thread") => SanitizerSet::THREAD,
base.link_env_remove.extend(super::apple_base::macos_link_env_remove());
// don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
- base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::LEAK | SanitizerSet::THREAD;
+ base.supported_sanitizers =
+ SanitizerSet::ADDRESS | SanitizerSet::CFI | SanitizerSet::LEAK | SanitizerSet::THREAD;
// Clang automatically chooses a more specific target based on
// MACOSX_DEPLOYMENT_TARGET. To enable cross-language LTO to work
base.max_atomic_width = Some(64);
// don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
- base.supported_sanitizers = SanitizerSet::ADDRESS;
+ base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI;
Target {
llvm_target: "x86_64-fuchsia".to_string(),
base.max_atomic_width = Some(64);
// don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
- base.supported_sanitizers = SanitizerSet::ADDRESS;
+ base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI;
Target {
llvm_target: "x86_64-pc-solaris".to_string(),
base.pre_link_args.entry(LinkerFlavor::Gcc).or_default().push("-m64".to_string());
// don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
- base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::MEMORY | SanitizerSet::THREAD;
+ base.supported_sanitizers =
+ SanitizerSet::ADDRESS | SanitizerSet::CFI | SanitizerSet::MEMORY | SanitizerSet::THREAD;
Target {
llvm_target: "x86_64-unknown-freebsd".to_string(),
base.pre_link_args.insert(LinkerFlavor::Gcc, vec!["-m64".to_string(), "-std=c99".to_string()]);
base.cpu = "x86-64".to_string();
base.max_atomic_width = Some(64);
- base.supported_sanitizers = SanitizerSet::ADDRESS;
+ base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI;
Target {
// LLVM does not currently have a separate illumos target,
base.pre_link_args.entry(LinkerFlavor::Gcc).or_default().push("-m64".to_string());
// don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
- base.supported_sanitizers =
- SanitizerSet::ADDRESS | SanitizerSet::LEAK | SanitizerSet::MEMORY | SanitizerSet::THREAD;
+ base.supported_sanitizers = SanitizerSet::ADDRESS
+ | SanitizerSet::CFI
+ | SanitizerSet::LEAK
+ | SanitizerSet::MEMORY
+ | SanitizerSet::THREAD;
Target {
llvm_target: "x86_64-unknown-linux-gnu".to_string(),
// don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
base.static_position_independent_executables = true;
- base.supported_sanitizers =
- SanitizerSet::ADDRESS | SanitizerSet::LEAK | SanitizerSet::MEMORY | SanitizerSet::THREAD;
+ base.supported_sanitizers = SanitizerSet::ADDRESS
+ | SanitizerSet::CFI
+ | SanitizerSet::LEAK
+ | SanitizerSet::MEMORY
+ | SanitizerSet::THREAD;
Target {
llvm_target: "x86_64-unknown-linux-musl".to_string(),
base.pre_link_args.entry(LinkerFlavor::Gcc).or_default().push("-m64".to_string());
// don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
base.stack_probes = StackProbeType::Call;
- base.supported_sanitizers =
- SanitizerSet::ADDRESS | SanitizerSet::LEAK | SanitizerSet::MEMORY | SanitizerSet::THREAD;
+ base.supported_sanitizers = SanitizerSet::ADDRESS
+ | SanitizerSet::CFI
+ | SanitizerSet::LEAK
+ | SanitizerSet::MEMORY
+ | SanitizerSet::THREAD;
Target {
llvm_target: "x86_64-unknown-netbsd".to_string(),
#![feature(crate_visibility_modifier)]
#![feature(control_flow_enum)]
#![recursion_limit = "512"] // For rustdoc
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate rustc_macros;
-use crate::traits::{self, ObligationCause, PredicateObligation};
+use crate::traits;
use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::sync::Lrc;
-use rustc_hir as hir;
-use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::def_id::DefId;
use rustc_infer::infer::error_reporting::unexpected_hidden_region_diagnostic;
-use rustc_infer::infer::opaque_types::OpaqueTypeDecl;
-use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
-use rustc_infer::infer::{InferCtxt, InferOk};
-use rustc_middle::ty::fold::{BottomUpFolder, TypeFoldable, TypeFolder, TypeVisitor};
-use rustc_middle::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst};
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::ty::fold::{TypeFoldable, TypeFolder};
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind, InternalSubsts};
use rustc_middle::ty::{self, OpaqueTypeKey, Ty, TyCtxt};
use rustc_span::Span;
-use std::ops::ControlFlow;
-
pub trait InferCtxtExt<'tcx> {
- fn instantiate_opaque_types<T: TypeFoldable<'tcx>>(
- &self,
- body_id: hir::HirId,
- param_env: ty::ParamEnv<'tcx>,
- value: T,
- value_span: Span,
- ) -> InferOk<'tcx, T>;
-
- fn constrain_opaque_types(&self);
-
- fn constrain_opaque_type(
- &self,
- opaque_type_key: OpaqueTypeKey<'tcx>,
- opaque_defn: &OpaqueTypeDecl<'tcx>,
- );
-
- /*private*/
- fn generate_member_constraint(
- &self,
- concrete_ty: Ty<'tcx>,
- opaque_defn: &OpaqueTypeDecl<'tcx>,
- opaque_type_key: OpaqueTypeKey<'tcx>,
- first_own_region_index: usize,
- );
-
fn infer_opaque_definition_from_instantiation(
&self,
opaque_type_key: OpaqueTypeKey<'tcx>,
}
impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
- /// Replaces all opaque types in `value` with fresh inference variables
- /// and creates appropriate obligations. For example, given the input:
- ///
- /// impl Iterator<Item = impl Debug>
- ///
- /// this method would create two type variables, `?0` and `?1`. It would
- /// return the type `?0` but also the obligations:
- ///
- /// ?0: Iterator<Item = ?1>
- /// ?1: Debug
- ///
- /// Moreover, it returns an `OpaqueTypeMap` that would map `?0` to
- /// info about the `impl Iterator<..>` type and `?1` to info about
- /// the `impl Debug` type.
- ///
- /// # Parameters
- ///
- /// - `parent_def_id` -- the `DefId` of the function in which the opaque type
- /// is defined
- /// - `body_id` -- the body-id with which the resulting obligations should
- /// be associated
- /// - `param_env` -- the in-scope parameter environment to be used for
- /// obligations
- /// - `value` -- the value within which we are instantiating opaque types
- /// - `value_span` -- the span where the value came from, used in error reporting
- fn instantiate_opaque_types<T: TypeFoldable<'tcx>>(
- &self,
- body_id: hir::HirId,
- param_env: ty::ParamEnv<'tcx>,
- value: T,
- value_span: Span,
- ) -> InferOk<'tcx, T> {
- debug!(
- "instantiate_opaque_types(value={:?}, body_id={:?}, \
- param_env={:?}, value_span={:?})",
- value, body_id, param_env, value_span,
- );
- let mut instantiator =
- Instantiator { infcx: self, body_id, param_env, value_span, obligations: vec![] };
- let value = instantiator.instantiate_opaque_types_in_map(value);
- InferOk { value, obligations: instantiator.obligations }
- }
-
- /// Given the map `opaque_types` containing the opaque
- /// `impl Trait` types whose underlying, hidden types are being
- /// inferred, this method adds constraints to the regions
- /// appearing in those underlying hidden types to ensure that they
- /// at least do not refer to random scopes within the current
- /// function. These constraints are not (quite) sufficient to
- /// guarantee that the regions are actually legal values; that
- /// final condition is imposed after region inference is done.
- ///
- /// # The Problem
- ///
- /// Let's work through an example to explain how it works. Assume
- /// the current function is as follows:
- ///
- /// ```text
- /// fn foo<'a, 'b>(..) -> (impl Bar<'a>, impl Bar<'b>)
- /// ```
- ///
- /// Here, we have two `impl Trait` types whose values are being
- /// inferred (the `impl Bar<'a>` and the `impl
- /// Bar<'b>`). Conceptually, this is sugar for a setup where we
- /// define underlying opaque types (`Foo1`, `Foo2`) and then, in
- /// the return type of `foo`, we *reference* those definitions:
- ///
- /// ```text
- /// type Foo1<'x> = impl Bar<'x>;
- /// type Foo2<'x> = impl Bar<'x>;
- /// fn foo<'a, 'b>(..) -> (Foo1<'a>, Foo2<'b>) { .. }
- /// // ^^^^ ^^
- /// // | |
- /// // | substs
- /// // def_id
- /// ```
- ///
- /// As indicating in the comments above, each of those references
- /// is (in the compiler) basically a substitution (`substs`)
- /// applied to the type of a suitable `def_id` (which identifies
- /// `Foo1` or `Foo2`).
- ///
- /// Now, at this point in compilation, what we have done is to
- /// replace each of the references (`Foo1<'a>`, `Foo2<'b>`) with
- /// fresh inference variables C1 and C2. We wish to use the values
- /// of these variables to infer the underlying types of `Foo1` and
- /// `Foo2`. That is, this gives rise to higher-order (pattern) unification
- /// constraints like:
- ///
- /// ```text
- /// for<'a> (Foo1<'a> = C1)
- /// for<'b> (Foo1<'b> = C2)
- /// ```
- ///
- /// For these equation to be satisfiable, the types `C1` and `C2`
- /// can only refer to a limited set of regions. For example, `C1`
- /// can only refer to `'static` and `'a`, and `C2` can only refer
- /// to `'static` and `'b`. The job of this function is to impose that
- /// constraint.
- ///
- /// Up to this point, C1 and C2 are basically just random type
- /// inference variables, and hence they may contain arbitrary
- /// regions. In fact, it is fairly likely that they do! Consider
- /// this possible definition of `foo`:
- ///
- /// ```text
- /// fn foo<'a, 'b>(x: &'a i32, y: &'b i32) -> (impl Bar<'a>, impl Bar<'b>) {
- /// (&*x, &*y)
- /// }
- /// ```
- ///
- /// Here, the values for the concrete types of the two impl
- /// traits will include inference variables:
- ///
- /// ```text
- /// &'0 i32
- /// &'1 i32
- /// ```
- ///
- /// Ordinarily, the subtyping rules would ensure that these are
- /// sufficiently large. But since `impl Bar<'a>` isn't a specific
- /// type per se, we don't get such constraints by default. This
- /// is where this function comes into play. It adds extra
- /// constraints to ensure that all the regions which appear in the
- /// inferred type are regions that could validly appear.
- ///
- /// This is actually a bit of a tricky constraint in general. We
- /// want to say that each variable (e.g., `'0`) can only take on
- /// values that were supplied as arguments to the opaque type
- /// (e.g., `'a` for `Foo1<'a>`) or `'static`, which is always in
- /// scope. We don't have a constraint quite of this kind in the current
- /// region checker.
- ///
- /// # The Solution
- ///
- /// We generally prefer to make `<=` constraints, since they
- /// integrate best into the region solver. To do that, we find the
- /// "minimum" of all the arguments that appear in the substs: that
- /// is, some region which is less than all the others. In the case
- /// of `Foo1<'a>`, that would be `'a` (it's the only choice, after
- /// all). Then we apply that as a least bound to the variables
- /// (e.g., `'a <= '0`).
- ///
- /// In some cases, there is no minimum. Consider this example:
- ///
- /// ```text
- /// fn baz<'a, 'b>() -> impl Trait<'a, 'b> { ... }
- /// ```
- ///
- /// Here we would report a more complex "in constraint", like `'r
- /// in ['a, 'b, 'static]` (where `'r` is some region appearing in
- /// the hidden type).
- ///
- /// # Constrain regions, not the hidden concrete type
- ///
- /// Note that generating constraints on each region `Rc` is *not*
- /// the same as generating an outlives constraint on `Tc` iself.
- /// For example, if we had a function like this:
- ///
- /// ```rust
- /// fn foo<'a, T>(x: &'a u32, y: T) -> impl Foo<'a> {
- /// (x, y)
- /// }
- ///
- /// // Equivalent to:
- /// type FooReturn<'a, T> = impl Foo<'a>;
- /// fn foo<'a, T>(..) -> FooReturn<'a, T> { .. }
- /// ```
- ///
- /// then the hidden type `Tc` would be `(&'0 u32, T)` (where `'0`
- /// is an inference variable). If we generated a constraint that
- /// `Tc: 'a`, then this would incorrectly require that `T: 'a` --
- /// but this is not necessary, because the opaque type we
- /// create will be allowed to reference `T`. So we only generate a
- /// constraint that `'0: 'a`.
- ///
- /// # The `free_region_relations` parameter
- ///
- /// The `free_region_relations` argument is used to find the
- /// "minimum" of the regions supplied to a given opaque type.
- /// It must be a relation that can answer whether `'a <= 'b`,
- /// where `'a` and `'b` are regions that appear in the "substs"
- /// for the opaque type references (the `<'a>` in `Foo1<'a>`).
- ///
- /// Note that we do not impose the constraints based on the
- /// generic regions from the `Foo1` definition (e.g., `'x`). This
- /// is because the constraints we are imposing here is basically
- /// the concern of the one generating the constraining type C1,
- /// which is the current function. It also means that we can
- /// take "implied bounds" into account in some cases:
- ///
- /// ```text
- /// trait SomeTrait<'a, 'b> { }
- /// fn foo<'a, 'b>(_: &'a &'b u32) -> impl SomeTrait<'a, 'b> { .. }
- /// ```
- ///
- /// Here, the fact that `'b: 'a` is known only because of the
- /// implied bounds from the `&'a &'b u32` parameter, and is not
- /// "inherent" to the opaque type definition.
- ///
- /// # Parameters
- ///
- /// - `opaque_types` -- the map produced by `instantiate_opaque_types`
- /// - `free_region_relations` -- something that can be used to relate
- /// the free regions (`'a`) that appear in the impl trait.
- fn constrain_opaque_types(&self) {
- let opaque_types = self.inner.borrow().opaque_types.clone();
- for (opaque_type_key, opaque_defn) in opaque_types {
- self.constrain_opaque_type(opaque_type_key, &opaque_defn);
- }
- }
-
- /// See `constrain_opaque_types` for documentation.
- #[instrument(level = "debug", skip(self))]
- fn constrain_opaque_type(
- &self,
- opaque_type_key: OpaqueTypeKey<'tcx>,
- opaque_defn: &OpaqueTypeDecl<'tcx>,
- ) {
- let def_id = opaque_type_key.def_id;
-
- let tcx = self.tcx;
-
- let concrete_ty = self.resolve_vars_if_possible(opaque_defn.concrete_ty);
-
- debug!(?concrete_ty);
-
- let first_own_region = match opaque_defn.origin {
- hir::OpaqueTyOrigin::FnReturn | hir::OpaqueTyOrigin::AsyncFn => {
- // We lower
- //
- // fn foo<'l0..'ln>() -> impl Trait<'l0..'lm>
- //
- // into
- //
- // type foo::<'p0..'pn>::Foo<'q0..'qm>
- // fn foo<l0..'ln>() -> foo::<'static..'static>::Foo<'l0..'lm>.
- //
- // For these types we only iterate over `'l0..lm` below.
- tcx.generics_of(def_id).parent_count
- }
- // These opaque type inherit all lifetime parameters from their
- // parent, so we have to check them all.
- hir::OpaqueTyOrigin::TyAlias => 0,
- };
-
- // The regions that appear in the hidden type must be equal to
- // one of the regions in scope for the opaque type.
- self.generate_member_constraint(
- concrete_ty,
- opaque_defn,
- opaque_type_key,
- first_own_region,
- );
- }
-
- /// As a fallback, we sometimes generate an "in constraint". For
- /// a case like `impl Foo<'a, 'b>`, where `'a` and `'b` cannot be
- /// related, we would generate a constraint `'r in ['a, 'b,
- /// 'static]` for each region `'r` that appears in the hidden type
- /// (i.e., it must be equal to `'a`, `'b`, or `'static`).
- ///
- /// `conflict1` and `conflict2` are the two region bounds that we
- /// detected which were unrelated. They are used for diagnostics.
- fn generate_member_constraint(
- &self,
- concrete_ty: Ty<'tcx>,
- opaque_defn: &OpaqueTypeDecl<'tcx>,
- opaque_type_key: OpaqueTypeKey<'tcx>,
- first_own_region: usize,
- ) {
- // Create the set of choice regions: each region in the hidden
- // type can be equal to any of the region parameters of the
- // opaque type definition.
- let choice_regions: Lrc<Vec<ty::Region<'tcx>>> = Lrc::new(
- opaque_type_key.substs[first_own_region..]
- .iter()
- .filter_map(|arg| match arg.unpack() {
- GenericArgKind::Lifetime(r) => Some(r),
- GenericArgKind::Type(_) | GenericArgKind::Const(_) => None,
- })
- .chain(std::iter::once(self.tcx.lifetimes.re_static))
- .collect(),
- );
-
- concrete_ty.visit_with(&mut ConstrainOpaqueTypeRegionVisitor {
- tcx: self.tcx,
- op: |r| {
- self.member_constraint(
- opaque_type_key.def_id,
- opaque_defn.definition_span,
- concrete_ty,
- r,
- &choice_regions,
- )
- },
- });
- }
-
/// Given the fully resolved, instantiated type for an opaque
/// type, i.e., the value of an inference variable like C1 or C2
/// (*), computes the "definition type" for an opaque type
/// purpose of this function is to do that translation.
///
/// (*) C1 and C2 were introduced in the comments on
- /// `constrain_opaque_types`. Read that comment for more context.
+ /// `constrain_opaque_type`. Read that comment for more context.
///
/// # Parameters
///
}
}
-// Visitor that requires that (almost) all regions in the type visited outlive
-// `least_region`. We cannot use `push_outlives_components` because regions in
-// closure signatures are not included in their outlives components. We need to
-// ensure all regions outlive the given bound so that we don't end up with,
-// say, `ReVar` appearing in a return type and causing ICEs when other
-// functions end up with region constraints involving regions from other
-// functions.
-//
-// We also cannot use `for_each_free_region` because for closures it includes
-// the regions parameters from the enclosing item.
-//
-// We ignore any type parameters because impl trait values are assumed to
-// capture all the in-scope type parameters.
-struct ConstrainOpaqueTypeRegionVisitor<'tcx, OP> {
- tcx: TyCtxt<'tcx>,
- op: OP,
-}
-
-impl<'tcx, OP> TypeVisitor<'tcx> for ConstrainOpaqueTypeRegionVisitor<'tcx, OP>
-where
- OP: FnMut(ty::Region<'tcx>),
-{
- fn tcx_for_anon_const_substs(&self) -> Option<TyCtxt<'tcx>> {
- Some(self.tcx)
- }
-
- fn visit_binder<T: TypeFoldable<'tcx>>(
- &mut self,
- t: &ty::Binder<'tcx, T>,
- ) -> ControlFlow<Self::BreakTy> {
- t.as_ref().skip_binder().visit_with(self);
- ControlFlow::CONTINUE
- }
-
- fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
- match *r {
- // ignore bound regions, keep visiting
- ty::ReLateBound(_, _) => ControlFlow::CONTINUE,
- _ => {
- (self.op)(r);
- ControlFlow::CONTINUE
- }
- }
- }
-
- fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
- // We're only interested in types involving regions
- if !ty.flags().intersects(ty::TypeFlags::HAS_POTENTIAL_FREE_REGIONS) {
- return ControlFlow::CONTINUE;
- }
-
- match ty.kind() {
- ty::Closure(_, ref substs) => {
- // Skip lifetime parameters of the enclosing item(s)
-
- substs.as_closure().tupled_upvars_ty().visit_with(self);
- substs.as_closure().sig_as_fn_ptr_ty().visit_with(self);
- }
-
- ty::Generator(_, ref substs, _) => {
- // Skip lifetime parameters of the enclosing item(s)
- // Also skip the witness type, because that has no free regions.
-
- substs.as_generator().tupled_upvars_ty().visit_with(self);
- substs.as_generator().return_ty().visit_with(self);
- substs.as_generator().yield_ty().visit_with(self);
- substs.as_generator().resume_ty().visit_with(self);
- }
- _ => {
- ty.super_visit_with(self);
- }
- }
-
- ControlFlow::CONTINUE
- }
-}
-
struct ReverseMapper<'tcx> {
tcx: TyCtxt<'tcx>,
}
}
-struct Instantiator<'a, 'tcx> {
- infcx: &'a InferCtxt<'a, 'tcx>,
- body_id: hir::HirId,
- param_env: ty::ParamEnv<'tcx>,
- value_span: Span,
- obligations: Vec<PredicateObligation<'tcx>>,
-}
-
-impl<'a, 'tcx> Instantiator<'a, 'tcx> {
- fn instantiate_opaque_types_in_map<T: TypeFoldable<'tcx>>(&mut self, value: T) -> T {
- let tcx = self.infcx.tcx;
- value.fold_with(&mut BottomUpFolder {
- tcx,
- ty_op: |ty| {
- if ty.references_error() {
- return tcx.ty_error();
- } else if let ty::Opaque(def_id, substs) = ty.kind() {
- // Check that this is `impl Trait` type is
- // declared by `parent_def_id` -- i.e., one whose
- // value we are inferring. At present, this is
- // always true during the first phase of
- // type-check, but not always true later on during
- // NLL. Once we support named opaque types more fully,
- // this same scenario will be able to arise during all phases.
- //
- // Here is an example using type alias `impl Trait`
- // that indicates the distinction we are checking for:
- //
- // ```rust
- // mod a {
- // pub type Foo = impl Iterator;
- // pub fn make_foo() -> Foo { .. }
- // }
- //
- // mod b {
- // fn foo() -> a::Foo { a::make_foo() }
- // }
- // ```
- //
- // Here, the return type of `foo` references an
- // `Opaque` indeed, but not one whose value is
- // presently being inferred. You can get into a
- // similar situation with closure return types
- // today:
- //
- // ```rust
- // fn foo() -> impl Iterator { .. }
- // fn bar() {
- // let x = || foo(); // returns the Opaque assoc with `foo`
- // }
- // ```
- if let Some(def_id) = def_id.as_local() {
- let opaque_hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
- let parent_def_id = self.infcx.defining_use_anchor;
- let def_scope_default = || {
- let opaque_parent_hir_id = tcx.hir().get_parent_item(opaque_hir_id);
- parent_def_id == tcx.hir().local_def_id(opaque_parent_hir_id)
- };
- let (in_definition_scope, origin) =
- match tcx.hir().expect_item(opaque_hir_id).kind {
- // Anonymous `impl Trait`
- hir::ItemKind::OpaqueTy(hir::OpaqueTy {
- impl_trait_fn: Some(parent),
- origin,
- ..
- }) => (parent == parent_def_id.to_def_id(), origin),
- // Named `type Foo = impl Bar;`
- hir::ItemKind::OpaqueTy(hir::OpaqueTy {
- impl_trait_fn: None,
- origin,
- ..
- }) => (
- may_define_opaque_type(tcx, parent_def_id, opaque_hir_id),
- origin,
- ),
- _ => (def_scope_default(), hir::OpaqueTyOrigin::TyAlias),
- };
- if in_definition_scope {
- let opaque_type_key =
- OpaqueTypeKey { def_id: def_id.to_def_id(), substs };
- return self.fold_opaque_ty(ty, opaque_type_key, origin);
- }
-
- debug!(
- "instantiate_opaque_types_in_map: \
- encountered opaque outside its definition scope \
- def_id={:?}",
- def_id,
- );
- }
- }
-
- ty
- },
- lt_op: |lt| lt,
- ct_op: |ct| ct,
- })
- }
-
- #[instrument(skip(self), level = "debug")]
- fn fold_opaque_ty(
- &mut self,
- ty: Ty<'tcx>,
- opaque_type_key: OpaqueTypeKey<'tcx>,
- origin: hir::OpaqueTyOrigin,
- ) -> Ty<'tcx> {
- let infcx = self.infcx;
- let tcx = infcx.tcx;
- let OpaqueTypeKey { def_id, substs } = opaque_type_key;
-
- // Use the same type variable if the exact same opaque type appears more
- // than once in the return type (e.g., if it's passed to a type alias).
- if let Some(opaque_defn) = infcx.inner.borrow().opaque_types.get(&opaque_type_key) {
- debug!("re-using cached concrete type {:?}", opaque_defn.concrete_ty.kind());
- return opaque_defn.concrete_ty;
- }
-
- let ty_var = infcx.next_ty_var(TypeVariableOrigin {
- kind: TypeVariableOriginKind::TypeInference,
- span: self.value_span,
- });
-
- // Ideally, we'd get the span where *this specific `ty` came
- // from*, but right now we just use the span from the overall
- // value being folded. In simple cases like `-> impl Foo`,
- // these are the same span, but not in cases like `-> (impl
- // Foo, impl Bar)`.
- let definition_span = self.value_span;
-
- {
- let mut infcx = self.infcx.inner.borrow_mut();
- infcx.opaque_types.insert(
- OpaqueTypeKey { def_id, substs },
- OpaqueTypeDecl { opaque_type: ty, definition_span, concrete_ty: ty_var, origin },
- );
- infcx.opaque_types_vars.insert(ty_var, ty);
- }
-
- debug!("generated new type inference var {:?}", ty_var.kind());
-
- let item_bounds = tcx.explicit_item_bounds(def_id);
-
- self.obligations.reserve(item_bounds.len());
- for (predicate, _) in item_bounds {
- debug!(?predicate);
- let predicate = predicate.subst(tcx, substs);
- debug!(?predicate);
-
- // We can't normalize associated types from `rustc_infer`, but we can eagerly register inference variables for them.
- let predicate = predicate.fold_with(&mut BottomUpFolder {
- tcx,
- ty_op: |ty| match ty.kind() {
- ty::Projection(projection_ty) => infcx.infer_projection(
- self.param_env,
- *projection_ty,
- ObligationCause::misc(self.value_span, self.body_id),
- 0,
- &mut self.obligations,
- ),
- _ => ty,
- },
- lt_op: |lt| lt,
- ct_op: |ct| ct,
- });
- debug!(?predicate);
-
- if let ty::PredicateKind::Projection(projection) = predicate.kind().skip_binder() {
- if projection.ty.references_error() {
- // No point on adding these obligations since there's a type error involved.
- return tcx.ty_error();
- }
- }
- // Change the predicate to refer to the type variable,
- // which will be the concrete type instead of the opaque type.
- // This also instantiates nested instances of `impl Trait`.
- let predicate = self.instantiate_opaque_types_in_map(predicate);
-
- let cause =
- traits::ObligationCause::new(self.value_span, self.body_id, traits::OpaqueType);
-
- // Require that the predicate holds for the concrete type.
- debug!(?predicate);
- self.obligations.push(traits::Obligation::new(cause, self.param_env, predicate));
- }
-
- ty_var
- }
-}
-
-/// Returns `true` if `opaque_hir_id` is a sibling or a child of a sibling of `def_id`.
-///
-/// Example:
-/// ```rust
-/// pub mod foo {
-/// pub mod bar {
-/// pub trait Bar { .. }
-///
-/// pub type Baz = impl Bar;
-///
-/// fn f1() -> Baz { .. }
-/// }
-///
-/// fn f2() -> bar::Baz { .. }
-/// }
-/// ```
-///
-/// Here, `def_id` is the `LocalDefId` of the defining use of the opaque type (e.g., `f1` or `f2`),
-/// and `opaque_hir_id` is the `HirId` of the definition of the opaque type `Baz`.
-/// For the above example, this function returns `true` for `f1` and `false` for `f2`.
-fn may_define_opaque_type(tcx: TyCtxt<'_>, def_id: LocalDefId, opaque_hir_id: hir::HirId) -> bool {
- let mut hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
-
- // Named opaque types can be defined by any siblings or children of siblings.
- let scope = tcx.hir().get_defining_scope(opaque_hir_id);
- // We walk up the node tree until we hit the root or the scope of the opaque type.
- while hir_id != scope && hir_id != hir::CRATE_HIR_ID {
- hir_id = tcx.hir().get_parent_item(hir_id);
- }
- // Syntactically, we are allowed to define the concrete type if:
- let res = hir_id == scope;
- trace!(
- "may_define_opaque_type(def={:?}, opaque_node={:?}) = {}",
- tcx.hir().find(hir_id),
- tcx.hir().get(opaque_hir_id),
- res
- );
- res
-}
-
/// Given a set of predicates that apply to an object type, returns
/// the region bounds that the (erased) `Self` type must
/// outlive. Precisely *because* the `Self` type is erased, the
// Check if a bound would previously have been removed when normalizing
// the param_env so that it can be given the lowest priority. See
// #50825 for the motivation for this.
- let is_global =
- |cand: &ty::PolyTraitRef<'_>| cand.is_known_global() && !cand.has_late_bound_regions();
+ let is_global = |cand: &ty::PolyTraitRef<'tcx>| {
+ cand.is_global(self.infcx.tcx) && !cand.has_late_bound_regions()
+ };
// (*) Prefer `BuiltinCandidate { has_nested: false }`, `PointeeCandidate`,
// and `DiscriminantKindCandidate` to anything else.
type NeedsDropResult<T> = Result<T, AlwaysRequiresDrop>;
fn needs_drop_raw<'tcx>(tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
- let adt_components =
- move |adt_def: &ty::AdtDef, _| tcx.adt_drop_tys(adt_def.did).map(|tys| tys.iter());
-
// If we don't know a type doesn't need drop, for example if it's a type
// parameter without a `Copy` bound, then we conservatively return that it
// needs drop.
- let res =
- NeedsDropTypes::new(tcx, query.param_env, query.value, adt_components).next().is_some();
+ let adt_has_dtor =
+ |adt_def: &ty::AdtDef| adt_def.destructor(tcx).map(|_| DtorType::Significant);
+ let res = drop_tys_helper(tcx, query.value, query.param_env, adt_has_dtor).next().is_some();
debug!("needs_drop_raw({:?}) = {:?}", query, res);
res
tcx: TyCtxt<'tcx>,
query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
) -> bool {
- let significant_drop_fields = move |adt_def: &ty::AdtDef, _| {
- tcx.adt_significant_drop_tys(adt_def.did).map(|tys| tys.iter())
- };
- let res = NeedsDropTypes::new(tcx, query.param_env, query.value, significant_drop_fields)
- .next()
- .is_some();
+ let res =
+ drop_tys_helper(tcx, query.value, query.param_env, adt_consider_insignificant_dtor(tcx))
+ .next()
+ .is_some();
debug!("has_significant_drop_raw({:?}) = {:?}", query, res);
res
}
Ok(tys) => tys,
};
for required_ty in tys {
- let subst_ty = tcx.normalize_erasing_regions(
- self.param_env,
- required_ty.subst(tcx, substs),
- );
+ let subst_ty =
+ tcx.normalize_erasing_regions(self.param_env, required_ty);
queue_type(self, subst_ty);
}
}
// Depending on the implentation of `adt_has_dtor`, it is used to check if the
// ADT has a destructor or if the ADT only has a significant destructor. For
// understanding significant destructor look at `adt_significant_drop_tys`.
-fn adt_drop_tys_helper<'tcx>(
+fn drop_tys_helper<'tcx>(
tcx: TyCtxt<'tcx>,
- def_id: DefId,
+ ty: Ty<'tcx>,
+ param_env: rustc_middle::ty::ParamEnv<'tcx>,
adt_has_dtor: impl Fn(&ty::AdtDef) -> Option<DtorType>,
-) -> Result<&ty::List<Ty<'tcx>>, AlwaysRequiresDrop> {
+) -> impl Iterator<Item = NeedsDropResult<Ty<'tcx>>> {
let adt_components = move |adt_def: &ty::AdtDef, substs: SubstsRef<'tcx>| {
if adt_def.is_manually_drop() {
- debug!("adt_drop_tys: `{:?}` is manually drop", adt_def);
+ debug!("drop_tys_helper: `{:?}` is manually drop", adt_def);
return Ok(Vec::new().into_iter());
} else if let Some(dtor_info) = adt_has_dtor(adt_def) {
match dtor_info {
DtorType::Significant => {
- debug!("adt_drop_tys: `{:?}` implements `Drop`", adt_def);
+ debug!("drop_tys_helper: `{:?}` implements `Drop`", adt_def);
return Err(AlwaysRequiresDrop);
}
DtorType::Insignificant => {
- debug!("adt_drop_tys: `{:?}` drop is insignificant", adt_def);
+ debug!("drop_tys_helper: `{:?}` drop is insignificant", adt_def);
// Since the destructor is insignificant, we just want to make sure all of
// the passed in type parameters are also insignificant.
}
}
} else if adt_def.is_union() {
- debug!("adt_drop_tys: `{:?}` is a union", adt_def);
+ debug!("drop_tys_helper: `{:?}` is a union", adt_def);
return Ok(Vec::new().into_iter());
}
- Ok(adt_def.all_fields().map(|field| tcx.type_of(field.did)).collect::<Vec<_>>().into_iter())
+ Ok(adt_def
+ .all_fields()
+ .map(|field| {
+ let r = tcx.type_of(field.did).subst(tcx, substs);
+ debug!("drop_tys_helper: Subst into {:?} with {:?} gettng {:?}", field, substs, r);
+ r
+ })
+ .collect::<Vec<_>>()
+ .into_iter())
};
- let adt_ty = tcx.type_of(def_id);
- let param_env = tcx.param_env(def_id);
- let res: Result<Vec<_>, _> =
- NeedsDropTypes::new(tcx, param_env, adt_ty, adt_components).collect();
-
- debug!("adt_drop_tys(`{}`) = `{:?}`", tcx.def_path_str(def_id), res);
- res.map(|components| tcx.intern_type_list(&components))
+ NeedsDropTypes::new(tcx, param_env, ty, adt_components)
}
-fn adt_drop_tys(tcx: TyCtxt<'_>, def_id: DefId) -> Result<&ty::List<Ty<'_>>, AlwaysRequiresDrop> {
- // This is for the "needs_drop" query, that considers all `Drop` impls, therefore all dtors are
- // significant.
- let adt_has_dtor =
- |adt_def: &ty::AdtDef| adt_def.destructor(tcx).map(|_| DtorType::Significant);
- adt_drop_tys_helper(tcx, def_id, adt_has_dtor)
-}
-
-fn adt_significant_drop_tys(
- tcx: TyCtxt<'_>,
- def_id: DefId,
-) -> Result<&ty::List<Ty<'_>>, AlwaysRequiresDrop> {
- let adt_has_dtor = |adt_def: &ty::AdtDef| {
+fn adt_consider_insignificant_dtor<'tcx>(
+ tcx: TyCtxt<'tcx>,
+) -> impl Fn(&ty::AdtDef) -> Option<DtorType> + 'tcx {
+ move |adt_def: &ty::AdtDef| {
let is_marked_insig = tcx.has_attr(adt_def.did, sym::rustc_insignificant_dtor);
if is_marked_insig {
// In some cases like `std::collections::HashMap` where the struct is a wrapper around
// treat this as the simple case of Drop impl for type.
None
}
- };
- adt_drop_tys_helper(tcx, def_id, adt_has_dtor)
+ }
+}
+
+fn adt_drop_tys(tcx: TyCtxt<'_>, def_id: DefId) -> Result<&ty::List<Ty<'_>>, AlwaysRequiresDrop> {
+ // This is for the "adt_drop_tys" query, that considers all `Drop` impls, therefore all dtors are
+ // significant.
+ let adt_has_dtor =
+ |adt_def: &ty::AdtDef| adt_def.destructor(tcx).map(|_| DtorType::Significant);
+ drop_tys_helper(tcx, tcx.type_of(def_id), tcx.param_env(def_id), adt_has_dtor)
+ .collect::<Result<Vec<_>, _>>()
+ .map(|components| tcx.intern_type_list(&components))
+}
+
+fn adt_significant_drop_tys(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+) -> Result<&ty::List<Ty<'_>>, AlwaysRequiresDrop> {
+ drop_tys_helper(
+ tcx,
+ tcx.type_of(def_id),
+ tcx.param_env(def_id),
+ adt_consider_insignificant_dtor(tcx),
+ )
+ .collect::<Result<Vec<_>, _>>()
+ .map(|components| tcx.intern_type_list(&components))
}
pub(crate) fn provide(providers: &mut ty::query::Providers) {
use rustc_infer::traits::Obligation;
use rustc_middle::ty::{self, ToPredicate, Ty, TyS};
use rustc_span::{MultiSpan, Span};
-use rustc_trait_selection::opaque_types::InferCtxtExt as _;
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
use rustc_trait_selection::traits::{
IfExpressionCause, MatchExpressionArmCause, ObligationCause, ObligationCauseCode,
use rustc_span::symbol::sym;
use rustc_span::{self, MultiSpan, Span};
use rustc_target::spec::abi::Abi;
-use rustc_trait_selection::opaque_types::InferCtxtExt as _;
use rustc_trait_selection::traits;
use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
use rustc_ty_utils::representability::{self, Representability};
use rustc_span::symbol::{kw, sym, Ident};
use rustc_span::{self, BytePos, MultiSpan, Span};
use rustc_trait_selection::infer::InferCtxtExt as _;
-use rustc_trait_selection::opaque_types::InferCtxtExt as _;
use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
use rustc_trait_selection::traits::{
self, ObligationCause, ObligationCauseCode, StatementAsExpression, TraitEngine, TraitEngineExt,
self.tcx.type_of(def_id)
};
let substs = self.infcx.fresh_substs_for_item(span, def_id);
- self.write_substs(hir_id, substs);
let ty = item_ty.subst(self.tcx, substs);
self.write_resolution(hir_id, Ok((def_kind, def_id)));
use rustc_middle::ty::{self, ToPredicate, Ty, TyCtxt, TypeFoldable, WithConstness};
use rustc_span::lev_distance;
use rustc_span::symbol::{kw, sym, Ident};
-use rustc_span::{source_map, FileName, MultiSpan, Span};
+use rustc_span::{source_map, FileName, MultiSpan, Span, Symbol};
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
use rustc_trait_selection::traits::{FulfillmentError, Obligation};
let mut candidates = valid_out_of_scope_traits;
candidates.sort();
candidates.dedup();
+
+ // `TryFrom` and `FromIterator` have no methods
+ let edition_fix = candidates
+ .iter()
+ .find(|did| self.tcx.is_diagnostic_item(sym::TryInto, **did))
+ .map(|&d| d);
+
err.help("items from traits can only be used if the trait is in scope");
let msg = format!(
"the following {traits_are} implemented but not in scope; \
);
self.suggest_use_candidates(err, msg, candidates);
+ if let Some(did) = edition_fix {
+ err.note(&format!(
+ "'{}' is included in the prelude starting in Edition 2021",
+ with_crate_prefix(|| self.tcx.def_path_str(did))
+ ));
+ }
+
true
} else {
false
self.tcx.lang_items().deref_trait(),
self.tcx.lang_items().deref_mut_trait(),
self.tcx.lang_items().drop_trait(),
+ self.tcx.get_diagnostic_item(sym::AsRef),
];
// Try alternative arbitrary self types that could fulfill this call.
// FIXME: probe for all types that *could* be arbitrary self-types, not
// We don't want to suggest a container type when the missing
// method is `.clone()` or `.deref()` otherwise we'd suggest
// `Arc::new(foo).clone()`, which is far from what the user wants.
- let skip = skippable.contains(&did);
+ // Explicitly ignore the `Pin::as_ref()` method as `Pin` does not
+ // implement the `AsRef` trait.
+ let skip = skippable.contains(&did)
+ || (("Pin::new" == *pre)
+ && (Symbol::intern("as_ref") == item_name.name));
// Make sure the method is defined for the *actual* receiver: we don't
// want to treat `Box<Self>` as a receiver if it only works because of
// an autoderef to `&self`
use rustc_middle::ty::adjustment;
use rustc_middle::ty::{self, Ty};
use rustc_span::Span;
-use rustc_trait_selection::opaque_types::InferCtxtExt as _;
use std::ops::Deref;
// a variation on try that just returns unit
self.link_fn_params(body.params);
self.visit_body(body);
self.visit_region_obligations(body_id.hir_id);
-
- self.constrain_opaque_types();
}
fn visit_region_obligations(&mut self, hir_id: hir::HirId) {
for item in list.iter() {
if item.has_name(sym::address) {
codegen_fn_attrs.no_sanitize |= SanitizerSet::ADDRESS;
+ } else if item.has_name(sym::cfi) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::CFI;
} else if item.has_name(sym::memory) {
codegen_fn_attrs.no_sanitize |= SanitizerSet::MEMORY;
} else if item.has_name(sym::thread) {
// Getting this wrong can lead to ICE and unsoundness, so we assert it here.
for arg in substs.iter() {
let allowed_flags = ty::TypeFlags::MAY_NEED_DEFAULT_CONST_SUBSTS
- | ty::TypeFlags::STILL_FURTHER_SPECIALIZABLE;
+ | ty::TypeFlags::STILL_FURTHER_SPECIALIZABLE
+ | ty::TypeFlags::HAS_ERROR;
assert!(!arg.has_type_flags(!allowed_flags));
}
substs
#![feature(slice_partition_dedup)]
#![feature(control_flow_enum)]
#![recursion_limit = "256"]
-#![cfg_attr(not(bootstrap), allow(rustc::potential_query_instability))]
#[macro_use]
extern crate tracing;
let mut c = 0;
for i in 0..BENCH_RANGE_SIZE {
for j in i + 1..BENCH_RANGE_SIZE {
- black_box(map.range(f(i, j)));
+ let _ = black_box(map.range(f(i, j)));
c += 1;
}
}
let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
b.iter(|| {
for _ in 0..repeats {
- black_box(map.iter());
+ let _ = black_box(map.iter());
}
});
}
/// let vec = heap.into_sorted_vec();
/// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]);
/// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
pub fn into_sorted_vec(mut self) -> Vec<T> {
let mut end = self.len();
///
/// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), vec![5, 4]);
/// ```
- #[must_use = "`self` will be dropped if the result is not used"]
#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
pub fn into_iter_sorted(self) -> IntoIterSorted<T> {
IntoIterSorted { inner: self }
/// # Time complexity
///
/// Cost is *O*(1) in the worst case.
+ #[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn peek(&self) -> Option<&T> {
self.data.get(0)
/// assert!(heap.capacity() >= 100);
/// heap.push(4);
/// ```
+ #[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn capacity(&self) -> usize {
self.data.capacity()
/// documentation for more.
///
/// [`iter`]: BinaryHeap::iter
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
iter: slice::Iter<'a, T>,
}
}
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
#[derive(Clone, Debug)]
pub struct IntoIterSorted<T> {
/// performance on *small* nodes of elements which are cheap to compare. However in the future we
/// would like to further explore choosing the optimal search strategy based on the choice of B,
/// and possibly other factors. Using linear search, searching for a random element is expected
-/// to take O(B * log(n)) comparisons, which is generally worse than a BST. In practice,
+/// to take B * log(n) comparisons, which is generally worse than a BST. In practice,
/// however, performance is excellent.
///
/// It is a logic error for a key to be modified in such a way that the key's ordering relative to
/// documentation for more.
///
/// [`iter`]: BTreeMap::iter
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, K: 'a, V: 'a> {
range: LazyLeafRange<marker::Immut<'a>, K, V>,
_marker: PhantomData<&'a mut (K, V)>,
}
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IterMut<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
/// documentation for more.
///
/// [`keys`]: BTreeMap::keys
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Keys<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
/// documentation for more.
///
/// [`values`]: BTreeMap::values
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Values<'a, K: 'a, V: 'a> {
inner: Iter<'a, K, V>,
/// documentation for more.
///
/// [`values_mut`]: BTreeMap::values_mut
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "map_values_mut", since = "1.10.0")]
pub struct ValuesMut<'a, K: 'a, V: 'a> {
inner: IterMut<'a, K, V>,
/// See its documentation for more.
///
/// [`into_keys`]: BTreeMap::into_keys
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "map_into_keys_values", since = "1.54.0")]
pub struct IntoKeys<K, V> {
inner: IntoIter<K, V>,
/// See its documentation for more.
///
/// [`into_values`]: BTreeMap::into_values
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "map_into_keys_values", since = "1.54.0")]
pub struct IntoValues<K, V> {
inner: IntoIter<K, V>,
/// documentation for more.
///
/// [`range`]: BTreeMap::range
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "btree_range", since = "1.17.0")]
pub struct Range<'a, K: 'a, V: 'a> {
inner: LeafRange<marker::Immut<'a>, K, V>,
/// documentation for more.
///
/// [`range_mut`]: BTreeMap::range_mut
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "btree_range", since = "1.17.0")]
pub struct RangeMut<'a, K: 'a, V: 'a> {
inner: LeafRange<marker::ValMut<'a>, K, V>,
/// assert_eq!(keys, [1, 2]);
/// ```
#[inline]
- #[must_use = "`self` will be dropped if the result is not used"]
#[stable(feature = "map_into_keys_values", since = "1.54.0")]
pub fn into_keys(self) -> IntoKeys<K, V> {
IntoKeys { inner: self.into_iter() }
/// assert_eq!(values, ["hello", "goodbye"]);
/// ```
#[inline]
- #[must_use = "`self` will be dropped if the result is not used"]
#[stable(feature = "map_into_keys_values", since = "1.54.0")]
pub fn into_values(self) -> IntoValues<K, V> {
IntoValues { inner: self.into_iter() }
/// map.entry("poneyland").or_insert(12);
/// assert_eq!(map.entry("poneyland").key(), &"poneyland");
/// ```
+ #[must_use]
#[stable(feature = "map_entry_keys", since = "1.10.0")]
pub fn key(&self) -> &K {
self.handle.reborrow().into_kv().0
/// assert_eq!(o.get(), &12);
/// }
/// ```
+ #[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get(&self) -> &V {
self.handle.reborrow().into_kv().1
#[should_panic]
fn test_range_equal_excluded() {
let map: BTreeMap<_, _> = (0..5).map(|i| (i, i)).collect();
- map.range((Excluded(2), Excluded(2)));
+ let _ = map.range((Excluded(2), Excluded(2)));
}
#[test]
#[should_panic]
fn test_range_backwards_1() {
let map: BTreeMap<_, _> = (0..5).map(|i| (i, i)).collect();
- map.range((Included(3), Included(2)));
+ let _ = map.range((Included(3), Included(2)));
}
#[test]
#[should_panic]
fn test_range_backwards_2() {
let map: BTreeMap<_, _> = (0..5).map(|i| (i, i)).collect();
- map.range((Included(3), Excluded(2)));
+ let _ = map.range((Included(3), Excluded(2)));
}
#[test]
#[should_panic]
fn test_range_backwards_3() {
let map: BTreeMap<_, _> = (0..5).map(|i| (i, i)).collect();
- map.range((Excluded(3), Included(2)));
+ let _ = map.range((Excluded(3), Included(2)));
}
#[test]
#[should_panic]
fn test_range_backwards_4() {
let map: BTreeMap<_, _> = (0..5).map(|i| (i, i)).collect();
- map.range((Excluded(3), Excluded(2)));
+ let _ = map.range((Excluded(3), Excluded(2)));
}
#[test]
// we cause a different panic than `test_range_backwards_1` does.
// A more refined `should_panic` would be welcome.
if Cyclic3::C < Cyclic3::A {
- map.range(Cyclic3::C..=Cyclic3::A);
+ let _ = map.range(Cyclic3::C..=Cyclic3::A);
}
}
}
let map = (0..12).map(|i| (CompositeKey(i, EvilTwin(i)), ())).collect::<BTreeMap<_, _>>();
- map.range(EvilTwin(5)..=EvilTwin(7));
+ let _ = map.range(EvilTwin(5)..=EvilTwin(7));
}
#[test]
#[allow(dead_code)]
fn get<T: Ord>(v: &BTreeMap<Box<T>, ()>, t: &T) {
- v.get(t);
+ let _ = v.get(t);
}
#[allow(dead_code)]
fn get_mut<T: Ord>(v: &mut BTreeMap<Box<T>, ()>, t: &T) {
- v.get_mut(t);
+ let _ = v.get_mut(t);
}
#[allow(dead_code)]
fn get_key_value<T: Ord>(v: &BTreeMap<Box<T>, ()>, t: &T) {
- v.get_key_value(t);
+ let _ = v.get_key_value(t);
}
#[allow(dead_code)]
fn contains_key<T: Ord>(v: &BTreeMap<Box<T>, ()>, t: &T) {
- v.contains_key(t);
+ let _ = v.contains_key(t);
}
#[allow(dead_code)]
fn range<T: Ord>(v: &BTreeMap<Box<T>, ()>, t: T) {
- v.range(t..);
+ let _ = v.range(t..);
}
#[allow(dead_code)]
fn range_mut<T: Ord>(v: &mut BTreeMap<Box<T>, ()>, t: T) {
- v.range_mut(t..);
+ let _ = v.range_mut(t..);
}
#[allow(dead_code)]
/// See its documentation for more.
///
/// [`iter`]: BTreeSet::iter
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
iter: Keys<'a, T, ()>,
/// See its documentation for more.
///
/// [`range`]: BTreeSet::range
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[derive(Debug)]
#[stable(feature = "btree_range", since = "1.17.0")]
pub struct Range<'a, T: 'a> {
/// set.insert(2);
/// assert_eq!(set.first(), Some(&1));
/// ```
+ #[must_use]
#[unstable(feature = "map_first_last", issue = "62924")]
pub fn first(&self) -> Option<&T>
where
/// set.insert(2);
/// assert_eq!(set.last(), Some(&2));
/// ```
+ #[must_use]
#[unstable(feature = "map_first_last", issue = "62924")]
pub fn last(&self) -> Option<&T>
where
set.is_empty();
set.len();
set.clear();
- set.iter();
- set.into_iter();
+ let _ = set.iter();
+ let _ = set.into_iter();
}
fn set_debug<K: Debug>(set: BTreeSet<K>) {
///
/// This `struct` is created by [`LinkedList::iter()`]. See its
/// documentation for more.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
head: Option<NonNull<Node<T>>>,
///
/// This `struct` is created by [`LinkedList::iter_mut()`]. See its
/// documentation for more.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
head: Option<NonNull<Node<T>>>,
///
/// The cursor is pointing to the "ghost" non-element if the list is empty.
#[inline]
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn cursor_front(&self) -> Cursor<'_, T> {
Cursor { index: 0, current: self.head, list: self }
///
/// The cursor is pointing to the "ghost" non-element if the list is empty.
#[inline]
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn cursor_front_mut(&mut self) -> CursorMut<'_, T> {
CursorMut { index: 0, current: self.head, list: self }
///
/// The cursor is pointing to the "ghost" non-element if the list is empty.
#[inline]
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn cursor_back(&self) -> Cursor<'_, T> {
Cursor { index: self.len.checked_sub(1).unwrap_or(0), current: self.tail, list: self }
///
/// The cursor is pointing to the "ghost" non-element if the list is empty.
#[inline]
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn cursor_back_mut(&mut self) -> CursorMut<'_, T> {
CursorMut { index: self.len.checked_sub(1).unwrap_or(0), current: self.tail, list: self }
/// assert_eq!(dl.front(), Some(&1));
/// ```
#[inline]
+ #[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn front(&self) -> Option<&T> {
unsafe { self.head.as_ref().map(|node| &node.as_ref().element) }
/// assert_eq!(dl.front(), Some(&5));
/// ```
#[inline]
+ #[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn front_mut(&mut self) -> Option<&mut T> {
unsafe { self.head.as_mut().map(|node| &mut node.as_mut().element) }
/// assert_eq!(dl.back(), Some(&1));
/// ```
#[inline]
+ #[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn back(&self) -> Option<&T> {
unsafe { self.tail.as_ref().map(|node| &node.as_ref().element) }
///
/// This returns `None` if the cursor is currently pointing to the
/// "ghost" non-element.
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn index(&self) -> Option<usize> {
let _ = self.current?;
///
/// This returns `None` if the cursor is currently pointing to the
/// "ghost" non-element.
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn current(&self) -> Option<&'a T> {
unsafe { self.current.map(|current| &(*current.as_ptr()).element) }
/// If the cursor is pointing to the "ghost" non-element then this returns
/// the first element of the `LinkedList`. If it is pointing to the last
/// element of the `LinkedList` then this returns `None`.
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn peek_next(&self) -> Option<&'a T> {
unsafe {
/// If the cursor is pointing to the "ghost" non-element then this returns
/// the last element of the `LinkedList`. If it is pointing to the first
/// element of the `LinkedList` then this returns `None`.
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn peek_prev(&self) -> Option<&'a T> {
unsafe {
/// Provides a reference to the front element of the cursor's parent list,
/// or None if the list is empty.
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn front(&self) -> Option<&'a T> {
self.list.front()
/// Provides a reference to the back element of the cursor's parent list,
/// or None if the list is empty.
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn back(&self) -> Option<&'a T> {
self.list.back()
///
/// This returns `None` if the cursor is currently pointing to the
/// "ghost" non-element.
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn index(&self) -> Option<usize> {
let _ = self.current?;
///
/// This returns `None` if the cursor is currently pointing to the
/// "ghost" non-element.
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn current(&mut self) -> Option<&mut T> {
unsafe { self.current.map(|current| &mut (*current.as_ptr()).element) }
/// Provides a reference to the front element of the cursor's parent list,
/// or None if the list is empty.
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn front(&self) -> Option<&T> {
self.list.front()
/// Provides a mutable reference to the front element of the cursor's
/// parent list, or None if the list is empty.
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn front_mut(&mut self) -> Option<&mut T> {
self.list.front_mut()
/// Provides a reference to the back element of the cursor's parent list,
/// or None if the list is empty.
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn back(&self) -> Option<&T> {
self.list.back()
/// assert_eq!(contents.next(), Some(0));
/// assert_eq!(contents.next(), None);
/// ```
+ #[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn back_mut(&mut self) -> Option<&mut T> {
self.list.back_mut()
impl TryReserveError {
/// Details about the allocation that caused the error
#[inline]
+ #[must_use]
#[unstable(
feature = "try_reserve_kind",
reason = "Uncertain how much info should be exposed",
/// [`format_args!`]: core::format_args
/// [`format!`]: crate::format
#[cfg(not(no_global_oom_handling))]
+#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments<'_>) -> string::String {
let capacity = args.estimated_capacity();
/// Gets the number of strong (`Rc`) pointers pointing to this allocation.
///
/// If `self` was created using [`Weak::new`], this will return 0.
+ #[must_use]
#[stable(feature = "weak_counts", since = "1.41.0")]
pub fn strong_count(&self) -> usize {
if let Some(inner) = self.inner() { inner.strong() } else { 0 }
/// Gets the number of `Weak` pointers pointing to this allocation.
///
/// If no strong pointers remain, this will return zero.
+ #[must_use]
#[stable(feature = "weak_counts", since = "1.41.0")]
pub fn weak_count(&self) -> usize {
self.inner()
/// assert!(!first.ptr_eq(&third));
/// ```
#[inline]
+ #[must_use]
#[stable(feature = "weak_ptr_eq", since = "1.39.0")]
pub fn ptr_eq(&self, other: &Self) -> bool {
self.ptr.as_ptr() == other.ptr.as_ptr()
/// assert_eq!(*boxed_bytes, *s.as_bytes());
/// ```
#[stable(feature = "str_box_extras", since = "1.20.0")]
+ #[must_use = "`self` will be dropped if the result is not used"]
#[inline]
pub fn into_boxed_bytes(self: Box<str>) -> Box<[u8]> {
self.into()
/// assert_eq!(boxed_str.into_string(), string);
/// ```
#[stable(feature = "box_str", since = "1.4.0")]
+ #[must_use = "`self` will be dropped if the result is not used"]
#[inline]
pub fn into_string(self: Box<str>) -> String {
let slice = Box::<[u8]>::from(self);
///
/// ```should_panic
/// // this will panic at runtime
- /// "0123456789abcdef".repeat(usize::MAX);
+ /// let huge = "0123456789abcdef".repeat(usize::MAX);
/// ```
#[cfg(not(no_global_oom_handling))]
+ #[must_use]
#[stable(feature = "repeat_str", since = "1.16.0")]
pub fn repeat(&self, n: usize) -> String {
unsafe { String::from_utf8_unchecked(self.as_bytes().repeat(n)) }
/// assert!(s.capacity() >= 10);
/// ```
#[inline]
+ #[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn capacity(&self) -> usize {
self.vec.capacity()
///
/// # Safety
///
- /// This function is unsafe because it does not check that the bytes passed
- /// to it are valid UTF-8. If this constraint is violated, it may cause
- /// memory unsafety issues with future users of the `String`, as the rest of
- /// the standard library assumes that `String`s are valid UTF-8.
+ /// This function is unsafe because the returned `&mut Vec` allows writing
+ /// bytes which are not valid UTF-8. If this constraint is violated, using
+ /// the original `String` after dropping the `&mut Vec` may violate memory
+ /// safety, as the rest of the standard library assumes that `String`s are
+ /// valid UTF-8.
///
/// # Examples
///
/// // the first byte is invalid here
/// assert_eq!(1, error.valid_up_to());
/// ```
+ #[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn utf8_error(&self) -> Utf8Error {
self.error
/// assert_eq!(1, Arc::weak_count(&five));
/// ```
#[inline]
+ #[must_use]
#[stable(feature = "arc_counts", since = "1.15.0")]
pub fn weak_count(this: &Self) -> usize {
let cnt = this.inner().weak.load(SeqCst);
/// assert_eq!(2, Arc::strong_count(&five));
/// ```
#[inline]
+ #[must_use]
#[stable(feature = "arc_counts", since = "1.15.0")]
pub fn strong_count(this: &Self) -> usize {
this.inner().strong.load(SeqCst)
drop(Weak { ptr: self.ptr });
}
- #[inline]
- #[stable(feature = "ptr_eq", since = "1.17.0")]
/// Returns `true` if the two `Arc`s point to the same allocation
/// (in a vein similar to [`ptr::eq`]).
///
/// ```
///
/// [`ptr::eq`]: core::ptr::eq "ptr::eq"
+ #[inline]
+ #[must_use]
+ #[stable(feature = "ptr_eq", since = "1.17.0")]
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
this.ptr.as_ptr() == other.ptr.as_ptr()
}
/// Gets the number of strong (`Arc`) pointers pointing to this allocation.
///
/// If `self` was created using [`Weak::new`], this will return 0.
+ #[must_use]
#[stable(feature = "weak_counts", since = "1.41.0")]
pub fn strong_count(&self) -> usize {
if let Some(inner) = self.inner() { inner.strong.load(SeqCst) } else { 0 }
/// Due to implementation details, the returned value can be off by 1 in
/// either direction when other threads are manipulating any `Arc`s or
/// `Weak`s pointing to the same allocation.
+ #[must_use]
#[stable(feature = "weak_counts", since = "1.41.0")]
pub fn weak_count(&self) -> usize {
self.inner()
///
/// [`ptr::eq`]: core::ptr::eq "ptr::eq"
#[inline]
+ #[must_use]
#[stable(feature = "weak_ptr_eq", since = "1.39.0")]
pub fn ptr_eq(&self, other: &Self) -> bool {
self.ptr.as_ptr() == other.ptr.as_ptr()
/// Returns a reference to the underlying allocator.
#[unstable(feature = "allocator_api", issue = "32838")]
+ #[must_use]
#[inline]
pub fn allocator(&self) -> &A {
unsafe { self.vec.as_ref().allocator() }
forward_ref_unop!(impl $imp, $method for $t,
#[stable(feature = "rust1", since = "1.0.0")]);
};
+ (impl const $imp:ident, $method:ident for $t:ty) => {
+ forward_ref_unop!(impl const $imp, $method for $t,
+ #[stable(feature = "rust1", since = "1.0.0")]);
+ };
+ // Equivalent to the non-const version, with the addition of `rustc_const_unstable`
+ (impl const $imp:ident, $method:ident for $t:ty, #[$attr:meta]) => {
+ #[$attr]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const $imp for &$t {
+ type Output = <$t as $imp>::Output;
+
+ #[inline]
+ fn $method(self) -> <$t as $imp>::Output {
+ $imp::$method(*self)
+ }
+ }
+ };
(impl $imp:ident, $method:ident for $t:ty, #[$attr:meta]) => {
#[$attr]
impl $imp for &$t {
forward_ref_binop!(impl $imp, $method for $t, $u,
#[stable(feature = "rust1", since = "1.0.0")]);
};
+ (impl const $imp:ident, $method:ident for $t:ty, $u:ty) => {
+ forward_ref_binop!(impl const $imp, $method for $t, $u,
+ #[stable(feature = "rust1", since = "1.0.0")]);
+ };
+ // Equivalent to the non-const version, with the addition of `rustc_const_unstable`
+ (impl const $imp:ident, $method:ident for $t:ty, $u:ty, #[$attr:meta]) => {
+ #[$attr]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl<'a> const $imp<$u> for &'a $t {
+ type Output = <$t as $imp<$u>>::Output;
+
+ #[inline]
+ fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
+ $imp::$method(*self, other)
+ }
+ }
+
+ #[$attr]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const $imp<&$u> for $t {
+ type Output = <$t as $imp<$u>>::Output;
+
+ #[inline]
+ fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
+ $imp::$method(self, *other)
+ }
+ }
+
+ #[$attr]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const $imp<&$u> for &$t {
+ type Output = <$t as $imp<$u>>::Output;
+
+ #[inline]
+ fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
+ $imp::$method(*self, *other)
+ }
+ }
+ };
(impl $imp:ident, $method:ident for $t:ty, $u:ty, #[$attr:meta]) => {
#[$attr]
impl<'a> $imp<$u> for &'a $t {
forward_ref_op_assign!(impl $imp, $method for $t, $u,
#[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]);
};
+ (impl const $imp:ident, $method:ident for $t:ty, $u:ty) => {
+ forward_ref_op_assign!(impl const $imp, $method for $t, $u,
+ #[stable(feature = "op_assign_builtins_by_ref", since = "1.22.0")]);
+ };
+ // Equivalent to the non-const version, with the addition of `rustc_const_unstable`
+ (impl const $imp:ident, $method:ident for $t:ty, $u:ty, #[$attr:meta]) => {
+ #[$attr]
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const $imp<&$u> for $t {
+ #[inline]
+ fn $method(&mut self, other: &$u) {
+ $imp::$method(self, *other);
+ }
+ }
+ };
(impl $imp:ident, $method:ident for $t:ty, $u:ty, #[$attr:meta]) => {
#[$attr]
impl $imp<&$u> for $t {
#![feature(const_caller_location)]
#![feature(const_cell_into_inner)]
#![feature(const_discriminant)]
+#![cfg_attr(not(bootstrap), feature(const_eval_select))]
#![feature(const_float_bits_conv)]
#![feature(const_float_classify)]
+#![feature(const_fmt_arguments_new)]
#![feature(const_heap)]
#![feature(const_inherent_unchecked_arith)]
#![feature(const_int_unchecked_arith)]
#![feature(const_maybe_uninit_as_ptr)]
#![feature(const_maybe_uninit_assume_init)]
#![feature(const_num_from_num)]
+#![feature(const_ops)]
#![feature(const_option)]
#![feature(const_pin)]
#![feature(const_replace)]
without modifying the original"]
#[inline]
pub const fn checked_div(self, rhs: Self) -> Option<Self> {
- // Using `&` helps LLVM see that it is the same check made in division.
- if unlikely!(rhs == 0 || ((self == Self::MIN) & (rhs == -1))) {
+ if unlikely!(rhs == 0 || ((self == Self::MIN) && (rhs == -1))) {
None
} else {
// SAFETY: div by zero and by INT_MIN have been checked above
without modifying the original"]
#[inline]
pub const fn checked_rem(self, rhs: Self) -> Option<Self> {
- // Using `&` helps LLVM see that it is the same check made in division.
- if unlikely!(rhs == 0 || ((self == Self::MIN) & (rhs == -1))) {
+ if unlikely!(rhs == 0 || ((self == Self::MIN) && (rhs == -1))) {
None
} else {
// SAFETY: div by zero and by INT_MIN have been checked above
}
#[stable(feature = "nonzero_bitor", since = "1.45.0")]
- impl BitOr for $Ty {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOr for $Ty {
type Output = Self;
#[inline]
fn bitor(self, rhs: Self) -> Self::Output {
}
#[stable(feature = "nonzero_bitor", since = "1.45.0")]
- impl BitOr<$Int> for $Ty {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOr<$Int> for $Ty {
type Output = Self;
#[inline]
fn bitor(self, rhs: $Int) -> Self::Output {
}
#[stable(feature = "nonzero_bitor", since = "1.45.0")]
- impl BitOr<$Ty> for $Int {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOr<$Ty> for $Int {
type Output = $Ty;
#[inline]
fn bitor(self, rhs: $Ty) -> Self::Output {
}
#[stable(feature = "nonzero_bitor", since = "1.45.0")]
- impl BitOrAssign for $Ty {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOrAssign for $Ty {
#[inline]
fn bitor_assign(&mut self, rhs: Self) {
*self = *self | rhs;
}
#[stable(feature = "nonzero_bitor", since = "1.45.0")]
- impl BitOrAssign<$Int> for $Ty {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOrAssign<$Int> for $Ty {
#[inline]
fn bitor_assign(&mut self, rhs: $Int) {
*self = *self | rhs;
( $( $Ty: ident($Int: ty); )+ ) => {
$(
#[stable(feature = "nonzero_div", since = "1.51.0")]
- impl Div<$Ty> for $Int {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Div<$Ty> for $Int {
type Output = $Int;
/// This operation rounds towards zero,
/// truncating any fractional part of the exact result, and cannot panic.
}
#[stable(feature = "nonzero_div", since = "1.51.0")]
- impl Rem<$Ty> for $Int {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Rem<$Ty> for $Int {
type Output = $Int;
/// This operation satisfies `n % d == n - (n / d) * d`, and cannot panic.
#[inline]
// to generate optimal code for now, and LLVM doesn't have an equivalent intrinsic
let (a, b) = self.overflowing_add(rhs);
let (c, d) = a.overflowing_add(carry as $SelfT);
- (c, b | d)
+ (c, b || d)
}
/// Calculates `self` + `rhs` with a signed `rhs`
// to generate optimal code for now, and LLVM doesn't have an equivalent intrinsic
let (a, b) = self.overflowing_sub(rhs);
let (c, d) = a.overflowing_sub(borrow as $SelfT);
- (c, b | d)
+ (c, b || d)
}
/// Computes the absolute difference between `self` and `other`.
macro_rules! sh_impl_signed {
($t:ident, $f:ident) => {
#[stable(feature = "rust1", since = "1.0.0")]
- impl Shl<$f> for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Shl<$f> for Wrapping<$t> {
type Output = Wrapping<$t>;
#[inline]
}
}
}
- forward_ref_binop! { impl Shl, shl for Wrapping<$t>, $f,
+ forward_ref_binop! { impl const Shl, shl for Wrapping<$t>, $f,
#[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl ShlAssign<$f> for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const ShlAssign<$f> for Wrapping<$t> {
#[inline]
fn shl_assign(&mut self, other: $f) {
*self = *self << other;
}
}
- forward_ref_op_assign! { impl ShlAssign, shl_assign for Wrapping<$t>, $f }
+ forward_ref_op_assign! { impl const ShlAssign, shl_assign for Wrapping<$t>, $f }
#[stable(feature = "rust1", since = "1.0.0")]
- impl Shr<$f> for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Shr<$f> for Wrapping<$t> {
type Output = Wrapping<$t>;
#[inline]
}
}
}
- forward_ref_binop! { impl Shr, shr for Wrapping<$t>, $f,
+ forward_ref_binop! { impl const Shr, shr for Wrapping<$t>, $f,
#[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl ShrAssign<$f> for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const ShrAssign<$f> for Wrapping<$t> {
#[inline]
fn shr_assign(&mut self, other: $f) {
*self = *self >> other;
}
}
- forward_ref_op_assign! { impl ShrAssign, shr_assign for Wrapping<$t>, $f }
+ forward_ref_op_assign! { impl const ShrAssign, shr_assign for Wrapping<$t>, $f }
};
}
macro_rules! sh_impl_unsigned {
($t:ident, $f:ident) => {
#[stable(feature = "rust1", since = "1.0.0")]
- impl Shl<$f> for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Shl<$f> for Wrapping<$t> {
type Output = Wrapping<$t>;
#[inline]
Wrapping(self.0.wrapping_shl((other & self::shift_max::$t as $f) as u32))
}
}
- forward_ref_binop! { impl Shl, shl for Wrapping<$t>, $f,
+ forward_ref_binop! { impl const Shl, shl for Wrapping<$t>, $f,
#[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl ShlAssign<$f> for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const ShlAssign<$f> for Wrapping<$t> {
#[inline]
fn shl_assign(&mut self, other: $f) {
*self = *self << other;
}
}
- forward_ref_op_assign! { impl ShlAssign, shl_assign for Wrapping<$t>, $f }
+ forward_ref_op_assign! { impl const ShlAssign, shl_assign for Wrapping<$t>, $f }
#[stable(feature = "rust1", since = "1.0.0")]
- impl Shr<$f> for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Shr<$f> for Wrapping<$t> {
type Output = Wrapping<$t>;
#[inline]
Wrapping(self.0.wrapping_shr((other & self::shift_max::$t as $f) as u32))
}
}
- forward_ref_binop! { impl Shr, shr for Wrapping<$t>, $f,
+ forward_ref_binop! { impl const Shr, shr for Wrapping<$t>, $f,
#[stable(feature = "wrapping_ref_ops", since = "1.39.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl ShrAssign<$f> for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const ShrAssign<$f> for Wrapping<$t> {
#[inline]
fn shr_assign(&mut self, other: $f) {
*self = *self >> other;
}
}
- forward_ref_op_assign! { impl ShrAssign, shr_assign for Wrapping<$t>, $f }
+ forward_ref_op_assign! { impl const ShrAssign, shr_assign for Wrapping<$t>, $f }
};
}
macro_rules! wrapping_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
- impl Add for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Add for Wrapping<$t> {
type Output = Wrapping<$t>;
#[inline]
Wrapping(self.0.wrapping_add(other.0))
}
}
- forward_ref_binop! { impl Add, add for Wrapping<$t>, Wrapping<$t>,
+ forward_ref_binop! { impl const Add, add for Wrapping<$t>, Wrapping<$t>,
#[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl AddAssign for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const AddAssign for Wrapping<$t> {
#[inline]
fn add_assign(&mut self, other: Wrapping<$t>) {
*self = *self + other;
}
}
- forward_ref_op_assign! { impl AddAssign, add_assign for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_op_assign! { impl const AddAssign, add_assign for Wrapping<$t>, Wrapping<$t> }
#[stable(feature = "rust1", since = "1.0.0")]
- impl Sub for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Sub for Wrapping<$t> {
type Output = Wrapping<$t>;
#[inline]
Wrapping(self.0.wrapping_sub(other.0))
}
}
- forward_ref_binop! { impl Sub, sub for Wrapping<$t>, Wrapping<$t>,
+ forward_ref_binop! { impl const Sub, sub for Wrapping<$t>, Wrapping<$t>,
#[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl SubAssign for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const SubAssign for Wrapping<$t> {
#[inline]
fn sub_assign(&mut self, other: Wrapping<$t>) {
*self = *self - other;
}
}
- forward_ref_op_assign! { impl SubAssign, sub_assign for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_op_assign! { impl const SubAssign, sub_assign for Wrapping<$t>, Wrapping<$t> }
#[stable(feature = "rust1", since = "1.0.0")]
- impl Mul for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Mul for Wrapping<$t> {
type Output = Wrapping<$t>;
#[inline]
#[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl MulAssign for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const MulAssign for Wrapping<$t> {
#[inline]
fn mul_assign(&mut self, other: Wrapping<$t>) {
*self = *self * other;
}
}
- forward_ref_op_assign! { impl MulAssign, mul_assign for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_op_assign! { impl const MulAssign, mul_assign for Wrapping<$t>, Wrapping<$t> }
#[stable(feature = "wrapping_div", since = "1.3.0")]
- impl Div for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Div for Wrapping<$t> {
type Output = Wrapping<$t>;
#[inline]
Wrapping(self.0.wrapping_div(other.0))
}
}
- forward_ref_binop! { impl Div, div for Wrapping<$t>, Wrapping<$t>,
+ forward_ref_binop! { impl const Div, div for Wrapping<$t>, Wrapping<$t>,
#[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl DivAssign for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const DivAssign for Wrapping<$t> {
#[inline]
fn div_assign(&mut self, other: Wrapping<$t>) {
*self = *self / other;
}
}
- forward_ref_op_assign! { impl DivAssign, div_assign for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_op_assign! { impl const DivAssign, div_assign for Wrapping<$t>, Wrapping<$t> }
#[stable(feature = "wrapping_impls", since = "1.7.0")]
- impl Rem for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Rem for Wrapping<$t> {
type Output = Wrapping<$t>;
#[inline]
Wrapping(self.0.wrapping_rem(other.0))
}
}
- forward_ref_binop! { impl Rem, rem for Wrapping<$t>, Wrapping<$t>,
+ forward_ref_binop! { impl const Rem, rem for Wrapping<$t>, Wrapping<$t>,
#[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl RemAssign for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const RemAssign for Wrapping<$t> {
#[inline]
fn rem_assign(&mut self, other: Wrapping<$t>) {
*self = *self % other;
}
}
- forward_ref_op_assign! { impl RemAssign, rem_assign for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_op_assign! { impl const RemAssign, rem_assign for Wrapping<$t>, Wrapping<$t> }
#[stable(feature = "rust1", since = "1.0.0")]
- impl Not for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Not for Wrapping<$t> {
type Output = Wrapping<$t>;
#[inline]
Wrapping(!self.0)
}
}
- forward_ref_unop! { impl Not, not for Wrapping<$t>,
+ forward_ref_unop! { impl const Not, not for Wrapping<$t>,
#[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "rust1", since = "1.0.0")]
- impl BitXor for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitXor for Wrapping<$t> {
type Output = Wrapping<$t>;
#[inline]
Wrapping(self.0 ^ other.0)
}
}
- forward_ref_binop! { impl BitXor, bitxor for Wrapping<$t>, Wrapping<$t>,
+ forward_ref_binop! { impl const BitXor, bitxor for Wrapping<$t>, Wrapping<$t>,
#[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl BitXorAssign for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitXorAssign for Wrapping<$t> {
#[inline]
fn bitxor_assign(&mut self, other: Wrapping<$t>) {
*self = *self ^ other;
}
}
- forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_op_assign! { impl const BitXorAssign, bitxor_assign for Wrapping<$t>, Wrapping<$t> }
#[stable(feature = "rust1", since = "1.0.0")]
- impl BitOr for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOr for Wrapping<$t> {
type Output = Wrapping<$t>;
#[inline]
Wrapping(self.0 | other.0)
}
}
- forward_ref_binop! { impl BitOr, bitor for Wrapping<$t>, Wrapping<$t>,
+ forward_ref_binop! { impl const BitOr, bitor for Wrapping<$t>, Wrapping<$t>,
#[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl BitOrAssign for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOrAssign for Wrapping<$t> {
#[inline]
fn bitor_assign(&mut self, other: Wrapping<$t>) {
*self = *self | other;
}
}
- forward_ref_op_assign! { impl BitOrAssign, bitor_assign for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_op_assign! { impl const BitOrAssign, bitor_assign for Wrapping<$t>, Wrapping<$t> }
#[stable(feature = "rust1", since = "1.0.0")]
- impl BitAnd for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitAnd for Wrapping<$t> {
type Output = Wrapping<$t>;
#[inline]
Wrapping(self.0 & other.0)
}
}
- forward_ref_binop! { impl BitAnd, bitand for Wrapping<$t>, Wrapping<$t>,
+ forward_ref_binop! { impl const BitAnd, bitand for Wrapping<$t>, Wrapping<$t>,
#[stable(feature = "wrapping_ref", since = "1.14.0")] }
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl BitAndAssign for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitAndAssign for Wrapping<$t> {
#[inline]
fn bitand_assign(&mut self, other: Wrapping<$t>) {
*self = *self & other;
}
}
- forward_ref_op_assign! { impl BitAndAssign, bitand_assign for Wrapping<$t>, Wrapping<$t> }
+ forward_ref_op_assign! { impl const BitAndAssign, bitand_assign for Wrapping<$t>, Wrapping<$t> }
#[stable(feature = "wrapping_neg", since = "1.10.0")]
- impl Neg for Wrapping<$t> {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Neg for Wrapping<$t> {
type Output = Self;
#[inline]
fn neg(self) -> Self {
Wrapping(0) - self
}
}
- forward_ref_unop! { impl Neg, neg for Wrapping<$t>,
+ forward_ref_unop! { impl const Neg, neg for Wrapping<$t>,
#[stable(feature = "wrapping_ref", since = "1.14.0")] }
)*)
macro_rules! add_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
- impl Add for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Add for $t {
type Output = $t;
#[inline]
fn add(self, other: $t) -> $t { self + other }
}
- forward_ref_binop! { impl Add, add for $t, $t }
+ forward_ref_binop! { impl const Add, add for $t, $t }
)*)
}
macro_rules! sub_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
- impl Sub for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Sub for $t {
type Output = $t;
#[inline]
fn sub(self, other: $t) -> $t { self - other }
}
- forward_ref_binop! { impl Sub, sub for $t, $t }
+ forward_ref_binop! { impl const Sub, sub for $t, $t }
)*)
}
macro_rules! mul_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
- impl Mul for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Mul for $t {
type Output = $t;
#[inline]
fn mul(self, other: $t) -> $t { self * other }
}
- forward_ref_binop! { impl Mul, mul for $t, $t }
+ forward_ref_binop! { impl const Mul, mul for $t, $t }
)*)
}
///
#[doc = $panic]
#[stable(feature = "rust1", since = "1.0.0")]
- impl Div for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Div for $t {
type Output = $t;
#[inline]
fn div(self, other: $t) -> $t { self / other }
}
- forward_ref_binop! { impl Div, div for $t, $t }
+ forward_ref_binop! { impl const Div, div for $t, $t }
)*)*)
}
macro_rules! div_impl_float {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
- impl Div for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Div for $t {
type Output = $t;
#[inline]
fn div(self, other: $t) -> $t { self / other }
}
- forward_ref_binop! { impl Div, div for $t, $t }
+ forward_ref_binop! { impl const Div, div for $t, $t }
)*)
}
///
#[doc = $panic]
#[stable(feature = "rust1", since = "1.0.0")]
- impl Rem for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Rem for $t {
type Output = $t;
#[inline]
fn rem(self, other: $t) -> $t { self % other }
}
- forward_ref_binop! { impl Rem, rem for $t, $t }
+ forward_ref_binop! { impl const Rem, rem for $t, $t }
)*)*)
}
/// assert_eq!(x % y, remainder);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- impl Rem for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Rem for $t {
type Output = $t;
#[inline]
fn rem(self, other: $t) -> $t { self % other }
}
- forward_ref_binop! { impl Rem, rem for $t, $t }
+ forward_ref_binop! { impl const Rem, rem for $t, $t }
)*)
}
macro_rules! neg_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
- impl Neg for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Neg for $t {
type Output = $t;
#[inline]
fn neg(self) -> $t { -self }
}
- forward_ref_unop! { impl Neg, neg for $t }
+ forward_ref_unop! { impl const Neg, neg for $t }
)*)
}
macro_rules! add_assign_impl {
($($t:ty)+) => ($(
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl AddAssign for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const AddAssign for $t {
#[inline]
#[rustc_inherit_overflow_checks]
fn add_assign(&mut self, other: $t) { *self += other }
}
- forward_ref_op_assign! { impl AddAssign, add_assign for $t, $t }
+ forward_ref_op_assign! { impl const AddAssign, add_assign for $t, $t }
)+)
}
macro_rules! sub_assign_impl {
($($t:ty)+) => ($(
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl SubAssign for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const SubAssign for $t {
#[inline]
#[rustc_inherit_overflow_checks]
fn sub_assign(&mut self, other: $t) { *self -= other }
}
- forward_ref_op_assign! { impl SubAssign, sub_assign for $t, $t }
+ forward_ref_op_assign! { impl const SubAssign, sub_assign for $t, $t }
)+)
}
macro_rules! mul_assign_impl {
($($t:ty)+) => ($(
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl MulAssign for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const MulAssign for $t {
#[inline]
#[rustc_inherit_overflow_checks]
fn mul_assign(&mut self, other: $t) { *self *= other }
}
- forward_ref_op_assign! { impl MulAssign, mul_assign for $t, $t }
+ forward_ref_op_assign! { impl const MulAssign, mul_assign for $t, $t }
)+)
}
macro_rules! div_assign_impl {
($($t:ty)+) => ($(
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl DivAssign for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const DivAssign for $t {
#[inline]
fn div_assign(&mut self, other: $t) { *self /= other }
}
- forward_ref_op_assign! { impl DivAssign, div_assign for $t, $t }
+ forward_ref_op_assign! { impl const DivAssign, div_assign for $t, $t }
)+)
}
macro_rules! rem_assign_impl {
($($t:ty)+) => ($(
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl RemAssign for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const RemAssign for $t {
#[inline]
fn rem_assign(&mut self, other: $t) { *self %= other }
}
- forward_ref_op_assign! { impl RemAssign, rem_assign for $t, $t }
+ forward_ref_op_assign! { impl const RemAssign, rem_assign for $t, $t }
)+)
}
macro_rules! not_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
- impl Not for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Not for $t {
type Output = $t;
#[inline]
fn not(self) -> $t { !self }
}
- forward_ref_unop! { impl Not, not for $t }
+ forward_ref_unop! { impl const Not, not for $t }
)*)
}
macro_rules! bitand_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
- impl BitAnd for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitAnd for $t {
type Output = $t;
#[inline]
fn bitand(self, rhs: $t) -> $t { self & rhs }
}
- forward_ref_binop! { impl BitAnd, bitand for $t, $t }
+ forward_ref_binop! { impl const BitAnd, bitand for $t, $t }
)*)
}
macro_rules! bitor_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
- impl BitOr for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOr for $t {
type Output = $t;
#[inline]
fn bitor(self, rhs: $t) -> $t { self | rhs }
}
- forward_ref_binop! { impl BitOr, bitor for $t, $t }
+ forward_ref_binop! { impl const BitOr, bitor for $t, $t }
)*)
}
macro_rules! bitxor_impl {
($($t:ty)*) => ($(
#[stable(feature = "rust1", since = "1.0.0")]
- impl BitXor for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitXor for $t {
type Output = $t;
#[inline]
fn bitxor(self, other: $t) -> $t { self ^ other }
}
- forward_ref_binop! { impl BitXor, bitxor for $t, $t }
+ forward_ref_binop! { impl const BitXor, bitxor for $t, $t }
)*)
}
macro_rules! shl_impl {
($t:ty, $f:ty) => {
#[stable(feature = "rust1", since = "1.0.0")]
- impl Shl<$f> for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Shl<$f> for $t {
type Output = $t;
#[inline]
}
}
- forward_ref_binop! { impl Shl, shl for $t, $f }
+ forward_ref_binop! { impl const Shl, shl for $t, $f }
};
}
macro_rules! shr_impl {
($t:ty, $f:ty) => {
#[stable(feature = "rust1", since = "1.0.0")]
- impl Shr<$f> for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const Shr<$f> for $t {
type Output = $t;
#[inline]
}
}
- forward_ref_binop! { impl Shr, shr for $t, $f }
+ forward_ref_binop! { impl const Shr, shr for $t, $f }
};
}
macro_rules! bitand_assign_impl {
($($t:ty)+) => ($(
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl BitAndAssign for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitAndAssign for $t {
#[inline]
fn bitand_assign(&mut self, other: $t) { *self &= other }
}
- forward_ref_op_assign! { impl BitAndAssign, bitand_assign for $t, $t }
+ forward_ref_op_assign! { impl const BitAndAssign, bitand_assign for $t, $t }
)+)
}
macro_rules! bitor_assign_impl {
($($t:ty)+) => ($(
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl BitOrAssign for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitOrAssign for $t {
#[inline]
fn bitor_assign(&mut self, other: $t) { *self |= other }
}
- forward_ref_op_assign! { impl BitOrAssign, bitor_assign for $t, $t }
+ forward_ref_op_assign! { impl const BitOrAssign, bitor_assign for $t, $t }
)+)
}
macro_rules! bitxor_assign_impl {
($($t:ty)+) => ($(
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl BitXorAssign for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const BitXorAssign for $t {
#[inline]
fn bitxor_assign(&mut self, other: $t) { *self ^= other }
}
- forward_ref_op_assign! { impl BitXorAssign, bitxor_assign for $t, $t }
+ forward_ref_op_assign! { impl const BitXorAssign, bitxor_assign for $t, $t }
)+)
}
macro_rules! shl_assign_impl {
($t:ty, $f:ty) => {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl ShlAssign<$f> for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const ShlAssign<$f> for $t {
#[inline]
#[rustc_inherit_overflow_checks]
fn shl_assign(&mut self, other: $f) {
}
}
- forward_ref_op_assign! { impl ShlAssign, shl_assign for $t, $f }
+ forward_ref_op_assign! { impl const ShlAssign, shl_assign for $t, $f }
};
}
macro_rules! shr_assign_impl {
($t:ty, $f:ty) => {
#[stable(feature = "op_assign_traits", since = "1.8.0")]
- impl ShrAssign<$f> for $t {
+ #[rustc_const_unstable(feature = "const_ops", issue = "90080")]
+ impl const ShrAssign<$f> for $t {
#[inline]
#[rustc_inherit_overflow_checks]
fn shr_assign(&mut self, other: $f) {
}
}
- forward_ref_op_assign! { impl ShrAssign, shr_assign for $t, $f }
+ forward_ref_op_assign! { impl const ShrAssign, shr_assign for $t, $f }
};
}
// never inline unless panic_immediate_abort to avoid code
// bloat at the call sites as much as possible
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[lang = "panic"] // needed by codegen for panic on overflow and other `Assert` MIR terminators
-pub fn panic(expr: &'static str) -> ! {
- if cfg!(feature = "panic_immediate_abort") {
- super::intrinsics::abort()
- }
-
+pub const fn panic(expr: &'static str) -> ! {
// Use Arguments::new_v1 instead of format_args!("{}", expr) to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
#[inline]
#[track_caller]
-#[lang = "panic_str"] // needed for const-evaluated panics
-pub fn panic_str(expr: &str) -> ! {
- panic_fmt(format_args!("{}", expr));
+#[lang = "panic_str"] // needed for `non-fmt-panics` lint
+pub const fn panic_str(expr: &str) -> ! {
+ panic_display(&expr);
}
#[inline]
#[track_caller]
#[lang = "panic_display"] // needed for const-evaluated panics
-pub fn panic_display<T: fmt::Display>(x: &T) -> ! {
+#[rustc_do_not_const_check] // hooked by const-eval
+pub const fn panic_display<T: fmt::Display>(x: &T) -> ! {
panic_fmt(format_args!("{}", *x));
}
#[cfg_attr(feature = "panic_immediate_abort", inline)]
#[track_caller]
#[lang = "panic_fmt"] // needed for const-evaluated panics
-pub fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
+#[rustc_do_not_const_check] // hooked by const-eval
+pub const fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
if cfg!(feature = "panic_immediate_abort") {
super::intrinsics::abort()
}
//! Free functions to create `&[T]` and `&mut [T]`.
use crate::array;
-use crate::intrinsics::is_aligned_and_not_null;
-use crate::mem;
use crate::ptr;
/// Forms a slice from a pointer and a length.
/// [`NonNull::dangling()`]: ptr::NonNull::dangling
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
-pub unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] {
- debug_assert!(is_aligned_and_not_null(data), "attempt to create unaligned or null slice");
- debug_assert!(
- mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize,
- "attempt to create slice covering at least half the address space"
- );
+#[rustc_const_unstable(feature = "const_slice_from_raw_parts", issue = "67456")]
+pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] {
+ debug_check_data_len(data, len);
+
// SAFETY: the caller must uphold the safety contract for `from_raw_parts`.
unsafe { &*ptr::slice_from_raw_parts(data, len) }
}
/// [`NonNull::dangling()`]: ptr::NonNull::dangling
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
-pub unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] {
- debug_assert!(is_aligned_and_not_null(data), "attempt to create unaligned or null slice");
- debug_assert!(
- mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize,
- "attempt to create slice covering at least half the address space"
- );
+#[rustc_const_unstable(feature = "const_slice_from_raw_parts", issue = "67456")]
+pub const unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] {
+ debug_check_data_len(data as _, len);
+
// SAFETY: the caller must uphold the safety contract for `from_raw_parts_mut`.
unsafe { &mut *ptr::slice_from_raw_parts_mut(data, len) }
}
+// In debug builds checks that `data` pointer is aligned and non-null and that slice with given `len` would cover less than half the address space
+#[cfg(all(not(bootstrap), debug_assertions))]
+#[unstable(feature = "const_slice_from_raw_parts", issue = "67456")]
+#[rustc_const_unstable(feature = "const_slice_from_raw_parts", issue = "67456")]
+const fn debug_check_data_len<T>(data: *const T, len: usize) {
+ fn rt_check<T>(data: *const T) {
+ use crate::intrinsics::is_aligned_and_not_null;
+
+ assert!(is_aligned_and_not_null(data), "attempt to create unaligned or null slice");
+ }
+
+ const fn noop<T>(_: *const T) {}
+
+ // SAFETY:
+ //
+ // `rt_check` is just a debug assert to hint users that they are causing UB,
+ // it is not required for safety (the safety must be guatanteed by
+ // the `from_raw_parts[_mut]` caller).
+ //
+ // Since the checks are not required, we ignore them in CTFE as they can't
+ // be done there (alignment does not make much sense there).
+ unsafe {
+ crate::intrinsics::const_eval_select((data,), noop, rt_check);
+ }
+
+ assert!(
+ crate::mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize,
+ "attempt to create slice covering at least half the address space"
+ );
+}
+
+#[cfg(not(all(not(bootstrap), debug_assertions)))]
+const fn debug_check_data_len<T>(_data: *const T, _len: usize) {}
+
/// Converts a reference to T into a slice of length 1 (without copying).
#[stable(feature = "from_ref", since = "1.28.0")]
#[rustc_const_unstable(feature = "const_slice_from_ref", issue = "90206")]
/// If the pattern allows a reverse search but its results might differ
/// from a forward search, the [`rmatch_indices`] method can be used.
///
- /// [`rmatch_indices`]: str::match_indices
+ /// [`rmatch_indices`]: str::rmatch_indices
///
/// # Examples
///
/// Implements substring slicing with syntax `&self[.. end]` or `&mut
/// self[.. end]`.
///
-/// Returns a slice of the given string from the byte range [`0`, `end`).
+/// Returns a slice of the given string from the byte range \[0, `end`).
/// Equivalent to `&self[0 .. end]` or `&mut self[0 .. end]`.
///
/// This operation is *O*(1).
/// Implements substring slicing with syntax `&self[begin ..]` or `&mut
/// self[begin ..]`.
///
-/// Returns a slice of the given string from the byte range [`begin`,
-/// `len`). Equivalent to `&self[begin .. len]` or `&mut self[begin ..
-/// len]`.
+/// Returns a slice of the given string from the byte range \[`begin`, `len`).
+/// Equivalent to `&self[begin .. len]` or `&mut self[begin .. len]`.
///
/// This operation is *O*(1).
///
/// Implements substring slicing with syntax `&self[..= end]` or `&mut
/// self[..= end]`.
///
-/// Returns a slice of the given string from the byte range [0, `end`].
+/// Returns a slice of the given string from the byte range \[0, `end`\].
/// Equivalent to `&self [0 .. end + 1]`, except if `end` has the maximum
/// value for `usize`.
///
// break if there is a nonascii byte
let zu = contains_nonascii(*block);
let zv = contains_nonascii(*block.offset(1));
- if zu | zv {
+ if zu || zv {
break;
}
}
/// # Examples
/// ```
/// #![feature(duration_checked_float)]
- ///
/// use std::time::Duration;
///
/// let dur = Duration::try_from_secs_f64(2.7);
/// # Examples
/// ```
/// #![feature(duration_checked_float)]
- ///
/// use std::time::Duration;
///
/// let dur = Duration::try_from_secs_f32(2.7);
///
/// ```
/// #![feature(duration_checked_float)]
-///
/// use std::time::Duration;
///
/// if let Err(e) = Duration::try_from_secs_f32(-1.0) {
/// println!("key: {} val: {}", key, val);
/// }
/// ```
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter(&self) -> Iter<'_, K, V> {
Iter { base: self.base.iter() }
/// println!("key: {} val: {}", key, val);
/// }
/// ```
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
IterMut { base: self.base.iter_mut() }
/// assert!(a.is_empty());
/// ```
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
#[stable(feature = "drain", since = "1.6.0")]
pub fn drain(&mut self) -> Drain<'_, K, V> {
Drain { base: self.base.drain() }
/// assert_eq!(odds, vec![1, 3, 5, 7]);
/// ```
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
#[unstable(feature = "hash_drain_filter", issue = "59618")]
pub fn drain_filter<F>(&mut self, pred: F) -> DrainFilter<'_, K, V, F>
where
/// assert_eq!(map.len(), 4);
/// ```
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
#[stable(feature = "retain_hash_collection", since = "1.18.0")]
pub fn retain<F>(&mut self, f: F)
where
/// assert_eq!(vec, ["a", "b", "c"]);
/// ```
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
#[stable(feature = "map_into_keys_values", since = "1.54.0")]
pub fn into_keys(self) -> IntoKeys<K, V> {
IntoKeys { inner: self.into_iter() }
/// assert_eq!(vec, [1, 2, 3]);
/// ```
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
#[stable(feature = "map_into_keys_values", since = "1.54.0")]
pub fn into_values(self) -> IntoValues<K, V> {
IntoValues { inner: self.into_iter() }
type IntoIter = Iter<'a, K, V>;
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
fn into_iter(self) -> Iter<'a, K, V> {
self.iter()
}
type IntoIter = IterMut<'a, K, V>;
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
fn into_iter(self) -> IterMut<'a, K, V> {
self.iter_mut()
}
/// let vec: Vec<(&str, i32)> = map.into_iter().collect();
/// ```
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
fn into_iter(self) -> IntoIter<K, V> {
IntoIter { base: self.base.into_iter() }
}
/// }
/// ```
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter(&self) -> Iter<'_, T> {
Iter { base: self.base.iter() }
/// assert!(set.is_empty());
/// ```
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
#[stable(feature = "drain", since = "1.6.0")]
pub fn drain(&mut self) -> Drain<'_, T> {
Drain { base: self.base.drain() }
/// assert_eq!(odds, vec![1, 3, 5, 7]);
/// ```
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
#[unstable(feature = "hash_drain_filter", issue = "59618")]
pub fn drain_filter<F>(&mut self, pred: F) -> DrainFilter<'_, T, F>
where
/// assert_eq!(diff, [4].iter().collect());
/// ```
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn difference<'a>(&'a self, other: &'a HashSet<T, S>) -> Difference<'a, T, S> {
Difference { iter: self.iter(), other }
/// assert_eq!(diff1, [1, 4].iter().collect());
/// ```
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn symmetric_difference<'a>(
&'a self,
/// assert_eq!(intersection, [2, 3].iter().collect());
/// ```
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn intersection<'a>(&'a self, other: &'a HashSet<T, S>) -> Intersection<'a, T, S> {
if self.len() <= other.len() {
/// assert_eq!(union, [1, 2, 3, 4].iter().collect());
/// ```
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn union<'a>(&'a self, other: &'a HashSet<T, S>) -> Union<'a, T, S> {
if self.len() >= other.len() {
/// set.retain(|&k| k % 2 == 0);
/// assert_eq!(set.len(), 3);
/// ```
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
#[stable(feature = "retain_hash_collection", since = "1.18.0")]
pub fn retain<F>(&mut self, f: F)
where
type IntoIter = Iter<'a, T>;
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
/// }
/// ```
#[inline]
- #[cfg_attr(not(bootstrap), rustc_lint_query_instability)]
fn into_iter(self) -> IntoIter<T> {
IntoIter { base: self.base.into_iter() }
}
pub fn atanh(self) -> f32 {
0.5 * ((2.0 * self) / (1.0 - self)).ln_1p()
}
-
- /// Linear interpolation between `start` and `end`.
- ///
- /// This enables linear interpolation between `start` and `end`, where start is represented by
- /// `self == 0.0` and `end` is represented by `self == 1.0`. This is the basis of all
- /// "transition", "easing", or "step" functions; if you change `self` from 0.0 to 1.0
- /// at a given rate, the result will change from `start` to `end` at a similar rate.
- ///
- /// Values below 0.0 or above 1.0 are allowed, allowing you to extrapolate values outside the
- /// range from `start` to `end`. This also is useful for transition functions which might
- /// move slightly past the end or start for a desired effect. Mathematically, the values
- /// returned are equivalent to `start + self * (end - start)`, although we make a few specific
- /// guarantees that are useful specifically to linear interpolation.
- ///
- /// These guarantees are:
- ///
- /// * If `start` and `end` are [finite], the value at 0.0 is always `start` and the
- /// value at 1.0 is always `end`. (exactness)
- /// * If `start` and `end` are [finite], the values will always move in the direction from
- /// `start` to `end` (monotonicity)
- /// * If `self` is [finite] and `start == end`, the value at any point will always be
- /// `start == end`. (consistency)
- ///
- /// [finite]: #method.is_finite
- #[must_use = "method returns a new number and does not mutate the original value"]
- #[unstable(feature = "float_interpolation", issue = "86269")]
- pub fn lerp(self, start: f32, end: f32) -> f32 {
- // consistent
- if start == end {
- start
-
- // exact/monotonic
- } else {
- self.mul_add(end, (-self).mul_add(start, start))
- }
- }
}
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f32::INFINITY));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&s_nan()));
}
-
-#[test]
-fn test_lerp_exact() {
- // simple values
- assert_eq!(f32::lerp(0.0, 2.0, 4.0), 2.0);
- assert_eq!(f32::lerp(1.0, 2.0, 4.0), 4.0);
-
- // boundary values
- assert_eq!(f32::lerp(0.0, f32::MIN, f32::MAX), f32::MIN);
- assert_eq!(f32::lerp(1.0, f32::MIN, f32::MAX), f32::MAX);
-}
-
-#[test]
-fn test_lerp_consistent() {
- assert_eq!(f32::lerp(f32::MAX, f32::MIN, f32::MIN), f32::MIN);
- assert_eq!(f32::lerp(f32::MIN, f32::MAX, f32::MAX), f32::MAX);
-
- // as long as t is finite, a/b can be infinite
- assert_eq!(f32::lerp(f32::MAX, f32::NEG_INFINITY, f32::NEG_INFINITY), f32::NEG_INFINITY);
- assert_eq!(f32::lerp(f32::MIN, f32::INFINITY, f32::INFINITY), f32::INFINITY);
-}
-
-#[test]
-fn test_lerp_nan_infinite() {
- // non-finite t is not NaN if a/b different
- assert!(!f32::lerp(f32::INFINITY, f32::MIN, f32::MAX).is_nan());
- assert!(!f32::lerp(f32::NEG_INFINITY, f32::MIN, f32::MAX).is_nan());
-}
-
-#[test]
-fn test_lerp_values() {
- // just a few basic values
- assert_eq!(f32::lerp(0.25, 1.0, 2.0), 1.25);
- assert_eq!(f32::lerp(0.50, 1.0, 2.0), 1.50);
- assert_eq!(f32::lerp(0.75, 1.0, 2.0), 1.75);
-}
-
-#[test]
-fn test_lerp_monotonic() {
- // near 0
- let below_zero = f32::lerp(-f32::EPSILON, f32::MIN, f32::MAX);
- let zero = f32::lerp(0.0, f32::MIN, f32::MAX);
- let above_zero = f32::lerp(f32::EPSILON, f32::MIN, f32::MAX);
- assert!(below_zero <= zero);
- assert!(zero <= above_zero);
- assert!(below_zero <= above_zero);
-
- // near 0.5
- let below_half = f32::lerp(0.5 - f32::EPSILON, f32::MIN, f32::MAX);
- let half = f32::lerp(0.5, f32::MIN, f32::MAX);
- let above_half = f32::lerp(0.5 + f32::EPSILON, f32::MIN, f32::MAX);
- assert!(below_half <= half);
- assert!(half <= above_half);
- assert!(below_half <= above_half);
-
- // near 1
- let below_one = f32::lerp(1.0 - f32::EPSILON, f32::MIN, f32::MAX);
- let one = f32::lerp(1.0, f32::MIN, f32::MAX);
- let above_one = f32::lerp(1.0 + f32::EPSILON, f32::MIN, f32::MAX);
- assert!(below_one <= one);
- assert!(one <= above_one);
- assert!(below_one <= above_one);
-}
0.5 * ((2.0 * self) / (1.0 - self)).ln_1p()
}
- /// Linear interpolation between `start` and `end`.
- ///
- /// This enables linear interpolation between `start` and `end`, where start is represented by
- /// `self == 0.0` and `end` is represented by `self == 1.0`. This is the basis of all
- /// "transition", "easing", or "step" functions; if you change `self` from 0.0 to 1.0
- /// at a given rate, the result will change from `start` to `end` at a similar rate.
- ///
- /// Values below 0.0 or above 1.0 are allowed, allowing you to extrapolate values outside the
- /// range from `start` to `end`. This also is useful for transition functions which might
- /// move slightly past the end or start for a desired effect. Mathematically, the values
- /// returned are equivalent to `start + self * (end - start)`, although we make a few specific
- /// guarantees that are useful specifically to linear interpolation.
- ///
- /// These guarantees are:
- ///
- /// * If `start` and `end` are [finite], the value at 0.0 is always `start` and the
- /// value at 1.0 is always `end`. (exactness)
- /// * If `start` and `end` are [finite], the values will always move in the direction from
- /// `start` to `end` (monotonicity)
- /// * If `self` is [finite] and `start == end`, the value at any point will always be
- /// `start == end`. (consistency)
- ///
- /// [finite]: #method.is_finite
- #[must_use = "method returns a new number and does not mutate the original value"]
- #[unstable(feature = "float_interpolation", issue = "86269")]
- pub fn lerp(self, start: f64, end: f64) -> f64 {
- // consistent
- if start == end {
- start
-
- // exact/monotonic
- } else {
- self.mul_add(end, (-self).mul_add(start, start))
- }
- }
-
// Solaris/Illumos requires a wrapper around log, log2, and log10 functions
// because of their non-standard behavior (e.g., log(-n) returns -Inf instead
// of expected NaN).
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&f64::INFINITY));
assert_eq!(Ordering::Less, (-s_nan()).total_cmp(&s_nan()));
}
-
-#[test]
-fn test_lerp_exact() {
- // simple values
- assert_eq!(f64::lerp(0.0, 2.0, 4.0), 2.0);
- assert_eq!(f64::lerp(1.0, 2.0, 4.0), 4.0);
-
- // boundary values
- assert_eq!(f64::lerp(0.0, f64::MIN, f64::MAX), f64::MIN);
- assert_eq!(f64::lerp(1.0, f64::MIN, f64::MAX), f64::MAX);
-}
-
-#[test]
-fn test_lerp_consistent() {
- assert_eq!(f64::lerp(f64::MAX, f64::MIN, f64::MIN), f64::MIN);
- assert_eq!(f64::lerp(f64::MIN, f64::MAX, f64::MAX), f64::MAX);
-
- // as long as t is finite, a/b can be infinite
- assert_eq!(f64::lerp(f64::MAX, f64::NEG_INFINITY, f64::NEG_INFINITY), f64::NEG_INFINITY);
- assert_eq!(f64::lerp(f64::MIN, f64::INFINITY, f64::INFINITY), f64::INFINITY);
-}
-
-#[test]
-fn test_lerp_nan_infinite() {
- // non-finite t is not NaN if a/b different
- assert!(!f64::lerp(f64::INFINITY, f64::MIN, f64::MAX).is_nan());
- assert!(!f64::lerp(f64::NEG_INFINITY, f64::MIN, f64::MAX).is_nan());
-}
-
-#[test]
-fn test_lerp_values() {
- // just a few basic values
- assert_eq!(f64::lerp(0.25, 1.0, 2.0), 1.25);
- assert_eq!(f64::lerp(0.50, 1.0, 2.0), 1.50);
- assert_eq!(f64::lerp(0.75, 1.0, 2.0), 1.75);
-}
-
-#[test]
-fn test_lerp_monotonic() {
- // near 0
- let below_zero = f64::lerp(-f64::EPSILON, f64::MIN, f64::MAX);
- let zero = f64::lerp(0.0, f64::MIN, f64::MAX);
- let above_zero = f64::lerp(f64::EPSILON, f64::MIN, f64::MAX);
- assert!(below_zero <= zero);
- assert!(zero <= above_zero);
- assert!(below_zero <= above_zero);
-
- // near 1
- let below_one = f64::lerp(1.0 - f64::EPSILON, f64::MIN, f64::MAX);
- let one = f64::lerp(1.0, f64::MIN, f64::MAX);
- let above_one = f64::lerp(1.0 + f64::EPSILON, f64::MIN, f64::MAX);
- assert!(below_one <= one);
- assert!(one <= above_one);
- assert!(below_one <= above_one);
-}
#[inline]
#[must_use]
#[stable(feature = "cstr_from_bytes", since = "1.10.0")]
- #[rustc_const_unstable(feature = "const_cstr_unchecked", issue = "none")]
+ #[rustc_const_unstable(feature = "const_cstr_unchecked", issue = "90343")]
pub const unsafe fn from_bytes_with_nul_unchecked(bytes: &[u8]) -> &CStr {
// SAFETY: Casting to CStr is safe because its internal representation
// is a [u8] too (safe only inside std).
// "hard_link" should still appear as a symlink.
assert!(check!(fs::symlink_metadata(tmpdir.join("hard_link"))).file_type().is_symlink());
}
+
+/// Ensure `fs::create_dir` works on Windows with longer paths.
+#[test]
+#[cfg(windows)]
+fn create_dir_long_paths() {
+ use crate::{ffi::OsStr, iter, os::windows::ffi::OsStrExt};
+ const PATH_LEN: usize = 247;
+
+ let tmpdir = tmpdir();
+ let mut path = tmpdir.path().to_path_buf();
+ path.push("a");
+ let mut path = path.into_os_string();
+
+ let utf16_len = path.encode_wide().count();
+ if utf16_len >= PATH_LEN {
+ // Skip the test in the unlikely event the local user has a long temp directory path.
+ // This should not affect CI.
+ return;
+ }
+ // Increase the length of the path.
+ path.extend(iter::repeat(OsStr::new("a")).take(PATH_LEN - utf16_len));
+
+ // This should succeed.
+ fs::create_dir(&path).unwrap();
+
+ // This will fail if the path isn't converted to verbatim.
+ path.push("a");
+ fs::create_dir(&path).unwrap();
+}
#![feature(const_cstr_unchecked)]
#![feature(const_fn_floating_point_arithmetic)]
#![feature(const_fn_fn_ptr_basics)]
+#![feature(const_fn_trait_bound)]
#![feature(const_format_args)]
#![feature(const_io_structs)]
#![feature(const_ip)]
#![feature(exact_size_is_empty)]
#![feature(exhaustive_patterns)]
#![feature(extend_one)]
-#![feature(float_interpolation)]
#![feature(fn_traits)]
#![feature(format_args_nl)]
#![feature(gen_future)]
#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
#[cold]
#[track_caller]
-pub fn begin_panic<M: Any + Send>(msg: M) -> ! {
+#[rustc_do_not_const_check] // hooked by const-eval
+pub const fn begin_panic<M: Any + Send>(msg: M) -> ! {
if cfg!(feature = "panic_immediate_abort") {
intrinsics::abort()
}
#[derive(Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[repr(align(64))]
-pub(super) struct Aligner;
-
-#[derive(Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
-pub(super) struct CacheAligned<T>(pub T, pub Aligner);
+pub(super) struct CacheAligned<T>(pub T);
impl<T> Deref for CacheAligned<T> {
type Target = T;
impl<T> CacheAligned<T> {
pub(super) fn new(t: T) -> Self {
- CacheAligned(t, Aligner)
+ CacheAligned(t)
}
}
mutex.lock();
}
- pub unsafe fn wait_timeout(&self, _mutex: &Mutex, _dur: Duration) -> bool {
- panic!("wait_timeout not supported on hermit");
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
+ self.counter.fetch_add(1, SeqCst);
+ mutex.unlock();
+ let millis = dur.as_millis().min(u32::MAX as u128) as u32;
+
+ let res = if millis > 0 {
+ abi::sem_timedwait(self.sem1, millis)
+ } else {
+ abi::sem_trywait(self.sem1)
+ };
+
+ abi::sem_post(self.sem2);
+ mutex.lock();
+ res == 0
}
pub unsafe fn destroy(&self) {
cchCount2: c_int,
bIgnoreCase: BOOL,
) -> c_int;
+ pub fn GetFullPathNameW(
+ lpFileName: LPCWSTR,
+ nBufferLength: DWORD,
+ lpBuffer: LPWSTR,
+ lpFilePart: *mut LPWSTR,
+ ) -> DWORD;
}
#[link(name = "ws2_32")]
use crate::sys::{c, cvt};
use crate::sys_common::{AsInner, FromInner, IntoInner};
+use super::path::maybe_verbatim;
use super::to_u16s;
pub struct File {
impl File {
pub fn open(path: &Path, opts: &OpenOptions) -> io::Result<File> {
- let path = to_u16s(path)?;
+ let path = maybe_verbatim(path)?;
let handle = unsafe {
c::CreateFileW(
path.as_ptr(),
}
pub fn mkdir(&self, p: &Path) -> io::Result<()> {
- let p = to_u16s(p)?;
+ let p = maybe_verbatim(p)?;
cvt(unsafe { c::CreateDirectoryW(p.as_ptr(), ptr::null_mut()) })?;
Ok(())
}
pub fn readdir(p: &Path) -> io::Result<ReadDir> {
let root = p.to_path_buf();
let star = p.join("*");
- let path = to_u16s(&star)?;
+ let path = maybe_verbatim(&star)?;
unsafe {
let mut wfd = mem::zeroed();
}
pub fn unlink(p: &Path) -> io::Result<()> {
- let p_u16s = to_u16s(p)?;
+ let p_u16s = maybe_verbatim(p)?;
cvt(unsafe { c::DeleteFileW(p_u16s.as_ptr()) })?;
Ok(())
}
pub fn rename(old: &Path, new: &Path) -> io::Result<()> {
- let old = to_u16s(old)?;
- let new = to_u16s(new)?;
+ let old = maybe_verbatim(old)?;
+ let new = maybe_verbatim(new)?;
cvt(unsafe { c::MoveFileExW(old.as_ptr(), new.as_ptr(), c::MOVEFILE_REPLACE_EXISTING) })?;
Ok(())
}
pub fn rmdir(p: &Path) -> io::Result<()> {
- let p = to_u16s(p)?;
+ let p = maybe_verbatim(p)?;
cvt(unsafe { c::RemoveDirectoryW(p.as_ptr()) })?;
Ok(())
}
pub fn symlink_inner(original: &Path, link: &Path, dir: bool) -> io::Result<()> {
let original = to_u16s(original)?;
- let link = to_u16s(link)?;
+ let link = maybe_verbatim(link)?;
let flags = if dir { c::SYMBOLIC_LINK_FLAG_DIRECTORY } else { 0 };
// Formerly, symlink creation required the SeCreateSymbolicLink privilege. For the Windows 10
// Creators Update, Microsoft loosened this to allow unprivileged symlink creation if the
#[cfg(not(target_vendor = "uwp"))]
pub fn link(original: &Path, link: &Path) -> io::Result<()> {
- let original = to_u16s(original)?;
- let link = to_u16s(link)?;
+ let original = maybe_verbatim(original)?;
+ let link = maybe_verbatim(link)?;
cvt(unsafe { c::CreateHardLinkW(link.as_ptr(), original.as_ptr(), ptr::null_mut()) })?;
Ok(())
}
}
pub fn set_perm(p: &Path, perm: FilePermissions) -> io::Result<()> {
- let p = to_u16s(p)?;
+ let p = maybe_verbatim(p)?;
unsafe {
cvt(c::SetFileAttributesW(p.as_ptr(), perm.attrs))?;
Ok(())
}
c::PROGRESS_CONTINUE
}
- let pfrom = to_u16s(from)?;
- let pto = to_u16s(to)?;
+ let pfrom = maybe_verbatim(from)?;
+ let pto = maybe_verbatim(to)?;
let mut size = 0i64;
cvt(unsafe {
c::CopyFileExW(
+use super::{c, fill_utf16_buf, to_u16s};
use crate::ffi::OsStr;
+use crate::io;
use crate::mem;
+use crate::path::Path;
use crate::path::Prefix;
+use crate::ptr;
#[cfg(test)]
mod tests;
None => (path, OsStr::new("")),
}
}
+
+/// Returns a UTF-16 encoded path capable of bypassing the legacy `MAX_PATH` limits.
+///
+/// This path may or may not have a verbatim prefix.
+pub(crate) fn maybe_verbatim(path: &Path) -> io::Result<Vec<u16>> {
+ // Normally the MAX_PATH is 260 UTF-16 code units (including the NULL).
+ // However, for APIs such as CreateDirectory[1], the limit is 248.
+ //
+ // [1]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createdirectorya#parameters
+ const LEGACY_MAX_PATH: usize = 248;
+ // UTF-16 encoded code points, used in parsing and building UTF-16 paths.
+ // All of these are in the ASCII range so they can be cast directly to `u16`.
+ const SEP: u16 = b'\\' as _;
+ const ALT_SEP: u16 = b'/' as _;
+ const QUERY: u16 = b'?' as _;
+ const COLON: u16 = b':' as _;
+ const DOT: u16 = b'.' as _;
+ const U: u16 = b'U' as _;
+ const N: u16 = b'N' as _;
+ const C: u16 = b'C' as _;
+
+ // \\?\
+ const VERBATIM_PREFIX: &[u16] = &[SEP, SEP, QUERY, SEP];
+ // \??\
+ const NT_PREFIX: &[u16] = &[SEP, QUERY, QUERY, SEP];
+ // \\?\UNC\
+ const UNC_PREFIX: &[u16] = &[SEP, SEP, QUERY, SEP, U, N, C, SEP];
+
+ let mut path = to_u16s(path)?;
+ if path.starts_with(VERBATIM_PREFIX) || path.starts_with(NT_PREFIX) {
+ // Early return for paths that are already verbatim.
+ return Ok(path);
+ } else if path.len() < LEGACY_MAX_PATH {
+ // Early return if an absolute path is less < 260 UTF-16 code units.
+ // This is an optimization to avoid calling `GetFullPathNameW` unnecessarily.
+ match path.as_slice() {
+ // Starts with `D:`, `D:\`, `D:/`, etc.
+ // Does not match if the path starts with a `\` or `/`.
+ [drive, COLON, 0] | [drive, COLON, SEP | ALT_SEP, ..]
+ if *drive != SEP && *drive != ALT_SEP =>
+ {
+ return Ok(path);
+ }
+ // Starts with `\\`, `//`, etc
+ [SEP | ALT_SEP, SEP | ALT_SEP, ..] => return Ok(path),
+ _ => {}
+ }
+ }
+
+ // Firstly, get the absolute path using `GetFullPathNameW`.
+ // https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfullpathnamew
+ let lpfilename = path.as_ptr();
+ fill_utf16_buf(
+ // SAFETY: `fill_utf16_buf` ensures the `buffer` and `size` are valid.
+ // `lpfilename` is a pointer to a null terminated string that is not
+ // invalidated until after `GetFullPathNameW` returns successfully.
+ |buffer, size| unsafe {
+ // While the docs for `GetFullPathNameW` have the standard note
+ // about needing a `\\?\` path for a long lpfilename, this does not
+ // appear to be true in practice.
+ // See:
+ // https://stackoverflow.com/questions/38036943/getfullpathnamew-and-long-windows-file-paths
+ // https://googleprojectzero.blogspot.com/2016/02/the-definitive-guide-on-win32-to-nt.html
+ c::GetFullPathNameW(lpfilename, size, buffer, ptr::null_mut())
+ },
+ |mut absolute| {
+ path.clear();
+
+ // Secondly, add the verbatim prefix. This is easier here because we know the
+ // path is now absolute and fully normalized (e.g. `/` has been changed to `\`).
+ let prefix = match absolute {
+ // C:\ => \\?\C:\
+ [_, COLON, SEP, ..] => VERBATIM_PREFIX,
+ // \\.\ => \\?\
+ [SEP, SEP, DOT, SEP, ..] => {
+ absolute = &absolute[4..];
+ VERBATIM_PREFIX
+ }
+ // Leave \\?\ and \??\ as-is.
+ [SEP, SEP, QUERY, SEP, ..] | [SEP, QUERY, QUERY, SEP, ..] => &[],
+ // \\ => \\?\UNC\
+ [SEP, SEP, ..] => {
+ absolute = &absolute[2..];
+ UNC_PREFIX
+ }
+ // Anything else we leave alone.
+ _ => &[],
+ };
+
+ path.reserve_exact(prefix.len() + absolute.len() + 1);
+ path.extend_from_slice(prefix);
+ path.extend_from_slice(absolute);
+ path.push(0);
+ },
+ )?;
+ Ok(path)
+}
(OsStr::new(r"server"), OsStr::new(r"\\\\\\\\\\\\\share"))
);
}
+
+#[test]
+fn verbatim() {
+ use crate::path::Path;
+ fn check(path: &str, expected: &str) {
+ let verbatim = maybe_verbatim(Path::new(path)).unwrap();
+ let verbatim = String::from_utf16_lossy(verbatim.strip_suffix(&[0]).unwrap());
+ assert_eq!(&verbatim, expected, "{}", path);
+ }
+
+ // Ensure long paths are correctly prefixed.
+ check(
+ r"C:\Program Files\Rust\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ r"\\?\C:\Program Files\Rust\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ );
+ check(
+ r"\\server\share\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ r"\\?\UNC\server\share\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ );
+ check(
+ r"\\.\PIPE\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ r"\\?\PIPE\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ );
+ // `\\?\` prefixed paths are left unchanged...
+ check(
+ r"\\?\verbatim.\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ r"\\?\verbatim.\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ );
+ // But `//?/` is not a verbatim prefix so it will be normalized.
+ check(
+ r"//?/E:/verbatim.\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ r"\\?\E:\verbatim\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\foo.txt",
+ );
+
+ // For performance, short absolute paths are left unchanged.
+ check(r"C:\Program Files\Rust", r"C:\Program Files\Rust");
+ check(r"\\server\share", r"\\server\share");
+ check(r"\\.\COM1", r"\\.\COM1");
+
+ // Check that paths of length 247 are converted to verbatim.
+ // This is necessary for `CreateDirectory`.
+ check(
+ r"C:\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ r"\\?\C:\aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ );
+
+ // Make sure opening a drive will work.
+ check("Z:", "Z:");
+
+ // An empty path or a path that contains null are not valid paths.
+ assert!(maybe_verbatim(Path::new("")).is_err());
+ assert!(maybe_verbatim(Path::new("\0")).is_err());
+}
}
}
-/// Returns a slice of the given string for the byte range [`begin`..`end`).
+/// Returns a slice of the given string for the byte range \[`begin`..`end`).
///
/// # Panics
///
/// [`unwrap`]: crate::result::Result::unwrap
/// [naming-threads]: ./index.html#naming-threads
/// [stack-size]: ./index.html#stack-size
+#[must_use = "must eventually spawn the thread"]
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct Builder {
#![feature(native_link_modifiers_bundle)]
#![feature(nll)]
#![feature(staged_api)]
-#![feature(static_nobundle)]
#![feature(c_unwind)]
#![cfg_attr(not(target_env = "msvc"), feature(libc))]
<tr>
<td>Forward-edge control flow protection
</td>
- <td>No
+ <td>Yes
</td>
- <td>
+ <td>Nightly
</td>
</tr>
<tr>
commercially available [grsecurity/PaX Reuse Attack Protector
(RAP)](https://grsecurity.net/rap_faq).
-The Rust compiler does not support forward-edge control flow protection on
-Linux<sup id="fnref:6" role="doc-noteref"><a href="#fn:6"
-class="footnote">6</a></sup>. There is work currently ongoing to add support
-for the [sanitizers](https://github.com/google/sanitizers)[40], which may or
-may not include support for LLVM CFI.
+The Rust compiler supports forward-edge control flow protection on nightly
+builds[40]-[41] <sup id="fnref:6" role="doc-noteref"><a href="#fn:6"
+class="footnote">6</a></sup>.
```text
-$ readelf -s target/release/hello-rust | grep __cfi_init
+$ readelf -s -W target/debug/rust-cfi | grep "\.cfi"
+ 12: 0000000000005170 46 FUNC LOCAL DEFAULT 14 _RNvCsjaOHoaNjor6_8rust_cfi7add_one.cfi
+ 15: 00000000000051a0 16 FUNC LOCAL DEFAULT 14 _RNvCsjaOHoaNjor6_8rust_cfi7add_two.cfi
+ 17: 0000000000005270 396 FUNC LOCAL DEFAULT 14 _RNvCsjaOHoaNjor6_8rust_cfi4main.cfi
+...
```
-Fig. 15. Checking if LLVM CFI is enabled for a given binary.
+Fig. 15. Checking if LLVM CFI is enabled for a given binary[41].
-The presence of the `__cfi_init` symbol (and references to `__cfi_check`)
-indicates that LLVM CFI (i.e., forward-edge control flow protection) is
-enabled for a given binary. Conversely, the absence of the `__cfi_init`
-symbol (and references to `__cfi_check`) indicates that LLVM CFI is not
-enabled for a given binary (see Fig. 15).
+The presence of symbols suffixed with ".cfi" or the `__cfi_init` symbol (and
+references to `__cfi_check`) indicates that LLVM CFI (i.e., forward-edge control
+flow protection) is enabled for a given binary. Conversely, the absence of
+symbols suffixed with ".cfi" or the `__cfi_init` symbol (and references to
+`__cfi_check`) indicates that LLVM CFI is not enabled for a given binary (see
+Fig. 15).
-<small id="fn:6">6\. It supports Control Flow Guard (CFG) on Windows (see
+<small id="fn:6">6\. It also supports Control Flow Guard (CFG) on Windows (see
<https://github.com/rust-lang/rust/issues/68793>). <a href="#fnref:6"
class="reversefootnote" role="doc-backlink">↩</a></small>
39. A. Crichton. “Remove the alloc\_jemalloc crate #55238.” GitHub.
<https://github.com/rust-lang/rust/pull/55238>.
-40. J. Aparicio. 2017. “Tracking issue for sanitizer support #39699.”
- <https://github.com/rust-lang/rust/issues/39699>.
+40. R. de C Valle. “Tracking Issue for LLVM Control Flow Integrity (CFI) Support
+ for Rust #89653.” GitHub. <https://github.com/rust-lang/rust/issues/89653>.
+
+41. “ControlFlowIntegrity.” The Rust Unstable Book.
+ <https://doc.rust-lang.org/beta/unstable-book/compiler-flags/sanitizer.html#controlflowintegrity>.
These forms of the `#[doc]` attribute are used on individual items, to control how
they are documented.
-## `#[doc(no_inline)]`/`#[doc(inline)]`
+### `inline` and `no_inline`
+
+<span id="docno_inlinedocinline"></span>
These attributes are used on `use` statements, and control where the documentation shows
up. For example, consider this Rust code:
One special case: In Rust 2018 and later, if you `pub use` one of your dependencies, `rustdoc` will
not eagerly inline it as a module unless you add `#[doc(inline)]`.
-## `#[doc(hidden)]`
+### `hidden`
+
+<span id="dochidden"></span>
Any item annotated with `#[doc(hidden)]` will not appear in the documentation, unless
the `strip-hidden` pass is removed.
+
+### `alias`
+
+This attribute adds an alias in the search index.
+
+Let's take an example:
+
+```rust,no_run
+#[doc(alias = "TheAlias")]
+pub struct SomeType;
+```
+
+So now, if you enter "TheAlias" in the search, it'll display `SomeType`.
+Of course, if you enter `SomeType` it'll return `SomeType` as expected!
+
+#### FFI example
+
+This doc attribute is especially useful when writing bindings for a C library.
+For example, let's say we have a C function that looks like this:
+
+```c
+int lib_name_do_something(Obj *obj);
+```
+
+It takes a pointer to an `Obj` type and returns an integer. In Rust, it might
+be written like this:
+
+```ignore (using non-existing ffi types)
+pub struct Obj {
+ inner: *mut ffi::Obj,
+}
+
+impl Obj {
+ pub fn do_something(&mut self) -> i32 {
+ unsafe { ffi::lib_name_do_something(self.inner) }
+ }
+}
+```
+
+The function has been turned into a method to make it more convenient to use.
+However, if you want to look for the Rust equivalent of `lib_name_do_something`,
+you have no way to do so.
+
+To get around this limitation, we just add `#[doc(alias = "lib_name_do_something")]`
+on the `do_something` method and then it's all good!
+Users can now look for `lib_name_do_something` in our crate directly and find
+`Obj::do_something`.
# `sanitizer`
-The tracking issue for this feature is: [#39699](https://github.com/rust-lang/rust/issues/39699).
+The tracking issues for this feature are:
+
+* [#39699](https://github.com/rust-lang/rust/issues/39699).
+* [#89653](https://github.com/rust-lang/rust/issues/89653).
------------------------
This feature allows for use of one of following sanitizers:
* [AddressSanitizer][clang-asan] a fast memory error detector.
+* [ControlFlowIntegrity][clang-cfi] LLVM Control Flow Integrity (CFI) provides
+ forward-edge control flow protection.
* [HWAddressSanitizer][clang-hwasan] a memory error detector similar to
AddressSanitizer, but based on partial hardware assistance.
* [LeakSanitizer][clang-lsan] a run-time memory leak detector.
* [MemorySanitizer][clang-msan] a detector of uninitialized reads.
* [ThreadSanitizer][clang-tsan] a fast data race detector.
-To enable a sanitizer compile with `-Zsanitizer=address`,
+To enable a sanitizer compile with `-Zsanitizer=address`,`-Zsanitizer=cfi`,
`-Zsanitizer=hwaddress`, `-Zsanitizer=leak`, `-Zsanitizer=memory` or
`-Zsanitizer=thread`.
==39249==ABORTING
```
+# ControlFlowIntegrity
+
+The LLVM Control Flow Integrity (CFI) support in the Rust compiler initially
+provides forward-edge control flow protection for Rust-compiled code only by
+aggregating function pointers in groups identified by their number of arguments.
+
+Forward-edge control flow protection for C or C++ and Rust -compiled code "mixed
+binaries" (i.e., for when C or C++ and Rust -compiled code share the same
+virtual address space) will be provided in later work by defining and using
+compatible type identifiers (see Type metadata in the design document in the
+tracking issue [#89653](https://github.com/rust-lang/rust/issues/89653)).
+
+LLVM CFI can be enabled with -Zsanitizer=cfi and requires LTO (i.e., -Clto).
+
+## Example
+
+```text
+#![feature(asm, naked_functions)]
+
+use std::mem;
+
+fn add_one(x: i32) -> i32 {
+ x + 1
+}
+
+#[naked]
+pub extern "C" fn add_two(x: i32) {
+ // x + 2 preceeded by a landing pad/nop block
+ unsafe {
+ asm!(
+ "
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ lea rax, [rdi+2]
+ ret
+ ",
+ options(noreturn)
+ );
+ }
+}
+
+fn do_twice(f: fn(i32) -> i32, arg: i32) -> i32 {
+ f(arg) + f(arg)
+}
+
+fn main() {
+ let answer = do_twice(add_one, 5);
+
+ println!("The answer is: {}", answer);
+
+ println!("With CFI enabled, you should not see the next answer");
+ let f: fn(i32) -> i32 = unsafe {
+ // Offsets 0-8 make it land in the landing pad/nop block, and offsets 1-8 are
+ // invalid branch/call destinations (i.e., within the body of the function).
+ mem::transmute::<*const u8, fn(i32) -> i32>((add_two as *const u8).offset(5))
+ };
+ let next_answer = do_twice(f, 5);
+
+ println!("The next answer is: {}", next_answer);
+}
+```
+Fig. 1. Modified example from the [Advanced Functions and
+Closures][rust-book-ch19-05] chapter of the [The Rust Programming
+Language][rust-book] book.
+
+[//]: # (FIXME: Replace with output from cargo using nightly when #89652 is merged)
+
+```shell
+$ rustc rust_cfi.rs -o rust_cfi
+$ ./rust_cfi
+The answer is: 12
+With CFI enabled, you should not see the next answer
+The next answer is: 14
+$
+```
+Fig. 2. Build and execution of the modified example with LLVM CFI disabled.
+
+[//]: # (FIXME: Replace with output from cargo using nightly when #89652 is merged)
+
+```shell
+$ rustc -Clto -Zsanitizer=cfi rust_cfi.rs -o rust_cfi
+$ ./rust_cfi
+The answer is: 12
+With CFI enabled, you should not see the next answer
+Illegal instruction
+$
+```
+Fig. 3. Build and execution of the modified example with LLVM CFI enabled.
+
+When LLVM CFI is enabled, if there are any attempts to change/hijack control
+flow using an indirect branch/call to an invalid destination, the execution is
+terminated (see Fig. 3).
+
+```rust
+use std::mem;
+
+fn add_one(x: i32) -> i32 {
+ x + 1
+}
+
+fn add_two(x: i32, _y: i32) -> i32 {
+ x + 2
+}
+
+fn do_twice(f: fn(i32) -> i32, arg: i32) -> i32 {
+ f(arg) + f(arg)
+}
+
+fn main() {
+ let answer = do_twice(add_one, 5);
+
+ println!("The answer is: {}", answer);
+
+ println!("With CFI enabled, you should not see the next answer");
+ let f: fn(i32) -> i32 =
+ unsafe { mem::transmute::<*const u8, fn(i32) -> i32>(add_two as *const u8) };
+ let next_answer = do_twice(f, 5);
+
+ println!("The next answer is: {}", next_answer);
+}
+```
+Fig. 4. Another modified example from the [Advanced Functions and
+Closures][rust-book-ch19-05] chapter of the [The Rust Programming
+Language][rust-book] book.
+
+[//]: # (FIXME: Replace with output from cargo using nightly when #89652 is merged)
+
+```shell
+$ rustc rust_cfi.rs -o rust_cfi
+$ ./rust_cfi
+The answer is: 12
+With CFI enabled, you should not see the next answer
+The next answer is: 14
+$
+```
+Fig. 5. Build and execution of the modified example with LLVM CFI disabled.
+
+[//]: # (FIXME: Replace with output from cargo using nightly when #89652 is merged)
+
+```shell
+$ rustc -Clto -Zsanitizer=cfi rust_cfi.rs -o rust_cfi
+$ ./rust_cfi
+The answer is: 12
+With CFI enabled, you should not see the next answer
+Illegal instruction
+$
+```
+Fig. 6. Build and execution of the modified example with LLVM CFI enabled.
+
+When LLVM CFI is enabled, if there are any attempts to change/hijack control
+flow using an indirect branch/call to a function with different number of
+arguments than intended/passed in the call/branch site, the execution is also
+terminated (see Fig. 6).
+
+Forward-edge control flow protection not only by aggregating function pointers
+in groups identified by their number of arguments, but also their argument
+types, will also be provided in later work by defining and using compatible type
+identifiers (see Type metadata in the design document in the tracking
+issue [#89653](https://github.com/rust-lang/rust/issues/89653)).
+
+[rust-book-ch19-05]: https://doc.rust-lang.org/book/ch19-05-advanced-functions-and-closures.html
+[rust-book]: https://doc.rust-lang.org/book/title-page.html
+
# HWAddressSanitizer
HWAddressSanitizer is a newer variant of AddressSanitizer that consumes much
* [Sanitizers project page](https://github.com/google/sanitizers/wiki/)
* [AddressSanitizer in Clang][clang-asan]
+* [ControlFlowIntegrity in Clang][clang-cfi]
* [HWAddressSanitizer in Clang][clang-hwasan]
* [LeakSanitizer in Clang][clang-lsan]
* [MemorySanitizer in Clang][clang-msan]
* [ThreadSanitizer in Clang][clang-tsan]
[clang-asan]: https://clang.llvm.org/docs/AddressSanitizer.html
+[clang-cfi]: https://clang.llvm.org/docs/ControlFlowIntegrity.html
[clang-hwasan]: https://clang.llvm.org/docs/HardwareAssistedAddressSanitizerDesign.html
[clang-lsan]: https://clang.llvm.org/docs/LeakSanitizer.html
[clang-msan]: https://clang.llvm.org/docs/MemorySanitizer.html
This will write the value `5` into the `u64` variable `x`.
You can see that the string literal we use to specify instructions is actually a template string.
It is governed by the same rules as Rust [format strings][format-syntax].
-The arguments that are inserted into the template however look a bit different then you may
+The arguments that are inserted into the template however look a bit different than you may
be familiar with. First we need to specify if the variable is an input or an output of the
inline assembly. In this case it is an output. We declared this by writing `out`.
We also need to specify in what kind of register the assembly expects the variable.
Second, we can see that inputs are declared by writing `in` instead of `out`.
Third, one of our operands has a type we haven't seen yet, `const`.
-This tells the compiler to expand this argument to value directly inside the assembly template.
+This tells the compiler to expand this argument to a value directly inside the assembly template.
This is only possible for constants and literals.
Fourth, we can see that we can specify an argument number, or name as in any format string.
arrayvec = { version = "0.7", default-features = false }
pulldown-cmark = { version = "0.8", default-features = false }
minifier = "0.0.41"
-rayon = { version = "0.3.0", package = "rustc-rayon" }
+rayon = "1.3.1"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
smallvec = "1.6.1"
use rustc_span::hygiene::MacroKind;
use rustc_span::symbol::{kw, sym, Symbol};
-use crate::clean::{
- self, utils, Attributes, AttributesExt, GetDefId, ItemId, NestedAttributesExt, Type,
-};
+use crate::clean::{self, utils, Attributes, AttributesExt, ItemId, NestedAttributesExt, Type};
use crate::core::DocContext;
use crate::formats::item_type::ItemType;
}
}
-/// Builds a specific implementation of a type. The `did` could be a type method or trait method.
+/// Inline an `impl`, inherent or of a trait. The `did` must be for an `impl`.
crate fn build_impl(
cx: &mut DocContext<'_>,
parent_module: impl Into<Option<DefId>>,
return;
}
+ let _prof_timer = cx.tcx.sess.prof.generic_activity("build_extern_trait_impl");
+
let tcx = cx.tcx;
let associated_trait = tcx.impl_trait_ref(did);
// Only inline impl if the implementing type is
// reachable in rustdoc generated documentation
if !did.is_local() {
- if let Some(did) = for_.def_id() {
+ if let Some(did) = for_.def_id(&cx.cache) {
if !cx.cache.access_levels.is_public(did) {
return;
}
}
while let Some(ty) = stack.pop() {
- if let Some(did) = ty.def_id() {
+ if let Some(did) = ty.def_id(&cx.cache) {
if tcx.get_attrs(did).lists(sym::doc).has_word(sym::hidden) {
return;
}
let (merged_attrs, cfg) = merge_attrs(cx, parent_module.into(), load_attrs(cx, did), attrs);
trace!("merged_attrs={:?}", merged_attrs);
- trace!("build_impl: impl {:?} for {:?}", trait_.as_ref().map(|t| t.def_id()), for_.def_id());
+ trace!(
+ "build_impl: impl {:?} for {:?}",
+ trait_.as_ref().map(|t| t.def_id()),
+ for_.def_id(&cx.cache)
+ );
ret.push(clean::Item::from_def_id_and_attrs_and_parts(
did,
None,
let self_type = self.self_ty().clean(cx);
Type::QPath {
name: cx.tcx.associated_item(self.item_def_id).ident.name,
- self_def_id: self_type.def_id(),
+ self_def_id: self_type.def_id(&cx.cache),
self_type: box self_type,
trait_,
}
}
let for_ = impl_.self_ty.clean(cx);
- let type_alias = for_.def_id().and_then(|did| match tcx.def_kind(did) {
+ let type_alias = for_.def_id(&cx.cache).and_then(|did| match tcx.def_kind(did) {
DefKind::TyAlias => Some(tcx.type_of(did).clean(cx)),
_ => None,
});
DefaultReturn,
}
-impl GetDefId for FnRetTy {
- fn def_id(&self) -> Option<DefId> {
- match *self {
- Return(ref ty) => ty.def_id(),
- DefaultReturn => None,
- }
- }
-
- fn def_id_full(&self, cache: &Cache) -> Option<DefId> {
- match *self {
- Return(ref ty) => ty.def_id_full(cache),
+impl FnRetTy {
+ crate fn as_return(&self) -> Option<&Type> {
+ match self {
+ Return(ret) => Some(ret),
DefaultReturn => None,
}
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(Type, 72);
-crate trait GetDefId {
- /// Use this method to get the [`DefId`] of a [`clean`] AST node.
- /// This will return [`None`] when called on a primitive [`clean::Type`].
- /// Use [`Self::def_id_full`] if you want to include primitives.
- ///
- /// [`clean`]: crate::clean
- /// [`clean::Type`]: crate::clean::Type
- // FIXME: get rid of this function and always use `def_id_full`
- fn def_id(&self) -> Option<DefId>;
-
- /// Use this method to get the [DefId] of a [clean] AST node, including [PrimitiveType]s.
- ///
- /// See [`Self::def_id`] for more.
- ///
- /// [clean]: crate::clean
- fn def_id_full(&self, cache: &Cache) -> Option<DefId>;
-}
-
-impl<T: GetDefId> GetDefId for Option<T> {
- fn def_id(&self) -> Option<DefId> {
- self.as_ref().and_then(|d| d.def_id())
- }
-
- fn def_id_full(&self, cache: &Cache) -> Option<DefId> {
- self.as_ref().and_then(|d| d.def_id_full(cache))
- }
-}
-
impl Type {
crate fn primitive_type(&self) -> Option<PrimitiveType> {
match *self {
QPath { ref self_type, .. } => return self_type.inner_def_id(cache),
Generic(_) | Infer | ImplTrait(_) => return None,
};
- cache.and_then(|c| Primitive(t).def_id_full(c))
+ cache.and_then(|c| Primitive(t).def_id(c))
}
-}
-impl GetDefId for Type {
- fn def_id(&self) -> Option<DefId> {
- self.inner_def_id(None)
+ /// Use this method to get the [DefId] of a [clean] AST node, including [PrimitiveType]s.
+ ///
+ /// See [`Self::def_id_no_primitives`] for more.
+ ///
+ /// [clean]: crate::clean
+ crate fn def_id(&self, cache: &Cache) -> Option<DefId> {
+ self.inner_def_id(Some(cache))
}
- fn def_id_full(&self, cache: &Cache) -> Option<DefId> {
- self.inner_def_id(Some(cache))
+ /// Use this method to get the [`DefId`] of a [`clean`] AST node.
+ /// This will return [`None`] when called on a primitive [`clean::Type`].
+ /// Use [`Self::def_id`] if you want to include primitives.
+ ///
+ /// [`clean`]: crate::clean
+ /// [`clean::Type`]: crate::clean::Type
+ // FIXME: get rid of this function and always use `def_id`
+ crate fn def_id_no_primitives(&self) -> Option<DefId> {
+ self.inner_def_id(None)
}
}
crate item_type: Option<Type>,
}
-impl GetDefId for Typedef {
- fn def_id(&self) -> Option<DefId> {
- self.type_.def_id()
- }
-
- fn def_id_full(&self, cache: &Cache) -> Option<DefId> {
- self.type_.def_id_full(cache)
- }
-}
-
#[derive(Clone, Debug)]
crate struct OpaqueTy {
crate bounds: Vec<GenericBound>,
use std::string::ToString;
use std::sync::mpsc::Sender;
-macro_rules! try_err {
- ($e:expr, $file:expr) => {
- match $e {
- Ok(e) => e,
- Err(e) => return Err(E::new(e, $file)),
- }
- };
-}
-
crate trait PathError {
fn new<S, P: AsRef<Path>>(e: S, path: P) -> Self
where
});
});
} else {
- try_err!(fs::write(&path, contents), path);
+ fs::write(&path, contents).map_err(|e| E::new(e, path))?;
}
Ok(())
}
use rustc_middle::ty::TyCtxt;
use rustc_span::symbol::sym;
-use crate::clean::{self, GetDefId, ItemId, PrimitiveType};
+use crate::clean::{self, ItemId, PrimitiveType};
use crate::config::RenderOptions;
use crate::fold::DocFolder;
use crate::formats::item_type::ItemType;
|| i.trait_
.as_ref()
.map_or(false, |t| self.cache.masked_crates.contains(&t.def_id().krate))
- || i.for_.def_id().map_or(false, |d| self.cache.masked_crates.contains(&d.krate))
+ || i.for_
+ .def_id(self.cache)
+ .map_or(false, |d| self.cache.masked_crates.contains(&d.krate))
{
return None;
}
if let Some(generics) = i.trait_.as_ref().and_then(|t| t.generics()) {
for bound in generics {
- if let Some(did) = bound.def_id() {
+ if let Some(did) = bound.def_id(self.cache) {
dids.insert(did);
}
}
ProcAttribute = 23,
ProcDerive = 24,
TraitAlias = 25,
+ Generic = 26,
}
impl Serialize for ItemType {
ItemType::ProcAttribute => "attr",
ItemType::ProcDerive => "derive",
ItemType::TraitAlias => "traitalias",
+ ItemType::Generic => "generic",
}
}
}
// Assume that '&' or '*' is the reference or dereference operator
// or a reference or pointer type. Unless, of course, it looks like
// a logical and or a multiplication operator: `&&` or `* `.
- TokenKind::Star => match self.peek() {
- Some(TokenKind::Whitespace) => Class::Op,
+ TokenKind::Star => match self.tokens.peek() {
+ Some((TokenKind::Whitespace, _)) => Class::Op,
+ Some((TokenKind::Ident, "mut")) => {
+ self.next();
+ sink(Highlight::Token { text: "*mut", class: Some(Class::RefKeyWord) });
+ return;
+ }
+ Some((TokenKind::Ident, "const")) => {
+ self.next();
+ sink(Highlight::Token { text: "*const", class: Some(Class::RefKeyWord) });
+ return;
+ }
_ => Class::RefKeyWord,
},
- TokenKind::And => match lookahead {
- Some(TokenKind::And) => {
+ TokenKind::And => match self.tokens.peek() {
+ Some((TokenKind::And, _)) => {
self.next();
sink(Highlight::Token { text: "&&", class: Some(Class::Op) });
return;
}
- Some(TokenKind::Eq) => {
+ Some((TokenKind::Eq, _)) => {
self.next();
sink(Highlight::Token { text: "&=", class: Some(Class::Op) });
return;
}
- Some(TokenKind::Whitespace) => Class::Op,
+ Some((TokenKind::Whitespace, _)) => Class::Op,
+ Some((TokenKind::Ident, "mut")) => {
+ self.next();
+ sink(Highlight::Token { text: "&mut", class: Some(Class::RefKeyWord) });
+ return;
+ }
_ => Class::RefKeyWord,
},
<span class="attribute">#[<span class="ident">cfg</span>(<span class="ident">target_os</span> <span class="op">=</span> <span class="string">"linux"</span>)]</span>
<span class="kw">fn</span> <span class="ident">main</span>() -> () {
<span class="kw">let</span> <span class="ident">foo</span> <span class="op">=</span> <span class="bool-val">true</span> <span class="op">&&</span> <span class="bool-val">false</span> <span class="op">|</span><span class="op">|</span> <span class="bool-val">true</span>;
- <span class="kw">let</span> <span class="kw">_</span>: <span class="kw-2">*</span><span class="kw">const</span> () <span class="op">=</span> <span class="number">0</span>;
+ <span class="kw">let</span> <span class="kw">_</span>: <span class="kw-2">*const</span> () <span class="op">=</span> <span class="number">0</span>;
<span class="kw">let</span> <span class="kw">_</span> <span class="op">=</span> <span class="kw-2">&</span><span class="ident">foo</span>;
<span class="kw">let</span> <span class="kw">_</span> <span class="op">=</span> <span class="op">&&</span><span class="ident">foo</span>;
<span class="kw">let</span> <span class="kw">_</span> <span class="op">=</span> <span class="kw-2">*</span><span class="ident">foo</span>;
- <span class="macro">mac!</span>(<span class="ident">foo</span>, <span class="kw-2">&</span><span class="kw-2">mut</span> <span class="ident">bar</span>);
+ <span class="macro">mac!</span>(<span class="ident">foo</span>, <span class="kw-2">&mut</span> <span class="ident">bar</span>);
<span class="macro">assert!</span>(<span class="self">self</span>.<span class="ident">length</span> <span class="op"><</span> <span class="ident">N</span> <span class="op">&&</span> <span class="ident">index</span> <span class="op"><</span><span class="op">=</span> <span class="self">self</span>.<span class="ident">length</span>);
<span class="ident">::std::env::var</span>(<span class="string">"gateau"</span>).<span class="ident">is_ok</span>();
<span class="attribute">#[<span class="ident">rustfmt::skip</span>]</span>
use std::collections::hash_map::Entry;
use std::collections::BTreeMap;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::fx::FxHashMap;
use rustc_middle::ty::TyCtxt;
use rustc_span::symbol::Symbol;
use serde::ser::{Serialize, SerializeStruct, Serializer};
use crate::clean;
-use crate::clean::types::{
- FnDecl, FnRetTy, GenericBound, Generics, GetDefId, Type, WherePredicate,
-};
+use crate::clean::types::{FnDecl, FnRetTy, GenericBound, Generics, Type, WherePredicate};
use crate::formats::cache::Cache;
use crate::formats::item_type::ItemType;
use crate::html::markdown::short_markdown_summary;
item: &clean::Item,
tcx: TyCtxt<'tcx>,
) -> Option<IndexItemFunctionType> {
- let (all_types, ret_types) = match *item.kind {
+ let (mut inputs, mut output) = match *item.kind {
clean::FunctionItem(ref f) => get_all_types(&f.generics, &f.decl, tcx),
clean::MethodItem(ref m, _) => get_all_types(&m.generics, &m.decl, tcx),
clean::TyMethodItem(ref m) => get_all_types(&m.generics, &m.decl, tcx),
_ => return None,
};
- let inputs = all_types
- .iter()
- .map(|(ty, kind)| TypeWithKind::from((get_index_type(ty), *kind)))
- .filter(|a| a.ty.name.is_some())
- .collect();
- let output = ret_types
- .iter()
- .map(|(ty, kind)| TypeWithKind::from((get_index_type(ty), *kind)))
- .filter(|a| a.ty.name.is_some())
- .collect::<Vec<_>>();
+ inputs.retain(|a| a.ty.name.is_some());
+ output.retain(|a| a.ty.name.is_some());
let output = if output.is_empty() { None } else { Some(output) };
Some(IndexItemFunctionType { inputs, output })
}
-fn get_index_type(clean_type: &clean::Type) -> RenderType {
+fn get_index_type(clean_type: &clean::Type, generics: Vec<TypeWithKind>) -> RenderType {
RenderType {
name: get_index_type_name(clean_type, true).map(|s| s.as_str().to_ascii_lowercase()),
- generics: get_generics(clean_type),
+ generics: if generics.is_empty() { None } else { Some(generics) },
}
}
}
}
-/// Return a list of generic parameters for use in the search index.
-///
-/// This function replaces bounds with types, so that `T where T: Debug` just becomes `Debug`.
-/// It does return duplicates, and that's intentional, since search queries like `Result<usize, usize>`
-/// are supposed to match only results where both parameters are `usize`.
-fn get_generics(clean_type: &clean::Type) -> Option<Vec<String>> {
- clean_type.generics().and_then(|types| {
- let r = types
- .iter()
- .filter_map(|t| {
- get_index_type_name(t, false).map(|name| name.as_str().to_ascii_lowercase())
- })
- .collect::<Vec<_>>();
- if r.is_empty() { None } else { Some(r) }
- })
-}
-
/// The point of this function is to replace bounds with types.
///
/// i.e. `[T, U]` when you have the following bounds: `T: Display, U: Option<T>` will return
generics: &Generics,
arg: &Type,
tcx: TyCtxt<'tcx>,
- recurse: i32,
- res: &mut FxHashSet<(Type, ItemType)>,
-) -> usize {
- fn insert(res: &mut FxHashSet<(Type, ItemType)>, tcx: TyCtxt<'_>, ty: Type) -> usize {
- if let Some(kind) = ty.def_id().map(|did| tcx.def_kind(did).into()) {
- res.insert((ty, kind));
- 1
+ recurse: usize,
+ res: &mut Vec<TypeWithKind>,
+) {
+ fn insert_ty(
+ res: &mut Vec<TypeWithKind>,
+ tcx: TyCtxt<'_>,
+ ty: Type,
+ mut generics: Vec<TypeWithKind>,
+ ) {
+ let is_full_generic = ty.is_full_generic();
+
+ if is_full_generic && generics.len() == 1 {
+ // In this case, no need to go through an intermediate state if the generics
+ // contains only one element.
+ //
+ // For example:
+ //
+ // fn foo<T: Display>(r: Option<T>) {}
+ //
+ // In this case, it would contain:
+ //
+ // ```
+ // [{
+ // name: "option",
+ // generics: [{
+ // name: "",
+ // generics: [
+ // name: "Display",
+ // generics: []
+ // }]
+ // }]
+ // }]
+ // ```
+ //
+ // After removing the intermediate (unnecessary) full generic, it'll become:
+ //
+ // ```
+ // [{
+ // name: "option",
+ // generics: [{
+ // name: "Display",
+ // generics: []
+ // }]
+ // }]
+ // ```
+ //
+ // To be noted that it can work if there is ONLY ONE generic, otherwise we still
+ // need to keep it as is!
+ res.push(generics.pop().unwrap());
+ return;
+ }
+ let mut index_ty = get_index_type(&ty, generics);
+ if index_ty.name.as_ref().map(|s| s.is_empty()).unwrap_or(true) {
+ return;
+ }
+ if is_full_generic {
+ // We remove the name of the full generic because we have no use for it.
+ index_ty.name = Some(String::new());
+ res.push(TypeWithKind::from((index_ty, ItemType::Generic)));
+ } else if let Some(kind) = ty.def_id_no_primitives().map(|did| tcx.def_kind(did).into()) {
+ res.push(TypeWithKind::from((index_ty, kind)));
} else if ty.is_primitive() {
// This is a primitive, let's store it as such.
- res.insert((ty, ItemType::Primitive));
- 1
- } else {
- 0
+ res.push(TypeWithKind::from((index_ty, ItemType::Primitive)));
}
}
if recurse >= 10 {
// FIXME: remove this whole recurse thing when the recursion bug is fixed
- return 0;
+ return;
}
- let mut nb_added = 0;
if let Type::Generic(arg_s) = *arg {
if let Some(where_pred) = generics.where_predicates.iter().find(|g| match g {
- WherePredicate::BoundPredicate { ty, .. } => ty.def_id() == arg.def_id(),
+ WherePredicate::BoundPredicate { ty, .. } => {
+ ty.def_id_no_primitives() == arg.def_id_no_primitives()
+ }
_ => false,
}) {
+ let mut ty_generics = Vec::new();
let bounds = where_pred.get_bounds().unwrap_or_else(|| &[]);
for bound in bounds.iter() {
if let GenericBound::TraitBound(poly_trait, _) = bound {
continue;
}
if let Some(ty) = x.get_type() {
- let adds = get_real_types(generics, &ty, tcx, recurse + 1, res);
- nb_added += adds;
- if adds == 0 && !ty.is_full_generic() {
- nb_added += insert(res, tcx, ty);
- }
+ get_real_types(generics, &ty, tcx, recurse + 1, &mut ty_generics);
}
}
}
}
+ insert_ty(res, tcx, arg.clone(), ty_generics);
}
if let Some(bound) = generics.params.iter().find(|g| g.is_type() && g.name == arg_s) {
+ let mut ty_generics = Vec::new();
for bound in bound.get_bounds().unwrap_or(&[]) {
if let Some(path) = bound.get_trait_path() {
let ty = Type::ResolvedPath { did: path.def_id(), path };
- let adds = get_real_types(generics, &ty, tcx, recurse + 1, res);
- nb_added += adds;
- if adds == 0 && !ty.is_full_generic() {
- nb_added += insert(res, tcx, ty);
- }
+ get_real_types(generics, &ty, tcx, recurse + 1, &mut ty_generics);
}
}
+ insert_ty(res, tcx, arg.clone(), ty_generics);
}
} else {
- nb_added += insert(res, tcx, arg.clone());
- if let Some(gens) = arg.generics() {
- for gen in gens.iter() {
- if gen.is_full_generic() {
- nb_added += get_real_types(generics, gen, tcx, recurse + 1, res);
- } else {
- nb_added += insert(res, tcx, (*gen).clone());
- }
+ let mut ty_generics = Vec::new();
+ if let Some(arg_generics) = arg.generics() {
+ for gen in arg_generics.iter() {
+ get_real_types(generics, gen, tcx, recurse + 1, &mut ty_generics);
}
}
+ insert_ty(res, tcx, arg.clone(), ty_generics);
}
- nb_added
}
/// Return the full list of types when bounds have been resolved.
generics: &Generics,
decl: &FnDecl,
tcx: TyCtxt<'tcx>,
-) -> (Vec<(Type, ItemType)>, Vec<(Type, ItemType)>) {
- let mut all_types = FxHashSet::default();
+) -> (Vec<TypeWithKind>, Vec<TypeWithKind>) {
+ let mut all_types = Vec::new();
for arg in decl.inputs.values.iter() {
if arg.type_.is_self_type() {
continue;
}
- let mut args = FxHashSet::default();
+ // FIXME: performance wise, it'd be much better to move `args` declaration outside of the
+ // loop and replace this line with `args.clear()`.
+ let mut args = Vec::new();
get_real_types(generics, &arg.type_, tcx, 0, &mut args);
if !args.is_empty() {
+ // FIXME: once back to performance improvements, replace this line with:
+ // `all_types.extend(args.drain(..));`.
all_types.extend(args);
} else {
- if let Some(kind) = arg.type_.def_id().map(|did| tcx.def_kind(did).into()) {
- all_types.insert((arg.type_.clone(), kind));
+ if let Some(kind) = arg.type_.def_id_no_primitives().map(|did| tcx.def_kind(did).into())
+ {
+ all_types.push(TypeWithKind::from((get_index_type(&arg.type_, vec![]), kind)));
}
}
}
- let ret_types = match decl.output {
+ let mut ret_types = Vec::new();
+ match decl.output {
FnRetTy::Return(ref return_type) => {
- let mut ret = FxHashSet::default();
- get_real_types(generics, return_type, tcx, 0, &mut ret);
- if ret.is_empty() {
- if let Some(kind) = return_type.def_id().map(|did| tcx.def_kind(did).into()) {
- ret.insert((return_type.clone(), kind));
+ get_real_types(generics, return_type, tcx, 0, &mut ret_types);
+ if ret_types.is_empty() {
+ if let Some(kind) =
+ return_type.def_id_no_primitives().map(|did| tcx.def_kind(did).into())
+ {
+ ret_types.push(TypeWithKind::from((get_index_type(return_type, vec![]), kind)));
}
}
- ret.into_iter().collect()
}
- _ => Vec::new(),
+ _ => {}
};
- (all_types.into_iter().collect(), ret_types)
+ (all_types, ret_types)
}
use std::sync::mpsc::{channel, Receiver};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_middle::ty::TyCtxt;
use rustc_session::Session;
use rustc_span::edition::Edition;
BASIC_KEYWORDS,
};
-use crate::clean;
-use crate::clean::ExternalCrate;
+use crate::clean::{self, ExternalCrate};
use crate::config::RenderOptions;
use crate::docfs::{DocFS, PathError};
use crate::error::Error;
use crate::html::markdown::{self, plain_text_summary, ErrorCodes, IdMap};
use crate::html::{layout, sources};
use crate::scrape_examples::AllCallLocations;
+use crate::try_err;
/// Major driving force in all rustdoc rendering. This contains information
/// about where in the tree-like hierarchy rendering is occurring and controls
/// real location of an item. This is used to allow external links to
/// publicly reused items to redirect to the right location.
pub(super) render_redirect_pages: bool,
+ /// Tracks section IDs for `Deref` targets so they match in both the main
+ /// body and the sidebar.
+ pub(super) deref_id_map: RefCell<FxHashMap<DefId, String>>,
/// The map used to ensure all generated 'id=' attributes are unique.
pub(super) id_map: RefCell<IdMap>,
/// Shared mutable state.
// `Context` is cloned a lot, so we don't want the size to grow unexpectedly.
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-rustc_data_structures::static_assert_size!(Context<'_>, 104);
+rustc_data_structures::static_assert_size!(Context<'_>, 144);
/// Shared mutable state used in [`Context`] and elsewhere.
crate struct SharedContext<'tcx> {
dst,
render_redirect_pages: false,
id_map: RefCell::new(id_map),
+ deref_id_map: RefCell::new(FxHashMap::default()),
shared: Rc::new(scx),
include_sources,
};
current: self.current.clone(),
dst: self.dst.clone(),
render_redirect_pages: self.render_redirect_pages,
+ deref_id_map: RefCell::new(FxHashMap::default()),
id_map: RefCell::new(IdMap::new()),
shared: Rc::clone(&self.shared),
include_sources: self.include_sources,
use serde::ser::SerializeSeq;
use serde::{Serialize, Serializer};
-use crate::clean::{self, GetDefId, ItemId, RenderedLink, SelfTy};
+use crate::clean::{self, ItemId, RenderedLink, SelfTy};
use crate::docfs::PathError;
use crate::error::Error;
use crate::formats::cache::Cache;
use crate::html::markdown::{HeadingOffset, Markdown, MarkdownHtml, MarkdownSummaryLine};
use crate::html::sources;
use crate::scrape_examples::CallData;
+use crate::try_none;
/// A pair of name and its optional document.
crate type NameDoc = (String, Option<String>);
#[derive(Debug)]
crate struct RenderType {
name: Option<String>,
- generics: Option<Vec<String>>,
+ generics: Option<Vec<TypeWithKind>>,
}
/// Full type of functions/methods in the search index.
containing_item: &clean::Item,
it: DefId,
what: AssocItemRender<'_>,
+) {
+ let mut derefs = FxHashSet::default();
+ derefs.insert(it);
+ render_assoc_items_inner(w, cx, containing_item, it, what, &mut derefs)
+}
+
+fn render_assoc_items_inner(
+ w: &mut Buffer,
+ cx: &Context<'_>,
+ containing_item: &clean::Item,
+ it: DefId,
+ what: AssocItemRender<'_>,
+ derefs: &mut FxHashSet<DefId>,
) {
info!("Documenting associated items of {:?}", containing_item.name);
let cache = cx.cache();
};
let (non_trait, traits): (Vec<_>, _) = v.iter().partition(|i| i.inner_impl().trait_.is_none());
if !non_trait.is_empty() {
+ let mut tmp_buf = Buffer::empty_from(w);
let render_mode = match what {
AssocItemRender::All => {
- w.write_str(
+ tmp_buf.write_str(
"<h2 id=\"implementations\" class=\"small-section-header\">\
Implementations<a href=\"#implementations\" class=\"anchor\"></a>\
</h2>",
RenderMode::Normal
}
AssocItemRender::DerefFor { trait_, type_, deref_mut_ } => {
+ let id =
+ cx.derive_id(small_url_encode(format!("deref-methods-{:#}", type_.print(cx))));
+ if let Some(def_id) = type_.def_id(cx.cache()) {
+ cx.deref_id_map.borrow_mut().insert(def_id, id.clone());
+ }
write!(
- w,
- "<h2 id=\"deref-methods\" class=\"small-section-header\">\
+ tmp_buf,
+ "<h2 id=\"{id}\" class=\"small-section-header\">\
<span>Methods from {trait_}<Target = {type_}></span>\
- <a href=\"#deref-methods\" class=\"anchor\"></a>\
+ <a href=\"#{id}\" class=\"anchor\"></a>\
</h2>",
+ id = id,
trait_ = trait_.print(cx),
type_ = type_.print(cx),
);
RenderMode::ForDeref { mut_: deref_mut_ }
}
};
+ let mut impls_buf = Buffer::empty_from(w);
for i in &non_trait {
render_impl(
- w,
+ &mut impls_buf,
cx,
i,
containing_item,
},
);
}
+ if !impls_buf.is_empty() {
+ w.push_buffer(tmp_buf);
+ w.push_buffer(impls_buf);
+ }
}
- if let AssocItemRender::DerefFor { .. } = what {
- return;
- }
+
if !traits.is_empty() {
let deref_impl =
traits.iter().find(|t| t.trait_did() == cx.tcx().lang_items().deref_trait());
if let Some(impl_) = deref_impl {
let has_deref_mut =
traits.iter().any(|t| t.trait_did() == cx.tcx().lang_items().deref_mut_trait());
- render_deref_methods(w, cx, impl_, containing_item, has_deref_mut);
+ render_deref_methods(w, cx, impl_, containing_item, has_deref_mut, derefs);
+ }
+
+ // If we were already one level into rendering deref methods, we don't want to render
+ // anything after recursing into any further deref methods above.
+ if let AssocItemRender::DerefFor { .. } = what {
+ return;
}
+
let (synthetic, concrete): (Vec<&&Impl>, Vec<&&Impl>) =
traits.iter().partition(|t| t.inner_impl().synthetic);
let (blanket_impl, concrete): (Vec<&&Impl>, _) =
impl_: &Impl,
container_item: &clean::Item,
deref_mut: bool,
+ derefs: &mut FxHashSet<DefId>,
) {
let cache = cx.cache();
let deref_type = impl_.inner_impl().trait_.as_ref().unwrap();
debug!("Render deref methods for {:#?}, target {:#?}", impl_.inner_impl().for_, target);
let what =
AssocItemRender::DerefFor { trait_: deref_type, type_: real_target, deref_mut_: deref_mut };
- if let Some(did) = target.def_id_full(cache) {
- if let Some(type_did) = impl_.inner_impl().for_.def_id_full(cache) {
+ if let Some(did) = target.def_id(cache) {
+ if let Some(type_did) = impl_.inner_impl().for_.def_id(cache) {
// `impl Deref<Target = S> for S`
- if did == type_did {
+ if did == type_did || !derefs.insert(did) {
// Avoid infinite cycles
return;
}
}
- render_assoc_items(w, cx, container_item, did, what);
+ render_assoc_items_inner(w, cx, container_item, did, what, derefs);
} else {
if let Some(prim) = target.primitive_type() {
if let Some(&did) = cache.primitive_locations.get(&prim) {
- render_assoc_items(w, cx, container_item, did, what);
+ render_assoc_items_inner(w, cx, container_item, did, what, derefs);
}
}
}
fn notable_traits_decl(decl: &clean::FnDecl, cx: &Context<'_>) -> String {
let mut out = Buffer::html();
- if let Some(did) = decl.output.def_id_full(cx.cache()) {
+ if let Some(did) = decl.output.as_return().and_then(|t| t.def_id(cx.cache())) {
if let Some(impls) = cx.cache().impls.get(&did) {
for i in impls {
let impl_ = i.inner_impl();
error_codes: cx.shared.codes,
edition: cx.shared.edition(),
playground: &cx.shared.playground,
- heading_offset: HeadingOffset::H2
+ heading_offset: HeadingOffset::H4
}
.into_string()
);
if let Some(impl_) =
v.iter().find(|i| i.trait_did() == cx.tcx().lang_items().deref_trait())
{
- sidebar_deref_methods(cx, out, impl_, v);
+ let mut derefs = FxHashSet::default();
+ derefs.insert(did);
+ sidebar_deref_methods(cx, out, impl_, v, &mut derefs);
}
let format_impls = |impls: Vec<&Impl>| {
}
}
-fn sidebar_deref_methods(cx: &Context<'_>, out: &mut Buffer, impl_: &Impl, v: &[Impl]) {
+fn sidebar_deref_methods(
+ cx: &Context<'_>,
+ out: &mut Buffer,
+ impl_: &Impl,
+ v: &[Impl],
+ derefs: &mut FxHashSet<DefId>,
+) {
let c = cx.cache();
debug!("found Deref: {:?}", impl_);
})
{
debug!("found target, real_target: {:?} {:?}", target, real_target);
- if let Some(did) = target.def_id_full(c) {
- if let Some(type_did) = impl_.inner_impl().for_.def_id_full(c) {
+ if let Some(did) = target.def_id(c) {
+ if let Some(type_did) = impl_.inner_impl().for_.def_id(c) {
// `impl Deref<Target = S> for S`
- if did == type_did {
+ if did == type_did || !derefs.insert(did) {
// Avoid infinite cycles
return;
}
}
let deref_mut = v.iter().any(|i| i.trait_did() == cx.tcx().lang_items().deref_mut_trait());
let inner_impl = target
- .def_id_full(c)
+ .def_id(c)
.or_else(|| {
target.primitive_type().and_then(|prim| c.primitive_locations.get(&prim).cloned())
})
})
.collect::<Vec<_>>();
if !ret.is_empty() {
+ let map;
+ let id = if let Some(target_def_id) = real_target.def_id(c) {
+ map = cx.deref_id_map.borrow();
+ map.get(&target_def_id).expect("Deref section without derived id")
+ } else {
+ "deref-methods"
+ };
write!(
out,
- "<h3 class=\"sidebar-title\"><a href=\"#deref-methods\">Methods from {}<Target={}></a></h3>",
+ "<h3 class=\"sidebar-title\"><a href=\"#{}\">Methods from {}<Target={}></a></h3>",
+ id,
Escape(&format!("{:#}", impl_.inner_impl().trait_.as_ref().unwrap().print(cx))),
Escape(&format!("{:#}", real_target.print(cx))),
);
out.push_str("</div>");
}
}
+
+ // Recurse into any further impls that might exist for `target`
+ if let Some(target_did) = target.def_id_no_primitives() {
+ if let Some(target_impls) = c.impls.get(&target_did) {
+ if let Some(target_deref_impl) = target_impls.iter().find(|i| {
+ i.inner_impl()
+ .trait_
+ .as_ref()
+ .map(|t| Some(t.def_id()) == cx.tcx().lang_items().deref_trait())
+ .unwrap_or(false)
+ }) {
+ sidebar_deref_methods(cx, out, target_deref_impl, target_impls, derefs);
+ }
+ }
+ }
}
}
let mut res = implementors
.iter()
.filter(|i| {
- i.inner_impl()
- .for_
- .def_id_full(cache)
- .map_or(false, |d| !cache.paths.contains_key(&d))
+ i.inner_impl().for_.def_id(cache).map_or(false, |d| !cache.paths.contains_key(&d))
})
.filter_map(|i| extract_for_impl_name(&i.impl_item, cx))
.collect::<Vec<_>>();
ItemType::ProcAttribute => ("attributes", "Attribute Macros"),
ItemType::ProcDerive => ("derives", "Derive Macros"),
ItemType::TraitAlias => ("trait-aliases", "Trait aliases"),
+ ItemType::Generic => unreachable!(),
}
}
render_impl, render_stability_since_raw, write_srclink, AssocItemLink, Context,
ImplRenderingParameters,
};
-use crate::clean::{self, GetDefId};
+use crate::clean;
use crate::formats::item_type::ItemType;
use crate::formats::{AssocItemRender, Impl, RenderMode};
use crate::html::escape::Escape;
}
let (local, foreign) = implementors.iter().partition::<Vec<_>, _>(|i| {
- i.inner_impl().for_.def_id_full(cache).map_or(true, |d| cache.paths.contains_key(&d))
+ i.inner_impl().for_.def_id(cache).map_or(true, |d| cache.paths.contains_key(&d))
});
let (mut synthetic, mut concrete): (Vec<&&Impl>, Vec<&&Impl>) =
if let Some(stability_class) = field.stability_class(cx.tcx()) {
write!(w, "<span class=\"stab {stab}\"></span>", stab = stability_class);
}
- document(w, cx, field, Some(it), HeadingOffset::H2);
+ document(w, cx, field, Some(it), HeadingOffset::H3);
}
}
let def_id = it.def_id.expect_def_id();
w.write_str("</code>");
render_stability_since(w, variant, it, cx.tcx());
w.write_str("</div>");
- document(w, cx, variant, Some(it), HeadingOffset::H2);
+ document(w, cx, variant, Some(it), HeadingOffset::H3);
document_non_exhaustive(w, variant);
use crate::clean::Variant;
f = field.name.as_ref().unwrap(),
t = ty.print(cx)
);
- document(w, cx, field, Some(variant), HeadingOffset::H2);
+ document(w, cx, field, Some(variant), HeadingOffset::H4);
}
_ => unreachable!(),
}
name = field_name,
ty = ty.print(cx)
);
- document(w, cx, field, Some(it), HeadingOffset::H2);
+ document(w, cx, field, Some(it), HeadingOffset::H3);
}
}
}
use crate::docfs::PathError;
use crate::error::Error;
use crate::html::{layout, static_files};
+use crate::{try_err, try_none};
static FILES_UNVERSIONED: Lazy<FxHashMap<&str, &[u8]>> = Lazy::new(|| {
map! {
"SourceCodePro-Semibold.ttf.woff" => static_files::source_code_pro::SEMIBOLD,
"SourceCodePro-It.ttf.woff" => static_files::source_code_pro::ITALIC,
"SourceCodePro-LICENSE.txt" => static_files::source_code_pro::LICENSE,
- "noto-sans-kr-regular.woff2" => static_files::noto_sans_kr::REGULAR2,
- "noto-sans-kr-regular.woff" => static_files::noto_sans_kr::REGULAR,
- "noto-sans-kr-LICENSE.txt" => static_files::noto_sans_kr::LICENSE,
+ "NanumBarunGothic.ttf.woff2" => static_files::nanum_barun_gothic::REGULAR2,
+ "NanumBarunGothic.ttf.woff" => static_files::nanum_barun_gothic::REGULAR,
+ "NanumBarunGothic-LICENSE.txt" => static_files::nanum_barun_gothic::LICENSE,
"LICENSE-MIT.txt" => static_files::LICENSE_MIT,
"LICENSE-APACHE.txt" => static_files::LICENSE_APACHE,
"COPYRIGHT.txt" => static_files::COPYRIGHT,
/* Avoid using legacy CJK serif fonts in Windows like Batang. */
@font-face {
- font-family: 'Noto Sans KR';
- src: url("noto-sans-kr-regular.woff2") format("woff2"),
- url("noto-sans-kr-regular.woff") format("woff");
+ font-family: 'NanumBarunGothic';
+ src: url("NanumBarunGothic.ttf.woff2") format("woff2"),
+ url("NanumBarunGothic.ttf.woff") format("woff");
font-display: swap;
- unicode-range: U+AC00-D7AF, U+3130-318F, U+1100-11FF, U+A960-A97F, U+D7B0-D7FF;
+ unicode-range: U+AC00-D7AF, U+1100-11FF, U+3130-318F, U+A960-A97F, U+D7B0-D7FF;
}
* {
/* General structure and fonts */
body {
- font: 16px/1.4 "Source Serif 4", "Noto Sans KR", serif;
+ font: 16px/1.4 "Source Serif 4", NanumBarunGothic, serif;
margin: 0;
position: relative;
padding: 10px 15px 20px 15px;
margin: 20px 0 15px 0;
padding-bottom: 6px;
}
-h5, h6 {
+.docblock h3, .docblock h4, h5, h6 {
margin: 15px 0 5px 0;
}
h1.fqn {
h1.fqn > .in-band > a:hover {
text-decoration: underline;
}
-h2, h3, h4 {
+/* The only headings that get underlines are:
+ Markdown-generated headings within the top-doc
+ Rustdoc-generated h2 section headings (e.g. "Implementations", "Required Methods", etc)
+ Underlines elsewhere in the documentation break up visual flow and tend to invert
+ section hierarchies. */
+h2,
+.top-doc h3,
+.top-doc h4 {
border-bottom: 1px solid;
}
h3.code-header {
.content ul.crate a.crate, a.srclink,
/* This selector is for the items listed in the "all items" page. */
#main > ul.docblock > li > a {
- font-family: "Fira Sans", Arial, sans-serif;
+ font-family: "Fira Sans", Arial, NanumBarunGothic, sans-serif;
}
.content ul.crate a.crate {
--- /dev/null
+Copyright (c) 2010, NAVER Corporation (https://www.navercorp.com/),
+
+with Reserved Font Name Nanum, Naver Nanum, NanumGothic, Naver NanumGothic,
+NanumMyeongjo, Naver NanumMyeongjo, NanumBrush, Naver NanumBrush, NanumPen,
+Naver NanumPen, Naver NanumGothicEco, NanumGothicEco, Naver NanumMyeongjoEco,
+NanumMyeongjoEco, Naver NanumGothicLight, NanumGothicLight, NanumBarunGothic,
+Naver NanumBarunGothic, NanumSquareRound, NanumBarunPen, MaruBuri
+
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+http://scripts.sil.org/OFL
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
+++ /dev/null
-Copyright 2014, 2015 Adobe Systems Incorporated (http://www.adobe.com/), with Reserved Font Name 'Source'. All Rights Reserved. Source is a trademark of Adobe Systems Incorporated in the United States and/or other countries.
-
-This Font Software is licensed under the SIL Open Font License, Version 1.1.
-
-This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL
-
-
------------------------------------------------------------
-SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
------------------------------------------------------------
-
-PREAMBLE
-The goals of the Open Font License (OFL) are to stimulate worldwide
-development of collaborative font projects, to support the font creation
-efforts of academic and linguistic communities, and to provide a free and
-open framework in which fonts may be shared and improved in partnership
-with others.
-
-The OFL allows the licensed fonts to be used, studied, modified and
-redistributed freely as long as they are not sold by themselves. The
-fonts, including any derivative works, can be bundled, embedded,
-redistributed and/or sold with any software provided that any reserved
-names are not used by derivative works. The fonts and derivatives,
-however, cannot be released under any other type of license. The
-requirement for fonts to remain under this license does not apply
-to any document created using the fonts or their derivatives.
-
-DEFINITIONS
-"Font Software" refers to the set of files released by the Copyright
-Holder(s) under this license and clearly marked as such. This may
-include source files, build scripts and documentation.
-
-"Reserved Font Name" refers to any names specified as such after the
-copyright statement(s).
-
-"Original Version" refers to the collection of Font Software components as
-distributed by the Copyright Holder(s).
-
-"Modified Version" refers to any derivative made by adding to, deleting,
-or substituting -- in part or in whole -- any of the components of the
-Original Version, by changing formats or by porting the Font Software to a
-new environment.
-
-"Author" refers to any designer, engineer, programmer, technical
-writer or other person who contributed to the Font Software.
-
-PERMISSION & CONDITIONS
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of the Font Software, to use, study, copy, merge, embed, modify,
-redistribute, and sell modified and unmodified copies of the Font
-Software, subject to the following conditions:
-
-1) Neither the Font Software nor any of its individual components,
-in Original or Modified Versions, may be sold by itself.
-
-2) Original or Modified Versions of the Font Software may be bundled,
-redistributed and/or sold with any software, provided that each copy
-contains the above copyright notice and this license. These can be
-included either as stand-alone text files, human-readable headers or
-in the appropriate machine-readable metadata fields within text or
-binary files as long as those fields can be easily viewed by the user.
-
-3) No Modified Version of the Font Software may use the Reserved Font
-Name(s) unless explicit written permission is granted by the corresponding
-Copyright Holder. This restriction only applies to the primary font name as
-presented to the users.
-
-4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
-Software shall not be used to promote, endorse or advertise any
-Modified Version, except to acknowledge the contribution(s) of the
-Copyright Holder(s) and the Author(s) or with their explicit written
-permission.
-
-5) The Font Software, modified or unmodified, in part or in whole,
-must be distributed entirely under this license, and must not be
-distributed under any other license. The requirement for fonts to
-remain under this license does not apply to any document created
-using the Font Software.
-
-TERMINATION
-This license becomes null and void if any of the above conditions are
-not met.
-
-DISCLAIMER
-THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
-OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
-COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
-DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
-OTHER DEALINGS IN THE FONT SOFTWARE.
var elems = Object.create(null);
var elength = obj[GENERICS_DATA].length;
for (var x = 0; x < elength; ++x) {
- if (!elems[obj[GENERICS_DATA][x]]) {
- elems[obj[GENERICS_DATA][x]] = 0;
+ if (!elems[obj[GENERICS_DATA][x][NAME]]) {
+ elems[obj[GENERICS_DATA][x][NAME]] = 0;
}
- elems[obj[GENERICS_DATA][x]] += 1;
+ elems[obj[GENERICS_DATA][x][NAME]] += 1;
}
var total = 0;
var done = 0;
// Check for type name and type generics (if any).
function checkType(obj, val, literalSearch) {
var lev_distance = MAX_LEV_DISTANCE + 1;
+ var tmp_lev = MAX_LEV_DISTANCE + 1;
var len, x, firstGeneric;
if (obj[NAME] === val.name) {
if (literalSearch) {
var elems = Object.create(null);
len = obj[GENERICS_DATA].length;
for (x = 0; x < len; ++x) {
- if (!elems[obj[GENERICS_DATA][x]]) {
- elems[obj[GENERICS_DATA][x]] = 0;
+ if (!elems[obj[GENERICS_DATA][x][NAME]]) {
+ elems[obj[GENERICS_DATA][x][NAME]] = 0;
}
- elems[obj[GENERICS_DATA][x]] += 1;
+ elems[obj[GENERICS_DATA][x][NAME]] += 1;
}
var allFound = true;
// If the type has generics but don't match, then it won't return at this point.
// Otherwise, `checkGenerics` will return 0 and it'll return.
if (obj.length > GENERICS_DATA && obj[GENERICS_DATA].length !== 0) {
- var tmp_lev = checkGenerics(obj, val);
+ tmp_lev = checkGenerics(obj, val);
if (tmp_lev <= MAX_LEV_DISTANCE) {
return tmp_lev;
}
if ((!val.generics || val.generics.length === 0) &&
obj.length > GENERICS_DATA && obj[GENERICS_DATA].length > 0) {
return obj[GENERICS_DATA].some(
- function(name) {
- return name === val.name;
+ function(gen) {
+ return gen[NAME] === val.name;
});
}
return false;
// a levenshtein distance value that isn't *this* good so it goes
// into the search results but not too high.
lev_distance = Math.ceil((checkGenerics(obj, val) + lev_distance) / 2);
- } else if (obj.length > GENERICS_DATA && obj[GENERICS_DATA].length > 0) {
+ }
+ if (obj.length > GENERICS_DATA && obj[GENERICS_DATA].length > 0) {
// We can check if the type we're looking for is inside the generics!
var olength = obj[GENERICS_DATA].length;
for (x = 0; x < olength; ++x) {
- lev_distance = Math.min(levenshtein(obj[GENERICS_DATA][x], val.name),
- lev_distance);
+ tmp_lev = Math.min(levenshtein(obj[GENERICS_DATA][x][NAME], val.name), tmp_lev);
+ }
+ if (tmp_lev !== 0) {
+ // If we didn't find a good enough result, we go check inside the generics of
+ // the generics.
+ for (x = 0; x < olength && tmp_lev !== 0; ++x) {
+ tmp_lev = Math.min(
+ checkType(obj[GENERICS_DATA][x], val, literalSearch),
+ tmp_lev
+ );
+ }
}
}
// Now whatever happens, the returned distance is "less good" so we should mark it
// as such, and so we add 1 to the distance to make it "less good".
- return lev_distance + 1;
+ return Math.min(lev_distance, tmp_lev) + 1;
}
function findArg(obj, val, literalSearch, typeFilter) {
crate static LICENSE: &[u8] = include_bytes!("static/fonts/SourceCodePro-LICENSE.txt");
}
-crate mod noto_sans_kr {
- /// The file `noto-sans-kr.woff`, the Regular variant of the Noto Sans KR font.
- crate static REGULAR: &[u8] = include_bytes!("static/fonts/noto-sans-kr-regular.woff");
-
- /// The file `noto-sans-kr.woff2`, the Regular variant of the Noto Sans KR font.
- crate static REGULAR2: &[u8] = include_bytes!("static/fonts/noto-sans-kr-regular.woff2");
-
- /// The file `noto-sans-kr-LICENSE.txt`, the license text of the Noto Sans KR font.
- crate static LICENSE: &[u8] = include_bytes!("static/fonts/noto-sans-kr-LICENSE.txt");
+/// Files related to the Nanum Barun Gothic font.
+///
+/// These files are used to avoid some legacy CJK serif fonts in Windows.
+///
+/// Note that the Noto Sans KR font, which was used previously but was not very readable on Windows,
+/// has been replaced by the Nanum Barun Gothic font. This is due to Windows' implementation of font
+/// rendering that distorts OpenType fonts too much.
+///
+/// The font files were generated with these commands:
+///
+/// ```sh
+/// pyftsubset NanumBarunGothic.ttf \
+/// --unicodes=U+AC00-D7AF,U+1100-11FF,U+3130-318F,U+A960-A97F,U+D7B0-D7FF \
+/// --output-file=NanumBarunGothic.ttf.woff --flavor=woff
+/// ```
+/// ```sh
+/// pyftsubset NanumBarunGothic.ttf \
+/// --unicodes=U+AC00-D7AF,U+1100-11FF,U+3130-318F,U+A960-A97F,U+D7B0-D7FF \
+/// --output-file=NanumBarunGothic.ttf.woff2 --flavor=woff2
+/// ```
+crate mod nanum_barun_gothic {
+ /// The file `NanumBarunGothic.ttf.woff`, the Regular variant of the Nanum Barun Gothic font.
+ crate static REGULAR: &[u8] = include_bytes!("static/fonts/NanumBarunGothic.ttf.woff");
+
+ /// The file `NanumBarunGothic.ttf.woff2`, the Regular variant of the Nanum Barun Gothic font.
+ crate static REGULAR2: &[u8] = include_bytes!("static/fonts/NanumBarunGothic.ttf.woff2");
+
+ /// The file `NanumBarunGothic-LICENSE.txt`, the license text of the Nanum Barun Gothic font.
+ crate static LICENSE: &[u8] = include_bytes!("static/fonts/NanumBarunGothic-LICENSE.txt");
}
/// Files related to the sidebar in rustdoc sources.
.map(|t| {
clean::GenericBound::TraitBound(t, rustc_hir::TraitBoundModifier::None)
})
- .chain(lt.into_iter().map(clean::GenericBound::Outlives))
+ .chain(lt.map(clean::GenericBound::Outlives))
.map(|bound| bound.into_tcx(tcx))
.collect(),
}
TraitAlias => ItemKind::TraitAlias,
ProcAttribute => ItemKind::ProcAttribute,
ProcDerive => ItemKind::ProcDerive,
+ Generic => unreachable!(),
}
}
}
}}
}
-#[macro_use]
-mod externalfiles;
-
mod clean;
mod config;
mod core;
mod docfs;
+mod doctest;
mod doctree;
-#[macro_use]
mod error;
-mod doctest;
+mod externalfiles;
mod fold;
mod formats;
// used by the error-index generator, so it needs to be public
use crate::core::DocContext;
use crate::fold::DocFolder;
-use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir::def_id::DefId;
use rustc_middle::ty::DefIdTree;
use rustc_span::symbol::sym;
for &cnum in cx.tcx.crates(()).iter() {
for &(did, _) in cx.tcx.all_trait_implementations(cnum).iter() {
- cx.tcx.sess.prof.generic_activity("build_extern_trait_impl").run(|| {
- inline::build_impl(cx, None, did, None, &mut new_items);
- });
+ inline::build_impl(cx, None, did, None, &mut new_items);
}
}
}
let mut cleaner = BadImplStripper { prims, items: crate_items };
+ let mut type_did_to_deref_target: FxHashMap<DefId, &Type> = FxHashMap::default();
+
+ // Follow all `Deref` targets of included items and recursively add them as valid
+ fn add_deref_target(
+ map: &FxHashMap<DefId, &Type>,
+ cleaner: &mut BadImplStripper,
+ type_did: DefId,
+ ) {
+ if let Some(target) = map.get(&type_did) {
+ debug!("add_deref_target: type {:?}, target {:?}", type_did, target);
+ if let Some(target_prim) = target.primitive_type() {
+ cleaner.prims.insert(target_prim);
+ } else if let Some(target_did) = target.def_id_no_primitives() {
+ // `impl Deref<Target = S> for S`
+ if target_did == type_did {
+ // Avoid infinite cycles
+ return;
+ }
+ cleaner.items.insert(target_did.into());
+ add_deref_target(map, cleaner, target_did);
+ }
+ }
+ }
// scan through included items ahead of time to splice in Deref targets to the "valid" sets
for it in &new_items {
if let ImplItem(Impl { ref for_, ref trait_, ref items, .. }) = *it.kind {
- if cleaner.keep_impl(for_)
- && trait_.as_ref().map(|t| t.def_id()) == cx.tcx.lang_items().deref_trait()
+ if trait_.as_ref().map(|t| t.def_id()) == cx.tcx.lang_items().deref_trait()
+ && cleaner.keep_impl(for_, true)
{
let target = items
.iter()
if let Some(prim) = target.primitive_type() {
cleaner.prims.insert(prim);
- } else if let Some(did) = target.def_id() {
+ } else if let Some(did) = target.def_id(&cx.cache) {
cleaner.items.insert(did.into());
}
+ if let Some(for_did) = for_.def_id_no_primitives() {
+ if type_did_to_deref_target.insert(for_did, target).is_none() {
+ // Since only the `DefId` portion of the `Type` instances is known to be same for both the
+ // `Deref` target type and the impl for type positions, this map of types is keyed by
+ // `DefId` and for convenience uses a special cleaner that accepts `DefId`s directly.
+ if cleaner.keep_impl_with_def_id(for_did.into()) {
+ add_deref_target(&type_did_to_deref_target, &mut cleaner, for_did);
+ }
+ }
+ }
}
}
}
new_items.retain(|it| {
if let ImplItem(Impl { ref for_, ref trait_, ref blanket_impl, .. }) = *it.kind {
- cleaner.keep_impl(for_)
- || trait_
- .as_ref()
- .map_or(false, |t| cleaner.keep_impl_with_def_id(t.def_id().into()))
+ cleaner.keep_impl(
+ for_,
+ trait_.as_ref().map(|t| t.def_id()) == cx.tcx.lang_items().deref_trait(),
+ ) || trait_.as_ref().map_or(false, |t| cleaner.keep_impl_with_def_id(t.def_id().into()))
|| blanket_impl.is_some()
} else {
true
}
impl BadImplStripper {
- fn keep_impl(&self, ty: &Type) -> bool {
+ fn keep_impl(&self, ty: &Type, is_deref: bool) -> bool {
if let Generic(_) = ty {
// keep impls made on generics
true
} else if let Some(prim) = ty.primitive_type() {
self.prims.contains(&prim)
- } else if let Some(did) = ty.def_id() {
- self.keep_impl_with_def_id(did.into())
+ } else if let Some(did) = ty.def_id_no_primitives() {
+ is_deref || self.keep_impl_with_def_id(did.into())
} else {
false
}
use rustc_middle::middle::privacy::AccessLevels;
use std::mem;
-use crate::clean::{self, GetDefId, Item, ItemIdSet};
+use crate::clean::{self, Item, ItemIdSet};
use crate::fold::{strip_item, DocFolder};
crate struct Stripper<'a> {
if imp.trait_.is_none() && imp.items.is_empty() {
return None;
}
- if let Some(did) = imp.for_.def_id() {
+ if let Some(did) = imp.for_.def_id_no_primitives() {
if did.is_local() && !imp.for_.is_assoc_ty() && !self.retained.contains(&did.into())
{
debug!("ImplStripper: impl item for stripped type; removing");
}
if let Some(generics) = imp.trait_.as_ref().and_then(|t| t.generics()) {
for typaram in generics {
- if let Some(did) = typaram.def_id() {
+ if let Some(did) = typaram.def_id_no_primitives() {
if did.is_local() && !self.retained.contains(&did.into()) {
debug!(
"ImplStripper: stripped item in trait's generics; removing impl"
--- /dev/null
+// Verifies that "CFI Canonical Jump Tables" module flag is added.
+//
+// ignore-windows
+// needs-sanitizer-cfi
+// only-aarch64
+// only-x86_64
+// compile-flags: -Clto -Zsanitizer=cfi
+
+#![crate_type="lib"]
+
+pub fn foo() {
+}
+
+// CHECK: !{{[0-9]+}} = !{i32 2, !"CFI Canonical Jump Tables", i32 1}
--- /dev/null
+// Verifies that pointer type membership tests for indirect calls are emitted.
+//
+// ignore-windows
+// needs-sanitizer-cfi
+// only-aarch64
+// only-x86_64
+// compile-flags: -Clto -Cno-prepopulate-passes -Zsanitizer=cfi
+
+#![crate_type="lib"]
+
+pub fn foo(f: fn(i32) -> i32, arg: i32) -> i32 {
+ // CHECK-LABEL: define{{.*}}foo{{.*}}!type !{{[0-9]+}}
+ // CHECK: start:
+ // CHECK-NEXT: %0 = bitcast i32 (i32)* %f to i8*
+ // CHECK-NEXT: %1 = call i1 @llvm.type.test(i8* %0, metadata !"{{[[:print:]]+}}")
+ // CHECK-NEXT: br i1 %1, label %type_test.pass, label %type_test.fail
+ // CHECK: type_test.pass:
+ // CHECK-NEXT: %2 = call i32 %f(i32 %arg)
+ // CHECK-NEXT: br label %bb1
+ // CHECK: type_test.fail:
+ // CHECK-NEXT: call void @llvm.trap()
+ // CHECK-NEXT: unreachable
+ f(arg)
+}
--- /dev/null
+// Verifies that type metadata for functions are emitted.
+//
+// ignore-windows
+// needs-sanitizer-cfi
+// only-aarch64
+// only-x86_64
+// compile-flags: -Clto -Cno-prepopulate-passes -Zsanitizer=cfi
+
+#![crate_type="lib"]
+
+pub fn foo(f: fn(i32) -> i32, arg: i32) -> i32 {
+ // CHECK-LABEL: define{{.*}}foo{{.*}}!type !{{[0-9]+}}
+ // CHECK: %1 = call i1 @llvm.type.test(i8* %0, metadata !"typeid1")
+ f(arg)
+}
+
+pub fn bar(f: fn(i32, i32) -> i32, arg1: i32, arg2: i32) -> i32 {
+ // CHECK-LABEL: define{{.*}}bar{{.*}}!type !{{[0-9]+}}
+ // CHECK: %1 = call i1 @llvm.type.test(i8* %0, metadata !"typeid2")
+ f(arg1, arg2)
+}
+
+pub fn baz(f: fn(i32, i32, i32) -> i32, arg1: i32, arg2: i32, arg3: i32) -> i32 {
+ // CHECK-LABEL: define{{.*}}baz{{.*}}!type !{{[0-9]+}}
+ // CHECK: %1 = call i1 @llvm.type.test(i8* %0, metadata !"typeid3")
+ f(arg1, arg2, arg3)
+}
+
+// CHECK: !{{[0-9]+}} = !{i64 0, !"typeid2"}
+// CHECK: !{{[0-9]+}} = !{i64 0, !"typeid3"}
+// CHECK: !{{[0-9]+}} = !{i64 0, !"typeid4"}
// are caught by catch_unwind. Also tests that Rust panics can unwind through
// C++ code.
-// For linking libstdc++ on MinGW
-#![cfg_attr(all(windows, target_env = "gnu"), feature(static_nobundle))]
#![feature(c_unwind)]
use std::panic::{catch_unwind, AssertUnwindSafe};
# that it is compiled with the expectation that pthreads is dynamically
# linked as a DLL and will fail to link with a statically linked libpthread.
#
- # So we end up with the following hack: we link use static-nobundle to only
+ # So we end up with the following hack: we link use static:-bundle to only
# link the parts of libstdc++ that we actually use, which doesn't include
# the dependency on the pthreads DLL.
- EXTRARSCXXFLAGS := -l static-nobundle=stdc++
+ EXTRARSCXXFLAGS := -l static:-bundle=stdc++ -Z unstable-options
endif
else
ifeq ($(UNAME),Darwin)
// Tests that linking to C++ code with global destructors works.
-// For linking libstdc++ on MinGW
-#![cfg_attr(all(windows, target_env = "gnu"), feature(static_nobundle))]
-
extern "C" {
fn get() -> u32;
}
--- /dev/null
+// This test check that headers (a) have the correct heading level, (b) are the right size,
+// and (c) have the correct underlining (or absence of underlining).
+// The sizes may change as design changes, but try to make sure a lower header is never bigger than
+// its parent headers. Also make sure lower headers don't have underlines when their parents lack
+// an underline.
+// Most of these sizes are set in CSS in `em` units, so here's a conversion chart based on our
+// default 16px font size:
+// 24px 1.5em
+// 22.4px 1.4em
+// 20.8px 1.3em
+// 18.4px 1.15em
+// 17.6px 1.1em
+// 16px 1em
+// 15.2px 0.95em
+goto: file://|DOC_PATH|/test_docs/struct.HeavilyDocumentedStruct.html
+
+assert-css: ("h1.fqn", {"font-size": "24px"})
+assert-css: ("h1.fqn", {"border-bottom-width": "1px"})
+
+assert-css: ("h2#top-doc-prose-title", {"font-size": "20.8px"})
+assert-css: ("h2#top-doc-prose-title", {"border-bottom-width": "1px"})
+assert-css: ("h3#top-doc-prose-sub-heading", {"font-size": "18.4px"})
+assert-css: ("h3#top-doc-prose-sub-heading", {"border-bottom-width": "1px"})
+assert-css: ("h4#top-doc-prose-sub-sub-heading", {"font-size": "17.6px"})
+assert-css: ("h4#top-doc-prose-sub-sub-heading", {"border-bottom-width": "1px"})
+
+assert-css: ("h2#fields", {"font-size": "22.4px"})
+assert-css: ("h2#fields", {"border-bottom-width": "1px"})
+assert-css: ("h3#title-for-field", {"font-size": "20.8px"})
+assert-css: ("h3#title-for-field", {"border-bottom-width": "0px"})
+assert-css: ("h4#sub-heading-for-field", {"font-size": "16px"})
+assert-css: ("h4#sub-heading-for-field", {"border-bottom-width": "0px"})
+
+assert-css: ("h2#implementations", {"font-size": "22.4px"})
+assert-css: ("h2#implementations", {"border-bottom-width": "1px"})
+
+assert-css: ("#impl > h3.code-header", {"font-size": "17.6px"})
+assert-css: ("#impl > h3.code-header", {"border-bottom-width": "0px"})
+assert-css: ("#method\.do_nothing > h4.code-header", {"font-size": "16px"})
+assert-css: ("#method\.do_nothing > h4.code-header", {"border-bottom-width": "0px"})
+
+assert-css: ("h4#title-for-struct-impl-doc", {"font-size": "16px"})
+assert-css: ("h4#title-for-struct-impl-doc", {"border-bottom-width": "0px"})
+assert-css: ("h5#sub-heading-for-struct-impl-doc", {"font-size": "16px"})
+assert-css: ("h5#sub-heading-for-struct-impl-doc", {"border-bottom-width": "0px"})
+assert-css: ("h6#sub-sub-heading-for-struct-impl-doc", {"font-size": "15.2px"})
+assert-css: ("h6#sub-sub-heading-for-struct-impl-doc", {"border-bottom-width": "0px"})
+
+assert-css: ("h5#title-for-struct-impl-item-doc", {"font-size": "16px"})
+assert-css: ("h5#title-for-struct-impl-item-doc", {"border-bottom-width": "0px"})
+assert-css: ("h6#sub-heading-for-struct-impl-item-doc", {"font-size": "15.2px"})
+assert-css: ("h6#sub-heading-for-struct-impl-item-doc", {"border-bottom-width": "0px"})
+assert-css: ("h6#sub-sub-heading-for-struct-impl-item-doc", {"font-size": "15.2px"})
+
+goto: file://|DOC_PATH|/test_docs/enum.HeavilyDocumentedEnum.html
+
+assert-css: ("h1.fqn", {"font-size": "24px"})
+assert-css: ("h1.fqn", {"border-bottom-width": "1px"})
+
+assert-css: ("h2#top-doc-prose-title", {"font-size": "20.8px"})
+assert-css: ("h2#top-doc-prose-title", {"border-bottom-width": "1px"})
+assert-css: ("h3#top-doc-prose-sub-heading", {"font-size": "18.4px"})
+assert-css: ("h3#top-doc-prose-sub-heading", {"border-bottom-width": "1px"})
+assert-css: ("h4#top-doc-prose-sub-sub-heading", {"font-size": "17.6px"})
+assert-css: ("h4#top-doc-prose-sub-sub-heading", {"border-bottom-width": "1px"})
+
+assert-css: ("h2#variants", {"font-size": "22.4px"})
+assert-css: ("h2#variants", {"border-bottom-width": "1px"})
+
+assert-css: ("h3#none-prose-title", {"font-size": "20.8px"})
+assert-css: ("h3#none-prose-title", {"border-bottom-width": "0px"})
+assert-css: ("h4#none-prose-sub-heading", {"font-size": "16px"})
+assert-css: ("h4#none-prose-sub-heading", {"border-bottom-width": "0px"})
+
+assert-css: ("h3#wrapped-prose-title", {"font-size": "20.8px"})
+assert-css: ("h3#wrapped-prose-title", {"border-bottom-width": "0px"})
+assert-css: ("h4#wrapped-prose-sub-heading", {"font-size": "16px"})
+assert-css: ("h4#wrapped-prose-sub-heading", {"border-bottom-width": "0px"})
+
+assert-css: ("h4#wrapped0-prose-title", {"font-size": "16px"})
+assert-css: ("h4#wrapped0-prose-title", {"border-bottom-width": "0px"})
+assert-css: ("h5#wrapped0-prose-sub-heading", {"font-size": "16px"})
+assert-css: ("h5#wrapped0-prose-sub-heading", {"border-bottom-width": "0px"})
+
+assert-css: ("h4#structy-prose-title", {"font-size": "16px"})
+assert-css: ("h4#structy-prose-title", {"border-bottom-width": "0px"})
+assert-css: ("h5#structy-prose-sub-heading", {"font-size": "16px"})
+assert-css: ("h5#structy-prose-sub-heading", {"border-bottom-width": "0px"})
+
+assert-css: ("h2#implementations", {"font-size": "22.4px"})
+assert-css: ("h2#implementations", {"border-bottom-width": "1px"})
+
+assert-css: ("#impl > h3.code-header", {"font-size": "17.6px"})
+assert-css: ("#impl > h3.code-header", {"border-bottom-width": "0px"})
+assert-css: ("#method\.do_nothing > h4.code-header", {"font-size": "16px"})
+assert-css: ("#method\.do_nothing > h4.code-header", {"border-bottom-width": "0px"})
+
+assert-css: ("h4#title-for-enum-impl-doc", {"font-size": "16px"})
+assert-css: ("h4#title-for-enum-impl-doc", {"border-bottom-width": "0px"})
+assert-css: ("h5#sub-heading-for-enum-impl-doc", {"font-size": "16px"})
+assert-css: ("h5#sub-heading-for-enum-impl-doc", {"border-bottom-width": "0px"})
+assert-css: ("h6#sub-sub-heading-for-enum-impl-doc", {"font-size": "15.2px"})
+assert-css: ("h6#sub-sub-heading-for-enum-impl-doc", {"border-bottom-width": "0px"})
+
+assert-css: ("h5#title-for-enum-impl-item-doc", {"font-size": "16px"})
+assert-css: ("h5#title-for-enum-impl-item-doc", {"border-bottom-width": "0px"})
+assert-css: ("h6#sub-heading-for-enum-impl-item-doc", {"font-size": "15.2px"})
+assert-css: ("h6#sub-heading-for-enum-impl-item-doc", {"border-bottom-width": "0px"})
+assert-css: ("h6#sub-sub-heading-for-enum-impl-item-doc", {"font-size": "15.2px"})
+assert-css: ("h6#sub-sub-heading-for-enum-impl-item-doc", {"border-bottom-width": "0px"})
+
+goto: file://|DOC_PATH|/test_docs/union.HeavilyDocumentedUnion.html
+
+assert-css: ("h1.fqn", {"font-size": "24px"})
+assert-css: ("h1.fqn", {"border-bottom-width": "1px"})
+
+assert-css: ("h2#top-doc-prose-title", {"font-size": "20.8px"})
+assert-css: ("h2#top-doc-prose-title", {"border-bottom-width": "1px"})
+assert-css: ("h3#top-doc-prose-sub-heading", {"font-size": "18.4px"})
+assert-css: ("h3#top-doc-prose-sub-heading", {"border-bottom-width": "1px"})
+
+assert-css: ("h2#fields", {"font-size": "22.4px"})
+assert-css: ("h2#fields", {"border-bottom-width": "1px"})
+
+assert-css: ("h3#title-for-union-variant", {"font-size": "20.8px"})
+assert-css: ("h3#title-for-union-variant", {"border-bottom-width": "0px"})
+assert-css: ("h4#sub-heading-for-union-variant", {"font-size": "16px"})
+assert-css: ("h4#sub-heading-for-union-variant", {"border-bottom-width": "0px"})
+
+assert-css: ("h2#implementations", {"font-size": "22.4px"})
+assert-css: ("h2#implementations", {"border-bottom-width": "1px"})
+
+assert-css: ("#impl > h3.code-header", {"font-size": "17.6px"})
+assert-css: ("#impl > h3.code-header", {"border-bottom-width": "0px"})
+assert-css: ("h4#title-for-union-impl-doc", {"font-size": "16px"})
+assert-css: ("h4#title-for-union-impl-doc", {"border-bottom-width": "0px"})
+assert-css: ("h5#sub-heading-for-union-impl-doc", {"font-size": "16px"})
+assert-css: ("h5#sub-heading-for-union-impl-doc", {"border-bottom-width": "0px"})
+
+assert-css: ("h5#title-for-union-impl-item-doc", {"font-size": "16px"})
+assert-css: ("h5#title-for-union-impl-item-doc", {"border-bottom-width": "0px"})
+assert-css: ("h6#sub-heading-for-union-impl-item-doc", {"font-size": "15.2px"})
+assert-css: ("h6#sub-heading-for-union-impl-item-doc", {"border-bottom-width": "0px"})
+
+goto: file://|DOC_PATH|/test_docs/macro.heavily_documented_macro.html
+
+assert-css: ("h1.fqn", {"font-size": "24px"})
+assert-css: ("h1.fqn", {"border-bottom-width": "1px"})
+
+assert-css: ("h2#top-doc-prose-title", {"font-size": "20.8px"})
+assert-css: ("h2#top-doc-prose-title", {"border-bottom-width": "1px"})
+assert-css: ("h3#top-doc-prose-sub-heading", {"font-size": "18.4px"})
+assert-css: ("h3#top-doc-prose-sub-heading", {"border-bottom-width": "1px"})
// This test checks that the correct font is used on module items (in index.html pages).
goto: file://|DOC_PATH|/test_docs/index.html
-assert-css: (".item-table .module-item a", {"font-family": '"Fira Sans", Arial, sans-serif'}, ALL)
-assert-css: (".item-table .docblock-short", {"font-family": '"Source Serif 4", "Noto Sans KR", serif'}, ALL)
+assert-css: (".item-table .module-item a", {"font-family": '"Fira Sans", Arial, NanumBarunGothic, sans-serif'}, ALL)
+assert-css: (".item-table .docblock-short", {"font-family": '"Source Serif 4", NanumBarunGothic, serif'}, ALL)
// modules
-assert-css: ("#modules + .item-table .item-left a", {"font-family": '"Fira Sans", Arial, sans-serif'})
-assert-css: ("#modules + .item-table .item-right.docblock-short", {"font-family": '"Source Serif 4", "Noto Sans KR", serif'})
+assert-css: ("#modules + .item-table .item-left a", {"font-family": '"Fira Sans", Arial, NanumBarunGothic, sans-serif'})
+assert-css: ("#modules + .item-table .item-right.docblock-short", {"font-family": '"Source Serif 4", NanumBarunGothic, serif'})
// structs
-assert-css: ("#structs + .item-table .item-left a", {"font-family": '"Fira Sans", Arial, sans-serif'})
-assert-css: ("#structs + .item-table .item-right.docblock-short", {"font-family": '"Source Serif 4", "Noto Sans KR", serif'})
+assert-css: ("#structs + .item-table .item-left a", {"font-family": '"Fira Sans", Arial, NanumBarunGothic, sans-serif'})
+assert-css: ("#structs + .item-table .item-right.docblock-short", {"font-family": '"Source Serif 4", NanumBarunGothic, serif'})
// enums
-assert-css: ("#enums + .item-table .item-left a", {"font-family": '"Fira Sans", Arial, sans-serif'})
-assert-css: ("#enums + .item-table .item-right.docblock-short", {"font-family": '"Source Serif 4", "Noto Sans KR", serif'})
+assert-css: ("#enums + .item-table .item-left a", {"font-family": '"Fira Sans", Arial, NanumBarunGothic, sans-serif'})
+assert-css: ("#enums + .item-table .item-right.docblock-short", {"font-family": '"Source Serif 4", NanumBarunGothic, serif'})
// traits
-assert-css: ("#traits + .item-table .item-left a", {"font-family": '"Fira Sans", Arial, sans-serif'})
-assert-css: ("#traits + .item-table .item-right.docblock-short", {"font-family": '"Source Serif 4", "Noto Sans KR", serif'})
+assert-css: ("#traits + .item-table .item-left a", {"font-family": '"Fira Sans", Arial, NanumBarunGothic, sans-serif'})
+assert-css: ("#traits + .item-table .item-right.docblock-short", {"font-family": '"Source Serif 4", NanumBarunGothic, serif'})
// functions
-assert-css: ("#functions + .item-table .item-left a", {"font-family": '"Fira Sans", Arial, sans-serif'})
-assert-css: ("#functions + .item-table .item-right.docblock-short", {"font-family": '"Source Serif 4", "Noto Sans KR", serif'})
+assert-css: ("#functions + .item-table .item-left a", {"font-family": '"Fira Sans", Arial, NanumBarunGothic, sans-serif'})
+assert-css: ("#functions + .item-table .item-right.docblock-short", {"font-family": '"Source Serif 4", NanumBarunGothic, serif'})
// keywords
-assert-css: ("#keywords + .item-table .item-left a", {"font-family": '"Fira Sans", Arial, sans-serif'})
-assert-css: ("#keywords + .item-table .item-right.docblock-short", {"font-family": '"Source Serif 4", "Noto Sans KR", serif'})
+assert-css: ("#keywords + .item-table .item-left a", {"font-family": '"Fira Sans", Arial, NanumBarunGothic, sans-serif'})
+assert-css: ("#keywords + .item-table .item-right.docblock-short", {"font-family": '"Source Serif 4", NanumBarunGothic, serif'})
assert-text: (".sidebar-elems > .items > ul > li:nth-child(5)", "Traits")
assert-text: (".sidebar-elems > .items > ul > li:nth-child(6)", "Functions")
assert-text: (".sidebar-elems > .items > ul > li:nth-child(7)", "Type Definitions")
-assert-text: (".sidebar-elems > .items > ul > li:nth-child(8)", "Keywords")
+assert-text: (".sidebar-elems > .items > ul > li:nth-child(8)", "Unions")
+assert-text: (".sidebar-elems > .items > ul > li:nth-child(9)", "Keywords")
assert-text: ("#structs + .item-table .item-left > a", "Foo")
click: "#structs + .item-table .item-left > a"
}
pub use crate::repro as repro2;
+
+/// # Top-doc Prose title
+///
+/// Text below title.
+///
+/// ## Top-doc Prose sub-heading
+///
+/// Text below sub-heading.
+///
+/// ### Top-doc Prose sub-sub-heading
+///
+/// Text below sub-sub-heading
+pub struct HeavilyDocumentedStruct {
+ /// # Title for field
+ /// ## Sub-heading for field
+ pub nothing: (),
+}
+
+/// # Title for struct impl doc
+///
+/// Text below heading.
+///
+/// ## Sub-heading for struct impl doc
+///
+/// Text below sub-heading.
+///
+/// ### Sub-sub-heading for struct impl doc
+///
+/// Text below sub-sub-heading.
+///
+impl HeavilyDocumentedStruct {
+ /// # Title for struct impl-item doc
+ /// Text below title.
+ /// ## Sub-heading for struct impl-item doc
+ /// Text below sub-heading.
+ /// ### Sub-sub-heading for struct impl-item doc
+ /// Text below sub-sub-heading.
+ pub fn do_nothing() {}
+}
+
+/// # Top-doc Prose title
+///
+/// Text below title.
+///
+/// ## Top-doc Prose sub-heading
+///
+/// Text below sub-heading.
+///
+/// ### Top-doc Prose sub-sub-heading
+///
+/// Text below sub-sub-heading
+pub enum HeavilyDocumentedEnum {
+ /// # None prose title
+ /// ## None prose sub-heading
+ None,
+ /// # Wrapped prose title
+ /// ## Wrapped prose sub-heading
+ Wrapped(
+ /// # Wrapped.0 prose title
+ /// ## Wrapped.0 prose sub-heading
+ String,
+ String,
+ ),
+ Structy {
+ /// # Structy prose title
+ /// ## Structy prose sub-heading
+ alpha: String,
+ beta: String,
+ },
+}
+
+/// # Title for enum impl doc
+///
+/// Text below heading.
+///
+/// ## Sub-heading for enum impl doc
+///
+/// Text below sub-heading.
+///
+/// ### Sub-sub-heading for enum impl doc
+///
+/// Text below sub-sub-heading.
+///
+impl HeavilyDocumentedEnum {
+ /// # Title for enum impl-item doc
+ /// Text below title.
+ /// ## Sub-heading for enum impl-item doc
+ /// Text below sub-heading.
+ /// ### Sub-sub-heading for enum impl-item doc
+ /// Text below sub-sub-heading.
+ pub fn do_nothing() {}
+}
+
+/// # Top-doc prose title
+///
+/// Text below heading.
+///
+/// ## Top-doc prose sub-heading
+///
+/// Text below heading.
+pub union HeavilyDocumentedUnion {
+ /// # Title for union variant
+ /// ## Sub-heading for union variant
+ pub nothing: (),
+ pub something: f32,
+}
+
+/// # Title for union impl doc
+/// ## Sub-heading for union impl doc
+impl HeavilyDocumentedUnion {
+ /// # Title for union impl-item doc
+ /// ## Sub-heading for union impl-item doc
+ pub fn do_nothing() {}
+}
+
+/// # Top-doc prose title
+///
+/// Text below heading.
+///
+/// ## Top-doc prose sub-heading
+///
+/// Text below heading.
+#[macro_export]
+macro_rules! heavily_documented_macro {
+ () => {};
+}
// exact-check
const QUERY = [
- '"R<P>"',
- '"P"',
- 'P',
- '"ExtraCreditStructMulti<ExtraCreditInnerMulti, ExtraCreditInnerMulti>"',
+ '"R<P>"',
+ '"P"',
+ 'P',
+ '"ExtraCreditStructMulti<ExtraCreditInnerMulti, ExtraCreditInnerMulti>"',
+ 'TraitCat',
+ 'TraitDog',
];
const EXPECTED = [
{
'returned': [
{ 'path': 'generics', 'name': 'alef' },
+ { 'path': 'generics', 'name': 'bet' },
],
'in_args': [
{ 'path': 'generics', 'name': 'alpha' },
+ { 'path': 'generics', 'name': 'beta' },
],
},
{
],
'returned': [],
},
+ {
+ 'in_args': [
+ { 'path': 'generics', 'name': 'gamma' },
+ ],
+ },
+ {
+ 'in_args': [
+ { 'path': 'generics', 'name': 'gamma' },
+ ],
+ },
];
pub fn redherringmatchforextracredit(
_param: ExtraCreditStructMulti<ExtraCreditInnerMulti, ()>
) { loop {} }
+
+pub trait TraitCat {}
+pub trait TraitDog {}
+
+pub fn gamma<T: TraitCat + TraitDog>(t: T) {}
|
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
- = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#docno_inlinedocinline for more information
+ = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#inline-and-no_inline for more information
error: this attribute can only be applied at the crate level
--> $DIR/invalid-doc-attr.rs:15:12
|
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
- = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#docno_inlinedocinline for more information
+ = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#inline-and-no_inline for more information
error: aborting due to 6 previous errors
--- /dev/null
+// check-pass
+
+// ICE found in https://github.com/rust-lang/rust/issues/83123
+
+pub struct Attribute;
+
+pub struct Map<'hir> {}
+impl<'hir> Map<'hir> {
+ pub fn attrs(&self) -> &'hir [Attribute] { &[] }
+}
+
+pub struct List<T>(T);
+
+impl<T> std::ops::Deref for List<T> {
+ type Target = [T];
+ fn deref(&self) -> &[T] {
+ &[]
+ }
+}
--- /dev/null
+// #26207: Show all methods reachable via Deref impls, recursing through multiple dereferencing
+// levels and across multiple crates.
+// For `Deref` on non-foreign types, look at `deref-recursive.rs`.
+
+// @has 'foo/struct.Foo.html'
+// @has '-' '//*[@id="deref-methods-PathBuf"]' 'Methods from Deref<Target = PathBuf>'
+// @has '-' '//*[@class="impl-items"]//*[@id="method.as_path"]' 'pub fn as_path(&self)'
+// @has '-' '//*[@id="deref-methods-Path"]' 'Methods from Deref<Target = Path>'
+// @has '-' '//*[@class="impl-items"]//*[@id="method.exists"]' 'pub fn exists(&self)'
+// @has '-' '//*[@class="sidebar-title"]/a[@href="#deref-methods-PathBuf"]' 'Methods from Deref<Target=PathBuf>'
+// @has '-' '//*[@class="sidebar-links"]/a[@href="#method.as_path"]' 'as_path'
+// @has '-' '//*[@class="sidebar-title"]/a[@href="#deref-methods-Path"]' 'Methods from Deref<Target=Path>'
+// @has '-' '//*[@class="sidebar-links"]/a[@href="#method.exists"]' 'exists'
+
+#![crate_name = "foo"]
+
+use std::ops::Deref;
+use std::path::PathBuf;
+
+pub struct Foo(PathBuf);
+
+impl Deref for Foo {
+ type Target = PathBuf;
+ fn deref(&self) -> &PathBuf { &self.0 }
+}
--- /dev/null
+// #26207: Show all methods reachable via Deref impls, recursing through multiple dereferencing
+// levels if needed.
+// For `Deref` on foreign types, look at `deref-recursive-pathbuf.rs`.
+
+// @has 'foo/struct.Foo.html'
+// @has '-' '//*[@id="deref-methods-Bar"]' 'Methods from Deref<Target = Bar>'
+// @has '-' '//*[@class="impl-items"]//*[@id="method.bar"]' 'pub fn bar(&self)'
+// @has '-' '//*[@id="deref-methods-Baz"]' 'Methods from Deref<Target = Baz>'
+// @has '-' '//*[@class="impl-items"]//*[@id="method.baz"]' 'pub fn baz(&self)'
+// @has '-' '//*[@class="sidebar-title"]/a[@href="#deref-methods-Bar"]' 'Methods from Deref<Target=Bar>'
+// @has '-' '//*[@class="sidebar-links"]/a[@href="#method.bar"]' 'bar'
+// @has '-' '//*[@class="sidebar-title"]/a[@href="#deref-methods-Baz"]' 'Methods from Deref<Target=Baz>'
+// @has '-' '//*[@class="sidebar-links"]/a[@href="#method.baz"]' 'baz'
+
+#![crate_name = "foo"]
+
+use std::ops::Deref;
+
+pub struct Foo(Bar);
+pub struct Bar(Baz);
+pub struct Baz;
+
+impl Deref for Foo {
+ type Target = Bar;
+ fn deref(&self) -> &Bar { &self.0 }
+}
+
+impl Deref for Bar {
+ type Target = Baz;
+ fn deref(&self) -> &Baz { &self.0 }
+}
+
+impl Bar {
+ /// This appears under `Foo` methods
+ pub fn bar(&self) {}
+}
+
+impl Baz {
+ /// This should also appear in `Foo` methods when recursing
+ pub fn baz(&self) {}
+}
#![crate_name = "foo"]
// @has 'foo/struct.Bar.html'
-// @has '-' '//*[@id="deref-methods"]' 'Methods from Deref<Target = FooJ>'
+// @has '-' '//*[@id="deref-methods-FooJ"]' 'Methods from Deref<Target = FooJ>'
// @has '-' '//*[@class="impl-items"]//*[@id="method.foo_a"]' 'pub fn foo_a(&self)'
// @has '-' '//*[@class="impl-items"]//*[@id="method.foo_b"]' 'pub fn foo_b(&self)'
// @has '-' '//*[@class="impl-items"]//*[@id="method.foo_c"]' 'pub fn foo_c(&self)'
// @has '-' '//*[@class="impl-items"]//*[@id="method.foo_j"]' 'pub fn foo_j(&self)'
-// @has '-' '//*[@class="sidebar-title"]/a[@href="#deref-methods"]' 'Methods from Deref<Target=FooJ>'
+// @has '-' '//*[@class="sidebar-title"]/a[@href="#deref-methods-FooJ"]' 'Methods from Deref<Target=FooJ>'
// @has '-' '//*[@class="sidebar-links"]/a[@href="#method.foo_a"]' 'foo_a'
// @has '-' '//*[@class="sidebar-links"]/a[@href="#method.foo_b"]' 'foo_b'
// @has '-' '//*[@class="sidebar-links"]/a[@href="#method.foo_c"]' 'foo_c'
fn deref(&self) -> &B { todo!() }
}
-// @!has recursive_deref_sidebar/struct.A.html '//div[@class="sidebar-links"]' 'foo_c'
+// @has recursive_deref_sidebar/struct.A.html '//div[@class="sidebar-links"]' 'foo_c'
impl Deref for B {
type Target = C;
fn deref(&self) -> &C { todo!() }
use std::ops::Deref;
+// Cyclic deref with the parent (which is not the top parent).
pub struct A;
pub struct B;
+pub struct C;
+
+impl C {
+ pub fn c(&self) {}
+}
// @has recursive_deref/struct.A.html '//h3[@class="code-header in-band"]' 'impl Deref for A'
+// @has '-' '//*[@class="impl-items"]//*[@id="method.c"]' 'pub fn c(&self)'
impl Deref for A {
type Target = B;
}
// @has recursive_deref/struct.B.html '//h3[@class="code-header in-band"]' 'impl Deref for B'
+// @has '-' '//*[@class="impl-items"]//*[@id="method.c"]' 'pub fn c(&self)'
impl Deref for B {
- type Target = A;
+ type Target = C;
+
+ fn deref(&self) -> &Self::Target {
+ panic!()
+ }
+}
+
+// @has recursive_deref/struct.C.html '//h3[@class="code-header in-band"]' 'impl Deref for C'
+impl Deref for C {
+ type Target = B;
+
+ fn deref(&self) -> &Self::Target {
+ panic!()
+ }
+}
+
+// Cyclic deref with the grand-parent (which is not the top parent).
+pub struct D;
+pub struct E;
+pub struct F;
+pub struct G;
+
+impl G {
+ // There is no "self" parameter so it shouldn't be listed!
+ pub fn g() {}
+}
+
+// @has recursive_deref/struct.D.html '//h3[@class="code-header in-band"]' 'impl Deref for D'
+// We also check that `G::g` method isn't rendered because there is no `self` argument.
+// @!has '-' '//*[@id="deref-methods-G"]'
+impl Deref for D {
+ type Target = E;
+
+ fn deref(&self) -> &Self::Target {
+ panic!()
+ }
+}
+
+// @has recursive_deref/struct.E.html '//h3[@class="code-header in-band"]' 'impl Deref for E'
+// We also check that `G::g` method isn't rendered because there is no `self` argument.
+// @!has '-' '//*[@id="deref-methods-G"]'
+impl Deref for E {
+ type Target = F;
+
+ fn deref(&self) -> &Self::Target {
+ panic!()
+ }
+}
+
+// @has recursive_deref/struct.F.html '//h3[@class="code-header in-band"]' 'impl Deref for F'
+// We also check that `G::g` method isn't rendered because there is no `self` argument.
+// @!has '-' '//*[@id="deref-methods-G"]'
+impl Deref for F {
+ type Target = G;
+
+ fn deref(&self) -> &Self::Target {
+ panic!()
+ }
+}
+
+// @has recursive_deref/struct.G.html '//h3[@class="code-header in-band"]' 'impl Deref for G'
+impl Deref for G {
+ type Target = E;
+
+ fn deref(&self) -> &Self::Target {
+ panic!()
+ }
+}
+
+// Cyclic deref with top parent.
+pub struct H;
+pub struct I;
+
+impl I {
+ // There is no "self" parameter so it shouldn't be listed!
+ pub fn i() {}
+}
+
+// @has recursive_deref/struct.H.html '//h3[@class="code-header in-band"]' 'impl Deref for H'
+// @!has '-' '//*[@id="deref-methods-I"]'
+impl Deref for H {
+ type Target = I;
+
+ fn deref(&self) -> &Self::Target {
+ panic!()
+ }
+}
+
+// @has recursive_deref/struct.I.html '//h3[@class="code-header in-band"]' 'impl Deref for I'
+impl Deref for I {
+ type Target = H;
fn deref(&self) -> &Self::Target {
panic!()
+++ /dev/null
-// compile-flags: -Z unstable-options
-
-#![feature(rustc_private)]
-#![deny(rustc::potential_query_instability)]
-
-extern crate rustc_data_structures;
-
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-
-fn main() {
- let mut x = FxHashMap::<u32, i32>::default();
-
- for _ in x.drain() {}
- //~^ ERROR using `drain` can result in unstable
-
- for _ in x.iter() {}
- //~^ ERROR using `iter`
-
- for _ in Some(&mut x).unwrap().iter_mut() {}
- //~^ ERROR using `iter_mut`
-
- for _ in x {}
- //~^ ERROR using `into_iter`
-}
+++ /dev/null
-error: using `drain` can result in unstable query results
- --> $DIR/query_stability.rs:13:16
- |
-LL | for _ in x.drain() {}
- | ^^^^^
- |
-note: the lint level is defined here
- --> $DIR/query_stability.rs:4:9
- |
-LL | #![deny(rustc::potential_query_instability)]
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- = note: if you believe this case to be fine, allow this lint and add a comment explaining your rationale
-
-error: using `iter` can result in unstable query results
- --> $DIR/query_stability.rs:16:16
- |
-LL | for _ in x.iter() {}
- | ^^^^
- |
- = note: if you believe this case to be fine, allow this lint and add a comment explaining your rationale
-
-error: using `iter_mut` can result in unstable query results
- --> $DIR/query_stability.rs:19:36
- |
-LL | for _ in Some(&mut x).unwrap().iter_mut() {}
- | ^^^^^^^^
- |
- = note: if you believe this case to be fine, allow this lint and add a comment explaining your rationale
-
-error: using `into_iter` can result in unstable query results
- --> $DIR/query_stability.rs:22:14
- |
-LL | for _ in x {}
- | ^
- |
- = note: if you believe this case to be fine, allow this lint and add a comment explaining your rationale
-
-error: aborting due to 4 previous errors
-
+++ /dev/null
-// compile-flags: -Z unstable-options
-
-#![feature(rustc_attrs)]
-
-#[rustc_lint_query_instability]
-//~^ ERROR attribute should be applied to a function
-struct Foo;
-
-impl Foo {
- #[rustc_lint_query_instability(a)]
- //~^ ERROR malformed `rustc_lint_query_instability`
- fn bar() {}
-}
-
-fn main() {}
+++ /dev/null
-error: malformed `rustc_lint_query_instability` attribute input
- --> $DIR/query_stability_incorrect.rs:10:5
- |
-LL | #[rustc_lint_query_instability(a)]
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: must be of the form: `#[rustc_lint_query_instability]`
-
-error: attribute should be applied to a function
- --> $DIR/query_stability_incorrect.rs:5:1
- |
-LL | #[rustc_lint_query_instability]
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-LL |
-LL | struct Foo;
- | ----------- not a function
-
-error: aborting due to 2 previous errors
-
--- /dev/null
+// build-pass
+// only-x86_64
+
+#![feature(asm, target_feature_11)]
+
+#[target_feature(enable = "avx")]
+fn main() {
+ unsafe {
+ asm!(
+ "/* {} */",
+ out(ymm_reg) _,
+ );
+ }
+}
|
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
- = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#docno_inlinedocinline for more information
+ = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#inline-and-no_inline for more information
error: this attribute can only be applied at the crate level
--> $DIR/invalid-doc-attr.rs:15:12
|
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
- = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#docno_inlinedocinline for more information
+ = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#inline-and-no_inline for more information
error: aborting due to 6 previous errors
--- /dev/null
+// Test that rustc doesn't ICE as in #90024.
+// check-pass
+// edition=2018
+
+#![warn(rust_2021_incompatible_closure_captures)]
+
+// Checks there's no double-subst into the generic args, otherwise we get OOB
+// MCVE by @lqd
+pub struct Graph<N, E, Ix> {
+ _edges: E,
+ _nodes: N,
+ _ix: Vec<Ix>,
+}
+fn graph<N, E>() -> Graph<N, E, i32> {
+ todo!()
+}
+fn first_ice() {
+ let g = graph::<i32, i32>();
+ let _ = || g;
+}
+
+// Checks that there is a subst into the fields, otherwise we get normalization error
+// MCVE by @cuviper
+use std::iter::Empty;
+struct Foo<I: Iterator> {
+ data: Vec<I::Item>,
+}
+pub fn second_ice() {
+ let v = Foo::<Empty<()>> { data: vec![] };
+
+ (|| v.data[0])();
+}
+
+pub fn main() {
+ first_ice();
+ second_ice();
+}
--- /dev/null
+#![allow(incomplete_features)]
+#![feature(generic_const_exprs)]
+
+struct ConstAssert<const COND: bool>;
+trait True {}
+impl True for ConstAssert<true> {}
+
+struct Range<T: PartialOrd, const MIN: T, const MAX: T>(T)
+//~^ ERROR the type of const parameters must not depend on other generic parameters
+//~| ERROR the type of const parameters must not depend on other generic parameters
+where
+ ConstAssert<{ MIN <= MAX }>: True;
+
+fn main() {}
--- /dev/null
+error[E0770]: the type of const parameters must not depend on other generic parameters
+ --> $DIR/issue-88997.rs:8:40
+ |
+LL | struct Range<T: PartialOrd, const MIN: T, const MAX: T>(T)
+ | ^ the type must not depend on the parameter `T`
+
+error[E0770]: the type of const parameters must not depend on other generic parameters
+ --> $DIR/issue-88997.rs:8:54
+ |
+LL | struct Range<T: PartialOrd, const MIN: T, const MAX: T>(T)
+ | ^ the type must not depend on the parameter `T`
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0770`.
--- /dev/null
+// check-pass
+
+#![feature(generic_const_exprs)]
+#![allow(incomplete_features)]
+
+struct GenericStruct<const T: usize> { val: i64 }
+
+impl<const T: usize> From<GenericStruct<T>> for GenericStruct<{T + 1}> {
+ fn from(other: GenericStruct<T>) -> Self {
+ Self { val: other.val }
+ }
+}
+
+impl<const T: usize> From<GenericStruct<{T + 1}>> for GenericStruct<T> {
+ fn from(other: GenericStruct<{T + 1}>) -> Self {
+ Self { val: other.val }
+ }
+}
+
+fn main() {}
--- /dev/null
+#![feature(generic_const_exprs)]
+#![allow(incomplete_features)]
+
+pub struct Foo<T, const H: T>(T)
+//~^ ERROR the type of const parameters must not depend on other generic parameters
+where
+ [(); 1]:;
+
+fn main() {}
--- /dev/null
+error[E0770]: the type of const parameters must not depend on other generic parameters
+ --> $DIR/issue-90364.rs:4:28
+ |
+LL | pub struct Foo<T, const H: T>(T)
+ | ^ the type must not depend on the parameter `T`
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0770`.
--- /dev/null
+// compile-flags: --crate-type=lib
+#![feature(const_mut_refs)]
+#![feature(const_precise_live_drops)]
+#![feature(const_swap)]
+
+// Mutable borrow of a field with drop impl.
+pub const fn f() {
+ let mut a: (u32, Option<String>) = (0, None); //~ ERROR destructors cannot be evaluated
+ let _ = &mut a.1;
+}
+
+// Mutable borrow of a type with drop impl.
+pub const A1: () = {
+ let mut x = None; //~ ERROR destructors cannot be evaluated
+ let mut y = Some(String::new());
+ let a = &mut x;
+ let b = &mut y;
+ std::mem::swap(a, b);
+ std::mem::forget(y);
+};
+
+// Mutable borrow of a type with drop impl.
+pub const A2: () = {
+ let mut x = None;
+ let mut y = Some(String::new());
+ let a = &mut x;
+ let b = &mut y;
+ std::mem::swap(a, b);
+ std::mem::forget(y);
+ let _z = x; //~ ERROR destructors cannot be evaluated
+};
+
+// Shared borrow of a type that might be !Freeze and Drop.
+pub const fn g1<T>() {
+ let x: Option<T> = None; //~ ERROR destructors cannot be evaluated
+ let _ = x.is_some();
+}
+
+// Shared borrow of a type that might be !Freeze and Drop.
+pub const fn g2<T>() {
+ let x: Option<T> = None;
+ let _ = x.is_some();
+ let _y = x; //~ ERROR destructors cannot be evaluated
+}
--- /dev/null
+error[E0493]: destructors cannot be evaluated at compile-time
+ --> $DIR/qualif-indirect-mutation-fail.rs:8:9
+ |
+LL | let mut a: (u32, Option<String>) = (0, None);
+ | ^^^^^ constant functions cannot evaluate destructors
+
+error[E0493]: destructors cannot be evaluated at compile-time
+ --> $DIR/qualif-indirect-mutation-fail.rs:14:9
+ |
+LL | let mut x = None;
+ | ^^^^^ constants cannot evaluate destructors
+
+error[E0493]: destructors cannot be evaluated at compile-time
+ --> $DIR/qualif-indirect-mutation-fail.rs:30:9
+ |
+LL | let _z = x;
+ | ^^ constants cannot evaluate destructors
+
+error[E0493]: destructors cannot be evaluated at compile-time
+ --> $DIR/qualif-indirect-mutation-fail.rs:35:9
+ |
+LL | let x: Option<T> = None;
+ | ^ constant functions cannot evaluate destructors
+
+error[E0493]: destructors cannot be evaluated at compile-time
+ --> $DIR/qualif-indirect-mutation-fail.rs:43:9
+ |
+LL | let _y = x;
+ | ^^ constant functions cannot evaluate destructors
+
+error: aborting due to 5 previous errors
+
+For more information about this error, try `rustc --explain E0493`.
--- /dev/null
+// compile-flags: --crate-type=lib
+// check-pass
+#![feature(const_mut_refs)]
+#![feature(const_precise_live_drops)]
+
+pub const fn f() {
+ let mut x: (Option<String>, u32) = (None, 0);
+ let mut a = 10;
+ *(&mut a) = 11;
+ x.1 = a;
+}
+
+pub const fn g() {
+ let mut a: (u32, Option<String>) = (0, None);
+ let _ = &mut a.0;
+}
--- /dev/null
+// Checks that unions use type based qualification. Regression test for issue #90268.
+#![feature(untagged_unions)]
+use std::cell::Cell;
+
+union U { i: u32, c: Cell<u32> }
+
+const C1: Cell<u32> = {
+ unsafe { U { c: Cell::new(0) }.c }
+};
+
+const C2: Cell<u32> = {
+ unsafe { U { i : 0 }.c }
+};
+
+const C3: Cell<u32> = {
+ let mut u = U { i: 0 };
+ u.i = 1;
+ unsafe { u.c }
+};
+
+const C4: U = U { i: 0 };
+
+const C5: [U; 1] = [U {i : 0}; 1];
+
+fn main() {
+ // Interior mutability should prevent promotion.
+ let _: &'static _ = &C1; //~ ERROR temporary value dropped while borrowed
+ let _: &'static _ = &C2; //~ ERROR temporary value dropped while borrowed
+ let _: &'static _ = &C3; //~ ERROR temporary value dropped while borrowed
+ let _: &'static _ = &C4; //~ ERROR temporary value dropped while borrowed
+ let _: &'static _ = &C5; //~ ERROR temporary value dropped while borrowed
+}
--- /dev/null
+error[E0716]: temporary value dropped while borrowed
+ --> $DIR/qualif-union.rs:27:26
+ |
+LL | let _: &'static _ = &C1;
+ | ---------- ^^ creates a temporary which is freed while still in use
+ | |
+ | type annotation requires that borrow lasts for `'static`
+...
+LL | }
+ | - temporary value is freed at the end of this statement
+
+error[E0716]: temporary value dropped while borrowed
+ --> $DIR/qualif-union.rs:28:26
+ |
+LL | let _: &'static _ = &C2;
+ | ---------- ^^ creates a temporary which is freed while still in use
+ | |
+ | type annotation requires that borrow lasts for `'static`
+...
+LL | }
+ | - temporary value is freed at the end of this statement
+
+error[E0716]: temporary value dropped while borrowed
+ --> $DIR/qualif-union.rs:29:26
+ |
+LL | let _: &'static _ = &C3;
+ | ---------- ^^ creates a temporary which is freed while still in use
+ | |
+ | type annotation requires that borrow lasts for `'static`
+...
+LL | }
+ | - temporary value is freed at the end of this statement
+
+error[E0716]: temporary value dropped while borrowed
+ --> $DIR/qualif-union.rs:30:26
+ |
+LL | let _: &'static _ = &C4;
+ | ---------- ^^ creates a temporary which is freed while still in use
+ | |
+ | type annotation requires that borrow lasts for `'static`
+LL | let _: &'static _ = &C5;
+LL | }
+ | - temporary value is freed at the end of this statement
+
+error[E0716]: temporary value dropped while borrowed
+ --> $DIR/qualif-union.rs:31:26
+ |
+LL | let _: &'static _ = &C5;
+ | ---------- ^^ creates a temporary which is freed while still in use
+ | |
+ | type annotation requires that borrow lasts for `'static`
+LL | }
+ | - temporary value is freed at the end of this statement
+
+error: aborting due to 5 previous errors
+
+For more information about this error, try `rustc --explain E0716`.
--- /dev/null
+#![feature(type_alias_impl_trait)]
+#![feature(generic_associated_types)]
+
+// See https://github.com/rust-lang/rust/issues/87258#issuecomment-883293367
+
+trait Trait1 {}
+
+struct Struct<'b>(&'b ());
+
+impl<'d> Trait1 for Struct<'d> {}
+
+pub trait Trait2 {
+ type FooFuture<'a>: Trait1;
+ fn foo<'a>() -> Self::FooFuture<'a>;
+}
+
+impl<'c, S: Trait2> Trait2 for &'c mut S {
+ type FooFuture<'a> = impl Trait1;
+ fn foo<'a>() -> Self::FooFuture<'a> { //~ ERROR
+ Struct(unimplemented!())
+ }
+}
+
+fn main() {}
--- /dev/null
+error[E0700]: hidden type for `impl Trait` captures lifetime that does not appear in bounds
+ --> $DIR/issue-87258_a.rs:19:21
+ |
+LL | fn foo<'a>() -> Self::FooFuture<'a> {
+ | ^^^^^^^^^^^^^^^^^^^
+ |
+ = note: hidden type `Struct<'_>` captures lifetime '_#7r
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0700`.
--- /dev/null
+#![feature(type_alias_impl_trait)]
+#![feature(generic_associated_types)]
+
+// See https://github.com/rust-lang/rust/issues/87258#issuecomment-883293367
+
+trait Trait1 {}
+
+struct Struct<'b>(&'b ());
+
+impl<'d> Trait1 for Struct<'d> {}
+
+pub trait Trait2 {
+ type FooFuture<'a>: Trait1;
+ fn foo<'a>() -> Self::FooFuture<'a>;
+}
+
+type Helper<'xenon, 'yttrium, KABOOM: Trait2> = impl Trait1;
+
+impl<'c, S: Trait2> Trait2 for &'c mut S {
+ type FooFuture<'a> = Helper<'c, 'a, S>;
+ fn foo<'a>() -> Self::FooFuture<'a> { //~ ERROR
+ Struct(unimplemented!())
+ }
+}
+
+fn main() {}
--- /dev/null
+error[E0700]: hidden type for `impl Trait` captures lifetime that does not appear in bounds
+ --> $DIR/issue-87258_b.rs:21:21
+ |
+LL | fn foo<'a>() -> Self::FooFuture<'a> {
+ | ^^^^^^^^^^^^^^^^^^^
+ |
+ = note: hidden type `Struct<'_>` captures lifetime '_#7r
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0700`.
--- /dev/null
+#![feature(decl_macro)]
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum Field {
+ RootCtxt,
+ MacroCtxt,
+}
+
+#[rustfmt::skip]
+macro x(
+ $macro_name:ident,
+ $macro2_name:ident,
+ $type_name:ident,
+ $field_name:ident,
+ $const_name:ident
+) {
+ #[derive(Copy, Clone)]
+ pub struct $type_name {
+ pub field: Field,
+ pub $field_name: Field,
+ }
+
+ pub const $const_name: $type_name =
+ $type_name { field: Field::MacroCtxt, $field_name: Field::RootCtxt };
+
+ #[macro_export]
+ macro_rules! $macro_name {
+ (check_fields_of $e:expr) => {{
+ let e = $e;
+ assert_eq!(e.field, Field::MacroCtxt);
+ assert_eq!(e.$field_name, Field::RootCtxt);
+ }};
+ (check_fields) => {{
+ assert_eq!($const_name.field, Field::MacroCtxt);
+ assert_eq!($const_name.$field_name, Field::RootCtxt);
+ }};
+ (construct) => {
+ $type_name { field: Field::MacroCtxt, $field_name: Field::RootCtxt }
+ };
+ }
+
+ pub macro $macro2_name {
+ (check_fields_of $e:expr) => {{
+ let e = $e;
+ assert_eq!(e.field, Field::MacroCtxt);
+ assert_eq!(e.$field_name, Field::RootCtxt);
+ }},
+ (check_fields) => {{
+ assert_eq!($const_name.field, Field::MacroCtxt);
+ assert_eq!($const_name.$field_name, Field::RootCtxt);
+ }},
+ (construct) => {
+ $type_name { field: Field::MacroCtxt, $field_name: Field::RootCtxt }
+ }
+ }
+}
+
+x!(test_fields, test_fields2, MyStruct, field, MY_CONST);
+
+pub fn check_fields(s: MyStruct) {
+ test_fields!(check_fields_of s);
+}
+
+pub fn check_fields_local() {
+ test_fields!(check_fields);
+ test_fields2!(check_fields);
+
+ let s1 = test_fields!(construct);
+ test_fields!(check_fields_of s1);
+
+ let s2 = test_fields2!(construct);
+ test_fields2!(check_fields_of s2);
+}
--- /dev/null
+#![feature(decl_macro)]
+
+#[derive(PartialEq, Eq, Debug)]
+pub enum Method {
+ DefaultMacroCtxt,
+ DefaultRootCtxt,
+ OverrideMacroCtxt,
+ OverrideRootCtxt,
+}
+
+#[rustfmt::skip]
+macro x($macro_name:ident, $macro2_name:ident, $trait_name:ident, $method_name:ident) {
+ pub trait $trait_name {
+ fn method(&self) -> Method {
+ Method::DefaultMacroCtxt
+ }
+
+ fn $method_name(&self) -> Method {
+ Method::DefaultRootCtxt
+ }
+ }
+
+ impl $trait_name for () {}
+ impl $trait_name for bool {
+ fn method(&self) -> Method {
+ Method::OverrideMacroCtxt
+ }
+
+ fn $method_name(&self) -> Method {
+ Method::OverrideRootCtxt
+ }
+ }
+
+ #[macro_export]
+ macro_rules! $macro_name {
+ (check_resolutions) => {
+ assert_eq!(().method(), Method::DefaultMacroCtxt);
+ assert_eq!($trait_name::method(&()), Method::DefaultMacroCtxt);
+ assert_eq!(().$method_name(), Method::DefaultRootCtxt);
+ assert_eq!($trait_name::$method_name(&()), Method::DefaultRootCtxt);
+
+ assert_eq!(false.method(), Method::OverrideMacroCtxt);
+ assert_eq!($trait_name::method(&false), Method::OverrideMacroCtxt);
+ assert_eq!(false.$method_name(), Method::OverrideRootCtxt);
+ assert_eq!($trait_name::$method_name(&false), Method::OverrideRootCtxt);
+
+ assert_eq!('a'.method(), Method::DefaultMacroCtxt);
+ assert_eq!($trait_name::method(&'a'), Method::DefaultMacroCtxt);
+ assert_eq!('a'.$method_name(), Method::DefaultRootCtxt);
+ assert_eq!($trait_name::$method_name(&'a'), Method::DefaultRootCtxt);
+
+ assert_eq!(1i32.method(), Method::OverrideMacroCtxt);
+ assert_eq!($trait_name::method(&1i32), Method::OverrideMacroCtxt);
+ assert_eq!(1i32.$method_name(), Method::OverrideRootCtxt);
+ assert_eq!($trait_name::$method_name(&1i32), Method::OverrideRootCtxt);
+
+ assert_eq!(1i64.method(), Method::OverrideMacroCtxt);
+ assert_eq!($trait_name::method(&1i64), Method::OverrideMacroCtxt);
+ assert_eq!(1i64.$method_name(), Method::OverrideRootCtxt);
+ assert_eq!($trait_name::$method_name(&1i64), Method::OverrideRootCtxt);
+ };
+ (assert_no_override $v:expr) => {
+ assert_eq!($v.method(), Method::DefaultMacroCtxt);
+ assert_eq!($trait_name::method(&$v), Method::DefaultMacroCtxt);
+ assert_eq!($v.$method_name(), Method::DefaultRootCtxt);
+ assert_eq!($trait_name::$method_name(&$v), Method::DefaultRootCtxt);
+ };
+ (assert_override $v:expr) => {
+ assert_eq!($v.method(), Method::OverrideMacroCtxt);
+ assert_eq!($trait_name::method(&$v), Method::OverrideMacroCtxt);
+ assert_eq!($v.$method_name(), Method::OverrideRootCtxt);
+ assert_eq!($trait_name::$method_name(&$v), Method::OverrideRootCtxt);
+ };
+ (impl for $t:ty) => {
+ impl $trait_name for $t {
+ fn method(&self) -> Method {
+ Method::OverrideMacroCtxt
+ }
+
+ fn $method_name(&self) -> Method {
+ Method::OverrideRootCtxt
+ }
+ }
+ };
+ }
+
+ pub macro $macro2_name {
+ (check_resolutions) => {
+ assert_eq!(().method(), Method::DefaultMacroCtxt);
+ assert_eq!($trait_name::method(&()), Method::DefaultMacroCtxt);
+ assert_eq!(().$method_name(), Method::DefaultRootCtxt);
+ assert_eq!($trait_name::$method_name(&()), Method::DefaultRootCtxt);
+
+ assert_eq!(false.method(), Method::OverrideMacroCtxt);
+ assert_eq!($trait_name::method(&false), Method::OverrideMacroCtxt);
+ assert_eq!(false.$method_name(), Method::OverrideRootCtxt);
+ assert_eq!($trait_name::$method_name(&false), Method::OverrideRootCtxt);
+
+ assert_eq!('a'.method(), Method::DefaultMacroCtxt);
+ assert_eq!($trait_name::method(&'a'), Method::DefaultMacroCtxt);
+ assert_eq!('a'.$method_name(), Method::DefaultRootCtxt);
+ assert_eq!($trait_name::$method_name(&'a'), Method::DefaultRootCtxt);
+
+ assert_eq!(1i32.method(), Method::OverrideMacroCtxt);
+ assert_eq!($trait_name::method(&1i32), Method::OverrideMacroCtxt);
+ assert_eq!(1i32.$method_name(), Method::OverrideRootCtxt);
+ assert_eq!($trait_name::$method_name(&1i32), Method::OverrideRootCtxt);
+
+ assert_eq!(1i64.method(), Method::OverrideMacroCtxt);
+ assert_eq!($trait_name::method(&1i64), Method::OverrideMacroCtxt);
+ assert_eq!(1i64.$method_name(), Method::OverrideRootCtxt);
+ assert_eq!($trait_name::$method_name(&1i64), Method::OverrideRootCtxt);
+ },
+ (assert_no_override $v:expr) => {
+ assert_eq!($v.method(), Method::DefaultMacroCtxt);
+ assert_eq!($trait_name::method(&$v), Method::DefaultMacroCtxt);
+ assert_eq!($v.$method_name(), Method::DefaultRootCtxt);
+ assert_eq!($trait_name::$method_name(&$v), Method::DefaultRootCtxt);
+ },
+ (assert_override $v:expr) => {
+ assert_eq!($v.method(), Method::OverrideMacroCtxt);
+ assert_eq!($trait_name::method(&$v), Method::OverrideMacroCtxt);
+ assert_eq!($v.$method_name(), Method::OverrideRootCtxt);
+ assert_eq!($trait_name::$method_name(&$v), Method::OverrideRootCtxt);
+ },
+ (impl for $t:ty) => {
+ impl $trait_name for $t {
+ fn method(&self) -> Method {
+ Method::OverrideMacroCtxt
+ }
+
+ fn $method_name(&self) -> Method {
+ Method::OverrideRootCtxt
+ }
+ }
+ }
+ }
+}
+
+x!(test_trait, test_trait2, MyTrait, method);
+
+impl MyTrait for char {}
+test_trait!(impl for i32);
+test_trait2!(impl for i64);
+
+pub fn check_crate_local() {
+ test_trait!(check_resolutions);
+ test_trait2!(check_resolutions);
+}
+
+// Check that any comparison of idents at monomorphization time is correct
+pub fn check_crate_local_generic<T: MyTrait, U: MyTrait>(t: T, u: U) {
+ test_trait!(check_resolutions);
+ test_trait2!(check_resolutions);
+
+ test_trait!(assert_no_override t);
+ test_trait2!(assert_no_override t);
+ test_trait!(assert_override u);
+ test_trait2!(assert_override u);
+}
--- /dev/null
+#![feature(decl_macro)]
+
+macro x() {
+ pub struct MyStruct;
+}
+
+x!();
--- /dev/null
+#![feature(decl_macro)]
+
+macro x($macro_name:ident) {
+ #[macro_export]
+ macro_rules! $macro_name {
+ (define) => {
+ pub struct MyStruct;
+ };
+ (create) => {
+ MyStruct {}
+ };
+ }
+}
+
+x!(my_struct);
--- /dev/null
+#![feature(decl_macro)]
+
+#[rustfmt::skip]
+macro x($macro_name:ident, $macro2_name:ident, $type_name:ident, $variant_name:ident) {
+ #[repr(u8)]
+ pub enum $type_name {
+ Variant = 0,
+ $variant_name = 1,
+ }
+
+ #[macro_export]
+ macro_rules! $macro_name {
+ () => {{
+ assert_eq!($type_name::Variant as u8, 0);
+ assert_eq!($type_name::$variant_name as u8, 1);
+ assert_eq!(<$type_name>::Variant as u8, 0);
+ assert_eq!(<$type_name>::$variant_name as u8, 1);
+ }};
+ }
+
+ pub macro $macro2_name {
+ () => {{
+ assert_eq!($type_name::Variant as u8, 0);
+ assert_eq!($type_name::$variant_name as u8, 1);
+ assert_eq!(<$type_name>::Variant as u8, 0);
+ assert_eq!(<$type_name>::$variant_name as u8, 1);
+ }},
+ }
+}
+
+x!(test_variants, test_variants2, MyEnum, Variant);
+
+pub fn check_variants() {
+ test_variants!();
+ test_variants2!();
+}
--- /dev/null
+// Check that a marco from another crate can define an item in one expansion
+// and use it from another, without it being visible to everyone.
+// This requires that the definition of `my_struct` preserves the hygiene
+// information for the tokens in its definition.
+
+// check-pass
+// aux-build:use_by_macro.rs
+
+#![feature(type_name_of_val)]
+extern crate use_by_macro;
+
+use use_by_macro::*;
+
+enum MyStruct {}
+my_struct!(define);
+
+fn main() {
+ let x = my_struct!(create);
+}
--- /dev/null
+// Test that fields on a struct defined in another crate are resolved correctly
+// their names differ only in `SyntaxContext`.
+
+// run-pass
+// aux-build:fields.rs
+
+extern crate fields;
+
+use fields::*;
+
+fn main() {
+ check_fields_local();
+
+ test_fields!(check_fields);
+ test_fields2!(check_fields);
+
+ let s1 = test_fields!(construct);
+ check_fields(s1);
+ test_fields!(check_fields_of s1);
+
+ let s2 = test_fields2!(construct);
+ check_fields(s2);
+ test_fields2!(check_fields_of s2);
+}
--- /dev/null
+// Check that globs cannot import hygienic identifiers from a macro expansion
+// in another crate. `my_struct` is a `macro_rules` macro, so the struct it
+// defines is only not imported because `my_struct` is defined by a macros 2.0
+// macro.
+
+// aux-build:use_by_macro.rs
+
+extern crate use_by_macro;
+
+use use_by_macro::*;
+
+mod m {
+ use use_by_macro::*;
+
+ my_struct!(define);
+}
+
+use m::*;
+
+fn main() {
+ let x = my_struct!(create);
+ //~^ ERROR cannot find struct, variant or union type `MyStruct` in this scope
+}
--- /dev/null
+error[E0422]: cannot find struct, variant or union type `MyStruct` in this scope
+ --> $DIR/cross-crate-glob-hygiene.rs:21:13
+ |
+LL | let x = my_struct!(create);
+ | ^^^^^^^^^^^^^^^^^^ not found in this scope
+ |
+ = note: this error originates in the macro `my_struct` (in Nightly builds, run with -Z macro-backtrace for more info)
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0422`.
--- /dev/null
+// Test that methods defined in another crate are resolved correctly their
+// names differ only in `SyntaxContext`. This also checks that any name
+// resolution done when monomorphizing is correct.
+
+// run-pass
+// aux-build:methods.rs
+
+extern crate methods;
+
+use methods::*;
+
+struct A;
+struct B;
+struct C;
+
+impl MyTrait for A {}
+test_trait!(impl for B);
+test_trait2!(impl for C);
+
+fn main() {
+ check_crate_local();
+ check_crate_local_generic(A, B);
+ check_crate_local_generic(A, C);
+
+ test_trait!(check_resolutions);
+ test_trait2!(check_resolutions);
+ test_trait!(assert_no_override A);
+ test_trait2!(assert_no_override A);
+ test_trait!(assert_override B);
+ test_trait2!(assert_override B);
+ test_trait!(assert_override C);
+ test_trait2!(assert_override C);
+}
--- /dev/null
+// Check that two items defined in another crate that have identifiers that
+// only differ by `SyntaxContext` do not cause name collisions when imported
+// in another crate.
+
+// check-pass
+// aux-build:needs_hygiene.rs
+
+extern crate needs_hygiene;
+
+use needs_hygiene::*;
+
+fn main() {}
--- /dev/null
+// Check that an identifier from a 2.0 macro in another crate cannot be
+// resolved with an identifier that's not from a macro expansion.
+
+// aux-build:use_by_macro.rs
+
+extern crate use_by_macro;
+
+use use_by_macro::*;
+
+my_struct!(define);
+
+fn main() {
+ let x = MyStruct {};
+ //~^ ERROR cannot find struct, variant or union type `MyStruct` in this scope
+}
--- /dev/null
+error[E0422]: cannot find struct, variant or union type `MyStruct` in this scope
+ --> $DIR/cross-crate-name-hiding-2.rs:13:13
+ |
+LL | let x = MyStruct {};
+ | ^^^^^^^^ not found in this scope
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0422`.
--- /dev/null
+// Check that an item defined by a 2.0 macro in another crate cannot be used in
+// another crate.
+
+// aux-build:pub_hygiene.rs
+
+extern crate pub_hygiene;
+
+use pub_hygiene::*;
+
+fn main() {
+ let x = MyStruct {};
+ //~^ ERROR cannot find struct, variant or union type `MyStruct` in this scope
+}
--- /dev/null
+error[E0422]: cannot find struct, variant or union type `MyStruct` in this scope
+ --> $DIR/cross-crate-name-hiding.rs:11:13
+ |
+LL | let x = MyStruct {};
+ | ^^^^^^^^ not found in this scope
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0422`.
--- /dev/null
+// Check that items with identical `SyntaxContext` conflict even when that
+// context involves a mark from another crate.
+
+// aux-build:use_by_macro.rs
+
+extern crate use_by_macro;
+
+use use_by_macro::*;
+
+my_struct!(define);
+//~^ ERROR the name `MyStruct` is defined multiple times
+my_struct!(define);
+
+fn main() {}
--- /dev/null
+error[E0428]: the name `MyStruct` is defined multiple times
+ --> $DIR/cross-crate-redefine.rs:10:1
+ |
+LL | my_struct!(define);
+ | ^^^^^^^^^^^^^^^^^^ `MyStruct` redefined here
+LL |
+LL | my_struct!(define);
+ | ------------------ previous definition of the type `MyStruct` here
+ |
+ = note: `MyStruct` must be defined only once in the type namespace of this module
+ = note: this error originates in the macro `my_struct` (in Nightly builds, run with -Z macro-backtrace for more info)
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0428`.
--- /dev/null
+// Test that variants of an enum defined in another crate are resolved
+// correctly when their names differ only in `SyntaxContext`.
+
+// run-pass
+// aux-build:variants.rs
+
+extern crate variants;
+
+use variants::*;
+
+fn main() {
+ check_variants();
+
+ test_variants!();
+ test_variants2!();
+
+ assert_eq!(MyEnum::Variant as u8, 1);
+}
+++ /dev/null
-// check-pass
-// aux-build:needs_hygiene.rs
-
-extern crate needs_hygiene;
-
-use needs_hygiene::*;
-
-fn main() {}
--- /dev/null
+// force-host
+// no-prefer-dynamic
+
+#![crate_type = "proc-macro"]
+
+extern crate proc_macro;
+
+use proc_macro::TokenStream;
+
+#[proc_macro_derive(ICE)]
+pub fn derive(_: TokenStream) -> TokenStream {
+ r#"#[allow(missing_docs)] struct X { }"#.parse().unwrap()
+}
--- /dev/null
+// aux-build:issue-89971-outer-attr-following-inner-attr-ice.rs
+
+#[macro_use]
+extern crate issue_89971_outer_attr_following_inner_attr_ice;
+
+fn main() {
+ Mew();
+ X {};
+}
+
+#![deny(missing_docs)]
+//~^ ERROR an inner attribute is not permitted in this context
+#[derive(ICE)]
+#[deny(missing_docs)]
+struct Mew();
--- /dev/null
+error: an inner attribute is not permitted in this context
+ --> $DIR/issue-89971-outer-attr-following-inner-attr-ice.rs:11:1
+ |
+LL | #![deny(missing_docs)]
+ | ^^^^^^^^^^^^^^^^^^^^^^
+...
+LL | struct Mew();
+ | ------------- the inner attribute doesn't annotate this struct
+ |
+ = note: inner attributes, like `#![no_std]`, annotate the item enclosing them, and are usually found at the beginning of source files
+help: to annotate the struct, change the attribute from inner to outer style
+ |
+LL - #![deny(missing_docs)]
+LL + #[deny(missing_docs)]
+ |
+
+error: aborting due to previous error
+
crate0::{{expn2}}: parent: crate0::{{expn0}}, call_site_ctxt: #0, def_site_ctxt: #0, kind: Macro(Bang, "produce_it")
crate0::{{expn3}}: parent: crate0::{{expn2}}, call_site_ctxt: #4, def_site_ctxt: #0, kind: Macro(Bang, "meta_macro::print_def_site")
crate0::{{expn4}}: parent: crate0::{{expn3}}, call_site_ctxt: #5, def_site_ctxt: #0, kind: Macro(Bang, "$crate::dummy")
+crate1::{{expnNNN}}: parent: crate0::{{expn0}}, call_site_ctxt: #0, def_site_ctxt: #0, kind: Macro(Attr, "derive")
+crate1::{{expnNNN}}: parent: crate0::{{expn0}}, call_site_ctxt: #0, def_site_ctxt: #0, kind: Macro(Attr, "derive")
crate1::{{expnNNN}}: parent: crate0::{{expn0}}, call_site_ctxt: #0, def_site_ctxt: #0, kind: Macro(Bang, "include")
crate2::{{expn1}}: parent: crate0::{{expn0}}, call_site_ctxt: #0, def_site_ctxt: #0, kind: AstPass(StdImports)
crate0::{{expn2}}: parent: crate0::{{expn0}}, call_site_ctxt: #0, def_site_ctxt: #0, kind: Macro(Bang, "outer")
crate0::{{expn3}}: parent: crate0::{{expn2}}, call_site_ctxt: #4, def_site_ctxt: #4, kind: Macro(Bang, "inner")
crate0::{{expn4}}: parent: crate0::{{expn3}}, call_site_ctxt: #6, def_site_ctxt: #0, kind: Macro(Bang, "print_bang")
+crate1::{{expnNNN}}: parent: crate0::{{expn0}}, call_site_ctxt: #0, def_site_ctxt: #0, kind: Macro(Attr, "derive")
+crate1::{{expnNNN}}: parent: crate0::{{expn0}}, call_site_ctxt: #0, def_site_ctxt: #0, kind: Macro(Attr, "derive")
crate1::{{expnNNN}}: parent: crate0::{{expn0}}, call_site_ctxt: #0, def_site_ctxt: #0, kind: Macro(Bang, "include")
crate2::{{expn1}}: parent: crate0::{{expn0}}, call_site_ctxt: #0, def_site_ctxt: #0, kind: AstPass(StdImports)
| the method is available for `Rc<u8>` here
|
= help: items from traits can only be used if the trait is in scope
+ = note: 'std::convert::TryInto' is included in the prelude starting in Edition 2021
help: consider wrapping the receiver expression with the appropriate type
|
LL | let _: u32 = Box::new(3u8).try_into().unwrap();
--- /dev/null
+// Make sure that trying to access `TryInto`, `TryFrom`, `FromIterator` in pre-2021 mentions
+// Edition 2021 change
+// edition:2018
+
+fn test() {
+ let _i: i16 = 0_i32.try_into().unwrap();
+ //~^ ERROR no method named `try_into` found for type `i32` in the current scope
+ //~| NOTE method not found in `i32`
+ //~| NOTE 'std::convert::TryInto' is included in the prelude starting in Edition 2021
+
+ let _i: i16 = TryFrom::try_from(0_i32).unwrap();
+ //~^ ERROR failed to resolve: use of undeclared type
+ //~| NOTE not found in this scope
+ //~| NOTE 'std::convert::TryFrom' is included in the prelude starting in Edition 2021
+ //~| NOTE 'core::convert::TryFrom' is included in the prelude starting in Edition 2021
+
+ let _i: i16 = TryInto::try_into(0_i32).unwrap();
+ //~^ ERROR failed to resolve: use of undeclared type
+ //~| NOTE not found in this scope
+ //~| NOTE 'std::convert::TryInto' is included in the prelude starting in Edition 2021
+ //~| NOTE 'core::convert::TryInto' is included in the prelude starting in Edition 2021
+
+ let _v: Vec<_> = FromIterator::from_iter(&[1]);
+ //~^ ERROR failed to resolve: use of undeclared type
+ //~| NOTE 'std::iter::FromIterator' is included in the prelude starting in Edition 2021
+ //~| NOTE 'core::iter::FromIterator' is included in the prelude starting in Edition 2021
+}
+
+fn main() {
+ test();
+}
--- /dev/null
+error[E0433]: failed to resolve: use of undeclared type `TryFrom`
+ --> $DIR/suggest-tryinto-edition-change.rs:11:19
+ |
+LL | let _i: i16 = TryFrom::try_from(0_i32).unwrap();
+ | ^^^^^^^ not found in this scope
+ |
+ = note: 'std::convert::TryFrom' is included in the prelude starting in Edition 2021
+ = note: 'core::convert::TryFrom' is included in the prelude starting in Edition 2021
+help: consider importing one of these items
+ |
+LL | use core::convert::TryFrom;
+ |
+LL | use std::convert::TryFrom;
+ |
+
+error[E0433]: failed to resolve: use of undeclared type `TryInto`
+ --> $DIR/suggest-tryinto-edition-change.rs:17:19
+ |
+LL | let _i: i16 = TryInto::try_into(0_i32).unwrap();
+ | ^^^^^^^ not found in this scope
+ |
+ = note: 'std::convert::TryInto' is included in the prelude starting in Edition 2021
+ = note: 'core::convert::TryInto' is included in the prelude starting in Edition 2021
+help: consider importing one of these items
+ |
+LL | use core::convert::TryInto;
+ |
+LL | use std::convert::TryInto;
+ |
+
+error[E0433]: failed to resolve: use of undeclared type `FromIterator`
+ --> $DIR/suggest-tryinto-edition-change.rs:23:22
+ |
+LL | let _v: Vec<_> = FromIterator::from_iter(&[1]);
+ | ^^^^^^^^^^^^
+ |
+ ::: $SRC_DIR/core/src/iter/traits/collect.rs:LL:COL
+ |
+LL | pub trait IntoIterator {
+ | ---------------------- similarly named trait `IntoIterator` defined here
+ |
+ = note: 'std::iter::FromIterator' is included in the prelude starting in Edition 2021
+ = note: 'core::iter::FromIterator' is included in the prelude starting in Edition 2021
+help: a trait with a similar name exists
+ |
+LL | let _v: Vec<_> = IntoIterator::from_iter(&[1]);
+ | ~~~~~~~~~~~~
+help: consider importing one of these items
+ |
+LL | use core::iter::FromIterator;
+ |
+LL | use std::iter::FromIterator;
+ |
+
+error[E0599]: no method named `try_into` found for type `i32` in the current scope
+ --> $DIR/suggest-tryinto-edition-change.rs:6:25
+ |
+LL | let _i: i16 = 0_i32.try_into().unwrap();
+ | ^^^^^^^^ method not found in `i32`
+ |
+ ::: $SRC_DIR/core/src/convert/mod.rs:LL:COL
+ |
+LL | fn try_into(self) -> Result<T, Self::Error>;
+ | -------- the method is available for `i32` here
+ |
+ = help: items from traits can only be used if the trait is in scope
+ = note: 'std::convert::TryInto' is included in the prelude starting in Edition 2021
+help: the following trait is implemented but not in scope; perhaps add a `use` for it:
+ |
+LL | use std::convert::TryInto;
+ |
+
+error: aborting due to 4 previous errors
+
+Some errors have detailed explanations: E0433, E0599.
+For more information about an error, try `rustc --explain E0433`.
--- /dev/null
+// check-pass
+
+pub fn foo<'a>(s: &'a mut ()) where &'a mut (): Clone {
+ <&mut () as Clone>::clone(&s);
+}
+
+fn main() {}
--- /dev/null
+// check-pass
+pub trait Archive {
+ type Archived;
+}
+
+impl<T> Archive for Option<T> {
+ type Archived = ();
+}
+pub type Archived<T> = <T as Archive>::Archived;
+
+pub trait Deserialize<D> {}
+
+const ARRAY_SIZE: usize = 32;
+impl<__D> Deserialize<__D> for ()
+where
+ Option<[u8; ARRAY_SIZE]>: Archive,
+ Archived<Option<[u8; ARRAY_SIZE]>>: Deserialize<__D>,
+{
+}
+fn main() {}
--- /dev/null
+// check-pass
+pub trait Archive {
+ type Archived;
+}
+
+impl<T> Archive for Option<T> {
+ type Archived = ();
+}
+pub type Archived<T> = <T as Archive>::Archived;
+
+pub trait Deserialize<D> {}
+
+const ARRAY_SIZE: usize = 32;
+impl<__D> Deserialize<__D> for ()
+where
+ Option<[u8; ARRAY_SIZE]>: Archive,
+ Option<[u8; ARRAY_SIZE]>: Archive,
+ Archived<Option<[u8; ARRAY_SIZE]>>: Deserialize<__D>,
+{
+}
+fn main() {}
--- /dev/null
+fn main() {
+ 0u8.as_ref(); //~ ERROR no method named `as_ref` found for type `u8` in the current scope
+}
--- /dev/null
+error[E0599]: no method named `as_ref` found for type `u8` in the current scope
+ --> $DIR/issue-89806.rs:2:9
+ |
+LL | 0u8.as_ref();
+ | ^^^^^^ method not found in `u8`
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0599`.
--- /dev/null
+fn copy<R: Unpin, W>(_: R, _: W) {}
+
+fn f<T>(r: T) {
+ let w = ();
+ copy(r, w);
+ //~^ ERROR [E0277]
+}
+
+fn main() {}
--- /dev/null
+error[E0277]: `T` cannot be unpinned
+ --> $DIR/issue-90164.rs:5:10
+ |
+LL | copy(r, w);
+ | ---- ^ the trait `Unpin` is not implemented for `T`
+ | |
+ | required by a bound introduced by this call
+ |
+ = note: consider using `Box::pin`
+note: required by a bound in `copy`
+ --> $DIR/issue-90164.rs:1:12
+ |
+LL | fn copy<R: Unpin, W>(_: R, _: W) {}
+ | ^^^^^ required by this bound in `copy`
+help: consider restricting type parameter `T`
+ |
+LL | fn f<T: std::marker::Unpin>(r: T) {
+ | ++++++++++++++++++++
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0277`.
("core/slice/trait.SliceIndex.html", &["begin</code>, <code>end"]),
("alloc/slice/trait.SliceIndex.html", &["begin</code>, <code>end"]),
("std/slice/trait.SliceIndex.html", &["begin</code>, <code>end"]),
+ ("core/primitive.str.html", &["begin</code>, <code>end"]),
+ ("std/primitive.str.html", &["begin</code>, <code>end"]),
];
-Subproject commit fa91a89193d26e6a86e2eee1cbaa38cb28ccebe1
+Subproject commit 9c18177cd36fe07a3c251234240a9c77a4e66785