--- /dev/null
+---
+name: Library Tracking Issue
+about: A tracking issue for an unstable library feature.
+title: Tracking Issue for XXX
+labels: C-tracking-issue T-libs
+---
+<!--
+Thank you for creating a tracking issue!
+
+Tracking issues are for tracking a feature from implementation to stabilization.
+
+Make sure to include the relevant RFC for the feature if it has one.
+
+If the new feature is small, it may be fine to skip the RFC process. In that
+case, you can use use `issue = "none"` in your initial implementation PR. The
+reviewer will ask you to open a tracking issue if they agree your feature can be
+added without an RFC.
+-->
+
+Feature gate: `#![feature(...)]`
+
+This is a tracking issue for ...
+
+<!--
+Include a short description of the feature.
+-->
+
+### Public API
+
+<!--
+For most library features, it'd be useful to include a summarized version of the public API.
+(E.g. just the public function signatures without their doc comments or implementation.)
+-->
+
+```rust
+...
+```
+
+### Steps / History
+
+<!--
+In the simplest case, this is a PR implementing the feature followed by a PR
+that stabilises the feature. However it's not uncommon for the feature to be
+changed before stabilization. For larger features, the implementation could be
+split up in multiple steps.
+-->
+
+- [ ] Implementation: ...
+- [ ] Stabilization PR
+
+### Unresolved Questions
+
+<!--
+Include any open questions that need to be answered before the feature can be
+stabilised. If multiple (unrelated) big questions come up, it can be a good idea
+to open a separate issue for each, to make it easier to keep track of the
+discussions.
+
+It's useful to link any relevant discussions and conclusions (whether on GitHub,
+Zulip, or the internals forum) here.
+-->
+
+- None yet.
[[package]]
name = "coverage_test_macros"
version = "0.0.0"
-dependencies = [
- "proc-macro2",
-]
[[package]]
name = "cpuid-bool"
"regex",
]
+[[package]]
+name = "gsgdt"
+version = "0.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a0d876ce7262df96262a2a19531da6ff9a86048224d49580a585fc5c04617825"
+dependencies = [
+ "serde",
+]
+
[[package]]
name = "handlebars"
version = "3.4.0"
"rustc_session",
"rustc_span",
"rustc_target",
+ "rustc_type_ir",
"smallvec 1.4.2",
"tracing",
]
dependencies = [
"coverage_test_macros",
"either",
+ "gsgdt",
"itertools 0.9.0",
"polonius-engine",
"regex",
"tracing",
]
+[[package]]
+name = "rustc_type_ir"
+version = "0.0.0"
+dependencies = [
+ "bitflags",
+ "rustc_data_structures",
+ "rustc_index",
+ "rustc_serialize",
+]
+
[[package]]
name = "rustc_typeck"
version = "0.0.0"
}
fn lower_arm(&mut self, arm: &Arm) -> hir::Arm<'hir> {
+ let pat = self.lower_pat(&arm.pat);
+ let guard = arm.guard.as_ref().map(|cond| {
+ if let ExprKind::Let(ref pat, ref scrutinee) = cond.kind {
+ hir::Guard::IfLet(self.lower_pat(pat), self.lower_expr(scrutinee))
+ } else {
+ hir::Guard::If(self.lower_expr(cond))
+ }
+ });
hir::Arm {
hir_id: self.next_id(),
attrs: self.lower_attrs(&arm.attrs),
- pat: self.lower_pat(&arm.pat),
- guard: match arm.guard {
- Some(ref x) => Some(hir::Guard::If(self.lower_expr(x))),
- _ => None,
- },
+ pat,
+ guard,
body: self.lower_expr(&arm.body),
span: arm.span,
}
if let Defaultness::Default(def_span) = defaultness {
let span = self.session.source_map().guess_head_span(span);
self.err_handler()
- .struct_span_err(span, "`default` is only allowed on items in `impl` definitions")
+ .struct_span_err(span, "`default` is only allowed on items in trait impls")
.span_label(def_span, "`default` because of this")
.emit();
}
}
}
-#[derive(Encodable, Decodable, Clone, HashStable_Generic)]
+#[derive(Debug, Encodable, Decodable, Clone, HashStable_Generic)]
pub struct Deprecation {
pub since: Option<Symbol>,
/// The note to issue a reason.
let mut ty_params = params
.iter()
- .filter_map(|param| match param.kind {
- ast::GenericParamKind::Type { .. } => Some(param),
- _ => None,
- })
+ .filter(|param| matches!(param.kind, ast::GenericParamKind::Type{..}))
.peekable();
if ty_params.peek().is_some() {
name,
kind,
object: Some(tmp_file),
+ dwarf_object: None,
bytecode: None,
},
work_product,
name: cgu.name().to_string(),
kind: ModuleKind::Regular,
object,
+ dwarf_object: None,
bytecode: None,
}
}
name: metadata_cgu_name,
kind: ModuleKind::Metadata,
object: Some(tmp_file),
+ dwarf_object: None,
bytecode: None,
})
} else {
use crate::{LlvmCodegenBackend, ModuleLlvm};
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
use rustc_codegen_ssa::back::symbol_export;
-use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig};
+use rustc_codegen_ssa::back::write::{
+ CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryConfig,
+};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
use rustc_data_structures::fx::FxHashMap;
cgcx: &CodegenContext<LlvmCodegenBackend>,
) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
let diag_handler = cgcx.create_diag_handler();
- let tm = (cgcx.tm_factory.0)().map_err(|e| write::llvm_err(&diag_handler, &e))?;
+
+ let module_name = &thin_module.shared.module_names[thin_module.idx];
+ let split_dwarf_file = cgcx
+ .output_filenames
+ .split_dwarf_filename(cgcx.split_dwarf_kind, Some(module_name.to_str().unwrap()));
+ let tm_factory_config = TargetMachineFactoryConfig { split_dwarf_file };
+ let tm =
+ (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&diag_handler, &e))?;
// Right now the implementation we've got only works over serialized
// modules, so we create a fresh new LLVM context and parse the module
// crates but for locally codegened modules we may be able to reuse
// that LLVM Context and Module.
let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
- let llmod_raw = parse_module(
- llcx,
- &thin_module.shared.module_names[thin_module.idx],
- thin_module.data(),
- &diag_handler,
- )? as *const _;
+ let llmod_raw =
+ parse_module(llcx, &module_name, thin_module.data(), &diag_handler)? as *const _;
let module = ModuleCodegen {
module_llvm: ModuleLlvm { llmod_raw, llcx, tm },
name: thin_module.name().to_string(),
use crate::type_::Type;
use crate::LlvmCodegenBackend;
use crate::ModuleLlvm;
-use rustc_codegen_ssa::back::write::{BitcodeSection, CodegenContext, EmitObj, ModuleConfig};
+use rustc_codegen_ssa::back::write::{
+ BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig,
+ TargetMachineFactoryFn,
+};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_hir::def_id::LOCAL_CRATE;
use rustc_middle::bug;
use rustc_middle::ty::TyCtxt;
-use rustc_session::config::{self, Lto, OutputType, Passes, SanitizerSet, SwitchWithOptPath};
+use rustc_session::config::{
+ self, Lto, OutputType, Passes, SanitizerSet, SplitDwarfKind, SwitchWithOptPath,
+};
use rustc_session::Session;
use rustc_span::symbol::sym;
use rustc_span::InnerSpan;
pm: &llvm::PassManager<'ll>,
m: &'ll llvm::Module,
output: &Path,
+ dwo_output: Option<&Path>,
file_type: llvm::FileType,
) -> Result<(), FatalError> {
unsafe {
let output_c = path_to_c_string(output);
- let result = llvm::LLVMRustWriteOutputFile(target, pm, m, output_c.as_ptr(), file_type);
+ let result = if let Some(dwo_output) = dwo_output {
+ let dwo_output_c = path_to_c_string(dwo_output);
+ llvm::LLVMRustWriteOutputFile(
+ target,
+ pm,
+ m,
+ output_c.as_ptr(),
+ dwo_output_c.as_ptr(),
+ file_type,
+ )
+ } else {
+ llvm::LLVMRustWriteOutputFile(
+ target,
+ pm,
+ m,
+ output_c.as_ptr(),
+ std::ptr::null(),
+ file_type,
+ )
+ };
result.into_result().map_err(|()| {
let msg = format!("could not write output to {}", output.display());
llvm_err(handler, &msg)
}
pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm::TargetMachine {
- target_machine_factory(sess, config::OptLevel::No)()
+ let config = TargetMachineFactoryConfig { split_dwarf_file: None };
+ target_machine_factory(sess, config::OptLevel::No)(config)
.unwrap_or_else(|err| llvm_err(sess.diagnostic(), &err).raise())
}
-pub fn create_target_machine(tcx: TyCtxt<'_>) -> &'static mut llvm::TargetMachine {
- target_machine_factory(&tcx.sess, tcx.backend_optimization_level(LOCAL_CRATE))()
+pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut llvm::TargetMachine {
+ let split_dwarf_file = tcx
+ .output_filenames(LOCAL_CRATE)
+ .split_dwarf_filename(tcx.sess.opts.debugging_opts.split_dwarf, Some(mod_name));
+ let config = TargetMachineFactoryConfig { split_dwarf_file };
+ target_machine_factory(&tcx.sess, tcx.backend_optimization_level(LOCAL_CRATE))(config)
.unwrap_or_else(|err| llvm_err(tcx.sess.diagnostic(), &err).raise())
}
pub fn target_machine_factory(
sess: &Session,
optlvl: config::OptLevel,
-) -> Arc<dyn Fn() -> Result<&'static mut llvm::TargetMachine, String> + Send + Sync> {
+) -> TargetMachineFactoryFn<LlvmCodegenBackend> {
let reloc_model = to_llvm_relocation_model(sess.relocation_model());
let (opt_level, _) = to_llvm_opt_settings(optlvl);
let use_init_array =
!sess.opts.debugging_opts.use_ctors_section.unwrap_or(sess.target.use_ctors_section);
- Arc::new(move || {
+ Arc::new(move |config: TargetMachineFactoryConfig| {
+ let split_dwarf_file = config.split_dwarf_file.unwrap_or_default();
+ let split_dwarf_file = CString::new(split_dwarf_file.to_str().unwrap()).unwrap();
+
let tm = unsafe {
llvm::LLVMRustCreateTargetMachine(
triple.as_ptr(),
emit_stack_size_section,
relax_elf_relocations,
use_init_array,
+ split_dwarf_file.as_ptr(),
)
};
llmod
};
with_codegen(tm, llmod, config.no_builtins, |cpm| {
- write_output_file(diag_handler, tm, cpm, llmod, &path, llvm::FileType::AssemblyFile)
+ write_output_file(
+ diag_handler,
+ tm,
+ cpm,
+ llmod,
+ &path,
+ None,
+ llvm::FileType::AssemblyFile,
+ )
})?;
}
let _timer = cgcx
.prof
.generic_activity_with_arg("LLVM_module_codegen_emit_obj", &module.name[..]);
+
+ let dwo_out = cgcx.output_filenames.temp_path_dwo(module_name);
+ let dwo_out = match cgcx.split_dwarf_kind {
+ // Don't change how DWARF is emitted in single mode (or when disabled).
+ SplitDwarfKind::None | SplitDwarfKind::Single => None,
+ // Emit (a subset of the) DWARF into a separate file in split mode.
+ SplitDwarfKind::Split => Some(dwo_out.as_path()),
+ };
+
with_codegen(tm, llmod, config.no_builtins, |cpm| {
write_output_file(
diag_handler,
cpm,
llmod,
&obj_out,
+ dwo_out,
llvm::FileType::ObjectFile,
)
})?;
Ok(module.into_compiled_module(
config.emit_obj != EmitObj::None,
+ cgcx.split_dwarf_kind == SplitDwarfKind::Split,
config.emit_bc,
&cgcx.output_filenames,
))
let producer = format!("clang LLVM ({})", rustc_producer);
let name_in_debuginfo = name_in_debuginfo.to_string_lossy();
- let work_dir = tcx.sess.working_dir.0.to_string_lossy();
let flags = "\0";
- let split_name = "";
+
+ let out_dir = &tcx.output_filenames(LOCAL_CRATE).out_directory;
+ let split_name = tcx
+ .output_filenames(LOCAL_CRATE)
+ .split_dwarf_filename(tcx.sess.opts.debugging_opts.split_dwarf, Some(codegen_unit_name))
+ .unwrap_or_default();
+ let out_dir = out_dir.to_str().unwrap();
+ let split_name = split_name.to_str().unwrap();
// FIXME(#60020):
//
debug_context.builder,
name_in_debuginfo.as_ptr().cast(),
name_in_debuginfo.len(),
- work_dir.as_ptr().cast(),
- work_dir.len(),
+ out_dir.as_ptr().cast(),
+ out_dir.len(),
llvm::ChecksumKind::None,
ptr::null(),
0,
split_name.as_ptr().cast(),
split_name.len(),
kind,
+ 0,
+ tcx.sess.opts.debugging_opts.split_dwarf_inlining,
);
if tcx.sess.opts.debugging_opts.profile {
let state_arg = mir::Local::new(1);
for var in &body.var_debug_info {
- if var.place.local != state_arg {
+ let place = if let mir::VarDebugInfoContents::Place(p) = var.value { p } else { continue };
+ if place.local != state_arg {
continue;
}
- match var.place.projection[..] {
+ match place.projection[..] {
[
// Deref of the `Pin<&mut Self>` state argument.
mir::ProjectionElem::Field(..),
));
}
- if name_str.starts_with("simd_shuffle") {
- let n: u64 = name_str["simd_shuffle".len()..].parse().unwrap_or_else(|_| {
+ if let Some(stripped) = name_str.strip_prefix("simd_shuffle") {
+ let n: u64 = stripped.parse().unwrap_or_else(|_| {
span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
});
pub use llvm_util::target_features;
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
-use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig};
+use rustc_codegen_ssa::back::write::{
+ CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn,
+};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::ModuleCodegen;
use rustc_codegen_ssa::{CodegenResults, CompiledModule};
use std::any::Any;
use std::ffi::CStr;
-use std::sync::Arc;
mod back {
pub mod archive;
&self,
sess: &Session,
optlvl: OptLevel,
- ) -> Arc<dyn Fn() -> Result<&'static mut llvm::TargetMachine, String> + Send + Sync> {
+ ) -> TargetMachineFactoryFn<Self> {
back::write::target_machine_factory(sess, optlvl)
}
fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str {
unsafe {
let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
- ModuleLlvm { llmod_raw, llcx, tm: create_target_machine(tcx) }
+ ModuleLlvm { llmod_raw, llcx, tm: create_target_machine(tcx, mod_name) }
}
}
unsafe {
let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
let llmod_raw = back::lto::parse_module(llcx, name, buffer, handler)?;
- let tm = match (cgcx.tm_factory.0)() {
+
+ let split_dwarf_file = cgcx
+ .output_filenames
+ .split_dwarf_filename(cgcx.split_dwarf_kind, Some(name.to_str().unwrap()));
+ let tm_factory_config = TargetMachineFactoryConfig { split_dwarf_file };
+
+ let tm = match (cgcx.tm_factory)(tm_factory_config) {
Ok(m) => m,
Err(e) => {
handler.struct_err(&e).emit();
SplitName: *const c_char,
SplitNameLen: size_t,
kind: DebugEmissionKind,
+ DWOId: u64,
+ SplitDebugInlining: bool,
) -> &'a DIDescriptor;
pub fn LLVMRustDIBuilderCreateFile(
EmitStackSizeSection: bool,
RelaxELFRelocations: bool,
UseInitArray: bool,
+ SplitDwarfFile: *const c_char,
) -> Option<&'static mut TargetMachine>;
pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine);
pub fn LLVMRustAddBuilderLibraryInfo(
PM: &PassManager<'a>,
M: &'a Module,
Output: *const c_char,
+ DwoOutput: *const c_char,
FileType: FileType,
) -> LLVMRustResult;
pub fn LLVMRustOptimizeWithNewPassManager(
use super::command::Command;
use super::linker::{self, Linker};
use super::rpath::{self, RPathConfig};
-use crate::{looks_like_rust_object_file, CodegenResults, CrateInfo, METADATA_FILENAME};
+use crate::{
+ looks_like_rust_object_file, CodegenResults, CompiledModule, CrateInfo, METADATA_FILENAME,
+};
use cc::windows_registry;
use tempfile::Builder as TempFileBuilder;
path.as_ref(),
target_cpu,
);
+ if sess.opts.debugging_opts.split_dwarf == config::SplitDwarfKind::Split {
+ link_dwarf_object(sess, &out_filename);
+ }
}
}
if sess.opts.json_artifact_notifications {
// Remove the temporary object file and metadata if we aren't saving temps
sess.time("link_binary_remove_temps", || {
if !sess.opts.cg.save_temps {
+ let remove_temps_from_module = |module: &CompiledModule| {
+ if let Some(ref obj) = module.object {
+ remove(sess, obj);
+ }
+
+ if let Some(ref obj) = module.dwarf_object {
+ remove(sess, obj);
+ }
+ };
+
if sess.opts.output_types.should_codegen()
&& !preserve_objects_for_their_debuginfo(sess)
{
- for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
- remove(sess, obj);
+ for module in &codegen_results.modules {
+ remove_temps_from_module(module);
}
}
+
if let Some(ref metadata_module) = codegen_results.metadata_module {
- if let Some(ref obj) = metadata_module.object {
- remove(sess, obj);
- }
+ remove_temps_from_module(metadata_module);
}
+
if let Some(ref allocator_module) = codegen_results.allocator_module {
- if let Some(ref obj) = allocator_module.object {
- remove(sess, obj);
- }
+ remove_temps_from_module(allocator_module);
}
}
});
out_filename
}
-// Create an 'rlib'
-//
-// An rlib in its current incarnation is essentially a renamed .a file. The
-// rlib primarily contains the object file of the crate, but it also contains
-// all of the object files from native libraries. This is done by unzipping
-// native libraries and inserting all of the contents into this archive.
+/// Create an 'rlib'.
+///
+/// An rlib in its current incarnation is essentially a renamed .a file. The rlib primarily contains
+/// the object file of the crate, but it also contains all of the object files from native
+/// libraries. This is done by unzipping native libraries and inserting all of the contents into
+/// this archive.
fn link_rlib<'a, B: ArchiveBuilder<'a>>(
sess: &'a Session,
codegen_results: &CodegenResults,
ab
}
-// Create a static archive
-//
-// This is essentially the same thing as an rlib, but it also involves adding
-// all of the upstream crates' objects into the archive. This will slurp in
-// all of the native libraries of upstream dependencies as well.
-//
-// Additionally, there's no way for us to link dynamic libraries, so we warn
-// about all dynamic library dependencies that they're not linked in.
-//
-// There's no need to include metadata in a static archive, so ensure to not
-// link in the metadata object file (and also don't prepare the archive with a
-// metadata file).
+/// Create a static archive.
+///
+/// This is essentially the same thing as an rlib, but it also involves adding all of the upstream
+/// crates' objects into the archive. This will slurp in all of the native libraries of upstream
+/// dependencies as well.
+///
+/// Additionally, there's no way for us to link dynamic libraries, so we warn about all dynamic
+/// library dependencies that they're not linked in.
+///
+/// There's no need to include metadata in a static archive, so ensure to not link in the metadata
+/// object file (and also don't prepare the archive with a metadata file).
fn link_staticlib<'a, B: ArchiveBuilder<'a>>(
sess: &'a Session,
codegen_results: &CodegenResults,
}
}
-// Create a dynamic library or executable
-//
-// This will invoke the system linker/cc to create the resulting file. This
-// links to all upstream files as well.
+fn escape_stdout_stderr_string(s: &[u8]) -> String {
+ str::from_utf8(s).map(|s| s.to_owned()).unwrap_or_else(|_| {
+ let mut x = "Non-UTF-8 output: ".to_string();
+ x.extend(s.iter().flat_map(|&b| ascii::escape_default(b)).map(char::from));
+ x
+ })
+}
+
+const LLVM_DWP_EXECUTABLE: &'static str = "rust-llvm-dwp";
+
+/// Invoke `llvm-dwp` (shipped alongside rustc) to link `dwo` files from Split DWARF into a `dwp`
+/// file.
+fn link_dwarf_object<'a>(sess: &'a Session, executable_out_filename: &Path) {
+ info!("preparing dwp to {}.dwp", executable_out_filename.to_str().unwrap());
+
+ let dwp_out_filename = executable_out_filename.with_extension("dwp");
+ let mut cmd = Command::new(LLVM_DWP_EXECUTABLE);
+ cmd.arg("-e");
+ cmd.arg(executable_out_filename);
+ cmd.arg("-o");
+ cmd.arg(&dwp_out_filename);
+
+ let mut new_path = sess.host_filesearch(PathKind::All).get_tools_search_paths(false);
+ if let Some(path) = env::var_os("PATH") {
+ new_path.extend(env::split_paths(&path));
+ }
+ let new_path = env::join_paths(new_path).unwrap();
+ cmd.env("PATH", new_path);
+
+ info!("{:?}", &cmd);
+ match sess.time("run_dwp", || cmd.output()) {
+ Ok(prog) if !prog.status.success() => {
+ sess.struct_err(&format!(
+ "linking dwarf objects with `{}` failed: {}",
+ LLVM_DWP_EXECUTABLE, prog.status
+ ))
+ .note(&format!("{:?}", &cmd))
+ .note(&escape_stdout_stderr_string(&prog.stdout))
+ .note(&escape_stdout_stderr_string(&prog.stderr))
+ .emit();
+ info!("linker stderr:\n{}", escape_stdout_stderr_string(&prog.stderr));
+ info!("linker stdout:\n{}", escape_stdout_stderr_string(&prog.stdout));
+ }
+ Ok(_) => {}
+ Err(e) => {
+ let dwp_not_found = e.kind() == io::ErrorKind::NotFound;
+ let mut err = if dwp_not_found {
+ sess.struct_err(&format!("linker `{}` not found", LLVM_DWP_EXECUTABLE))
+ } else {
+ sess.struct_err(&format!("could not exec the linker `{}`", LLVM_DWP_EXECUTABLE))
+ };
+
+ err.note(&e.to_string());
+
+ if !dwp_not_found {
+ err.note(&format!("{:?}", &cmd));
+ }
+
+ err.emit();
+ }
+ }
+}
+
+/// Create a dynamic library or executable.
+///
+/// This will invoke the system linker/cc to create the resulting file. This links to all upstream
+/// files as well.
fn link_natively<'a, B: ArchiveBuilder<'a>>(
sess: &'a Session,
crate_type: CrateType,
prog.status
))
.note(&format!("{:?}", &cmd))
- .note(&escape_string(&output))
+ .note(&escape_stdout_stderr_string(&output))
.emit();
// If MSVC's `link.exe` was expected but the return code
sess.abort_if_errors();
}
- info!("linker stderr:\n{}", escape_string(&prog.stderr));
- info!("linker stdout:\n{}", escape_string(&prog.stdout));
+ info!("linker stderr:\n{}", escape_stdout_stderr_string(&prog.stderr));
+ info!("linker stdout:\n{}", escape_stdout_stderr_string(&prog.stdout));
}
Err(e) => {
let linker_not_found = e.kind() == io::ErrorKind::NotFound;
return false;
}
+ // Single mode keeps debuginfo in the same object file, but in such a way that it it skipped
+ // by the linker - so it's expected that when codegen units are linked together that this
+ // debuginfo would be lost without keeping around the temps.
+ if sess.opts.debugging_opts.split_dwarf == config::SplitDwarfKind::Single {
+ return true;
+ }
+
// If we're on OSX then the equivalent of split dwarf is turned on by
// default. The final executable won't actually have any debug information
// except it'll have pointers to elsewhere. Historically we've always run
cmd.take_cmd()
}
-// # Native library linking
-//
-// User-supplied library search paths (-L on the command line). These are
-// the same paths used to find Rust crates, so some of them may have been
-// added already by the previous crate linking code. This only allows them
-// to be found at compile time so it is still entirely up to outside
-// forces to make sure that library can be found at runtime.
-//
-// Also note that the native libraries linked here are only the ones located
-// in the current crate. Upstream crates with native library dependencies
-// may have their native library pulled in above.
+/// # Native library linking
+///
+/// User-supplied library search paths (-L on the command line). These are the same paths used to
+/// find Rust crates, so some of them may have been added already by the previous crate linking
+/// code. This only allows them to be found at compile time so it is still entirely up to outside
+/// forces to make sure that library can be found at runtime.
+///
+/// Also note that the native libraries linked here are only the ones located in the current crate.
+/// Upstream crates with native library dependencies may have their native library pulled in above.
fn add_local_native_libraries(
cmd: &mut dyn Linker,
sess: &Session,
}
}
-// # Rust Crate linking
-//
-// Rust crates are not considered at all when creating an rlib output. All
-// dependencies will be linked when producing the final output (instead of
-// the intermediate rlib version)
+/// # Rust Crate linking
+///
+/// Rust crates are not considered at all when creating an rlib output. All dependencies will be
+/// linked when producing the final output (instead of the intermediate rlib version).
fn add_upstream_rust_crates<'a, B: ArchiveBuilder<'a>>(
cmd: &mut dyn Linker,
sess: &'a Session,
}
}
-// Link in all of our upstream crates' native dependencies. Remember that
-// all of these upstream native dependencies are all non-static
-// dependencies. We've got two cases then:
-//
-// 1. The upstream crate is an rlib. In this case we *must* link in the
-// native dependency because the rlib is just an archive.
-//
-// 2. The upstream crate is a dylib. In order to use the dylib, we have to
-// have the dependency present on the system somewhere. Thus, we don't
-// gain a whole lot from not linking in the dynamic dependency to this
-// crate as well.
-//
-// The use case for this is a little subtle. In theory the native
-// dependencies of a crate are purely an implementation detail of the crate
-// itself, but the problem arises with generic and inlined functions. If a
-// generic function calls a native function, then the generic function must
-// be instantiated in the target crate, meaning that the native symbol must
-// also be resolved in the target crate.
+/// Link in all of our upstream crates' native dependencies. Remember that all of these upstream
+/// native dependencies are all non-static dependencies. We've got two cases then:
+///
+/// 1. The upstream crate is an rlib. In this case we *must* link in the native dependency because
+/// the rlib is just an archive.
+///
+/// 2. The upstream crate is a dylib. In order to use the dylib, we have to have the dependency
+/// present on the system somewhere. Thus, we don't gain a whole lot from not linking in the
+/// dynamic dependency to this crate as well.
+///
+/// The use case for this is a little subtle. In theory the native dependencies of a crate are
+/// purely an implementation detail of the crate itself, but the problem arises with generic and
+/// inlined functions. If a generic function calls a native function, then the generic function
+/// must be instantiated in the target crate, meaning that the native symbol must also be resolved
+/// in the target crate.
fn add_upstream_native_libraries(
cmd: &mut dyn Linker,
sess: &Session,
}
}
-// HACK(eddyb) work around `#[derive]` producing wrong bounds for `Clone`.
-pub struct TargetMachineFactory<B: WriteBackendMethods>(
- pub Arc<dyn Fn() -> Result<B::TargetMachine, String> + Send + Sync>,
-);
-
-impl<B: WriteBackendMethods> Clone for TargetMachineFactory<B> {
- fn clone(&self) -> Self {
- TargetMachineFactory(self.0.clone())
- }
+/// Configuration passed to the function returned by the `target_machine_factory`.
+pub struct TargetMachineFactoryConfig {
+ /// Split DWARF is enabled in LLVM by checking that `TM.MCOptions.SplitDwarfFile` isn't empty,
+ /// so the path to the dwarf object has to be provided when we create the target machine.
+ /// This can be ignored by backends which do not need it for their Split DWARF support.
+ pub split_dwarf_file: Option<PathBuf>,
}
+pub type TargetMachineFactoryFn<B> = Arc<
+ dyn Fn(TargetMachineFactoryConfig) -> Result<<B as WriteBackendMethods>::TargetMachine, String>
+ + Send
+ + Sync,
+>;
+
pub type ExportedSymbols = FxHashMap<CrateNum, Arc<Vec<(String, SymbolExportLevel)>>>;
/// Additional resources used by optimize_and_codegen (not module specific)
pub regular_module_config: Arc<ModuleConfig>,
pub metadata_module_config: Arc<ModuleConfig>,
pub allocator_module_config: Arc<ModuleConfig>,
- pub tm_factory: TargetMachineFactory<B>,
+ pub tm_factory: TargetMachineFactoryFn<B>,
pub msvc_imps_needed: bool,
pub is_pe_coff: bool,
pub target_pointer_width: u32,
pub target_arch: String,
pub debuginfo: config::DebugInfo,
+ pub split_dwarf_kind: config::SplitDwarfKind,
// Number of cgus excluding the allocator/metadata modules
pub total_cgus: usize,
}
}
+ if let Some(ref path) = module.dwarf_object {
+ if !keep_numbered_objects {
+ remove(sess, path);
+ }
+ }
+
if let Some(ref path) = module.bytecode {
if !keep_numbered_bitcode {
remove(sess, path);
name: module.name,
kind: ModuleKind::Regular,
object,
+ dwarf_object: None,
bytecode: None,
}))
}
regular_module_config: regular_config,
metadata_module_config: metadata_config,
allocator_module_config: allocator_config,
- tm_factory: TargetMachineFactory(backend.target_machine_factory(tcx.sess, ol)),
+ tm_factory: backend.target_machine_factory(tcx.sess, ol),
total_cgus,
msvc_imps_needed: msvc_imps_needed(tcx),
is_pe_coff: tcx.sess.target.is_like_windows,
target_pointer_width: tcx.sess.target.pointer_width,
target_arch: tcx.sess.target.arch.clone(),
debuginfo: tcx.sess.opts.debuginfo,
+ split_dwarf_kind: tcx.sess.opts.debugging_opts.split_dwarf,
};
// This is the "main loop" of parallel work happening for parallel codegen.
pub fn into_compiled_module(
self,
emit_obj: bool,
+ emit_dwarf_obj: bool,
emit_bc: bool,
outputs: &OutputFilenames,
) -> CompiledModule {
let object = emit_obj.then(|| outputs.temp_path(OutputType::Object, Some(&self.name)));
+ let dwarf_object = emit_dwarf_obj.then(|| outputs.temp_path_dwo(Some(&self.name)));
let bytecode = emit_bc.then(|| outputs.temp_path(OutputType::Bitcode, Some(&self.name)));
- CompiledModule { name: self.name.clone(), kind: self.kind, object, bytecode }
+ CompiledModule { name: self.name.clone(), kind: self.kind, object, dwarf_object, bytecode }
}
}
pub name: String,
pub kind: ModuleKind,
pub object: Option<PathBuf>,
+ pub dwarf_object: Option<PathBuf>,
pub bytecode: Option<PathBuf>,
}
dst: PlaceRef<'tcx, Bx::Value>,
) {
let src = self.codegen_operand(bx, src);
+
+ // Special-case transmutes between scalars as simple bitcasts.
+ match (&src.layout.abi, &dst.layout.abi) {
+ (abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => {
+ // HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers.
+ if (src_scalar.value == abi::Pointer) == (dst_scalar.value == abi::Pointer) {
+ assert_eq!(src.layout.size, dst.layout.size);
+
+ // NOTE(eddyb) the `from_immediate` and `to_immediate_scalar`
+ // conversions allow handling `bool`s the same as `u8`s.
+ let src = bx.from_immediate(src.immediate());
+ let src_as_dst = bx.bitcast(src, bx.backend_type(dst.layout));
+ Immediate(bx.to_immediate_scalar(src_as_dst, dst_scalar)).store(bx, dst);
+ return;
+ }
+ }
+ _ => {}
+ }
+
let llty = bx.backend_type(src.layout);
let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
let align = src.layout.align.abi.min(dst.align);
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn eval_mir_constant_to_operand(
- &mut self,
+ &self,
bx: &mut Bx,
constant: &mir::Constant<'tcx>,
) -> Result<OperandRef<'tcx, Bx::Value>, ErrorHandled> {
}
pub fn eval_mir_constant(
- &mut self,
+ &self,
constant: &mir::Constant<'tcx>,
) -> Result<ConstValue<'tcx>, ErrorHandled> {
match self.monomorphize(constant.literal).val {
use rustc_span::{BytePos, Span};
use rustc_target::abi::{LayoutOf, Size};
-use super::operand::OperandValue;
+use super::operand::{OperandRef, OperandValue};
use super::place::PlaceRef;
use super::{FunctionCx, LocalRef};
span
}
+ fn spill_operand_to_stack(
+ operand: &OperandRef<'tcx, Bx::Value>,
+ name: Option<String>,
+ bx: &mut Bx,
+ ) -> PlaceRef<'tcx, Bx::Value> {
+ // "Spill" the value onto the stack, for debuginfo,
+ // without forcing non-debuginfo uses of the local
+ // to also load from the stack every single time.
+ // FIXME(#68817) use `llvm.dbg.value` instead,
+ // at least for the cases which LLVM handles correctly.
+ let spill_slot = PlaceRef::alloca(bx, operand.layout);
+ if let Some(name) = name {
+ bx.set_var_name(spill_slot.llval, &(name + ".dbg.spill"));
+ }
+ operand.val.store(bx, spill_slot);
+ spill_slot
+ }
+
/// Apply debuginfo and/or name, after creating the `alloca` for a local,
/// or initializing the local with an operand (whichever applies).
pub fn debug_introduce_local(&self, bx: &mut Bx, local: mir::Local) {
return;
}
- // "Spill" the value onto the stack, for debuginfo,
- // without forcing non-debuginfo uses of the local
- // to also load from the stack every single time.
- // FIXME(#68817) use `llvm.dbg.value` instead,
- // at least for the cases which LLVM handles correctly.
- let spill_slot = PlaceRef::alloca(bx, operand.layout);
- if let Some(name) = name {
- bx.set_var_name(spill_slot.llval, &(name + ".dbg.spill"));
- }
- operand.val.store(bx, spill_slot);
- spill_slot
+ Self::spill_operand_to_stack(operand, name, bx)
}
LocalRef::Place(place) => *place,
/// Partition all `VarDebugInfo` in `self.mir`, by their base `Local`.
pub fn compute_per_local_var_debug_info(
&self,
+ bx: &mut Bx,
) -> Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>> {
let full_debug_info = self.cx.sess().opts.debuginfo == DebugInfo::Full;
} else {
None
};
+
let dbg_var = dbg_scope_and_span.map(|(dbg_scope, _, span)| {
- let place = var.place;
- let var_ty = self.monomorphized_place_ty(place.as_ref());
- let var_kind = if self.mir.local_kind(place.local) == mir::LocalKind::Arg
- && place.projection.is_empty()
- && var.source_info.scope == mir::OUTERMOST_SOURCE_SCOPE
- {
- let arg_index = place.local.index() - 1;
-
- // FIXME(eddyb) shouldn't `ArgumentVariable` indices be
- // offset in closures to account for the hidden environment?
- // Also, is this `+ 1` needed at all?
- VariableKind::ArgumentVariable(arg_index + 1)
- } else {
- VariableKind::LocalVariable
+ let (var_ty, var_kind) = match var.value {
+ mir::VarDebugInfoContents::Place(place) => {
+ let var_ty = self.monomorphized_place_ty(place.as_ref());
+ let var_kind = if self.mir.local_kind(place.local) == mir::LocalKind::Arg
+ && place.projection.is_empty()
+ && var.source_info.scope == mir::OUTERMOST_SOURCE_SCOPE
+ {
+ let arg_index = place.local.index() - 1;
+
+ // FIXME(eddyb) shouldn't `ArgumentVariable` indices be
+ // offset in closures to account for the hidden environment?
+ // Also, is this `+ 1` needed at all?
+ VariableKind::ArgumentVariable(arg_index + 1)
+ } else {
+ VariableKind::LocalVariable
+ };
+ (var_ty, var_kind)
+ }
+ mir::VarDebugInfoContents::Const(c) => {
+ let ty = self.monomorphize(c.literal.ty);
+ (ty, VariableKind::LocalVariable)
+ }
};
+
self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span)
});
- per_local[var.place.local].push(PerLocalVarDebugInfo {
- name: var.name,
- source_info: var.source_info,
- dbg_var,
- projection: var.place.projection,
- });
+ match var.value {
+ mir::VarDebugInfoContents::Place(place) => {
+ per_local[place.local].push(PerLocalVarDebugInfo {
+ name: var.name,
+ source_info: var.source_info,
+ dbg_var,
+ projection: place.projection,
+ });
+ }
+ mir::VarDebugInfoContents::Const(c) => {
+ if let Some(dbg_var) = dbg_var {
+ let dbg_loc = match self.dbg_loc(var.source_info) {
+ Some(dbg_loc) => dbg_loc,
+ None => continue,
+ };
+
+ if let Ok(operand) = self.eval_mir_constant_to_operand(bx, &c) {
+ let base = Self::spill_operand_to_stack(
+ &operand,
+ Some(var.name.to_string()),
+ bx,
+ );
+
+ bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[]);
+ }
+ }
+ }
+ }
}
Some(per_local)
}
return;
}
- sym::unreachable => {
- return;
- }
sym::va_start => bx.va_start(args[0].immediate()),
sym::va_end => bx.va_end(args[0].immediate()),
sym::size_of_val => {
bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
}
}
- sym::size_of
- | sym::pref_align_of
+ sym::pref_align_of
| sym::min_align_of
| sym::needs_drop
| sym::type_id
.unwrap();
OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
}
- // Effectively no-op
- sym::forget => {
- return;
- }
sym::offset => {
let ptr = args[0].immediate();
let offset = args[1].immediate();
sym::add_with_overflow
| sym::sub_with_overflow
| sym::mul_with_overflow
- | sym::wrapping_add
- | sym::wrapping_sub
- | sym::wrapping_mul
| sym::unchecked_div
| sym::unchecked_rem
| sym::unchecked_shl
return;
}
- sym::wrapping_add => bx.add(args[0].immediate(), args[1].immediate()),
- sym::wrapping_sub => bx.sub(args[0].immediate(), args[1].immediate()),
- sym::wrapping_mul => bx.mul(args[0].immediate(), args[1].immediate()),
sym::exact_div => {
if signed {
bx.exactsdiv(args[0].immediate(), args[1].immediate())
caller_location: None,
};
- fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info();
+ fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut bx);
for const_ in &mir.required_consts {
if let Err(err) = fx.eval_mir_constant(const_) {
mir::Rvalue::Discriminant(ref place) => {
let discr_ty = rvalue.ty(self.mir, bx.tcx());
+ let discr_ty = self.monomorphize(discr_ty);
let discr = self
.codegen_place(&mut bx, place.as_ref())
.codegen_get_discr(&mut bx, discr_ty);
use super::write::WriteBackendMethods;
use super::CodegenObject;
+use crate::back::write::TargetMachineFactoryFn;
use crate::{CodegenResults, ModuleCodegen};
use rustc_ast::expand::allocator::AllocatorKind;
pub use rustc_data_structures::sync::MetadataRef;
use std::any::Any;
-use std::sync::Arc;
pub trait BackendTypes {
type Value: CodegenObject;
&self,
sess: &Session,
opt_level: config::OptLevel,
- ) -> Arc<dyn Fn() -> Result<Self::TargetMachine, String> + Send + Sync>;
+ ) -> TargetMachineFactoryFn<Self>;
fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str;
fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str>;
}
E0207: include_str!("./error_codes/E0207.md"),
E0210: include_str!("./error_codes/E0210.md"),
E0211: include_str!("./error_codes/E0211.md"),
+E0212: include_str!("./error_codes/E0212.md"),
E0214: include_str!("./error_codes/E0214.md"),
E0220: include_str!("./error_codes/E0220.md"),
E0221: include_str!("./error_codes/E0221.md"),
// E0196, // cannot determine a type for this closure
E0208,
// E0209, // builtin traits can only be implemented on structs or enums
- E0212, // cannot extract an associated type from a higher-ranked trait bound
// E0213, // associated types are not accepted in this context
// E0215, // angle-bracket notation is not stable with `Fn`
// E0216, // parenthetical notation is only stable with `Fn`
--- /dev/null
+Cannot use the associated type of
+a trait with uninferred generic parameters.
+
+Erroneous code example:
+
+```compile_fail,E0212
+pub trait Foo<T> {
+ type A;
+
+ fn get(&self, t: T) -> Self::A;
+}
+
+fn foo2<I : for<'x> Foo<&'x isize>>(
+ field: I::A) {} // error!
+```
+
+In this example, we have to instantiate `'x`, and
+we don't know what lifetime to instantiate it with.
+To fix this, spell out the precise lifetimes involved.
+Example:
+
+```
+pub trait Foo<T> {
+ type A;
+
+ fn get(&self, t: T) -> Self::A;
+}
+
+fn foo3<I : for<'x> Foo<&'x isize>>(
+ x: <I as Foo<&isize>>::A) {} // ok!
+
+
+fn foo4<'a, I : for<'x> Foo<&'x isize>>(
+ x: <I as Foo<&'a isize>>::A) {} // ok!
+```
mod tests;
#[cfg(test)]
mod parse {
- #[cfg(test)]
mod tests;
}
#[cfg(test)]
mod tokenstream {
- #[cfg(test)]
mod tests;
}
#[cfg(test)]
mod mut_visit {
- #[cfg(test)]
mod tests;
}
-//! This is an NFA-based parser, which calls out to the main rust parser for named non-terminals
+//! This is an NFA-based parser, which calls out to the main Rust parser for named non-terminals
//! (which it commits to fully when it hits one in a grammar). There's a set of current NFA threads
//! and a set of next ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
//! pathological cases, is worse than traditional use of NFA or Earley parsing, but it's an easier
///
/// # Parameters
///
-/// - `sess`: the parsing session into which errors are emitted.
/// - `cur_items`: the set of current items to be processed. This should be empty by the end of a
/// successful execution of this function.
/// - `next_items`: the set of newly generated items. These are used to replenish `cur_items` in
/// - `eof_items`: the set of items that would be valid if this was the EOF.
/// - `bb_items`: the set of items that are waiting for the black-box parser.
/// - `token`: the current token of the parser.
-/// - `span`: the `Span` in the source code corresponding to the token trees we are trying to match
-/// against the matcher positions in `cur_items`.
///
/// # Returns
///
mod builtin_attrs;
mod removed;
+#[cfg(test)]
+mod tests;
+
use rustc_span::{edition::Edition, symbol::Symbol, Span};
use std::fmt;
use std::num::NonZeroU32;
AttributeType, BuiltinAttribute, GatedCfg, BUILTIN_ATTRIBUTES, BUILTIN_ATTRIBUTE_MAP,
};
pub use removed::{REMOVED_FEATURES, STABLE_REMOVED_FEATURES};
-
-#[cfg(test)]
-mod test {
- use super::UnstableFeatures;
-
- #[test]
- fn rustc_bootstrap_parsing() {
- let is_bootstrap = |env, krate| {
- std::env::set_var("RUSTC_BOOTSTRAP", env);
- matches!(UnstableFeatures::from_environment(krate), UnstableFeatures::Cheat)
- };
- assert!(is_bootstrap("1", None));
- assert!(is_bootstrap("1", Some("x")));
- // RUSTC_BOOTSTRAP allows specifying a specific crate
- assert!(is_bootstrap("x", Some("x")));
- // RUSTC_BOOTSTRAP allows multiple comma-delimited crates
- assert!(is_bootstrap("x,y,z", Some("x")));
- assert!(is_bootstrap("x,y,z", Some("y")));
- // Crate that aren't specified do not get unstable features
- assert!(!is_bootstrap("x", Some("a")));
- assert!(!is_bootstrap("x,y,z", Some("a")));
- assert!(!is_bootstrap("x,y,z", None));
-
- // this is technically a breaking change, but there are no stability guarantees for RUSTC_BOOTSTRAP
- assert!(!is_bootstrap("0", None));
- }
-}
--- /dev/null
+use super::UnstableFeatures;
+
+#[test]
+fn rustc_bootstrap_parsing() {
+ let is_bootstrap = |env, krate| {
+ std::env::set_var("RUSTC_BOOTSTRAP", env);
+ matches!(UnstableFeatures::from_environment(krate), UnstableFeatures::Cheat)
+ };
+ assert!(is_bootstrap("1", None));
+ assert!(is_bootstrap("1", Some("x")));
+ // RUSTC_BOOTSTRAP allows specifying a specific crate
+ assert!(is_bootstrap("x", Some("x")));
+ // RUSTC_BOOTSTRAP allows multiple comma-delimited crates
+ assert!(is_bootstrap("x,y,z", Some("x")));
+ assert!(is_bootstrap("x,y,z", Some("y")));
+ // Crate that aren't specified do not get unstable features
+ assert!(!is_bootstrap("x", Some("a")));
+ assert!(!is_bootstrap("x,y,z", Some("a")));
+ assert!(!is_bootstrap("x,y,z", None));
+
+ // this is technically a breaking change, but there are no stability guarantees for RUSTC_BOOTSTRAP
+ assert!(!is_bootstrap("0", None));
+}
#[derive(Debug, HashStable_Generic)]
pub enum Guard<'hir> {
If(&'hir Expr<'hir>),
+ IfLet(&'hir Pat<'hir>, &'hir Expr<'hir>),
}
#[derive(Debug, HashStable_Generic)]
IfDesugar { contains_else_clause: bool },
/// An `if let _ = _ { .. }` (optionally with `else { .. }`).
IfLetDesugar { contains_else_clause: bool },
+ /// An `if let _ = _ => { .. }` match guard.
+ IfLetGuardDesugar,
/// A `while _ { .. }` (which was desugared to a `loop { match _ { .. } }`).
WhileDesugar,
/// A `while let _ = _ { .. }` (which was desugared to a
use MatchSource::*;
match self {
Normal => "match",
- IfDesugar { .. } | IfLetDesugar { .. } => "if",
+ IfDesugar { .. } | IfLetDesugar { .. } | IfLetGuardDesugar => "if",
WhileDesugar | WhileLetDesugar => "while",
ForLoopDesugar => "for",
TryDesugar => "?",
if let Some(ref g) = arm.guard {
match g {
Guard::If(ref e) => visitor.visit_expr(e),
+ Guard::IfLet(ref pat, ref e) => {
+ visitor.visit_pat(pat);
+ visitor.visit_expr(e);
+ }
}
}
visitor.visit_expr(&arm.body);
self.print_expr(&e);
self.s.space();
}
+ hir::Guard::IfLet(pat, e) => {
+ self.word_nbsp("if");
+ self.word_nbsp("let");
+ self.print_pat(&pat);
+ self.s.space();
+ self.word_space("=");
+ self.print_expr(&e);
+ self.s.space();
+ }
}
}
self.word_space("=>");
tracked!(share_generics, Some(true));
tracked!(show_span, Some(String::from("abc")));
tracked!(src_hash_algorithm, Some(SourceFileHashAlgorithm::Sha1));
- tracked!(symbol_mangling_version, SymbolManglingVersion::V0);
+ tracked!(symbol_mangling_version, Some(SymbolManglingVersion::V0));
tracked!(teach, true);
tracked!(thinlto, Some(true));
tracked!(tune_cpu, Some(String::from("abc")));
cmd.args(&components);
for lib in output(&mut cmd).split_whitespace() {
- let name = if lib.starts_with("-l") {
- &lib[2..]
- } else if lib.starts_with('-') {
- &lib[1..]
+ let name = if let Some(stripped) = lib.strip_prefix("-l") {
+ stripped
+ } else if let Some(stripped) = lib.strip_prefix('-') {
+ stripped
} else if Path::new(lib).exists() {
// On MSVC llvm-config will print the full name to libraries, but
// we're only interested in the name part
cmd.arg(llvm_link_arg).arg("--ldflags");
for lib in output(&mut cmd).split_whitespace() {
if is_crossed {
- if lib.starts_with("-LIBPATH:") {
- println!("cargo:rustc-link-search=native={}", lib[9..].replace(&host, &target));
- } else if lib.starts_with("-L") {
- println!("cargo:rustc-link-search=native={}", lib[2..].replace(&host, &target));
+ if let Some(stripped) = lib.strip_prefix("-LIBPATH:") {
+ println!("cargo:rustc-link-search=native={}", stripped.replace(&host, &target));
+ } else if let Some(stripped) = lib.strip_prefix("-L") {
+ println!("cargo:rustc-link-search=native={}", stripped.replace(&host, &target));
}
- } else if lib.starts_with("-LIBPATH:") {
- println!("cargo:rustc-link-search=native={}", &lib[9..]);
- } else if lib.starts_with("-l") {
- println!("cargo:rustc-link-lib={}", &lib[2..]);
- } else if lib.starts_with("-L") {
- println!("cargo:rustc-link-search=native={}", &lib[2..]);
+ } else if let Some(stripped) = lib.strip_prefix("-LIBPATH:") {
+ println!("cargo:rustc-link-search=native={}", stripped);
+ } else if let Some(stripped) = lib.strip_prefix("-l") {
+ println!("cargo:rustc-link-lib={}", stripped);
+ } else if let Some(stripped) = lib.strip_prefix("-L") {
+ println!("cargo:rustc-link-search=native={}", stripped);
}
}
let llvm_linker_flags = tracked_env_var_os("LLVM_LINKER_FLAGS");
if let Some(s) = llvm_linker_flags {
for lib in s.into_string().unwrap().split_whitespace() {
- if lib.starts_with("-l") {
- println!("cargo:rustc-link-lib={}", &lib[2..]);
- } else if lib.starts_with("-L") {
- println!("cargo:rustc-link-search=native={}", &lib[2..]);
+ if let Some(stripped) = lib.strip_prefix("-l") {
+ println!("cargo:rustc-link-lib={}", stripped);
+ } else if let Some(stripped) = lib.strip_prefix("-L") {
+ println!("cargo:rustc-link-search=native={}", stripped);
}
}
}
bool AsmComments,
bool EmitStackSizeSection,
bool RelaxELFRelocations,
- bool UseInitArray) {
+ bool UseInitArray,
+ const char *SplitDwarfFile) {
auto OptLevel = fromRust(RustOptLevel);
auto RM = fromRust(RustReloc);
Options.MCOptions.AsmVerbose = AsmComments;
Options.MCOptions.PreserveAsmComments = AsmComments;
Options.MCOptions.ABIName = ABIStr;
+ if (SplitDwarfFile) {
+ Options.MCOptions.SplitDwarfFile = SplitDwarfFile;
+ }
Options.RelaxELFRelocations = RelaxELFRelocations;
Options.UseInitArray = UseInitArray;
extern "C" LLVMRustResult
LLVMRustWriteOutputFile(LLVMTargetMachineRef Target, LLVMPassManagerRef PMR,
- LLVMModuleRef M, const char *Path,
+ LLVMModuleRef M, const char *Path, const char *DwoPath,
LLVMRustFileType RustFileType) {
llvm::legacy::PassManager *PM = unwrap<llvm::legacy::PassManager>(PMR);
auto FileType = fromRust(RustFileType);
}
buffer_ostream BOS(OS);
- unwrap(Target)->addPassesToEmitFile(*PM, BOS, nullptr, FileType, false);
- PM->run(*unwrap(M));
+ if (DwoPath) {
+ raw_fd_ostream DOS(DwoPath, EC, sys::fs::F_None);
+ EC.clear();
+ if (EC)
+ ErrorInfo = EC.message();
+ if (ErrorInfo != "") {
+ LLVMRustSetLastError(ErrorInfo.c_str());
+ return LLVMRustResult::Failure;
+ }
+ buffer_ostream DBOS(DOS);
+ unwrap(Target)->addPassesToEmitFile(*PM, BOS, &DBOS, FileType, false);
+ PM->run(*unwrap(M));
+ } else {
+ unwrap(Target)->addPassesToEmitFile(*PM, BOS, nullptr, FileType, false);
+ PM->run(*unwrap(M));
+ }
// Apparently `addPassesToEmitFile` adds a pointer to our on-the-stack output
// stream (OS), so the only real safe place to delete this is here? Don't we
const char *Producer, size_t ProducerLen, bool isOptimized,
const char *Flags, unsigned RuntimeVer,
const char *SplitName, size_t SplitNameLen,
- LLVMRustDebugEmissionKind Kind) {
+ LLVMRustDebugEmissionKind Kind,
+ uint64_t DWOId, bool SplitDebugInlining) {
auto *File = unwrapDI<DIFile>(FileRef);
return wrap(Builder->createCompileUnit(Lang, File, StringRef(Producer, ProducerLen),
isOptimized, Flags, RuntimeVer,
StringRef(SplitName, SplitNameLen),
- fromRust(Kind)));
+ fromRust(Kind), DWOId, SplitDebugInlining));
}
extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateFile(
#[proc_macro]
pub fn symbols(input: TokenStream) -> TokenStream {
- symbols::symbols(input)
+ symbols::symbols(input.into()).into()
}
decl_derive!([HashStable, attributes(stable_hasher)] => hash_stable::hash_stable_derive);
-use proc_macro::TokenStream;
+//! Proc macro which builds the Symbol table
+//!
+//! # Debugging
+//!
+//! Since this proc-macro does some non-trivial work, debugging it is important.
+//! This proc-macro can be invoked as an ordinary unit test, like so:
+//!
+//! ```bash
+//! cd compiler/rustc_macros
+//! cargo test symbols::test_symbols -- --nocapture
+//! ```
+//!
+//! This unit test finds the `symbols!` invocation in `compiler/rustc_span/src/symbol.rs`
+//! and runs it. It verifies that the output token stream can be parsed as valid module
+//! items and that no errors were produced.
+//!
+//! You can also view the generated code by using `cargo expand`:
+//!
+//! ```bash
+//! cargo install cargo-expand # this is necessary only once
+//! cd compiler/rustc_span
+//! cargo expand > /tmp/rustc_span.rs # it's a big file
+//! ```
+
+use proc_macro2::{Span, TokenStream};
use quote::quote;
-use std::collections::HashSet;
+use std::collections::HashMap;
use syn::parse::{Parse, ParseStream, Result};
-use syn::{braced, parse_macro_input, Ident, LitStr, Token};
+use syn::{braced, punctuated::Punctuated, Ident, LitStr, Token};
+
+#[cfg(test)]
+mod tests;
mod kw {
syn::custom_keyword!(Keywords);
let name = input.parse()?;
input.parse::<Token![:]>()?;
let value = input.parse()?;
- input.parse::<Token![,]>()?;
Ok(Keyword { name, value })
}
Ok(_) => Some(input.parse()?),
Err(_) => None,
};
- input.parse::<Token![,]>()?;
Ok(Symbol { name, value })
}
}
-/// A type used to greedily parse another type until the input is empty.
-struct List<T>(Vec<T>);
-
-impl<T: Parse> Parse for List<T> {
- fn parse(input: ParseStream<'_>) -> Result<Self> {
- let mut list = Vec::new();
- while !input.is_empty() {
- list.push(input.parse()?);
- }
- Ok(List(list))
- }
-}
-
struct Input {
- keywords: List<Keyword>,
- symbols: List<Symbol>,
+ keywords: Punctuated<Keyword, Token![,]>,
+ symbols: Punctuated<Symbol, Token![,]>,
}
impl Parse for Input {
input.parse::<kw::Keywords>()?;
let content;
braced!(content in input);
- let keywords = content.parse()?;
+ let keywords = Punctuated::parse_terminated(&content)?;
input.parse::<kw::Symbols>()?;
let content;
braced!(content in input);
- let symbols = content.parse()?;
+ let symbols = Punctuated::parse_terminated(&content)?;
Ok(Input { keywords, symbols })
}
}
+#[derive(Default)]
+struct Errors {
+ list: Vec<syn::Error>,
+}
+
+impl Errors {
+ fn error(&mut self, span: Span, message: String) {
+ self.list.push(syn::Error::new(span, message));
+ }
+}
+
pub fn symbols(input: TokenStream) -> TokenStream {
- let input = parse_macro_input!(input as Input);
+ let (mut output, errors) = symbols_with_errors(input);
+
+ // If we generated any errors, then report them as compiler_error!() macro calls.
+ // This lets the errors point back to the most relevant span. It also allows us
+ // to report as many errors as we can during a single run.
+ output.extend(errors.into_iter().map(|e| e.to_compile_error()));
+
+ output
+}
+
+fn symbols_with_errors(input: TokenStream) -> (TokenStream, Vec<syn::Error>) {
+ let mut errors = Errors::default();
+
+ let input: Input = match syn::parse2(input) {
+ Ok(input) => input,
+ Err(e) => {
+ // This allows us to display errors at the proper span, while minimizing
+ // unrelated errors caused by bailing out (and not generating code).
+ errors.list.push(e);
+ Input { keywords: Default::default(), symbols: Default::default() }
+ }
+ };
let mut keyword_stream = quote! {};
let mut symbols_stream = quote! {};
let mut digits_stream = quote! {};
let mut prefill_stream = quote! {};
let mut counter = 0u32;
- let mut keys = HashSet::<String>::new();
- let mut prev_key: Option<String> = None;
- let mut errors = Vec::<String>::new();
-
- let mut check_dup = |str: &str, errors: &mut Vec<String>| {
- if !keys.insert(str.to_string()) {
- errors.push(format!("Symbol `{}` is duplicated", str));
+ let mut keys =
+ HashMap::<String, Span>::with_capacity(input.keywords.len() + input.symbols.len() + 10);
+ let mut prev_key: Option<(Span, String)> = None;
+
+ let mut check_dup = |span: Span, str: &str, errors: &mut Errors| {
+ if let Some(prev_span) = keys.get(str) {
+ errors.error(span, format!("Symbol `{}` is duplicated", str));
+ errors.error(*prev_span, format!("location of previous definition"));
+ } else {
+ keys.insert(str.to_string(), span);
}
};
- let mut check_order = |str: &str, errors: &mut Vec<String>| {
- if let Some(ref prev_str) = prev_key {
+ let mut check_order = |span: Span, str: &str, errors: &mut Errors| {
+ if let Some((prev_span, ref prev_str)) = prev_key {
if str < prev_str {
- errors.push(format!("Symbol `{}` must precede `{}`", str, prev_str));
+ errors.error(span, format!("Symbol `{}` must precede `{}`", str, prev_str));
+ errors.error(prev_span, format!("location of previous symbol `{}`", prev_str));
}
}
- prev_key = Some(str.to_string());
+ prev_key = Some((span, str.to_string()));
};
// Generate the listed keywords.
- for keyword in &input.keywords.0 {
+ for keyword in input.keywords.iter() {
let name = &keyword.name;
let value = &keyword.value;
- check_dup(&value.value(), &mut errors);
+ let value_string = value.value();
+ check_dup(keyword.name.span(), &value_string, &mut errors);
prefill_stream.extend(quote! {
#value,
});
}
// Generate the listed symbols.
- for symbol in &input.symbols.0 {
+ for symbol in input.symbols.iter() {
let name = &symbol.name;
let value = match &symbol.value {
Some(value) => value.value(),
None => name.to_string(),
};
- check_dup(&value, &mut errors);
- check_order(&name.to_string(), &mut errors);
+ check_dup(symbol.name.span(), &value, &mut errors);
+ check_order(symbol.name.span(), &name.to_string(), &mut errors);
+
prefill_stream.extend(quote! {
#value,
});
// Generate symbols for the strings "0", "1", ..., "9".
for n in 0..10 {
let n = n.to_string();
- check_dup(&n, &mut errors);
+ check_dup(Span::call_site(), &n, &mut errors);
prefill_stream.extend(quote! {
#n,
});
counter += 1;
}
- if !errors.is_empty() {
- for error in errors.into_iter() {
- eprintln!("error: {}", error)
- }
- panic!("errors in `Keywords` and/or `Symbols`");
- }
-
- let tt = TokenStream::from(quote! {
+ let output = quote! {
macro_rules! keywords {
() => {
#keyword_stream
])
}
}
- });
+ };
- // To see the generated code generated, uncomment this line, recompile, and
- // run the resulting output through `rustfmt`.
- //eprintln!("{}", tt);
+ (output, errors.list)
- tt
+ // To see the generated code, use the "cargo expand" command.
+ // Do this once to install:
+ // cargo install cargo-expand
+ //
+ // Then, cd to rustc_span and run:
+ // cargo expand > /tmp/rustc_span_expanded.rs
+ //
+ // and read that file.
}
--- /dev/null
+use super::*;
+
+// This test is mainly here for interactive development. Use this test while
+// you're working on the proc-macro defined in this file.
+#[test]
+fn test_symbols() {
+ // We textually include the symbol.rs file, which contains the list of all
+ // symbols, keywords, and common words. Then we search for the
+ // `symbols! { ... }` call.
+
+ static SYMBOL_RS_FILE: &str = include_str!("../../../rustc_span/src/symbol.rs");
+
+ let file = syn::parse_file(SYMBOL_RS_FILE).unwrap();
+ let symbols_path: syn::Path = syn::parse_quote!(symbols);
+
+ let m: &syn::ItemMacro = file
+ .items
+ .iter()
+ .filter_map(|i| {
+ if let syn::Item::Macro(m) = i {
+ if m.mac.path == symbols_path { Some(m) } else { None }
+ } else {
+ None
+ }
+ })
+ .next()
+ .expect("did not find `symbols!` macro invocation.");
+
+ let body_tokens = m.mac.tokens.clone();
+
+ test_symbols_macro(body_tokens, &[]);
+}
+
+fn test_symbols_macro(input: TokenStream, expected_errors: &[&str]) {
+ let (output, found_errors) = symbols_with_errors(input);
+
+ // It should always parse.
+ let _parsed_file = syn::parse2::<syn::File>(output).unwrap();
+
+ assert_eq!(
+ found_errors.len(),
+ expected_errors.len(),
+ "Macro generated a different number of errors than expected"
+ );
+
+ for (found_error, &expected_error) in found_errors.iter().zip(expected_errors.iter()) {
+ let found_error_str = format!("{}", found_error);
+ assert_eq!(found_error_str, expected_error);
+ }
+}
+
+#[test]
+fn check_dup_keywords() {
+ let input = quote! {
+ Keywords {
+ Crate: "crate",
+ Crate: "crate",
+ }
+ Symbols {}
+ };
+ test_symbols_macro(input, &["Symbol `crate` is duplicated", "location of previous definition"]);
+}
+
+#[test]
+fn check_dup_symbol() {
+ let input = quote! {
+ Keywords {}
+ Symbols {
+ splat,
+ splat,
+ }
+ };
+ test_symbols_macro(input, &["Symbol `splat` is duplicated", "location of previous definition"]);
+}
+
+#[test]
+fn check_dup_symbol_and_keyword() {
+ let input = quote! {
+ Keywords {
+ Splat: "splat",
+ }
+ Symbols {
+ splat,
+ }
+ };
+ test_symbols_macro(input, &["Symbol `splat` is duplicated", "location of previous definition"]);
+}
+
+#[test]
+fn check_symbol_order() {
+ let input = quote! {
+ Keywords {}
+ Symbols {
+ zebra,
+ aardvark,
+ }
+ };
+ test_symbols_macro(
+ input,
+ &["Symbol `aardvark` must precede `zebra`", "location of previous symbol `zebra`"],
+ );
+}
self.inject_dependency_if(cnum, "a panic runtime", &|data| data.needs_panic_runtime());
}
- fn inject_profiler_runtime(&mut self) {
+ fn inject_profiler_runtime(&mut self, krate: &ast::Crate) {
if (self.sess.opts.debugging_opts.instrument_coverage
|| self.sess.opts.debugging_opts.profile
|| self.sess.opts.cg.profile_generate.enabled())
{
info!("loading profiler");
+ if self.sess.contains_name(&krate.attrs, sym::no_core) {
+ self.sess.err(
+ "`profiler_builtins` crate (required by compiler options) \
+ is not compatible with crate attribute `#![no_core]`",
+ );
+ }
+
let name = sym::profiler_builtins;
let cnum = self.resolve_crate(name, DUMMY_SP, CrateDepKind::Implicit, None);
let data = self.cstore.get_crate_data(cnum);
}
pub fn postprocess(&mut self, krate: &ast::Crate) {
- self.inject_profiler_runtime();
+ self.inject_profiler_runtime(krate);
self.inject_allocator_crate(krate);
self.inject_panic_runtime(krate);
self.opaque.position()
}
- fn tcx(&self) -> TyCtxt<'tcx> {
- self.tcx
- }
-
fn type_shorthands(&mut self) -> &mut FxHashMap<Ty<'tcx>, usize> {
&mut self.type_shorthands
}
no_builtins: tcx.sess.contains_name(&attrs, sym::no_builtins),
panic_runtime: tcx.sess.contains_name(&attrs, sym::panic_runtime),
profiler_runtime: tcx.sess.contains_name(&attrs, sym::profiler_runtime),
- symbol_mangling_version: tcx.sess.opts.debugging_opts.symbol_mangling_version,
+ symbol_mangling_version: tcx.sess.opts.debugging_opts.get_symbol_mangling_version(),
crate_deps,
dylib_dependency_formats,
smallvec = { version = "1.0", features = ["union", "may_dangle"] }
measureme = "9.0.0"
rustc_session = { path = "../rustc_session" }
+rustc_type_ir = { path = "../rustc_type_ir" }
fn register_reused_dep_path_hash(&self, hash: DefPathHash) {
if let Some(cache) = self.queries.on_disk_cache.as_ref() {
- cache.register_reused_dep_path_hash(hash)
+ cache.register_reused_dep_path_hash(*self, hash)
}
}
use rustc_hir::HirId;
use rustc_target::abi::VariantIdx;
-#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(
+ Clone,
+ Copy,
+ Debug,
+ PartialEq,
+ Eq,
+ Hash,
+ TyEncodable,
+ TyDecodable,
+ TypeFoldable,
+ HashStable
+)]
pub enum PlaceBase {
/// A temporary variable
Rvalue,
Upvar(ty::UpvarId),
}
-#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(
+ Clone,
+ Copy,
+ Debug,
+ PartialEq,
+ Eq,
+ Hash,
+ TyEncodable,
+ TyDecodable,
+ TypeFoldable,
+ HashStable
+)]
pub enum ProjectionKind {
/// A dereference of a pointer, reference or `Box<T>` of the given type
Deref,
Subslice,
}
-#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(
+ Clone,
+ Copy,
+ Debug,
+ PartialEq,
+ Eq,
+ Hash,
+ TyEncodable,
+ TyDecodable,
+ TypeFoldable,
+ HashStable
+)]
pub struct Projection<'tcx> {
/// Type after the projection is being applied.
pub ty: Ty<'tcx>,
/// A `Place` represents how a value is located in memory.
///
/// This is an HIR version of `mir::Place`
-#[derive(Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
pub struct Place<'tcx> {
/// The type of the `PlaceBase`
pub base_ty: Ty<'tcx>,
/// A `PlaceWithHirId` represents how a value is located in memory.
///
/// This is an HIR version of `mir::Place`
-#[derive(Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
pub struct PlaceWithHirId<'tcx> {
/// `HirId` of the expression or pattern producing this value.
pub hir_id: HirId,
/// Checks whether an item marked with `deprecated(since="X")` is currently
/// deprecated (i.e., whether X is not greater than the current rustc version).
pub fn deprecation_in_effect(is_since_rustc_version: bool, since: Option<&str>) -> bool {
- let since = if let Some(since) = since {
- if is_since_rustc_version {
- since
- } else {
- // We assume that the deprecation is in effect if it's not a
- // rustc version.
- return true;
- }
- } else {
- // If since attribute is not set, then we're definitely in effect.
- return true;
- };
fn parse_version(ver: &str) -> Vec<u32> {
// We ignore non-integer components of the version (e.g., "nightly").
ver.split(|c| c == '.' || c == '-').flat_map(|s| s.parse()).collect()
}
- if let Some(rustc) = option_env!("CFG_RELEASE") {
- let since: Vec<u32> = parse_version(&since);
- let rustc: Vec<u32> = parse_version(rustc);
- // We simply treat invalid `since` attributes as relating to a previous
- // Rust version, thus always displaying the warning.
- if since.len() != 3 {
- return true;
- }
- since <= rustc
- } else {
- // By default, a deprecation warning applies to
- // the current version of the compiler.
- true
+ if !is_since_rustc_version {
+ // The `since` field doesn't have semantic purpose in the stable `deprecated`
+ // attribute, only in `rustc_deprecated`.
+ return true;
}
+
+ if let Some(since) = since {
+ if since == "TBD" {
+ return false;
+ }
+
+ if let Some(rustc) = option_env!("CFG_RELEASE") {
+ let since: Vec<u32> = parse_version(&since);
+ let rustc: Vec<u32> = parse_version(rustc);
+ // We simply treat invalid `since` attributes as relating to a previous
+ // Rust version, thus always displaying the warning.
+ if since.len() != 3 {
+ return true;
+ }
+ return since <= rustc;
+ }
+ };
+
+ // Assume deprecation is in effect if "since" field is missing
+ // or if we can't determine the current Rust version.
+ true
}
pub fn deprecation_suggestion(
}
pub fn deprecation_message(depr: &Deprecation, kind: &str, path: &str) -> (String, &'static Lint) {
- let (message, lint) = if deprecation_in_effect(
- depr.is_since_rustc_version,
- depr.since.map(Symbol::as_str).as_deref(),
- ) {
+ let since = depr.since.map(Symbol::as_str);
+ let (message, lint) = if deprecation_in_effect(depr.is_since_rustc_version, since.as_deref()) {
(format!("use of deprecated {} `{}`", kind, path), DEPRECATED)
} else {
(
- format!(
- "use of {} `{}` that will be deprecated in future version {}",
- kind,
- path,
- depr.since.unwrap()
- ),
+ if since.as_deref() == Some("TBD") {
+ format!(
+ "use of {} `{}` that will be deprecated in a future Rust version",
+ kind, path
+ )
+ } else {
+ format!(
+ "use of {} `{}` that will be deprecated in future version {}",
+ kind,
+ path,
+ since.unwrap()
+ )
+ },
DEPRECATED_IN_FUTURE,
)
};
/// Uniquely identifies one of the following:
/// - A constant
/// - A static
-/// - A const fn where all arguments (if any) are zero-sized types
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, TyEncodable, TyDecodable)]
#[derive(HashStable, Lift)]
pub struct GlobalId<'tcx> {
}
}
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
+pub enum VarDebugInfoContents<'tcx> {
+ /// NOTE(eddyb) There's an unenforced invariant that this `Place` is
+ /// based on a `Local`, not a `Static`, and contains no indexing.
+ Place(Place<'tcx>),
+ Const(Constant<'tcx>),
+}
+
+impl<'tcx> Debug for VarDebugInfoContents<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ match self {
+ VarDebugInfoContents::Const(c) => write!(fmt, "{}", c),
+ VarDebugInfoContents::Place(p) => write!(fmt, "{:?}", p),
+ }
+ }
+}
+
/// Debug information pertaining to a user variable.
#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable)]
pub struct VarDebugInfo<'tcx> {
pub source_info: SourceInfo,
/// Where the data for this user variable is to be found.
- /// NOTE(eddyb) There's an unenforced invariant that this `Place` is
- /// based on a `Local`, not a `Static`, and contains no indexing.
- pub place: Place<'tcx>,
+ pub value: VarDebugInfoContents<'tcx>,
}
///////////////////////////////////////////////////////////////////////////
pub fn as_ref(&self) -> PlaceRef<'tcx> {
PlaceRef { local: self.local, projection: &self.projection }
}
+
+ /// Iterate over the projections in evaluation order, i.e., the first element is the base with
+ /// its projection and then subsequently more projections are added.
+ /// As a concrete example, given the place a.b.c, this would yield:
+ /// - (a, .b)
+ /// - (a.b, .c)
+ /// Given a place without projections, the iterator is empty.
+ pub fn iter_projections(
+ self,
+ ) -> impl Iterator<Item = (PlaceRef<'tcx>, PlaceElem<'tcx>)> + DoubleEndedIterator {
+ self.projection.iter().enumerate().map(move |(i, proj)| {
+ let base = PlaceRef { local: self.local, projection: &self.projection[..i] };
+ (base, proj)
+ })
+ }
}
impl From<Local> for Place<'_> {
UseOfMutableStatic,
UseOfExternStatic,
DerefOfRawPointer,
- AssignToNonCopyUnionField,
+ AssignToDroppingUnionField,
AccessToUnionField,
MutationOfLayoutConstrainedField,
BorrowOfLayoutConstrainedField,
"raw pointers may be NULL, dangling or unaligned; they can violate aliasing rules \
and cause data races: all of these are undefined behavior",
),
- AssignToNonCopyUnionField => (
- "assignment to non-`Copy` union field",
+ AssignToDroppingUnionField => (
+ "assignment to union field that might need dropping",
"the previous content of the field will be dropped, which causes undefined \
behavior if the field was not properly initialized",
),
}
}
+impl<'tcx> PlaceRef<'tcx> {
+ pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> PlaceTy<'tcx>
+ where
+ D: HasLocalDecls<'tcx>,
+ {
+ Place::ty_from(self.local, &self.projection, local_decls, tcx)
+ }
+}
+
pub enum RvalueInitializationState {
Shallow,
Deep,
let VarDebugInfo {
name: _,
source_info,
- place,
+ value,
} = var_debug_info;
self.visit_source_info(source_info);
let location = START_BLOCK.start_location();
- self.visit_place(
- place,
- PlaceContext::NonUse(NonUseContext::VarDebugInfo),
- location,
- );
+ match value {
+ VarDebugInfoContents::Const(c) => self.visit_constant(c, location),
+ VarDebugInfoContents::Place(place) =>
+ self.visit_place(
+ place,
+ PlaceContext::NonUse(NonUseContext::VarDebugInfo),
+ location
+ ),
+ }
}
fn super_source_scope(&mut self,
pub trait TyEncoder<'tcx>: Encoder {
const CLEAR_CROSS_CRATE: bool;
- fn tcx(&self) -> TyCtxt<'tcx>;
fn position(&self) -> usize;
fn type_shorthands(&mut self) -> &mut FxHashMap<Ty<'tcx>, usize>;
fn predicate_shorthands(&mut self) -> &mut FxHashMap<ty::Predicate<'tcx>, usize>;
LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.pat_adjustments }
}
+ /// For a given closure, returns the iterator of `ty::CapturedPlace`s that are captured
+ /// by the closure.
+ pub fn closure_min_captures_flattened(
+ &self,
+ closure_def_id: DefId,
+ ) -> impl Iterator<Item = &ty::CapturedPlace<'tcx>> {
+ self.closure_min_captures
+ .get(&closure_def_id)
+ .map(|closure_min_captures| closure_min_captures.values().flat_map(|v| v.iter()))
+ .into_iter()
+ .flatten()
+ }
+
pub fn upvar_capture(&self, upvar_id: ty::UpvarId) -> ty::UpvarCapture<'tcx> {
self.upvar_capture_map[&upvar_id]
}
pub use self::sty::RegionKind;
pub use self::sty::RegionKind::*;
pub use self::sty::TyKind::*;
-pub use self::sty::{Binder, BoundTy, BoundTyKind, BoundVar, DebruijnIndex, INNERMOST};
+pub use self::sty::{Binder, BoundTy, BoundTyKind, BoundVar};
pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region};
pub use self::sty::{CanonicalPolyFnSig, FnSig, GenSig, PolyFnSig, PolyGenSig};
pub use self::sty::{ClosureSubsts, GeneratorSubsts, TypeAndMut, UpvarSubsts};
pub use self::sty::{ExistentialTraitRef, PolyExistentialTraitRef};
pub use self::sty::{PolyTraitRef, TraitRef, TyKind};
pub use crate::ty::diagnostics::*;
+pub use rustc_type_ir::{DebruijnIndex, TypeFlags, INNERMOST};
pub use self::binding::BindingMode;
pub use self::binding::BindingMode::*;
pub pos: usize,
}
-bitflags! {
- /// Flags that we track on types. These flags are propagated upwards
- /// through the type during type construction, so that we can quickly check
- /// whether the type has various kinds of types in it without recursing
- /// over the type itself.
- pub struct TypeFlags: u32 {
- // Does this have parameters? Used to determine whether substitution is
- // required.
- /// Does this have [Param]?
- const HAS_TY_PARAM = 1 << 0;
- /// Does this have [ReEarlyBound]?
- const HAS_RE_PARAM = 1 << 1;
- /// Does this have [ConstKind::Param]?
- const HAS_CT_PARAM = 1 << 2;
-
- const NEEDS_SUBST = TypeFlags::HAS_TY_PARAM.bits
- | TypeFlags::HAS_RE_PARAM.bits
- | TypeFlags::HAS_CT_PARAM.bits;
-
- /// Does this have [Infer]?
- const HAS_TY_INFER = 1 << 3;
- /// Does this have [ReVar]?
- const HAS_RE_INFER = 1 << 4;
- /// Does this have [ConstKind::Infer]?
- const HAS_CT_INFER = 1 << 5;
-
- /// Does this have inference variables? Used to determine whether
- /// inference is required.
- const NEEDS_INFER = TypeFlags::HAS_TY_INFER.bits
- | TypeFlags::HAS_RE_INFER.bits
- | TypeFlags::HAS_CT_INFER.bits;
-
- /// Does this have [Placeholder]?
- const HAS_TY_PLACEHOLDER = 1 << 6;
- /// Does this have [RePlaceholder]?
- const HAS_RE_PLACEHOLDER = 1 << 7;
- /// Does this have [ConstKind::Placeholder]?
- const HAS_CT_PLACEHOLDER = 1 << 8;
-
- /// `true` if there are "names" of regions and so forth
- /// that are local to a particular fn/inferctxt
- const HAS_FREE_LOCAL_REGIONS = 1 << 9;
-
- /// `true` if there are "names" of types and regions and so forth
- /// that are local to a particular fn
- const HAS_FREE_LOCAL_NAMES = TypeFlags::HAS_TY_PARAM.bits
- | TypeFlags::HAS_CT_PARAM.bits
- | TypeFlags::HAS_TY_INFER.bits
- | TypeFlags::HAS_CT_INFER.bits
- | TypeFlags::HAS_TY_PLACEHOLDER.bits
- | TypeFlags::HAS_CT_PLACEHOLDER.bits
- | TypeFlags::HAS_FREE_LOCAL_REGIONS.bits;
-
- /// Does this have [Projection]?
- const HAS_TY_PROJECTION = 1 << 10;
- /// Does this have [Opaque]?
- const HAS_TY_OPAQUE = 1 << 11;
- /// Does this have [ConstKind::Unevaluated]?
- const HAS_CT_PROJECTION = 1 << 12;
-
- /// Could this type be normalized further?
- const HAS_PROJECTION = TypeFlags::HAS_TY_PROJECTION.bits
- | TypeFlags::HAS_TY_OPAQUE.bits
- | TypeFlags::HAS_CT_PROJECTION.bits;
-
- /// Is an error type/const reachable?
- const HAS_ERROR = 1 << 13;
-
- /// Does this have any region that "appears free" in the type?
- /// Basically anything but [ReLateBound] and [ReErased].
- const HAS_FREE_REGIONS = 1 << 14;
-
- /// Does this have any [ReLateBound] regions? Used to check
- /// if a global bound is safe to evaluate.
- const HAS_RE_LATE_BOUND = 1 << 15;
-
- /// Does this have any [ReErased] regions?
- const HAS_RE_ERASED = 1 << 16;
-
- /// Does this value have parameters/placeholders/inference variables which could be
- /// replaced later, in a way that would change the results of `impl` specialization?
- const STILL_FURTHER_SPECIALIZABLE = 1 << 17;
- }
-}
-
#[allow(rustc::usage_of_ty_tykind)]
pub struct TyS<'tcx> {
/// This field shouldn't be used directly and may be removed in the future.
#[rustc_diagnostic_item = "Ty"]
pub type Ty<'tcx> = &'tcx TyS<'tcx>;
-#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(
+ Clone,
+ Copy,
+ Debug,
+ PartialEq,
+ Eq,
+ Hash,
+ TyEncodable,
+ TyDecodable,
+ TypeFoldable,
+ HashStable
+)]
pub struct UpvarPath {
pub hir_id: hir::HirId,
}
/// Upvars do not get their own `NodeId`. Instead, we use the pair of
/// the original var ID (that is, the root variable that is referenced
/// by the upvar) and the ID of the closure expression.
-#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
pub struct UpvarId {
pub var_path: UpvarPath,
pub closure_expr_id: LocalDefId,
}
}
-#[derive(Clone, PartialEq, Debug, TyEncodable, TyDecodable, Copy, HashStable)]
+#[derive(Clone, PartialEq, Debug, TyEncodable, TyDecodable, TypeFoldable, Copy, HashStable)]
pub enum BorrowKind {
/// Data must be immutable and is aliasable.
ImmBorrow,
/// Information describing the capture of an upvar. This is computed
/// during `typeck`, specifically by `regionck`.
-#[derive(PartialEq, Clone, Debug, Copy, TyEncodable, TyDecodable, HashStable)]
+#[derive(PartialEq, Clone, Debug, Copy, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
pub enum UpvarCapture<'tcx> {
/// Upvar is captured by value. This is always true when the
/// closure is labeled `move`, but can also be true in other cases
ByRef(UpvarBorrow<'tcx>),
}
-#[derive(PartialEq, Clone, Copy, TyEncodable, TyDecodable, HashStable)]
+#[derive(PartialEq, Clone, Copy, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
pub struct UpvarBorrow<'tcx> {
/// The kind of borrow: by-ref upvars have access to shared
/// immutable borrows, which are not part of the normal language
pub type MinCaptureList<'tcx> = Vec<CapturedPlace<'tcx>>;
/// A `Place` and the corresponding `CaptureInfo`.
-#[derive(PartialEq, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+#[derive(PartialEq, Clone, Debug, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
pub struct CapturedPlace<'tcx> {
pub place: HirPlace<'tcx>,
pub info: CaptureInfo<'tcx>,
/// Part of `MinCaptureInformationMap`; describes the capture kind (&, &mut, move)
/// for a particular capture as well as identifying the part of the source code
/// that triggered this capture to occur.
-#[derive(PartialEq, Clone, Debug, Copy, TyEncodable, TyDecodable, HashStable)]
+#[derive(PartialEq, Clone, Debug, Copy, TyEncodable, TyDecodable, TypeFoldable, HashStable)]
pub struct CaptureInfo<'tcx> {
/// Expr Id pointing to use that resulted in selecting the current capture kind
///
fn try_remap_cnum(&self, tcx: TyCtxt<'_>, cnum: u32) -> Option<CrateNum> {
let cnum_map =
self.cnum_map.get_or_init(|| Self::compute_cnum_map(tcx, &self.prev_cnums[..]));
+ debug!("try_remap_cnum({}): cnum_map={:?}", cnum, cnum_map);
cnum_map[CrateNum::from_u32(cnum)]
}
.insert(hash, RawDefId { krate: def_id.krate.as_u32(), index: def_id.index.as_u32() });
}
- pub fn register_reused_dep_path_hash(&self, hash: DefPathHash) {
- if let Some(old_id) = self.foreign_def_path_hashes.get(&hash) {
- self.latest_foreign_def_path_hashes.lock().insert(hash, *old_id);
+ /// If the given `hash` still exists in the current compilation,
+ /// calls `store_foreign_def_id` with its current `DefId`.
+ ///
+ /// Normally, `store_foreign_def_id_hash` can be called directly by
+ /// the dependency graph when we construct a `DepNode`. However,
+ /// when we re-use a deserialized `DepNode` from the previous compilation
+ /// session, we only have the `DefPathHash` available. This method is used
+ /// to that any `DepNode` that we re-use has a `DefPathHash` -> `RawId` written
+ /// out for usage in the next compilation session.
+ pub fn register_reused_dep_path_hash(&self, tcx: TyCtxt<'tcx>, hash: DefPathHash) {
+ // We can't simply copy the `RawDefId` from `foreign_def_path_hashes` to
+ // `latest_foreign_def_path_hashes`, since the `RawDefId` might have
+ // changed in the current compilation session (e.g. we've added/removed crates,
+ // or added/removed definitions before/after the target definition).
+ if let Some(def_id) = self.def_path_hash_to_def_id(tcx, hash) {
+ self.store_foreign_def_id_hash(def_id, hash);
}
}
match cache.entry(hash) {
Entry::Occupied(e) => *e.get(),
Entry::Vacant(e) => {
+ debug!("def_path_hash_to_def_id({:?})", hash);
// Check if the `DefPathHash` corresponds to a definition in the current
// crate
if let Some(def_id) = self.local_def_path_hash_to_def_id.get(&hash).cloned() {
// current compilation session, the crate is guaranteed to be the same
// (otherwise, we would compute a different `DefPathHash`).
let raw_def_id = self.get_raw_def_id(&hash)?;
+ debug!("def_path_hash_to_def_id({:?}): raw_def_id = {:?}", hash, raw_def_id);
// If the owning crate no longer exists, the corresponding definition definitely
// no longer exists.
let krate = self.try_remap_cnum(tcx, raw_def_id.krate)?;
+ debug!("def_path_hash_to_def_id({:?}): krate = {:?}", hash, krate);
// If our `DefPathHash` corresponded to a definition in the local crate,
// we should have either found it in `local_def_path_hash_to_def_id`, or
// never attempted to load it in the first place. Any query result or `DepNode`
// Try to find a definition in the current session, using the previous `DefIndex`
// as an initial guess.
let opt_def_id = tcx.cstore.def_path_hash_to_def_id(krate, raw_def_id.index, hash);
+ debug!("def_path_to_def_id({:?}): opt_def_id = {:?}", hash, opt_def_id);
e.insert(opt_def_id);
opt_def_id
}
{
const CLEAR_CROSS_CRATE: bool = false;
- fn tcx(&self) -> TyCtxt<'tcx> {
- self.tcx
- }
fn position(&self) -> usize {
self.encoder.encoder_position()
}
}
}
-rustc_index::newtype_index! {
- /// A [De Bruijn index][dbi] is a standard means of representing
- /// regions (and perhaps later types) in a higher-ranked setting. In
- /// particular, imagine a type like this:
- ///
- /// for<'a> fn(for<'b> fn(&'b isize, &'a isize), &'a char)
- /// ^ ^ | | |
- /// | | | | |
- /// | +------------+ 0 | |
- /// | | |
- /// +----------------------------------+ 1 |
- /// | |
- /// +----------------------------------------------+ 0
- ///
- /// In this type, there are two binders (the outer fn and the inner
- /// fn). We need to be able to determine, for any given region, which
- /// fn type it is bound by, the inner or the outer one. There are
- /// various ways you can do this, but a De Bruijn index is one of the
- /// more convenient and has some nice properties. The basic idea is to
- /// count the number of binders, inside out. Some examples should help
- /// clarify what I mean.
- ///
- /// Let's start with the reference type `&'b isize` that is the first
- /// argument to the inner function. This region `'b` is assigned a De
- /// Bruijn index of 0, meaning "the innermost binder" (in this case, a
- /// fn). The region `'a` that appears in the second argument type (`&'a
- /// isize`) would then be assigned a De Bruijn index of 1, meaning "the
- /// second-innermost binder". (These indices are written on the arrays
- /// in the diagram).
- ///
- /// What is interesting is that De Bruijn index attached to a particular
- /// variable will vary depending on where it appears. For example,
- /// the final type `&'a char` also refers to the region `'a` declared on
- /// the outermost fn. But this time, this reference is not nested within
- /// any other binders (i.e., it is not an argument to the inner fn, but
- /// rather the outer one). Therefore, in this case, it is assigned a
- /// De Bruijn index of 0, because the innermost binder in that location
- /// is the outer fn.
- ///
- /// [dbi]: https://en.wikipedia.org/wiki/De_Bruijn_index
- #[derive(HashStable)]
- pub struct DebruijnIndex {
- DEBUG_FORMAT = "DebruijnIndex({})",
- const INNERMOST = 0,
- }
-}
-
pub type Region<'tcx> = &'tcx RegionKind;
/// Representation of regions. Note that the NLL checker uses a distinct
/// Region bound in a function scope, which will be substituted when the
/// function is called.
- ReLateBound(DebruijnIndex, BoundRegion),
+ ReLateBound(ty::DebruijnIndex, BoundRegion),
/// When checking a function body, the types of all arguments and so forth
/// that refer to bound region parameters are modified to refer to free
}
}
-impl DebruijnIndex {
- /// Returns the resulting index when this value is moved into
- /// `amount` number of new binders. So, e.g., if you had
- ///
- /// for<'a> fn(&'a x)
- ///
- /// and you wanted to change it to
- ///
- /// for<'a> fn(for<'b> fn(&'a x))
- ///
- /// you would need to shift the index for `'a` into a new binder.
- #[must_use]
- pub fn shifted_in(self, amount: u32) -> DebruijnIndex {
- DebruijnIndex::from_u32(self.as_u32() + amount)
- }
-
- /// Update this index in place by shifting it "in" through
- /// `amount` number of binders.
- pub fn shift_in(&mut self, amount: u32) {
- *self = self.shifted_in(amount);
- }
-
- /// Returns the resulting index when this value is moved out from
- /// `amount` number of new binders.
- #[must_use]
- pub fn shifted_out(self, amount: u32) -> DebruijnIndex {
- DebruijnIndex::from_u32(self.as_u32() - amount)
- }
-
- /// Update in place by shifting out from `amount` binders.
- pub fn shift_out(&mut self, amount: u32) {
- *self = self.shifted_out(amount);
- }
-
- /// Adjusts any De Bruijn indices so as to make `to_binder` the
- /// innermost binder. That is, if we have something bound at `to_binder`,
- /// it will now be bound at INNERMOST. This is an appropriate thing to do
- /// when moving a region out from inside binders:
- ///
- /// ```
- /// for<'a> fn(for<'b> for<'c> fn(&'a u32), _)
- /// // Binder: D3 D2 D1 ^^
- /// ```
- ///
- /// Here, the region `'a` would have the De Bruijn index D3,
- /// because it is the bound 3 binders out. However, if we wanted
- /// to refer to that region `'a` in the second argument (the `_`),
- /// those two binders would not be in scope. In that case, we
- /// might invoke `shift_out_to_binder(D3)`. This would adjust the
- /// De Bruijn index of `'a` to D1 (the innermost binder).
- ///
- /// If we invoke `shift_out_to_binder` and the region is in fact
- /// bound by one of the binders we are shifting out of, that is an
- /// error (and should fail an assertion failure).
- pub fn shifted_out_to_binder(self, to_binder: DebruijnIndex) -> Self {
- self.shifted_out(to_binder.as_u32() - INNERMOST.as_u32())
- }
-}
-
/// Region utilities
impl RegionKind {
/// Is this region named by the user?
}
}
- pub fn bound_at_or_above_binder(&self, index: DebruijnIndex) -> bool {
+ pub fn bound_at_or_above_binder(&self, index: ty::DebruijnIndex) -> bool {
match *self {
ty::ReLateBound(debruijn, _) => debruijn >= index,
_ => false,
}
}
+ /// Get the `i`-th element of a tuple.
+ /// Panics when called on anything but a tuple.
+ pub fn tuple_element_ty(&self, i: usize) -> Option<Ty<'tcx>> {
+ match self.kind() {
+ Tuple(substs) => substs.iter().nth(i).map(|field| field.expect_ty()),
+ _ => bug!("tuple_fields called on non-tuple"),
+ }
+ }
+
/// If the type contains variants, returns the valid range of variant indices.
//
// FIXME: This requires the optimized MIR in the case of generators.
}
/// Returns the type of the discriminant of this type.
- pub fn discriminant_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ pub fn discriminant_ty(&'tcx self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
match self.kind() {
ty::Adt(adt, _) if adt.is_enum() => adt.repr.discr_type().to_ty(tcx),
ty::Generator(_, substs, _) => substs.as_generator().discr_ty(tcx),
- _ => {
- // This can only be `0`, for now, so `u8` will suffice.
- tcx.types.u8
+
+ ty::Param(_) | ty::Projection(_) | ty::Opaque(..) | ty::Infer(ty::TyVar(_)) => {
+ let assoc_items =
+ tcx.associated_items(tcx.lang_items().discriminant_kind_trait().unwrap());
+ let discriminant_def_id = assoc_items.in_definition_order().next().unwrap().def_id;
+ tcx.mk_projection(discriminant_def_id, tcx.mk_substs([self.into()].iter()))
+ }
+
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Adt(..)
+ | ty::Foreign(_)
+ | ty::Str
+ | ty::Array(..)
+ | ty::Slice(_)
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(..)
+ | ty::Dynamic(..)
+ | ty::Closure(..)
+ | ty::GeneratorWitness(..)
+ | ty::Never
+ | ty::Tuple(_)
+ | ty::Error(_)
+ | ty::Infer(IntVar(_) | FloatVar(_)) => tcx.types.u8,
+
+ ty::Bound(..)
+ | ty::Placeholder(_)
+ | ty::Infer(FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ bug!("`discriminant_ty` applied to unexpected type: {:?}", self)
}
}
}
[dependencies]
either = "1.5.0"
rustc_graphviz = { path = "../rustc_graphviz" }
+gsgdt = "0.1.2"
itertools = "0.9"
tracing = "0.1"
polonius-engine = "0.12.0"
use rustc_index::bit_set::BitSet;
use rustc_index::vec::IndexVec;
use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
+use rustc_middle::hir::place::PlaceBase as HirPlaceBase;
use rustc_middle::mir::{
traversal, Body, ClearCrossCrate, Local, Location, Mutability, Operand, Place, PlaceElem,
- PlaceRef,
+ PlaceRef, VarDebugInfoContents,
};
use rustc_middle::mir::{AggregateKind, BasicBlock, BorrowCheckResult, BorrowKind};
use rustc_middle::mir::{Field, ProjectionElem, Promoted, Rvalue, Statement, StatementKind};
crate struct Upvar {
name: Symbol,
+ // FIXME(project-rfc-2229#8): This should use Place or something similar
var_hir_id: HirId,
/// If true, the capture is behind a reference.
let mut local_names = IndexVec::from_elem(None, &input_body.local_decls);
for var_debug_info in &input_body.var_debug_info {
- if let Some(local) = var_debug_info.place.as_local() {
- if let Some(prev_name) = local_names[local] {
- if var_debug_info.name != prev_name {
- span_bug!(
- var_debug_info.source_info.span,
- "local {:?} has many names (`{}` vs `{}`)",
- local,
- prev_name,
- var_debug_info.name
- );
+ if let VarDebugInfoContents::Place(place) = var_debug_info.value {
+ if let Some(local) = place.as_local() {
+ if let Some(prev_name) = local_names[local] {
+ if var_debug_info.name != prev_name {
+ span_bug!(
+ var_debug_info.source_info.span,
+ "local {:?} has many names (`{}` vs `{}`)",
+ local,
+ prev_name,
+ var_debug_info.name
+ );
+ }
}
+ local_names[local] = Some(var_debug_info.name);
}
- local_names[local] = Some(var_debug_info.name);
}
}
infcx.set_tainted_by_errors();
}
let upvars: Vec<_> = tables
- .closure_captures
- .get(&def.did.to_def_id())
- .into_iter()
- .flat_map(|v| v.values())
- .map(|upvar_id| {
- let var_hir_id = upvar_id.var_path.hir_id;
- let capture = tables.upvar_capture(*upvar_id);
+ .closure_min_captures_flattened(def.did.to_def_id())
+ .map(|captured_place| {
+ let var_hir_id = match captured_place.place.base {
+ HirPlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ _ => bug!("Expected upvar"),
+ };
+ let capture = captured_place.info.capture_kind;
let by_ref = match capture {
ty::UpvarCapture::ByValue(_) => false,
ty::UpvarCapture::ByRef(..) => true,
(&adt_def.variants[VariantIdx::new(0)], substs)
}
ty::Closure(_, substs) => {
- return match substs.as_closure().upvar_tys().nth(field.index()) {
+ return match substs
+ .as_closure()
+ .tupled_upvars_ty()
+ .tuple_element_ty(field.index())
+ {
Some(ty) => Ok(ty),
None => Err(FieldAccessError::OutOfRange {
field_count: substs.as_closure().upvar_tys().count(),
ModifiedGlobal,
AssertFailure(AssertKind<ConstInt>),
Panic { msg: Symbol, line: u32, col: u32, file: Symbol },
+ Abort(String),
}
// The errors become `MachineStop` with plain strings when being raised.
Panic { msg, line, col, file } => {
write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col)
}
+ Abort(ref msg) => write!(f, "{}", msg),
}
}
}
) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
debug!("eval_body_using_ecx: {:?}, {:?}", cid, ecx.param_env);
let tcx = *ecx.tcx;
+ assert!(
+ cid.promoted.is_some()
+ || matches!(
+ ecx.tcx.def_kind(cid.instance.def_id()),
+ DefKind::Const
+ | DefKind::Static
+ | DefKind::ConstParam
+ | DefKind::AnonConst
+ | DefKind::AssocConst
+ ),
+ "Unexpected DefKind: {:?}",
+ ecx.tcx.def_kind(cid.instance.def_id())
+ );
let layout = ecx.layout_of(body.return_ty().subst(tcx, cid.instance.substs))?;
assert!(!layout.is_unsized());
let ret = ecx.allocate(layout, MemoryKind::Stack);
let prom = cid.promoted.map_or(String::new(), |p| format!("::promoted[{:?}]", p));
trace!("eval_body_using_ecx: pushing stack frame for global: {}{}", name, prom);
- // Assert all args (if any) are zero-sized types; `eval_body_using_ecx` doesn't
- // make sense if the body is expecting nontrivial arguments.
- // (The alternative would be to use `eval_fn_call` with an args slice.)
- for arg in body.args_iter() {
- let decl = body.local_decls.get(arg).expect("arg missing from local_decls");
- let layout = ecx.layout_of(decl.ty.subst(tcx, cid.instance.substs))?;
- assert!(layout.is_zst())
- }
-
ecx.push_stack_frame(
cid.instance,
body,
use rustc_middle::mir;
-use rustc_middle::ty::layout::HasTyCtxt;
-use rustc_middle::ty::InstanceDef;
use rustc_middle::ty::{self, Ty};
use std::borrow::Borrow;
use std::collections::hash_map::Entry;
use rustc_target::abi::{Align, Size};
use crate::interpret::{
- self, compile_time_machine, AllocId, Allocation, Frame, GlobalId, ImmTy, InterpCx,
- InterpResult, Memory, OpTy, PlaceTy, Pointer, Scalar,
+ self, compile_time_machine, AllocId, Allocation, Frame, ImmTy, InterpCx, InterpResult, Memory,
+ OpTy, PlaceTy, Pointer, Scalar,
};
use super::error::*;
impl<'mir, 'tcx> InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> {
- /// Evaluate a const function where all arguments (if any) are zero-sized types.
- /// The evaluation is memoized thanks to the query system.
- ///
- /// Returns `true` if the call has been evaluated.
- fn try_eval_const_fn_call(
- &mut self,
- instance: ty::Instance<'tcx>,
- ret: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
- args: &[OpTy<'tcx>],
- ) -> InterpResult<'tcx, bool> {
- trace!("try_eval_const_fn_call: {:?}", instance);
- // Because `#[track_caller]` adds an implicit non-ZST argument, we also cannot
- // perform this optimization on items tagged with it.
- if instance.def.requires_caller_location(self.tcx()) {
- return Ok(false);
- }
- // Only memoize instrinsics. This was added in #79594 while adding the `const_allocate` intrinsic.
- // We only memoize intrinsics because it would be unsound to memoize functions
- // which might interact with the heap.
- // Additionally, const_allocate intrinsic is impure and thus should not be memoized;
- // it will not be memoized because it has non-ZST args
- if !matches!(instance.def, InstanceDef::Intrinsic(_)) {
- return Ok(false);
- }
- // For the moment we only do this for functions which take no arguments
- // (or all arguments are ZSTs) so that we don't memoize too much.
- if args.iter().any(|a| !a.layout.is_zst()) {
- return Ok(false);
- }
-
- let dest = match ret {
- Some((dest, _)) => dest,
- // Don't memoize diverging function calls.
- None => return Ok(false),
- };
-
- let gid = GlobalId { instance, promoted: None };
-
- let place = self.eval_to_allocation(gid)?;
-
- self.copy_op(place.into(), dest)?;
-
- self.return_to_block(ret.map(|r| r.1))?;
- trace!("{:?}", self.dump_place(*dest));
- Ok(true)
- }
-
/// "Intercept" a function call to a panic-related function
/// because we have something special to do for it.
/// If this returns successfully (`Ok`), the function should just be evaluated normally.
ecx: &mut InterpCx<'mir, 'tcx, Self>,
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
- ret: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
+ _ret: Option<(PlaceTy<'tcx>, mir::BasicBlock)>,
_unwind: Option<mir::BasicBlock>, // unwinding is not supported in consts
) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
debug!("find_mir_or_eval_fn: {:?}", instance);
// Execution might have wandered off into other crates, so we cannot do a stability-
// sensitive check here. But we can at least rule out functions that are not const
// at all.
- if ecx.tcx.is_const_fn_raw(def.did) {
- // If this function is a `const fn` then under certain circumstances we
- // can evaluate call via the query system, thus memoizing all future calls.
- if ecx.try_eval_const_fn_call(instance, ret, args)? {
- return Ok(None);
- }
- } else {
+ if !ecx.tcx.is_const_fn_raw(def.did) {
// Some functions we support even if they are non-const -- but avoid testing
// that for const fn!
ecx.hook_panic_fn(instance, args)?;
Err(ConstEvalErrKind::AssertFailure(err).into())
}
+ fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: String) -> InterpResult<'tcx, !> {
+ Err(ConstEvalErrKind::Abort(msg).into())
+ }
+
fn ptr_to_int(_mem: &Memory<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx, u64> {
Err(ConstEvalErrKind::NeedsRfc("pointer-to-integer cast".to_string()).into())
}
Ok(())
}
- /// Mark a storage as live, killing the previous content and returning it.
- /// Remember to deallocate that!
- pub fn storage_live(
- &mut self,
- local: mir::Local,
- ) -> InterpResult<'tcx, LocalValue<M::PointerTag>> {
+ /// Mark a storage as live, killing the previous content.
+ pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> {
assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
trace!("{:?} is now live", local);
let local_val = LocalValue::Uninitialized;
- // StorageLive *always* kills the value that's currently stored.
- // However, we do not error if the variable already is live;
- // see <https://github.com/rust-lang/rust/issues/42371>.
- Ok(mem::replace(&mut self.frame_mut().locals[local].value, local_val))
+ // StorageLive expects the local to be dead, and marks it live.
+ let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
+ if !matches!(old, LocalValue::Dead) {
+ throw_ub_format!("StorageLive on a local that was already live");
+ }
+ Ok(())
}
- /// Returns the old value of the local.
- /// Remember to deallocate that!
- pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue<M::PointerTag> {
+ pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> {
assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
trace!("{:?} is now dead", local);
- mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead)
+ // It is entirely okay for this local to be already dead (at least that's how we currently generate MIR)
+ let old = mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead);
+ self.deallocate_local(old)?;
+ Ok(())
}
- pub(super) fn deallocate_local(
- &mut self,
- local: LocalValue<M::PointerTag>,
- ) -> InterpResult<'tcx> {
- // FIXME: should we tell the user that there was a local which was never written to?
+ fn deallocate_local(&mut self, local: LocalValue<M::PointerTag>) -> InterpResult<'tcx> {
if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
// All locals have a backing allocation, even if the allocation is empty
// due to the local having ZST type.
ConstValue::Slice { data: alloc, start: 0, end: alloc.len() }
}
sym::needs_drop => ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env)),
- sym::size_of | sym::min_align_of | sym::pref_align_of => {
+ sym::min_align_of | sym::pref_align_of => {
let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
let n = match name {
sym::pref_align_of => layout.align.pref.bytes(),
sym::min_align_of => layout.align.abi.bytes(),
- sym::size_of => layout.size.bytes(),
_ => bug!(),
};
ConstValue::from_machine_usize(n, &tcx)
let (dest, ret) = match ret {
None => match intrinsic_name {
sym::transmute => throw_ub_format!("transmuting to uninhabited type"),
- sym::unreachable => throw_ub!(Unreachable),
- sym::abort => M::abort(self)?,
+ sym::abort => M::abort(self, "the program aborted execution".to_owned())?,
// Unsupported diverging intrinsic.
_ => return Ok(false),
},
sym::min_align_of
| sym::pref_align_of
| sym::needs_drop
- | sym::size_of
| sym::type_id
| sym::type_name
| sym::variant_count => {
let gid = GlobalId { instance, promoted: None };
let ty = match intrinsic_name {
- sym::min_align_of | sym::pref_align_of | sym::size_of | sym::variant_count => {
+ sym::min_align_of | sym::pref_align_of | sym::variant_count => {
self.tcx.types.usize
}
sym::needs_drop => self.tcx.types.bool,
let out_val = numeric_intrinsic(intrinsic_name, bits, kind)?;
self.write_scalar(out_val, dest)?;
}
- sym::wrapping_add
- | sym::wrapping_sub
- | sym::wrapping_mul
- | sym::add_with_overflow
- | sym::sub_with_overflow
- | sym::mul_with_overflow => {
+ sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
let lhs = self.read_immediate(args[0])?;
let rhs = self.read_immediate(args[1])?;
- let (bin_op, ignore_overflow) = match intrinsic_name {
- sym::wrapping_add => (BinOp::Add, true),
- sym::wrapping_sub => (BinOp::Sub, true),
- sym::wrapping_mul => (BinOp::Mul, true),
- sym::add_with_overflow => (BinOp::Add, false),
- sym::sub_with_overflow => (BinOp::Sub, false),
- sym::mul_with_overflow => (BinOp::Mul, false),
+ let bin_op = match intrinsic_name {
+ sym::add_with_overflow => BinOp::Add,
+ sym::sub_with_overflow => BinOp::Sub,
+ sym::mul_with_overflow => BinOp::Mul,
_ => bug!("Already checked for int ops"),
};
- if ignore_overflow {
- self.binop_ignore_overflow(bin_op, lhs, rhs, dest)?;
- } else {
- self.binop_with_overflow(bin_op, lhs, rhs, dest)?;
- }
+ self.binop_with_overflow(bin_op, lhs, rhs, dest)?;
}
sym::saturating_add | sym::saturating_sub => {
let l = self.read_immediate(args[0])?;
sym::transmute => {
self.copy_op_transmute(args[0], dest)?;
}
+ sym::assert_inhabited => {
+ let ty = instance.substs.type_at(0);
+ let layout = self.layout_of(ty)?;
+
+ if layout.abi.is_uninhabited() {
+ // The run-time intrinsic panics just to get a good backtrace; here we abort
+ // since there is no problem showing a backtrace even for aborts.
+ M::abort(
+ self,
+ format!(
+ "aborted execution: attempted to instantiate uninhabited type `{}`",
+ ty
+ ),
+ )?;
+ }
+ }
sym::simd_insert => {
let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
let elem = args[2];
use rustc_middle::mir;
use rustc_middle::ty::{self, Ty};
use rustc_span::def_id::DefId;
+use rustc_target::abi::Size;
use super::{
AllocId, Allocation, AllocationExtra, CheckInAllocMsg, Frame, ImmTy, InterpCx, InterpResult,
) -> InterpResult<'tcx>;
/// Called to evaluate `Abort` MIR terminator.
- fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx, !> {
+ fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _msg: String) -> InterpResult<'tcx, !> {
throw_unsup_format!("aborting execution is not supported")
}
Ok(())
}
+ /// Called after initializing static memory using the interpreter.
+ fn after_static_mem_initialized(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _ptr: Pointer<Self::PointerTag>,
+ _size: Size,
+ ) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
/// Executes a retagging operation
#[inline]
fn retag(
// Mark locals as alive
StorageLive(local) => {
- let old_val = self.storage_live(*local)?;
- self.deallocate_local(old_val)?;
+ self.storage_live(*local)?;
}
// Mark locals as dead
StorageDead(local) => {
- let old_val = self.storage_dead(*local);
- self.deallocate_local(old_val)?;
+ self.storage_dead(*local)?;
}
// No dynamic semantics attached to `FakeRead`; MIR
}
Abort => {
- M::abort(self)?;
+ M::abort(self, "the program aborted execution".to_owned())?;
}
// When we encounter Resume, we've finished unwinding
// If you touch this code, be sure to also make the corresponding changes to
// `get_vtable` in `rust_codegen_llvm/meth.rs`.
// /////////////////////////////////////////////////////////////////////////////////////////
- let vtable = self.memory.allocate(
- ptr_size * u64::try_from(methods.len()).unwrap().checked_add(3).unwrap(),
- ptr_align,
- MemoryKind::Vtable,
- );
+ let vtable_size = ptr_size * u64::try_from(methods.len()).unwrap().checked_add(3).unwrap();
+ let vtable = self.memory.allocate(vtable_size, ptr_align, MemoryKind::Vtable);
let drop = Instance::resolve_drop_in_place(tcx, ty);
let drop = self.memory.create_fn_alloc(FnVal::Instance(drop));
}
}
+ M::after_static_mem_initialized(self, vtable, vtable_size)?;
+
self.memory.mark_immutable(vtable.alloc_id)?;
assert!(self.vtables.insert((ty, poly_trait_ref), vtable).is_none());
#![feature(or_patterns)]
#![feature(once_cell)]
#![feature(control_flow_enum)]
+#![feature(str_split_once)]
#![recursion_limit = "256"]
#[macro_use]
self.check_mut_borrowing_layout_constrained_field(*place, context.is_mutating_use());
}
+ // Check for borrows to packed fields.
+ // `is_disaligned` already traverses the place to consider all projections after the last
+ // `Deref`, so this only needs to be called once at the top level.
if context.is_borrow() {
if util::is_disaligned(self.tcx, self.body, self.param_env, *place) {
self.require_unsafe(
}
}
- for (i, elem) in place.projection.iter().enumerate() {
- let proj_base = &place.projection[..i];
- if context.is_borrow() {
- if util::is_disaligned(self.tcx, self.body, self.param_env, *place) {
+ // Some checks below need the extra metainfo of the local declaration.
+ let decl = &self.body.local_decls[place.local];
+
+ // Check the base local: it might be an unsafe-to-access static. We only check derefs of the
+ // temporary holding the static pointer to avoid duplicate errors
+ // <https://github.com/rust-lang/rust/pull/78068#issuecomment-731753506>.
+ if decl.internal && place.projection.first() == Some(&ProjectionElem::Deref) {
+ // If the projection root is an artifical local that we introduced when
+ // desugaring `static`, give a more specific error message
+ // (avoid the general "raw pointer" clause below, that would only be confusing).
+ if let Some(box LocalInfo::StaticRef { def_id, .. }) = decl.local_info {
+ if self.tcx.is_mutable_static(def_id) {
self.require_unsafe(
- UnsafetyViolationKind::BorrowPacked,
- UnsafetyViolationDetails::BorrowOfPackedField,
+ UnsafetyViolationKind::General,
+ UnsafetyViolationDetails::UseOfMutableStatic,
);
+ return;
+ } else if self.tcx.is_foreign_item(def_id) {
+ self.require_unsafe(
+ UnsafetyViolationKind::General,
+ UnsafetyViolationDetails::UseOfExternStatic,
+ );
+ return;
}
}
- let source_info = self.source_info;
- if let [] = proj_base {
- let decl = &self.body.local_decls[place.local];
- if decl.internal {
- // If the projection root is an artifical local that we introduced when
- // desugaring `static`, give a more specific error message
- // (avoid the general "raw pointer" clause below, that would only be confusing).
- if let Some(box LocalInfo::StaticRef { def_id, .. }) = decl.local_info {
- if self.tcx.is_mutable_static(def_id) {
- self.require_unsafe(
- UnsafetyViolationKind::General,
- UnsafetyViolationDetails::UseOfMutableStatic,
- );
- return;
- } else if self.tcx.is_foreign_item(def_id) {
- self.require_unsafe(
- UnsafetyViolationKind::General,
- UnsafetyViolationDetails::UseOfExternStatic,
- );
- return;
- }
- } else {
- // Internal locals are used in the `move_val_init` desugaring.
- // We want to check unsafety against the source info of the
- // desugaring, rather than the source info of the RHS.
- self.source_info = self.body.local_decls[place.local].source_info;
- }
+ }
+
+ // Check for raw pointer `Deref`.
+ for (base, proj) in place.iter_projections() {
+ if proj == ProjectionElem::Deref {
+ let source_info = self.source_info; // Backup source_info so we can restore it later.
+ if base.projection.is_empty() && decl.internal {
+ // Internal locals are used in the `move_val_init` desugaring.
+ // We want to check unsafety against the source info of the
+ // desugaring, rather than the source info of the RHS.
+ self.source_info = self.body.local_decls[place.local].source_info;
+ }
+ let base_ty = base.ty(self.body, self.tcx).ty;
+ if base_ty.is_unsafe_ptr() {
+ self.require_unsafe(
+ UnsafetyViolationKind::GeneralAndConstFn,
+ UnsafetyViolationDetails::DerefOfRawPointer,
+ )
}
+ self.source_info = source_info; // Restore backed-up source_info.
}
- let base_ty = Place::ty_from(place.local, proj_base, self.body, self.tcx).ty;
- match base_ty.kind() {
- ty::RawPtr(..) => self.require_unsafe(
- UnsafetyViolationKind::GeneralAndConstFn,
- UnsafetyViolationDetails::DerefOfRawPointer,
- ),
- ty::Adt(adt, _) => {
- if adt.is_union() {
- if context == PlaceContext::MutatingUse(MutatingUseContext::Store)
- || context == PlaceContext::MutatingUse(MutatingUseContext::Drop)
- || context == PlaceContext::MutatingUse(MutatingUseContext::AsmOutput)
- {
- let elem_ty = match elem {
- ProjectionElem::Field(_, ty) => ty,
- _ => span_bug!(
- self.source_info.span,
- "non-field projection {:?} from union?",
- place
- ),
- };
- if !elem_ty.is_copy_modulo_regions(
- self.tcx.at(self.source_info.span),
- self.param_env,
- ) {
- self.require_unsafe(
- UnsafetyViolationKind::GeneralAndConstFn,
- UnsafetyViolationDetails::AssignToNonCopyUnionField,
- )
- } else {
- // write to non-move union, safe
- }
- } else {
- self.require_unsafe(
- UnsafetyViolationKind::GeneralAndConstFn,
- UnsafetyViolationDetails::AccessToUnionField,
- )
- }
+ }
+
+ // Check for union fields. For this we traverse right-to-left, as the last `Deref` changes
+ // whether we *read* the union field or potentially *write* to it (if this place is being assigned to).
+ let mut saw_deref = false;
+ for (base, proj) in place.iter_projections().rev() {
+ if proj == ProjectionElem::Deref {
+ saw_deref = true;
+ continue;
+ }
+
+ let base_ty = base.ty(self.body, self.tcx).ty;
+ if base_ty.ty_adt_def().map_or(false, |adt| adt.is_union()) {
+ // If we did not hit a `Deref` yet and the overall place use is an assignment, the
+ // rules are different.
+ let assign_to_field = !saw_deref
+ && matches!(
+ context,
+ PlaceContext::MutatingUse(
+ MutatingUseContext::Store
+ | MutatingUseContext::Drop
+ | MutatingUseContext::AsmOutput
+ )
+ );
+ // If this is just an assignment, determine if the assigned type needs dropping.
+ if assign_to_field {
+ // We have to check the actual type of the assignment, as that determines if the
+ // old value is being dropped.
+ let assigned_ty = place.ty(&self.body.local_decls, self.tcx).ty;
+ // To avoid semver hazard, we only consider `Copy` and `ManuallyDrop` non-dropping.
+ let manually_drop = assigned_ty
+ .ty_adt_def()
+ .map_or(false, |adt_def| adt_def.is_manually_drop());
+ let nodrop = manually_drop
+ || assigned_ty.is_copy_modulo_regions(
+ self.tcx.at(self.source_info.span),
+ self.param_env,
+ );
+ if !nodrop {
+ self.require_unsafe(
+ UnsafetyViolationKind::GeneralAndConstFn,
+ UnsafetyViolationDetails::AssignToDroppingUnionField,
+ );
+ } else {
+ // write to non-drop union field, safe
}
+ } else {
+ self.require_unsafe(
+ UnsafetyViolationKind::GeneralAndConstFn,
+ UnsafetyViolationDetails::AccessToUnionField,
+ )
}
- _ => {}
}
- self.source_info = source_info;
}
}
}
--- /dev/null
+//! Finds locals which are assigned once to a const and unused except for debuginfo and converts
+//! their debuginfo to use the const directly, allowing the local to be removed.
+
+use rustc_middle::{
+ mir::{
+ visit::{PlaceContext, Visitor},
+ Body, Constant, Local, Location, Operand, Rvalue, StatementKind, VarDebugInfoContents,
+ },
+ ty::TyCtxt,
+};
+
+use crate::transform::MirPass;
+use rustc_index::{bit_set::BitSet, vec::IndexVec};
+
+pub struct ConstDebugInfo;
+
+impl<'tcx> MirPass<'tcx> for ConstDebugInfo {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ if !tcx.sess.opts.debugging_opts.unsound_mir_opts {
+ return;
+ }
+
+ trace!("running ConstDebugInfo on {:?}", body.source);
+
+ for (local, constant) in find_optimization_oportunities(body) {
+ for debuginfo in &mut body.var_debug_info {
+ if let VarDebugInfoContents::Place(p) = debuginfo.value {
+ if p.local == local && p.projection.is_empty() {
+ trace!(
+ "changing debug info for {:?} from place {:?} to constant {:?}",
+ debuginfo.name,
+ p,
+ constant
+ );
+ debuginfo.value = VarDebugInfoContents::Const(constant);
+ }
+ }
+ }
+ }
+ }
+}
+
+struct LocalUseVisitor {
+ local_mutating_uses: IndexVec<Local, u8>,
+ local_assignment_locations: IndexVec<Local, Option<Location>>,
+}
+
+fn find_optimization_oportunities<'tcx>(body: &Body<'tcx>) -> Vec<(Local, Constant<'tcx>)> {
+ let mut visitor = LocalUseVisitor {
+ local_mutating_uses: IndexVec::from_elem(0, &body.local_decls),
+ local_assignment_locations: IndexVec::from_elem(None, &body.local_decls),
+ };
+
+ visitor.visit_body(body);
+
+ let mut locals_to_debuginfo = BitSet::new_empty(body.local_decls.len());
+ for debuginfo in &body.var_debug_info {
+ if let VarDebugInfoContents::Place(p) = debuginfo.value {
+ if let Some(l) = p.as_local() {
+ locals_to_debuginfo.insert(l);
+ }
+ }
+ }
+
+ let mut eligable_locals = Vec::new();
+ for (local, mutating_uses) in visitor.local_mutating_uses.drain_enumerated(..) {
+ if mutating_uses != 1 || !locals_to_debuginfo.contains(local) {
+ continue;
+ }
+
+ if let Some(location) = visitor.local_assignment_locations[local] {
+ let bb = &body[location.block];
+
+ // The value is assigned as the result of a call, not a constant
+ if bb.statements.len() == location.statement_index {
+ continue;
+ }
+
+ if let StatementKind::Assign(box (p, Rvalue::Use(Operand::Constant(box c)))) =
+ &bb.statements[location.statement_index].kind
+ {
+ if let Some(local) = p.as_local() {
+ eligable_locals.push((local, *c));
+ }
+ }
+ }
+ }
+
+ eligable_locals
+}
+
+impl<'tcx> Visitor<'tcx> for LocalUseVisitor {
+ fn visit_local(&mut self, local: &Local, context: PlaceContext, location: Location) {
+ if context.is_mutating_use() {
+ self.local_mutating_uses[*local] = self.local_mutating_uses[*local].saturating_add(1);
+
+ if context.is_place_assignment() {
+ self.local_assignment_locations[*local] = Some(location);
+ }
+ }
+ }
+}
if let Ok(env_debug_options) = std::env::var(RUSTC_COVERAGE_DEBUG_OPTIONS) {
for setting_str in env_debug_options.replace(" ", "").replace("-", "_").split(',') {
- let mut setting = setting_str.splitn(2, '=');
- match setting.next() {
- Some(option) if option == "allow_unused_expressions" => {
- allow_unused_expressions = bool_option_val(option, setting.next());
+ let (option, value) = match setting_str.split_once('=') {
+ None => (setting_str, None),
+ Some((k, v)) => (k, Some(v)),
+ };
+ match option {
+ "allow_unused_expressions" => {
+ allow_unused_expressions = bool_option_val(option, value);
debug!(
"{} env option `allow_unused_expressions` is set to {}",
RUSTC_COVERAGE_DEBUG_OPTIONS, allow_unused_expressions
);
}
- Some(option) if option == "counter_format" => {
- if let Some(strval) = setting.next() {
- counter_format = counter_format_option_val(strval);
- debug!(
- "{} env option `counter_format` is set to {:?}",
- RUSTC_COVERAGE_DEBUG_OPTIONS, counter_format
- );
- } else {
- bug!(
- "`{}` option in environment variable {} requires one or more \
- plus-separated choices (a non-empty subset of \
- `id+block+operation`)",
- option,
- RUSTC_COVERAGE_DEBUG_OPTIONS
- );
- }
+ "counter_format" => {
+ match value {
+ None => {
+ bug!(
+ "`{}` option in environment variable {} requires one or more \
+ plus-separated choices (a non-empty subset of \
+ `id+block+operation`)",
+ option,
+ RUSTC_COVERAGE_DEBUG_OPTIONS
+ );
+ }
+ Some(val) => {
+ counter_format = counter_format_option_val(val);
+ debug!(
+ "{} env option `counter_format` is set to {:?}",
+ RUSTC_COVERAGE_DEBUG_OPTIONS, counter_format
+ );
+ }
+ };
}
- Some("") => {}
- Some(invalid) => bug!(
- "Unsupported setting `{}` in environment variable {}",
- invalid,
- RUSTC_COVERAGE_DEBUG_OPTIONS
- ),
- None => {}
- }
+ _ => {
+ bug!(
+ "Unsupported setting `{}` in environment variable {}",
+ option,
+ RUSTC_COVERAGE_DEBUG_OPTIONS
+ )
+ }
+ };
}
}
),
};
counters
- .insert(id.into(), DebugCounter::new(counter_kind.clone(), some_block_label))
+ .insert(id, DebugCounter::new(counter_kind.clone(), some_block_label))
.expect_none(
"attempt to add the same counter_kind to DebugCounters more than once",
);
if self.some_counters.is_some() && (counter_format.block || !counter_format.id) {
let counters = self.some_counters.as_ref().unwrap();
if let Some(DebugCounter { some_block_label: Some(block_label), .. }) =
- counters.get(&id.into())
+ counters.get(&id)
{
return if counter_format.id {
format!("{}#{}", block_label, id.index())
// Pre-transform MIR `BasicBlock` successors and predecessors into the BasicCoverageBlock
// equivalents. Note that since the BasicCoverageBlock graph has been fully simplified, the
- // each predecessor of a BCB leader_bb should be in a unique BCB, and each successor of a
- // BCB last_bb should be in its own unique BCB. Therefore, collecting the BCBs using
- // `bb_to_bcb` should work without requiring a deduplication step.
+ // each predecessor of a BCB leader_bb should be in a unique BCB. It is possible for a
+ // `SwitchInt` to have multiple targets to the same destination `BasicBlock`, so
+ // de-duplication is required. This is done without reordering the successors.
+ let bcbs_len = bcbs.len();
+ let mut seen = IndexVec::from_elem_n(false, bcbs_len);
let successors = IndexVec::from_fn_n(
|bcb| {
+ for b in seen.iter_mut() {
+ *b = false;
+ }
let bcb_data = &bcbs[bcb];
- let bcb_successors =
+ let mut bcb_successors = Vec::new();
+ for successor in
bcb_filtered_successors(&mir_body, &bcb_data.terminator(mir_body).kind)
.filter_map(|&successor_bb| bb_to_bcb[successor_bb])
- .collect::<Vec<_>>();
- debug_assert!({
- let mut sorted = bcb_successors.clone();
- sorted.sort_unstable();
- let initial_len = sorted.len();
- sorted.dedup();
- sorted.len() == initial_len
- });
+ {
+ if !seen[successor] {
+ seen[successor] = true;
+ bcb_successors.push(successor);
+ }
+ }
bcb_successors
},
bcbs.len(),
return;
}
+ match mir_body.basic_blocks()[mir::START_BLOCK].terminator().kind {
+ TerminatorKind::Unreachable => {
+ trace!("InstrumentCoverage skipped for unreachable `START_BLOCK`");
+ return;
+ }
+ _ => {}
+ }
+
trace!("InstrumentCoverage starting for {:?}", mir_source.def_id());
Instrumentor::new(&self.name(), tcx, mir_body).inject_counters();
trace!("InstrumentCoverage starting for {:?}", mir_source.def_id());
inject_statement(
self.mir_body,
counter_kind,
- self.bcb_last_bb(bcb),
+ self.bcb_leader_bb(bcb),
Some(make_code_region(file_name, &self.source_file, span, body_span)),
);
}
code_region: some_code_region,
}),
};
- data.statements.push(statement);
+ data.statements.insert(0, statement);
}
// Non-code expressions are injected into the coverage map, without generating executable code.
[lib]
proc-macro = true
doctest = false
-
-[dependencies]
-proc-macro2 = "1"
let discr = self.find_switch_discriminant_info(bb, switch)?;
// go through each target, finding a discriminant read, and a switch
- let results = discr.targets_with_values.iter().map(|(value, target)| {
- self.find_discriminant_switch_pairing(&discr, target.clone(), value.clone())
- });
+ let results = discr
+ .targets_with_values
+ .iter()
+ .map(|(value, target)| self.find_discriminant_switch_pairing(&discr, *target, *value));
// if the optimization did not apply for one of the targets, then abort
if results.clone().any(|x| x.is_none()) || results.len() == 0 {
return None;
}
+ // when the second place is a projection of the first one, it's not safe to calculate their discriminant values sequentially.
+ // for example, this should not be optimized:
+ //
+ // ```rust
+ // enum E<'a> { Empty, Some(&'a E<'a>), }
+ // let Some(Some(_)) = e;
+ // ```
+ //
+ // ```mir
+ // bb0: {
+ // _2 = discriminant(*_1)
+ // switchInt(_2) -> [...]
+ // }
+ // bb1: {
+ // _3 = discriminant(*(((*_1) as Some).0: &E))
+ // switchInt(_3) -> [...]
+ // }
+ // ```
+ let discr_place = discr_info.place_of_adt_discr_read;
+ let this_discr_place = this_bb_discr_info.place_of_adt_discr_read;
+ if discr_place.local == this_discr_place.local
+ && this_discr_place.projection.starts_with(discr_place.projection)
+ {
+ trace!("NO: one target is the projection of another");
+ return None;
+ }
+
// if we reach this point, the optimization applies, and we should be able to optimize this case
// store the info that is needed to apply the optimization
return;
}
- if tcx.sess.opts.debugging_opts.instrument_coverage {
- // The current implementation of source code coverage injects code region counters
- // into the MIR, and assumes a 1-to-1 correspondence between MIR and source-code-
- // based function.
- debug!("function inlining is disabled when compiling with `instrument_coverage`");
- return;
- }
-
if inline(tcx, body) {
debug!("running simplify cfg on {:?}", body.source);
CfgSimplifier::new(body).simplify();
terminator.kind = TerminatorKind::Goto { target };
}
}
+ sym::discriminant_value => {
+ if let (Some((destination, target)), Some(arg)) =
+ (*destination, args[0].place())
+ {
+ let arg = tcx.mk_place_deref(arg);
+ block.statements.push(Statement {
+ source_info: terminator.source_info,
+ kind: StatementKind::Assign(box (
+ destination,
+ Rvalue::Discriminant(arg),
+ )),
+ });
+ terminator.kind = TerminatorKind::Goto { target };
+ }
+ }
_ => {}
}
}
pub mod check_packed_ref;
pub mod check_unsafety;
pub mod cleanup_post_borrowck;
+pub mod const_debuginfo;
pub mod const_prop;
pub mod coverage;
pub mod deaggregator;
// `AddRetag` needs to run after `ElaborateDrops`. Otherwise it should run fairly late,
// but before optimizations begin.
&add_retag::AddRetag,
+ &lower_intrinsics::LowerIntrinsics,
&simplify::SimplifyCfg::new("elaborate-drops"),
// `Deaggregator` is conceptually part of MIR building, some backends rely on it happening
// and it can help optimizations.
// The main optimizations that we do on MIR.
let optimizations: &[&dyn MirPass<'tcx>] = &[
- &lower_intrinsics::LowerIntrinsics,
&remove_unneeded_drops::RemoveUnneededDrops,
&match_branches::MatchBranchSimplification,
// inst combine is after MatchBranchSimplification to clean up Ne(_1, false)
&remove_noop_landing_pads::RemoveNoopLandingPads,
&simplify::SimplifyCfg::new("final"),
&nrvo::RenameReturnPlace,
+ &const_debuginfo::ConstDebugInfo,
&simplify::SimplifyLocals,
&multiple_return_terminators::MultipleReturnTerminators,
];
tmp_assigned_vars.insert(*r);
}
- let dbg_info_to_adjust: Vec<_> =
- debug_info
- .iter()
- .enumerate()
- .filter_map(|(i, var_info)| {
- if tmp_assigned_vars.contains(var_info.place.local) { Some(i) } else { None }
- })
- .collect();
+ let dbg_info_to_adjust: Vec<_> = debug_info
+ .iter()
+ .enumerate()
+ .filter_map(|(i, var_info)| {
+ if let VarDebugInfoContents::Place(p) = var_info.value {
+ if tmp_assigned_vars.contains(p.local) {
+ return Some(i);
+ }
+ }
+
+ None
+ })
+ .collect();
Some(ArmIdentityInfo {
local_temp_0: local_tmp_s0,
// Check that debug info only points to full Locals and not projections.
for dbg_idx in &opt_info.dbg_info_to_adjust {
let dbg_info = &var_debug_info[*dbg_idx];
- if !dbg_info.place.projection.is_empty() {
- trace!("NO: debug info for {:?} had a projection {:?}", dbg_info.name, dbg_info.place);
- return false;
+ if let VarDebugInfoContents::Place(p) = dbg_info.value {
+ if !p.projection.is_empty() {
+ trace!("NO: debug info for {:?} had a projection {:?}", dbg_info.name, p);
+ return false;
+ }
}
}
// Fix the debug info to point to the right local
for dbg_index in opt_info.dbg_info_to_adjust {
let dbg_info = &mut debug_info[dbg_index];
- assert!(dbg_info.place.projection.is_empty());
- dbg_info.place.local = opt_info.local_0;
- dbg_info.place.projection = opt_info.dbg_projection;
+ assert!(
+ matches!(dbg_info.value, VarDebugInfoContents::Place(_)),
+ "value was not a Place"
+ );
+ if let VarDebugInfoContents::Place(p) = &mut dbg_info.value {
+ assert!(p.projection.is_empty());
+ p.local = opt_info.local_0;
+ p.projection = opt_info.dbg_projection;
+ }
}
trace!("block is now {:?}", bb.statements);
--- /dev/null
+use gsgdt::{Edge, Graph, Node, NodeStyle};
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+/// Convert an MIR function into a gsgdt Graph
+pub fn mir_fn_to_generic_graph<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'_>) -> Graph {
+ let def_id = body.source.def_id();
+ let def_name = graphviz_safe_def_name(def_id);
+ let graph_name = format!("Mir_{}", def_name);
+ let dark_mode = tcx.sess.opts.debugging_opts.graphviz_dark_mode;
+
+ // Nodes
+ let nodes: Vec<Node> = body
+ .basic_blocks()
+ .iter_enumerated()
+ .map(|(block, _)| bb_to_graph_node(block, body, dark_mode))
+ .collect();
+
+ // Edges
+ let mut edges = Vec::new();
+ for (source, _) in body.basic_blocks().iter_enumerated() {
+ let def_id = body.source.def_id();
+ let terminator = body[source].terminator();
+ let labels = terminator.kind.fmt_successor_labels();
+
+ for (&target, label) in terminator.successors().zip(labels) {
+ let src = node(def_id, source);
+ let trg = node(def_id, target);
+ edges.push(Edge::new(src, trg, label.to_string()));
+ }
+ }
+
+ Graph::new(graph_name, nodes, edges)
+}
+
+fn bb_to_graph_node(block: BasicBlock, body: &Body<'_>, dark_mode: bool) -> Node {
+ let def_id = body.source.def_id();
+ let data = &body[block];
+ let label = node(def_id, block);
+
+ let (title, bgcolor) = if data.is_cleanup {
+ let color = if dark_mode { "royalblue" } else { "lightblue" };
+ (format!("{} (cleanup)", block.index()), color)
+ } else {
+ let color = if dark_mode { "dimgray" } else { "gray" };
+ (format!("{}", block.index()), color)
+ };
+
+ let style = NodeStyle { title_bg: Some(bgcolor.to_owned()), ..Default::default() };
+ let mut stmts: Vec<String> = data.statements.iter().map(|x| format!("{:?}", x)).collect();
+
+ // add the terminator to the stmts, gsgdt can print it out seperately
+ let mut terminator_head = String::new();
+ data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
+ stmts.push(terminator_head);
+
+ Node::new(stmts, label, title, style)
+}
+
+// Must match `[0-9A-Za-z_]*`. This does not appear in the rendered graph, so
+// it does not have to be user friendly.
+pub fn graphviz_safe_def_name(def_id: DefId) -> String {
+ format!("{}_{}", def_id.krate.index(), def_id.index.index(),)
+}
+
+fn node(def_id: DefId, block: BasicBlock) -> String {
+ format!("bb{}__{}", block.index(), graphviz_safe_def_name(def_id))
+}
+use gsgdt::GraphvizSettings;
use rustc_graphviz as dot;
use rustc_hir::def_id::DefId;
-use rustc_index::vec::Idx;
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
use std::fmt::Debug;
use std::io::{self, Write};
+use super::generic_graph::mir_fn_to_generic_graph;
use super::pretty::dump_mir_def_ids;
/// Write a graphviz DOT graph of a list of MIRs.
Ok(())
}
-// Must match `[0-9A-Za-z_]*`. This does not appear in the rendered graph, so
-// it does not have to be user friendly.
-pub fn graphviz_safe_def_name(def_id: DefId) -> String {
- format!("{}_{}", def_id.krate.index(), def_id.index.index(),)
-}
-
/// Write a graphviz DOT graph of the MIR.
pub fn write_mir_fn_graphviz<'tcx, W>(
tcx: TyCtxt<'tcx>,
where
W: Write,
{
- let def_id = body.source.def_id();
- let kind = if subgraph { "subgraph" } else { "digraph" };
- let cluster = if subgraph { "cluster_" } else { "" }; // Prints a border around MIR
- let def_name = graphviz_safe_def_name(def_id);
- writeln!(w, "{} {}Mir_{} {{", kind, cluster, def_name)?;
-
// Global graph properties
let font = format!(r#"fontname="{}""#, tcx.sess.opts.debugging_opts.graphviz_font);
let mut graph_attrs = vec![&font[..]];
content_attrs.push(r#"fontcolor="white""#);
}
- writeln!(w, r#" graph [{}];"#, graph_attrs.join(" "))?;
- let content_attrs_str = content_attrs.join(" ");
- writeln!(w, r#" node [{}];"#, content_attrs_str)?;
- writeln!(w, r#" edge [{}];"#, content_attrs_str)?;
-
// Graph label
- write_graph_label(tcx, body, w)?;
-
- // Nodes
- for (block, _) in body.basic_blocks().iter_enumerated() {
- write_node(block, body, dark_mode, w)?;
- }
-
- // Edges
- for (source, _) in body.basic_blocks().iter_enumerated() {
- write_edges(source, body, w)?;
- }
- writeln!(w, "}}")
-}
-
-/// Write a graphviz HTML-styled label for the given basic block, with
-/// all necessary escaping already performed. (This is suitable for
-/// emitting directly, as is done in this module, or for use with the
-/// LabelText::HtmlStr from librustc_graphviz.)
-///
-/// `init` and `fini` are callbacks for emitting additional rows of
-/// data (using HTML enclosed with `<tr>` in the emitted text).
-pub fn write_node_label<W: Write, INIT, FINI>(
- block: BasicBlock,
- body: &Body<'_>,
- dark_mode: bool,
- w: &mut W,
- num_cols: u32,
- init: INIT,
- fini: FINI,
-) -> io::Result<()>
-where
- INIT: Fn(&mut W) -> io::Result<()>,
- FINI: Fn(&mut W) -> io::Result<()>,
-{
- let data = &body[block];
-
- write!(w, r#"<table border="0" cellborder="1" cellspacing="0">"#)?;
-
- // Basic block number at the top.
- let (blk, bgcolor) = if data.is_cleanup {
- let color = if dark_mode { "royalblue" } else { "lightblue" };
- (format!("{} (cleanup)", block.index()), color)
- } else {
- let color = if dark_mode { "dimgray" } else { "gray" };
- (format!("{}", block.index()), color)
+ let mut label = String::from("");
+ // FIXME: remove this unwrap
+ write_graph_label(tcx, body, &mut label).unwrap();
+ let g = mir_fn_to_generic_graph(tcx, body);
+ let settings = GraphvizSettings {
+ graph_attrs: Some(graph_attrs.join(" ")),
+ node_attrs: Some(content_attrs.join(" ")),
+ edge_attrs: Some(content_attrs.join(" ")),
+ graph_label: Some(label),
};
- write!(
- w,
- r#"<tr><td bgcolor="{bgcolor}" {attrs} colspan="{colspan}">{blk}</td></tr>"#,
- attrs = r#"align="center""#,
- colspan = num_cols,
- blk = blk,
- bgcolor = bgcolor
- )?;
-
- init(w)?;
-
- // List of statements in the middle.
- if !data.statements.is_empty() {
- write!(w, r#"<tr><td align="left" balign="left">"#)?;
- for statement in &data.statements {
- write!(w, "{}<br/>", escape(statement))?;
- }
- write!(w, "</td></tr>")?;
- }
-
- // Terminator head at the bottom, not including the list of successor blocks. Those will be
- // displayed as labels on the edges between blocks.
- let mut terminator_head = String::new();
- data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
- write!(w, r#"<tr><td align="left">{}</td></tr>"#, dot::escape_html(&terminator_head))?;
-
- fini(w)?;
-
- // Close the table
- write!(w, "</table>")
-}
-
-/// Write a graphviz DOT node for the given basic block.
-fn write_node<W: Write>(
- block: BasicBlock,
- body: &Body<'_>,
- dark_mode: bool,
- w: &mut W,
-) -> io::Result<()> {
- let def_id = body.source.def_id();
- // Start a new node with the label to follow, in one of DOT's pseudo-HTML tables.
- write!(w, r#" {} [shape="none", label=<"#, node(def_id, block))?;
- write_node_label(block, body, dark_mode, w, 1, |_| Ok(()), |_| Ok(()))?;
- // Close the node label and the node itself.
- writeln!(w, ">];")
-}
-
-/// Write graphviz DOT edges with labels between the given basic block and all of its successors.
-fn write_edges<W: Write>(source: BasicBlock, body: &Body<'_>, w: &mut W) -> io::Result<()> {
- let def_id = body.source.def_id();
- let terminator = body[source].terminator();
- let labels = terminator.kind.fmt_successor_labels();
-
- for (&target, label) in terminator.successors().zip(labels) {
- let src = node(def_id, source);
- let trg = node(def_id, target);
- writeln!(w, r#" {} -> {} [label="{}"];"#, src, trg, label)?;
- }
-
- Ok(())
+ g.to_dot(w, &settings, subgraph)
}
/// Write the graphviz DOT label for the overall graph. This is essentially a block of text that
/// will appear below the graph, showing the type of the `fn` this MIR represents and the types of
/// all the variables and temporaries.
-fn write_graph_label<'tcx, W: Write>(
+fn write_graph_label<'tcx, W: std::fmt::Write>(
tcx: TyCtxt<'tcx>,
body: &Body<'_>,
w: &mut W,
-) -> io::Result<()> {
+) -> std::fmt::Result {
let def_id = body.source.def_id();
- write!(w, " label=<fn {}(", dot::escape_html(&tcx.def_path_str(def_id)))?;
+ write!(w, "fn {}(", dot::escape_html(&tcx.def_path_str(def_id)))?;
// fn argument types.
for (i, arg) in body.args_iter().enumerate() {
w,
r#"debug {} => {};<br align="left"/>"#,
var_debug_info.name,
- escape(&var_debug_info.place)
+ escape(&var_debug_info.value),
)?;
}
- writeln!(w, ">;")
-}
-
-fn node(def_id: DefId, block: BasicBlock) -> String {
- format!("bb{}__{}", block.index(), graphviz_safe_def_name(def_id))
+ Ok(())
}
fn escape<T: Debug>(t: &T) -> String {
mod alignment;
pub mod collect_writes;
mod find_self_call;
+mod generic_graph;
pub(crate) mod generic_graphviz;
mod graphviz;
pub(crate) mod pretty;
pub use self::aggregate::expand_aggregate;
pub use self::alignment::is_disaligned;
pub use self::find_self_call::find_self_call;
-pub use self::graphviz::write_node_label as write_graphviz_node_label;
-pub use self::graphviz::{graphviz_safe_def_name, write_mir_graphviz};
+pub use self::generic_graph::graphviz_safe_def_name;
+pub use self::graphviz::write_mir_graphviz;
pub use self::pretty::{dump_enabled, dump_mir, write_mir_pretty, PassWhere};
let indented_debug_info = format!(
"{0:1$}debug {2} => {3:?};",
- INDENT, indent, var_debug_info.name, var_debug_info.place,
+ INDENT, indent, var_debug_info.name, var_debug_info.value,
);
writeln!(
use crate::build::ForGuard::{OutsideGuard, RefWithinGuard};
use crate::build::{BlockAnd, BlockAndExtension, Builder};
use crate::thir::*;
+use rustc_hir::def_id::DefId;
+use rustc_hir::HirId;
use rustc_middle::middle::region;
+use rustc_middle::hir::place::ProjectionKind as HirProjectionKind;
use rustc_middle::mir::AssertKind::BoundsCheck;
use rustc_middle::mir::*;
use rustc_middle::ty::{self, CanonicalUserTypeAnnotation, Ty, TyCtxt, Variance};
use rustc_span::Span;
+use rustc_target::abi::VariantIdx;
use rustc_index::vec::Idx;
+/// The "outermost" place that holds this value.
+#[derive(Copy, Clone)]
+crate enum PlaceBase {
+ /// Denotes the start of a `Place`.
+ Local(Local),
+
+ /// When building place for an expression within a closure, the place might start off a
+ /// captured path. When `capture_disjoint_fields` is enabled, we might not know the capture
+ /// index (within the desugared closure) of the captured path until most of the projections
+ /// are applied. We use `PlaceBase::Upvar` to keep track of the root variable off of which the
+ /// captured path starts, the closure the capture belongs to and the trait the closure
+ /// implements.
+ ///
+ /// Once we have figured out the capture index, we can convert the place builder to start from
+ /// `PlaceBase::Local`.
+ ///
+ /// Consider the following example
+ /// ```rust
+ /// let t = (10, (10, (10, 10)));
+ ///
+ /// let c = || {
+ /// println!("{}", t.0.0.0);
+ /// };
+ /// ```
+ /// Here the THIR expression for `t.0.0.0` will be something like
+ ///
+ /// ```
+ /// * Field(0)
+ /// * Field(0)
+ /// * Field(0)
+ /// * UpvarRef(t)
+ /// ```
+ ///
+ /// When `capture_disjoint_fields` is enabled, `t.0.0.0` is captured and we won't be able to
+ /// figure out that it is captured until all the `Field` projections are applied.
+ Upvar {
+ /// HirId of the upvar
+ var_hir_id: HirId,
+ /// DefId of the closure
+ closure_def_id: DefId,
+ /// The trait closure implements, `Fn`, `FnMut`, `FnOnce`
+ closure_kind: ty::ClosureKind },
+}
+
/// `PlaceBuilder` is used to create places during MIR construction. It allows you to "build up" a
/// place by pushing more and more projections onto the end, and then convert the final set into a
/// place using the `into_place` method.
/// This is used internally when building a place for an expression like `a.b.c`. The fields `b`
/// and `c` can be progressively pushed onto the place builder that is created when converting `a`.
#[derive(Clone)]
-struct PlaceBuilder<'tcx> {
- local: Local,
+crate struct PlaceBuilder<'tcx> {
+ base: PlaceBase,
projection: Vec<PlaceElem<'tcx>>,
}
+/// Given a list of MIR projections, convert them to list of HIR ProjectionKind.
+/// The projections are truncated to represent a path that might be captured by a
+/// closure/generator. This implies the vector returned from this function doesn't contain
+/// ProjectionElems `Downcast`, `ConstantIndex`, `Index`, or `Subslice` because those will never be
+/// part of a path that is captued by a closure. We stop applying projections once we see the first
+/// projection that isn't captured by a closure.
+fn convert_to_hir_projections_and_truncate_for_capture<'tcx>(
+ mir_projections: &Vec<PlaceElem<'tcx>>,
+) -> Vec<HirProjectionKind> {
+
+ let mut hir_projections = Vec::new();
+
+ for mir_projection in mir_projections {
+ let hir_projection = match mir_projection {
+ ProjectionElem::Deref => HirProjectionKind::Deref,
+ ProjectionElem::Field(field, _) => {
+ // We will never encouter this for multivariant enums,
+ // read the comment for `Downcast`.
+ HirProjectionKind::Field(field.index() as u32, VariantIdx::new(0))
+ },
+ ProjectionElem::Downcast(..) => {
+ // This projections exist only for enums that have
+ // multiple variants. Since such enums that are captured
+ // completely, we can stop here.
+ break
+ },
+ ProjectionElem::Index(..)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. } => {
+ // We don't capture array-access projections.
+ // We can stop here as arrays are captured completely.
+ break
+ },
+ };
+
+ hir_projections.push(hir_projection);
+ }
+
+ hir_projections
+}
+
+/// Return true if the `proj_possible_ancestor` represents an ancestor path
+/// to `proj_capture` or `proj_possible_ancestor` is same as `proj_capture`,
+/// assuming they both start off of the same root variable.
+///
+/// **Note:** It's the caller's responsibility to ensure that both lists of projections
+/// start off of the same root variable.
+///
+/// Eg: 1. `foo.x` which is represented using `projections=[Field(x)]` is an ancestor of
+/// `foo.x.y` which is represented using `projections=[Field(x), Field(y)]`.
+/// Note both `foo.x` and `foo.x.y` start off of the same root variable `foo`.
+/// 2. Since we only look at the projections here function will return `bar.x` as an a valid
+/// ancestor of `foo.x.y`. It's the caller's responsibility to ensure that both projections
+/// list are being applied to the same root variable.
+fn is_ancestor_or_same_capture(
+ proj_possible_ancestor: &Vec<HirProjectionKind>,
+ proj_capture: &Vec<HirProjectionKind>,
+) -> bool {
+ // We want to make sure `is_ancestor_or_same_capture("x.0.0", "x.0")` to return false.
+ // Therefore we can't just check if all projections are same in the zipped iterator below.
+ if proj_possible_ancestor.len() > proj_capture.len() {
+ return false;
+ }
+
+ proj_possible_ancestor.iter().zip(proj_capture).all(|(a, b)| a == b)
+}
+
+/// Computes the index of a capture within the desugared closure provided the closure's
+/// `closure_min_captures` and the capture's index of the capture in the
+/// `ty::MinCaptureList` of the root variable `var_hir_id`.
+fn compute_capture_idx<'tcx>(
+ closure_min_captures: &ty::RootVariableMinCaptureList<'tcx>,
+ var_hir_id: HirId,
+ root_var_idx: usize,
+) -> usize {
+ let mut res = 0;
+ for (var_id, capture_list) in closure_min_captures {
+ if *var_id == var_hir_id {
+ res += root_var_idx;
+ break;
+ } else {
+ res += capture_list.len();
+ }
+ }
+
+ res
+}
+
+/// Given a closure, returns the index of a capture within the desugared closure struct and the
+/// `ty::CapturedPlace` which is the ancestor of the Place represented using the `var_hir_id`
+/// and `projection`.
+///
+/// Note there will be at most one ancestor for any given Place.
+///
+/// Returns None, when the ancestor is not found.
+fn find_capture_matching_projections<'a, 'tcx>(
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ var_hir_id: HirId,
+ closure_def_id: DefId,
+ projections: &Vec<PlaceElem<'tcx>>,
+) -> Option<(usize, &'a ty::CapturedPlace<'tcx>)> {
+ let closure_min_captures = typeck_results.closure_min_captures.get(&closure_def_id)?;
+ let root_variable_min_captures = closure_min_captures.get(&var_hir_id)?;
+
+ let hir_projections = convert_to_hir_projections_and_truncate_for_capture(projections);
+
+ // If an ancestor is found, `idx` is the index within the list of captured places
+ // for root variable `var_hir_id` and `capture` is the `ty::CapturedPlace` itself.
+ let (idx, capture) = root_variable_min_captures.iter().enumerate().find(|(_, capture)| {
+ let possible_ancestor_proj_kinds =
+ capture.place.projections.iter().map(|proj| proj.kind).collect();
+ is_ancestor_or_same_capture(&possible_ancestor_proj_kinds, &hir_projections)
+ })?;
+
+ // Convert index to be from the presepective of the entire closure_min_captures map
+ // instead of just the root variable capture list
+ Some((compute_capture_idx(closure_min_captures, var_hir_id, idx), capture))
+}
+
+/// Takes a PlaceBuilder and resolves the upvar (if any) within it, so that the
+/// `PlaceBuilder` now starts from `PlaceBase::Local`.
+///
+/// Returns a Result with the error being the HirId of the Upvar that was not found.
+fn to_upvars_resolved_place_builder<'a, 'tcx>(
+ from_builder: PlaceBuilder<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+) -> Result<PlaceBuilder<'tcx>, HirId> {
+ match from_builder.base {
+ PlaceBase::Local(_) => Ok(from_builder),
+ PlaceBase::Upvar { var_hir_id, closure_def_id, closure_kind } => {
+ // Captures are represented using fields inside a structure.
+ // This represents accessing self in the closure structure
+ let mut upvar_resolved_place_builder = PlaceBuilder::from(Local::new(1));
+ match closure_kind {
+ ty::ClosureKind::Fn | ty::ClosureKind::FnMut => {
+ upvar_resolved_place_builder = upvar_resolved_place_builder.deref();
+ }
+ ty::ClosureKind::FnOnce => {}
+ }
+
+ let (capture_index, capture) =
+ if let Some(capture_details) = find_capture_matching_projections(
+ typeck_results,
+ var_hir_id,
+ closure_def_id,
+ &from_builder.projection,
+ ) {
+ capture_details
+ } else {
+ if !tcx.features().capture_disjoint_fields {
+ bug!(
+ "No associated capture found for {:?}[{:#?}] even though \
+ capture_disjoint_fields isn't enabled",
+ var_hir_id,
+ from_builder.projection
+ )
+ } else {
+ // FIXME(project-rfc-2229#24): Handle this case properly
+ debug!(
+ "No associated capture found for {:?}[{:#?}]",
+ var_hir_id,
+ from_builder.projection,
+ );
+ }
+ return Err(var_hir_id);
+ };
+
+ let closure_ty =
+ typeck_results.node_type(tcx.hir().local_def_id_to_hir_id(closure_def_id.expect_local()));
+
+ let substs = match closure_ty.kind() {
+ ty::Closure(_, substs) => ty::UpvarSubsts::Closure(substs),
+ ty::Generator(_, substs, _) => ty::UpvarSubsts::Generator(substs),
+ _ => bug!("Lowering capture for non-closure type {:?}", closure_ty),
+ };
+
+ // Access the capture by accessing the field within the Closure struct.
+ //
+ // We must have inferred the capture types since we are building MIR, therefore
+ // it's safe to call `tuple_element_ty` and we can unwrap here because
+ // we know that the capture exists and is the `capture_index`-th capture.
+ let var_ty = substs.tupled_upvars_ty().tuple_element_ty(capture_index).unwrap();
+
+ upvar_resolved_place_builder = upvar_resolved_place_builder.field(Field::new(capture_index), var_ty);
+
+ // If the variable is captured via ByRef(Immutable/Mutable) Borrow,
+ // we need to deref it
+ upvar_resolved_place_builder = match capture.info.capture_kind {
+ ty::UpvarCapture::ByRef(_) => upvar_resolved_place_builder.deref(),
+ ty::UpvarCapture::ByValue(_) => upvar_resolved_place_builder,
+ };
+
+ let next_projection = capture.place.projections.len();
+ let mut curr_projections = from_builder.projection;
+
+ // We used some of the projections to build the capture itself,
+ // now we apply the remaining to the upvar resolved place.
+ upvar_resolved_place_builder.projection.extend(
+ curr_projections.drain(next_projection..));
+
+ Ok(upvar_resolved_place_builder)
+ }
+ }
+}
+
impl<'tcx> PlaceBuilder<'tcx> {
- fn into_place(self, tcx: TyCtxt<'tcx>) -> Place<'tcx> {
- Place { local: self.local, projection: tcx.intern_place_elems(&self.projection) }
+ crate fn into_place<'a>(
+ self,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ ) -> Place<'tcx> {
+ if let PlaceBase::Local(local) = self.base {
+ Place { local, projection: tcx.intern_place_elems(&self.projection) }
+ } else {
+ self.expect_upvars_resolved(tcx, typeck_results).into_place(tcx, typeck_results)
+ }
+ }
+
+ fn expect_upvars_resolved<'a>(
+ self,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ ) -> PlaceBuilder<'tcx> {
+ to_upvars_resolved_place_builder(self, tcx, typeck_results).unwrap()
+ }
+
+ crate fn base(&self) -> PlaceBase {
+ self.base
}
fn field(self, f: Field, ty: Ty<'tcx>) -> Self {
impl<'tcx> From<Local> for PlaceBuilder<'tcx> {
fn from(local: Local) -> Self {
- Self { local, projection: Vec::new() }
+ Self { base: PlaceBase::Local(local), projection: Vec::new() }
+ }
+}
+
+impl<'tcx> From<PlaceBase> for PlaceBuilder<'tcx> {
+ fn from(base: PlaceBase) -> Self {
+ Self { base, projection: Vec::new() }
}
}
M: Mirror<'tcx, Output = Expr<'tcx>>,
{
let place_builder = unpack!(block = self.as_place_builder(block, expr));
- block.and(place_builder.into_place(self.hir.tcx()))
+ block.and(place_builder.into_place(self.hir.tcx(), self.hir.typeck_results()))
}
/// This is used when constructing a compound `Place`, so that we can avoid creating
/// intermediate `Place` values until we know the full set of projections.
- fn as_place_builder<M>(&mut self, block: BasicBlock, expr: M) -> BlockAnd<PlaceBuilder<'tcx>>
+ crate fn as_place_builder<M>(&mut self, block: BasicBlock, expr: M) -> BlockAnd<PlaceBuilder<'tcx>>
where
M: Mirror<'tcx, Output = Expr<'tcx>>,
{
M: Mirror<'tcx, Output = Expr<'tcx>>,
{
let place_builder = unpack!(block = self.as_read_only_place_builder(block, expr));
- block.and(place_builder.into_place(self.hir.tcx()))
+ block.and(place_builder.into_place(self.hir.tcx(), self.hir.typeck_results()))
}
/// This is used when constructing a compound `Place`, so that we can avoid creating
source_info,
),
ExprKind::UpvarRef { closure_def_id, var_hir_id } => {
- let capture = this
- .hir
- .typeck_results
- .closure_captures
- .get(&closure_def_id)
- .and_then(|captures| captures.get_full(&var_hir_id));
-
- if capture.is_none() {
- if !this.hir.tcx().features().capture_disjoint_fields {
- bug!(
- "No associated capture found for {:?} even though \
- capture_disjoint_fields isn't enabled",
- expr.kind
- )
- }
- // FIXME(project-rfc-2229#24): Handle this case properly
- }
-
- // Unwrap until the FIXME has been resolved
- let (capture_index, _, upvar_id) = capture.unwrap();
- this.lower_closure_capture(block, capture_index, *upvar_id)
+ let upvar_id = ty::UpvarId::new(var_hir_id, closure_def_id.expect_local());
+ this.lower_captured_upvar(block, upvar_id)
}
ExprKind::VarRef { id } => {
inferred_ty: expr.ty,
});
- let place = place_builder.clone().into_place(this.hir.tcx());
+ let place =
+ place_builder.clone().into_place(this.hir.tcx(), this.hir.typeck_results());
this.cfg.push(
block,
Statement {
}
}
- /// Lower a closure/generator capture by representing it as a field
- /// access within the desugared closure/generator.
- ///
- /// `capture_index` is the index of the capture within the desugared
- /// closure/generator.
- fn lower_closure_capture(
+ /// Lower a captured upvar. Note we might not know the actual capture index,
+ /// so we create a place starting from `PlaceBase::Upvar`, which will be resolved
+ /// once all projections that allow us to indentify a capture have been applied.
+ fn lower_captured_upvar(
&mut self,
block: BasicBlock,
- capture_index: usize,
upvar_id: ty::UpvarId,
- ) -> BlockAnd<PlaceBuilder<'tcx>> {
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
let closure_ty = self
.hir
.typeck_results()
.node_type(self.hir.tcx().hir().local_def_id_to_hir_id(upvar_id.closure_expr_id));
- // Captures are represented using fields inside a structure.
- // This represents accessing self in the closure structure
- let mut place_builder = PlaceBuilder::from(Local::new(1));
-
- // In case of Fn/FnMut closures we must deref to access the fields
- // Generators are considered FnOnce, so we ignore this step for them.
- if let ty::Closure(_, closure_substs) = closure_ty.kind() {
- match self.hir.infcx().closure_kind(closure_substs).unwrap() {
- ty::ClosureKind::Fn | ty::ClosureKind::FnMut => {
- place_builder = place_builder.deref();
- }
- ty::ClosureKind::FnOnce => {}
- }
- }
-
- let substs = match closure_ty.kind() {
- ty::Closure(_, substs) => ty::UpvarSubsts::Closure(substs),
- ty::Generator(_, substs, _) => ty::UpvarSubsts::Generator(substs),
- _ => bug!("Lowering capture for non-closure type {:?}", closure_ty)
+ let closure_kind = if let ty::Closure(_, closure_substs) = closure_ty.kind() {
+ self.hir.infcx().closure_kind(closure_substs).unwrap()
+ } else {
+ // Generators are considered FnOnce.
+ ty::ClosureKind::FnOnce
};
- // Access the capture by accessing the field within the Closure struct.
- //
- // We must have inferred the capture types since we are building MIR, therefore
- // it's safe to call `upvar_tys` and we can unwrap here because
- // we know that the capture exists and is the `capture_index`-th capture.
- let var_ty = substs.upvar_tys().nth(capture_index).unwrap();
- place_builder = place_builder.field(Field::new(capture_index), var_ty);
-
- // If the variable is captured via ByRef(Immutable/Mutable) Borrow,
- // we need to deref it
- match self.hir.typeck_results.upvar_capture(upvar_id) {
- ty::UpvarCapture::ByRef(_) => {
- block.and(place_builder.deref())
- }
- ty::UpvarCapture::ByValue(_) => block.and(place_builder),
- }
+ block.and(PlaceBuilder::from(PlaceBase::Upvar {
+ var_hir_id: upvar_id.var_path.hir_id,
+ closure_def_id: upvar_id.closure_expr_id.to_def_id(),
+ closure_kind,
+ }))
}
/// Lower an index expression
let is_outermost_index = fake_borrow_temps.is_none();
let fake_borrow_temps = fake_borrow_temps.unwrap_or(base_fake_borrow_temps);
- let base_place =
+ let mut base_place =
unpack!(block = self.expr_as_place(block, lhs, mutability, Some(fake_borrow_temps),));
// Making this a *fresh* temporary means we do not have to worry about
block = self.bounds_check(
block,
- base_place.clone().into_place(self.hir.tcx()),
+ base_place.clone().into_place(self.hir.tcx(), self.hir.typeck_results()),
idx,
expr_span,
source_info,
if is_outermost_index {
self.read_fake_borrows(block, fake_borrow_temps, source_info)
} else {
+ base_place = base_place.expect_upvars_resolved(self.hir.tcx(), self.hir.typeck_results());
self.add_fake_borrows_of_base(
&base_place,
block,
source_info: SourceInfo,
) {
let tcx = self.hir.tcx();
- let place_ty =
- Place::ty_from(base_place.local, &base_place.projection, &self.local_decls, tcx);
+ let local = match base_place.base {
+ PlaceBase::Local(local) => local,
+ PlaceBase::Upvar { .. } => bug!("Expected PlacseBase::Local found Upvar")
+ };
+
+ let place_ty = Place::ty_from(local, &base_place.projection, &self.local_decls, tcx);
if let ty::Slice(_) = place_ty.ty.kind() {
// We need to create fake borrows to ensure that the bounds
// check that we just did stays valid. Since we can't assign to
match elem {
ProjectionElem::Deref => {
let fake_borrow_deref_ty = Place::ty_from(
- base_place.local,
+ local,
&base_place.projection[..idx],
&self.local_decls,
tcx,
Rvalue::Ref(
tcx.lifetimes.re_erased,
BorrowKind::Shallow,
- Place { local: base_place.local, projection },
+ Place { local, projection },
),
);
fake_borrow_temps.push(fake_borrow_temp);
}
ProjectionElem::Index(_) => {
let index_ty = Place::ty_from(
- base_place.local,
+ local,
&base_place.projection[..idx],
&self.local_decls,
tcx,
use crate::build::expr::category::{Category, RvalueFunc};
use crate::build::{BlockAnd, BlockAndExtension, Builder};
+use crate::build::expr::as_place::PlaceBase;
use crate::thir::*;
use rustc_middle::middle::region;
use rustc_middle::mir::AssertKind;
this.cfg.push(block, Statement { source_info, kind: StatementKind::StorageLive(temp) });
- let arg_place = unpack!(block = this.as_place(block, arg));
-
- let mutability = match arg_place.as_ref() {
- PlaceRef { local, projection: &[] } => this.local_decls[local].mutability,
- PlaceRef { local, projection: &[ProjectionElem::Deref] } => {
- debug_assert!(
- this.local_decls[local].is_ref_for_guard(),
- "Unexpected capture place",
- );
- this.local_decls[local].mutability
- }
- PlaceRef {
- local,
- projection: &[ref proj_base @ .., ProjectionElem::Field(upvar_index, _)],
- }
- | PlaceRef {
- local,
- projection:
- &[ref proj_base @ .., ProjectionElem::Field(upvar_index, _), ProjectionElem::Deref],
- } => {
- let place = PlaceRef { local, projection: proj_base };
-
- // Not projected from the implicit `self` in a closure.
- debug_assert!(
- match place.local_or_deref_local() {
- Some(local) => local == Local::new(1),
- None => false,
- },
- "Unexpected capture place"
- );
- // Not in a closure
- debug_assert!(
- this.upvar_mutbls.len() > upvar_index.index(),
- "Unexpected capture place"
- );
- this.upvar_mutbls[upvar_index.index()]
+ let arg_place_builder = unpack!(block = this.as_place_builder(block, arg));
+
+ let mutability = match arg_place_builder.base() {
+ // We are capturing a path that starts off a local variable in the parent.
+ // The mutability of the current capture is same as the mutability
+ // of the local declaration in the parent.
+ PlaceBase::Local(local) => this.local_decls[local].mutability,
+ // Parent is a closure and we are capturing a path that is captured
+ // by the parent itself. The mutability of the current capture
+ // is same as that of the capture in the parent closure.
+ PlaceBase::Upvar { .. } => {
+ let enclosing_upvars_resolved = arg_place_builder.clone().into_place(
+ this.hir.tcx(),
+ this.hir.typeck_results());
+
+ match enclosing_upvars_resolved.as_ref() {
+ PlaceRef { local, projection: &[ProjectionElem::Field(upvar_index, _), ..] }
+ | PlaceRef {
+ local,
+ projection: &[ProjectionElem::Deref, ProjectionElem::Field(upvar_index, _), ..] } => {
+ // Not in a closure
+ debug_assert!(
+ local == Local::new(1),
+ "Expected local to be Local(1), found {:?}",
+ local
+ );
+ // Not in a closure
+ debug_assert!(
+ this.upvar_mutbls.len() > upvar_index.index(),
+ "Unexpected capture place, upvar_mutbls={:#?}, upvar_index={:?}",
+ this.upvar_mutbls, upvar_index
+ );
+ this.upvar_mutbls[upvar_index.index()]
+ }
+ _ => bug!("Unexpected capture place"),
+ }
}
- _ => bug!("Unexpected capture place"),
};
let borrow_kind = match mutability {
Mutability::Mut => BorrowKind::Mut { allow_two_phase_borrow: false },
};
+ let arg_place = arg_place_builder.into_place(
+ this.hir.tcx(),
+ this.hir.typeck_results());
+
this.cfg.push_assign(
block,
source_info,
guard: Option<&Guard<'tcx>>,
fake_borrow_temps: &Vec<(Place<'tcx>, Local)>,
scrutinee_span: Span,
+ arm_span: Option<Span>,
arm_scope: Option<region::Scope>,
) -> BasicBlock {
if candidate.subcandidates.is_empty() {
guard,
fake_borrow_temps,
scrutinee_span,
+ arm_span,
true,
)
} else {
guard,
&fake_borrow_temps,
scrutinee_span,
+ arm_span,
schedule_drops,
);
if arm_scope.is_none() {
&fake_borrow_temps,
irrefutable_pat.span,
None,
+ None,
)
.unit()
}
/// For an example of a case where we set `otherwise_block`, even for an
/// exhaustive match consider:
///
+ /// ```rust
/// match x {
/// (true, true) => (),
/// (_, false) => (),
/// (false, true) => (),
/// }
+ /// ```
///
/// For this match, we check if `x.0` matches `true` (for the first
/// arm). If that's false, we check `x.1`. If it's `true` we check if
/// Link up matched candidates. For example, if we have something like
/// this:
///
+ /// ```rust
/// ...
/// Some(x) if cond => ...
/// Some(x) => ...
/// Some(x) if cond => ...
/// ...
+ /// ```
///
/// We generate real edges from:
/// * `start_block` to the `prebinding_block` of the first pattern,
/// Initializes each of the bindings from the candidate by
/// moving/copying/ref'ing the source as appropriate. Tests the guard, if
/// any, and then branches to the arm. Returns the block for the case where
- /// the guard fails.
+ /// the guard succeeds.
///
/// Note: we do not check earlier that if there is a guard,
/// there cannot be move bindings. We avoid a use-after-move by only
guard: Option<&Guard<'tcx>>,
fake_borrows: &Vec<(Place<'tcx>, Local)>,
scrutinee_span: Span,
+ arm_span: Option<Span>,
schedule_drops: bool,
) -> BasicBlock {
debug!("bind_and_guard_matched_candidate(candidate={:?})", candidate);
self.cfg.push_assign(block, scrutinee_source_info, Place::from(temp), borrow);
}
- // the block to branch to if the guard fails; if there is no
- // guard, this block is simply unreachable
- let guard = match guard {
- Guard::If(e) => self.hir.mirror(e.clone()),
+ let (guard_span, (post_guard_block, otherwise_post_guard_block)) = match guard {
+ Guard::If(e) => {
+ let e = self.hir.mirror(e.clone());
+ let source_info = self.source_info(e.span);
+ (e.span, self.test_bool(block, e, source_info))
+ },
+ Guard::IfLet(pat, scrutinee) => {
+ let scrutinee_span = scrutinee.span();
+ let scrutinee_place = unpack!(block = self.lower_scrutinee(block, scrutinee.clone(), scrutinee_span));
+ let mut guard_candidate = Candidate::new(scrutinee_place, &pat, false);
+ let wildcard = Pat::wildcard_from_ty(pat.ty);
+ let mut otherwise_candidate = Candidate::new(scrutinee_place, &wildcard, false);
+ let fake_borrow_temps =
+ self.lower_match_tree(block, pat.span, false, &mut [&mut guard_candidate, &mut otherwise_candidate]);
+ self.declare_bindings(
+ None,
+ pat.span.to(arm_span.unwrap()),
+ pat,
+ ArmHasGuard(false),
+ Some((Some(&scrutinee_place), scrutinee.span())),
+ );
+ let post_guard_block = self.bind_pattern(
+ self.source_info(pat.span),
+ guard_candidate,
+ None,
+ &fake_borrow_temps,
+ scrutinee.span(),
+ None,
+ None,
+ );
+ let otherwise_post_guard_block = otherwise_candidate.pre_binding_block.unwrap();
+ (scrutinee_span, (post_guard_block, otherwise_post_guard_block))
+ }
};
- let source_info = self.source_info(guard.span);
- let guard_end = self.source_info(tcx.sess.source_map().end_point(guard.span));
- let (post_guard_block, otherwise_post_guard_block) =
- self.test_bool(block, guard, source_info);
+ let source_info = self.source_info(guard_span);
+ let guard_end = self.source_info(tcx.sess.source_map().end_point(guard_span));
let guard_frame = self.guard_context.pop().unwrap();
debug!("Exiting guard building context with locals: {:?}", guard_frame);
self.var_debug_info.push(VarDebugInfo {
name,
source_info: debug_source_info,
- place: for_arm_body.into(),
+ value: VarDebugInfoContents::Place(for_arm_body.into()),
});
let locals = if has_guard.0 {
let ref_for_guard = self.local_decls.push(LocalDecl::<'tcx> {
self.var_debug_info.push(VarDebugInfo {
name,
source_info: debug_source_info,
- place: ref_for_guard.into(),
+ value: VarDebugInfoContents::Place(ref_for_guard.into()),
});
LocalsForNode::ForGuard { ref_for_guard, for_arm_body }
} else {
use rustc_hir::{GeneratorKind, HirIdMap, Node};
use rustc_index::vec::{Idx, IndexVec};
use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::hir::place::PlaceBase as HirPlaceBase;
use rustc_middle::middle::region;
use rustc_middle::mir::*;
use rustc_middle::ty::subst::Subst;
self.var_debug_info.push(VarDebugInfo {
name: ident.name,
source_info,
- place: arg_local.into(),
+ value: VarDebugInfoContents::Place(arg_local.into()),
});
}
}
// with the closure's DefId. Here, we run through that vec of UpvarIds for
// the given closure and use the necessary information to create upvar
// debuginfo and to fill `self.upvar_mutbls`.
- if let Some(upvars) = hir_typeck_results.closure_captures.get(&fn_def_id) {
+ if hir_typeck_results.closure_min_captures.get(&fn_def_id).is_some() {
let closure_env_arg = Local::new(1);
let mut closure_env_projs = vec![];
let mut closure_ty = self.local_decls[closure_env_arg].ty;
ty::Generator(_, substs, _) => ty::UpvarSubsts::Generator(substs),
_ => span_bug!(self.fn_span, "upvars with non-closure env ty {:?}", closure_ty),
};
- let upvar_tys = upvar_substs.upvar_tys();
- let upvars_with_tys = upvars.iter().zip(upvar_tys);
- self.upvar_mutbls = upvars_with_tys
+ let capture_tys = upvar_substs.upvar_tys();
+ let captures_with_tys = hir_typeck_results
+ .closure_min_captures_flattened(fn_def_id)
+ .zip(capture_tys);
+
+ self.upvar_mutbls = captures_with_tys
.enumerate()
- .map(|(i, ((&var_id, &upvar_id), ty))| {
- let capture = hir_typeck_results.upvar_capture(upvar_id);
+ .map(|(i, (captured_place, ty))| {
+ let capture = captured_place.info.capture_kind;
+ let var_id = match captured_place.place.base {
+ HirPlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ _ => bug!("Expected an upvar")
+ };
let mut mutability = Mutability::Not;
+
+ // FIXME(project-rfc-2229#8): Store more precise information
let mut name = kw::Invalid;
if let Some(Node::Binding(pat)) = tcx_hir.find(var_id) {
if let hir::PatKind::Binding(_, _, ident, _) = pat.kind {
self.var_debug_info.push(VarDebugInfo {
name,
source_info: SourceInfo::outermost(tcx_hir.span(var_id)),
- place: Place {
+ value: VarDebugInfoContents::Place(Place {
local: closure_env_arg,
projection: tcx.intern_place_elems(&projs),
- },
+ }),
});
mutability
debug!("stmt_expr Break val block_context.push(SubExpr)");
self.block_context.push(BlockFrame::SubExpr);
unpack!(block = self.into(destination, dest_scope, block, value));
- dest_scope
- .map(|scope| self.unschedule_drop(scope, destination.as_local().unwrap()));
+ if let Some(scope) = dest_scope {
+ self.unschedule_drop(scope, destination.as_local().unwrap())
+ };
self.block_context.pop();
} else {
self.cfg.push_assign_unit(block, source_info, destination, self.hir.tcx())
arm.guard.as_ref(),
&fake_borrow_temps,
scrutinee_span,
+ Some(arm.span),
Some(arm.scope),
);
use rustc_hir as hir;
use rustc_hir::def::{CtorKind, CtorOf, DefKind, Res};
use rustc_index::vec::Idx;
+use rustc_middle::hir::place::PlaceBase as HirPlaceBase;
+use rustc_middle::hir::place::ProjectionKind as HirProjectionKind;
use rustc_middle::mir::interpret::Scalar;
use rustc_middle::mir::BorrowKind;
use rustc_middle::ty::adjustment::{
span_bug!(expr.span, "closure expr w/o closure type: {:?}", closure_ty);
}
};
+
let upvars = cx
.typeck_results()
- .closure_captures
- .get(&def_id)
- .iter()
- .flat_map(|upvars| upvars.iter())
+ .closure_min_captures_flattened(def_id)
.zip(substs.upvar_tys())
- .map(|((&var_hir_id, _), ty)| capture_upvar(cx, expr, var_hir_id, ty))
+ .map(|(captured_place, ty)| capture_upvar(cx, expr, captured_place, ty))
.collect();
ExprKind::Closure { closure_id: def_id, substs, upvars, movability }
}
fn convert_arm<'tcx>(cx: &mut Cx<'_, 'tcx>, arm: &'tcx hir::Arm<'tcx>) -> Arm<'tcx> {
Arm {
pattern: cx.pattern_from_hir(&arm.pat),
- guard: match arm.guard {
- Some(hir::Guard::If(ref e)) => Some(Guard::If(e.to_ref())),
- _ => None,
- },
+ guard: arm.guard.as_ref().map(|g| match g {
+ hir::Guard::If(ref e) => Guard::If(e.to_ref()),
+ hir::Guard::IfLet(ref pat, ref e) => Guard::IfLet(cx.pattern_from_hir(pat), e.to_ref()),
+ }),
body: arm.body.to_ref(),
lint_level: LintLevel::Explicit(arm.hir_id),
scope: region::Scope { id: arm.hir_id.local_id, data: region::ScopeData::Node },
ExprKind::Deref { arg: ref_expr.to_ref() }
}
-fn capture_upvar<'tcx>(
+fn capture_upvar<'a, 'tcx>(
cx: &mut Cx<'_, 'tcx>,
closure_expr: &'tcx hir::Expr<'tcx>,
- var_hir_id: hir::HirId,
+ captured_place: &'a ty::CapturedPlace<'tcx>,
upvar_ty: Ty<'tcx>,
) -> ExprRef<'tcx> {
- let upvar_id = ty::UpvarId {
- var_path: ty::UpvarPath { hir_id: var_hir_id },
- closure_expr_id: cx.tcx.hir().local_def_id(closure_expr.hir_id),
- };
- let upvar_capture = cx.typeck_results().upvar_capture(upvar_id);
+ let upvar_capture = captured_place.info.capture_kind;
let temp_lifetime = cx.region_scope_tree.temporary_scope(closure_expr.hir_id.local_id);
- let var_ty = cx.typeck_results().node_type(var_hir_id);
- let captured_var = Expr {
+ let var_ty = captured_place.place.base_ty;
+
+ // The result of capture analysis in `rustc_typeck/check/upvar.rs`represents a captured path
+ // as it's seen for use within the closure and not at the time of closure creation.
+ //
+ // That is we see expect to see it start from a captured upvar and not something that is local
+ // to the closure's parent.
+ let var_hir_id = match captured_place.place.base {
+ HirPlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ base => bug!("Expected an upvar, found {:?}", base),
+ };
+
+ let mut captured_place_expr = Expr {
temp_lifetime,
ty: var_ty,
span: closure_expr.span,
kind: convert_var(cx, var_hir_id),
};
+
+ for proj in captured_place.place.projections.iter() {
+ let kind = match proj.kind {
+ HirProjectionKind::Deref => ExprKind::Deref { arg: captured_place_expr.to_ref() },
+ HirProjectionKind::Field(field, ..) => {
+ // Variant index will always be 0, because for multi-variant
+ // enums, we capture the enum entirely.
+ ExprKind::Field {
+ lhs: captured_place_expr.to_ref(),
+ name: Field::new(field as usize),
+ }
+ }
+ HirProjectionKind::Index | HirProjectionKind::Subslice => {
+ // We don't capture these projections, so we can ignore them here
+ continue;
+ }
+ };
+
+ captured_place_expr = Expr { temp_lifetime, ty: proj.ty, span: closure_expr.span, kind };
+ }
+
match upvar_capture {
- ty::UpvarCapture::ByValue(_) => captured_var.to_ref(),
+ ty::UpvarCapture::ByValue(_) => captured_place_expr.to_ref(),
ty::UpvarCapture::ByRef(upvar_borrow) => {
let borrow_kind = match upvar_borrow.kind {
ty::BorrowKind::ImmBorrow => BorrowKind::Shared,
temp_lifetime,
ty: upvar_ty,
span: closure_expr.span,
- kind: ExprKind::Borrow { borrow_kind, arg: captured_var.to_ref() },
+ kind: ExprKind::Borrow { borrow_kind, arg: captured_place_expr.to_ref() },
}
.to_ref()
}
#[derive(Clone, Debug)]
crate enum Guard<'tcx> {
If(ExprRef<'tcx>),
+ IfLet(Pat<'tcx>, ExprRef<'tcx>),
}
#[derive(Copy, Clone, Debug)]
for arm in arms {
// Check the arm for some things unrelated to exhaustiveness.
self.check_patterns(&arm.pat);
+ if let Some(hir::Guard::IfLet(ref pat, _)) = arm.guard {
+ self.check_patterns(pat);
+ }
}
let mut cx = self.new_cx(scrut.hir_id);
+ for arm in arms {
+ if let Some(hir::Guard::IfLet(ref pat, _)) = arm.guard {
+ let tpat = self.lower_pattern(&mut cx, pat, &mut false).0;
+ check_if_let_guard(&mut cx, &tpat, pat.hir_id);
+ }
+ }
+
let mut have_errors = false;
let arms: Vec<_> = arms
let msg = match source {
hir::MatchSource::IfLetDesugar { .. } => "irrefutable if-let pattern",
hir::MatchSource::WhileLetDesugar => "irrefutable while-let pattern",
+ hir::MatchSource::IfLetGuardDesugar => "irrefutable if-let guard",
_ => bug!(),
};
lint.build(msg).emit()
});
}
+fn check_if_let_guard<'p, 'tcx>(
+ cx: &mut MatchCheckCtxt<'p, 'tcx>,
+ pat: &'p super::Pat<'tcx>,
+ pat_id: HirId,
+) {
+ let arms = [MatchArm { pat, hir_id: pat_id, has_guard: false }];
+ let report = compute_match_usefulness(&cx, &arms, pat_id, pat.ty);
+ report_arm_reachability(&cx, &report, hir::MatchSource::IfLetGuardDesugar);
+
+ if report.non_exhaustiveness_witnesses.is_empty() {
+ // The match is exhaustive, i.e. the if let pattern is irrefutable.
+ irrefutable_let_pattern(cx.tcx, pat.span, pat_id, hir::MatchSource::IfLetGuardDesugar)
+ }
+}
+
/// Report unreachable arms, if any.
fn report_arm_reachability<'p, 'tcx>(
cx: &MatchCheckCtxt<'p, 'tcx>,
}
}
+ hir::MatchSource::IfLetGuardDesugar => {
+ assert_eq!(arm_index, 0);
+ unreachable_pattern(cx.tcx, arm.pat.span, arm.hir_id, None);
+ }
+
hir::MatchSource::ForLoopDesugar | hir::MatchSource::Normal => {
unreachable_pattern(cx.tcx, arm.pat.span, arm.hir_id, catchall);
}
(ident, ItemKind::Static(ty, m, expr))
} else if let Const::Yes(const_span) = self.parse_constness() {
// CONST ITEM
- self.recover_const_mut(const_span);
- let (ident, ty, expr) = self.parse_item_global(None)?;
- (ident, ItemKind::Const(def(), ty, expr))
+ if self.token.is_keyword(kw::Impl) {
+ // recover from `const impl`, suggest `impl const`
+ self.recover_const_impl(const_span, attrs, def())?
+ } else {
+ self.recover_const_mut(const_span);
+ let (ident, ty, expr) = self.parse_item_global(None)?;
+ (ident, ItemKind::Const(def(), ty, expr))
+ }
} else if self.check_keyword(kw::Trait) || self.check_auto_or_unsafe_trait_item() {
// TRAIT ITEM
self.parse_item_trait(attrs, lo)?
}
}
+ /// Recover on `const impl` with `const` already eaten.
+ fn recover_const_impl(
+ &mut self,
+ const_span: Span,
+ attrs: &mut Vec<Attribute>,
+ defaultness: Defaultness,
+ ) -> PResult<'a, ItemInfo> {
+ let impl_span = self.token.span;
+ let mut err = self.expected_ident_found();
+ let mut impl_info = self.parse_item_impl(attrs, defaultness)?;
+ match impl_info.1 {
+ // only try to recover if this is implementing a trait for a type
+ ItemKind::Impl { of_trait: Some(ref trai), ref mut constness, .. } => {
+ *constness = Const::Yes(const_span);
+
+ let before_trait = trai.path.span.shrink_to_lo();
+ let const_up_to_impl = const_span.with_hi(impl_span.lo());
+ err.multipart_suggestion(
+ "you might have meant to write a const trait impl",
+ vec![(const_up_to_impl, "".to_owned()), (before_trait, "const ".to_owned())],
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+ ItemKind::Impl { .. } => return Err(err),
+ _ => unreachable!(),
+ }
+ Ok(impl_info)
+ }
+
/// Parse `["const" | ("static" "mut"?)] $ident ":" $ty (= $expr)?` with
/// `["const" | ("static" "mut"?)]` already parsed and stored in `m`.
///
use rustc_ast::ptr::P;
use rustc_ast::token::{self, DelimToken, Token, TokenKind};
use rustc_ast::tokenstream::{self, DelimSpan, LazyTokenStream, Spacing};
-use rustc_ast::tokenstream::{CreateTokenStream, TokenStream, TokenTree};
+use rustc_ast::tokenstream::{CreateTokenStream, TokenStream, TokenTree, TreeAndSpacing};
use rustc_ast::DUMMY_NODE_ID;
use rustc_ast::{self as ast, AnonConst, AttrStyle, AttrVec, Const, CrateSugar, Extern, Unsafe};
use rustc_ast::{Async, Expr, ExprKind, MacArgs, MacDelimiter, Mutability, StrLit};
// Counts the number of calls to `next` or `next_desugared`,
// depending on whether `desugar_doc_comments` is set.
num_next_calls: usize,
+ // During parsing, we may sometimes need to 'unglue' a
+ // glued token into two component tokens
+ // (e.g. '>>' into '>' and '>), so that the parser
+ // can consume them one at a time. This process
+ // bypasses the normal capturing mechanism
+ // (e.g. `num_next_calls` will not be incremented),
+ // since the 'unglued' tokens due not exist in
+ // the original `TokenStream`.
+ //
+ // If we end up consuming both unglued tokens,
+ // then this is not an issue - we'll end up
+ // capturing the single 'glued' token.
+ //
+ // However, in certain circumstances, we may
+ // want to capture just the first 'unglued' token.
+ // For example, capturing the `Vec<u8>`
+ // in `Option<Vec<u8>>` requires us to unglue
+ // the trailing `>>` token. The `append_unglued_token`
+ // field is used to track this token - it gets
+ // appended to the captured stream when
+ // we evaluate a `LazyTokenStream`
+ append_unglued_token: Option<TreeAndSpacing>,
}
#[derive(Clone)]
stack: Vec::new(),
num_next_calls: 0,
desugar_doc_comments,
+ append_unglued_token: None,
},
desugar_doc_comments,
unmatched_angle_bracket_count: 0,
self.token_cursor.next()
};
self.token_cursor.num_next_calls += 1;
+ // We've retrieved an token from the underlying
+ // cursor, so we no longer need to worry about
+ // an unglued token. See `break_and_eat` for more details
+ self.token_cursor.append_unglued_token = None;
if next.span.is_dummy() {
// Tweak the location for better diagnostics, but keep syntactic context intact.
next.span = fallback_span.with_ctxt(next.span.ctxt());
let first_span = self.sess.source_map().start_point(self.token.span);
let second_span = self.token.span.with_lo(first_span.hi());
self.token = Token::new(first, first_span);
+ // Keep track of this token - if we end token capturing now,
+ // we'll want to append this token to the captured stream.
+ //
+ // If we consume any additional tokens, then this token
+ // is not needed (we'll capture the entire 'glued' token),
+ // and `next_tok` will set this field to `None`
+ self.token_cursor.append_unglued_token =
+ Some((TokenTree::Token(self.token.clone()), Spacing::Alone));
// Use the spacing of the glued token as the spacing
// of the unglued second token.
self.bump_with((Token::new(second, second_span), self.token_spacing));
num_calls: usize,
desugar_doc_comments: bool,
trailing_semi: bool,
+ append_unglued_token: Option<TreeAndSpacing>,
}
impl CreateTokenStream for LazyTokenStreamImpl {
fn create_token_stream(&self) -> TokenStream {
}))
.take(num_calls);
- make_token_stream(tokens)
+ make_token_stream(tokens, self.append_unglued_token.clone())
}
fn add_trailing_semi(&self) -> Box<dyn CreateTokenStream> {
if self.trailing_semi {
panic!("Called `add_trailing_semi` twice!");
}
+ if self.append_unglued_token.is_some() {
+ panic!(
+ "Cannot call `add_trailing_semi` when we have an unglued token {:?}",
+ self.append_unglued_token
+ );
+ }
let mut new = self.clone();
new.trailing_semi = true;
Box::new(new)
cursor_snapshot,
desugar_doc_comments: self.desugar_doc_comments,
trailing_semi: false,
+ append_unglued_token: self.token_cursor.append_unglued_token.clone(),
};
Ok((ret, Some(LazyTokenStream::new(lazy_impl))))
}
/// Converts a flattened iterator of tokens (including open and close delimiter tokens)
/// into a `TokenStream`, creating a `TokenTree::Delimited` for each matching pair
/// of open and close delims.
-fn make_token_stream(tokens: impl Iterator<Item = (Token, Spacing)>) -> TokenStream {
+fn make_token_stream(
+ tokens: impl Iterator<Item = (Token, Spacing)>,
+ append_unglued_token: Option<TreeAndSpacing>,
+) -> TokenStream {
#[derive(Debug)]
struct FrameData {
open: Span,
.inner
.push((delimited, Spacing::Alone));
}
- token => stack
- .last_mut()
- .expect("Bottom token frame is missing!")
- .inner
- .push((TokenTree::Token(token), spacing)),
+ token => {
+ stack
+ .last_mut()
+ .expect("Bottom token frame is missing!")
+ .inner
+ .push((TokenTree::Token(token), spacing));
+ }
}
}
- let final_buf = stack.pop().expect("Missing final buf!");
+ let mut final_buf = stack.pop().expect("Missing final buf!");
+ final_buf.inner.extend(append_unglued_token);
assert!(stack.is_empty(), "Stack should be empty: final_buf={:?} stack={:?}", final_buf, stack);
TokenStream::new(final_buf.inner)
}
return None;
}
+ Self::Match(IfLetGuardDesugar) => bug!("if-let guard outside a `match` expression"),
+
// All other expressions are allowed.
Self::Loop(Loop | While | WhileLet)
| Self::Match(
fn visit_arm(&mut self, arm: &'tcx hir::Arm<'tcx>) {
self.add_from_pat(&arm.pat);
+ if let Some(hir::Guard::IfLet(ref pat, _)) = arm.guard {
+ self.add_from_pat(pat);
+ }
intravisit::walk_arm(self, arm);
}
for arm in arms {
let body_succ = self.propagate_through_expr(&arm.body, succ);
- let guard_succ = self.propagate_through_opt_expr(
- arm.guard.as_ref().map(|hir::Guard::If(e)| *e),
- body_succ,
- );
+ let guard_succ = arm.guard.as_ref().map_or(body_succ, |g| match g {
+ hir::Guard::If(e) => self.propagate_through_expr(e, body_succ),
+ hir::Guard::IfLet(pat, e) => {
+ let let_bind = self.define_bindings_in_pat(pat, body_succ);
+ self.propagate_through_expr(e, let_bind)
+ }
+ });
let arm_succ = self.define_bindings_in_pat(&arm.pat, guard_succ);
self.merge_from_succ(ln, arm_succ);
}
for (dep_v, stab_v) in
dep_since.as_str().split('.').zip(stab_since.as_str().split('.'))
{
- if let (Ok(dep_v), Ok(stab_v)) = (dep_v.parse::<u64>(), stab_v.parse()) {
- match dep_v.cmp(&stab_v) {
- Ordering::Less => {
- self.tcx.sess.span_err(
- item_sp,
- "An API can't be stabilized \
- after it is deprecated",
- );
+ match stab_v.parse::<u64>() {
+ Err(_) => {
+ self.tcx.sess.span_err(item_sp, "Invalid stability version found");
+ break;
+ }
+ Ok(stab_vp) => match dep_v.parse::<u64>() {
+ Ok(dep_vp) => match dep_vp.cmp(&stab_vp) {
+ Ordering::Less => {
+ self.tcx.sess.span_err(
+ item_sp,
+ "An API can't be stabilized after it is deprecated",
+ );
+ break;
+ }
+ Ordering::Equal => continue,
+ Ordering::Greater => break,
+ },
+ Err(_) => {
+ if dep_v != "TBD" {
+ self.tcx
+ .sess
+ .span_err(item_sp, "Invalid deprecation version found");
+ }
break;
}
- Ordering::Equal => continue,
- Ordering::Greater => break,
- }
- } else {
- // Act like it isn't less because the question is now nonsensical,
- // and this makes us not do anything else interesting.
- self.tcx.sess.span_err(
- item_sp,
- "Invalid stability or deprecation \
- version found",
- );
- break;
+ },
}
}
}
// an eval_always node, let's try to mark it green recursively.
if !dep_dep_node.kind.is_eval_always() {
debug!(
- "try_mark_previous_green({:?}) --- state of dependency {:?} \
+ "try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \
is unknown, trying to mark it green",
- dep_node, dep_dep_node
+ dep_node, dep_dep_node, dep_dep_node.hash,
);
let node_index = self.try_mark_previous_green(
Ok(ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX)))
}
ast::VisibilityKind::Inherited => {
- Ok(ty::Visibility::Restricted(parent_scope.module.normal_ancestor_id))
+ if matches!(self.parent_scope.module.kind, ModuleKind::Def(DefKind::Enum, _, _)) {
+ // Any inherited visibility resolved directly inside an enum
+ // (e.g. variants or fields) inherits from the visibility of the enum.
+ let parent_enum = self.parent_scope.module.def_id().unwrap().expect_local();
+ Ok(self.r.visibilities[&parent_enum])
+ } else {
+ // If it's not in an enum, its visibility is restricted to the `mod` item
+ // that it's defined in.
+ Ok(ty::Visibility::Restricted(self.parent_scope.module.normal_ancestor_id))
+ }
}
ast::VisibilityKind::Restricted { ref path, id, .. } => {
// For visibilities we are not ready to provide correct implementation of "uniform
},
}
+#[derive(Debug)]
enum ModuleKind {
/// An anonymous module; e.g., just a block.
///
Full,
}
+/// Some debuginfo requires link-time relocation and some does not. LLVM can partition the debuginfo
+/// into sections depending on whether or not it requires link-time relocation. Split DWARF
+/// provides a mechanism which allows the linker to skip the sections which don't require link-time
+/// relocation - either by putting those sections into DWARF object files, or keeping them in the
+/// object file in such a way that the linker will skip them.
+#[derive(Clone, Copy, Debug, PartialEq, Hash)]
+pub enum SplitDwarfKind {
+ /// Disabled.
+ None,
+ /// Sections which do not require relocation are written into the object file but ignored
+ /// by the linker.
+ Single,
+ /// Sections which do not require relocation are written into a DWARF object (`.dwo`) file,
+ /// which is skipped by the linker by virtue of being a different file.
+ Split,
+}
+
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)]
#[derive(Encodable, Decodable)]
pub enum OutputType {
pub const RLINK_EXT: &str = "rlink";
pub const RUST_CGU_EXT: &str = "rcgu";
+pub const DWARF_OBJECT_EXT: &str = "dwo";
impl OutputFilenames {
pub fn new(
self.temp_path_ext(extension, codegen_unit_name)
}
- /// Like temp_path, but also supports things where there is no corresponding
+ /// Like `temp_path`, but specifically for dwarf objects.
+ pub fn temp_path_dwo(&self, codegen_unit_name: Option<&str>) -> PathBuf {
+ self.temp_path_ext(DWARF_OBJECT_EXT, codegen_unit_name)
+ }
+
+ /// Like `temp_path`, but also supports things where there is no corresponding
/// OutputType, like noopt-bitcode or lto-bitcode.
pub fn temp_path_ext(&self, ext: &str, codegen_unit_name: Option<&str>) -> PathBuf {
let mut extension = String::new();
path.set_extension(extension);
path
}
+
+ /// Returns the name of the Split DWARF file - this can differ depending on which Split DWARF
+ /// mode is being used, which is the logic that this function is intended to encapsulate.
+ pub fn split_dwarf_filename(
+ &self,
+ split_dwarf_kind: SplitDwarfKind,
+ cgu_name: Option<&str>,
+ ) -> Option<PathBuf> {
+ self.split_dwarf_path(split_dwarf_kind, cgu_name)
+ .map(|path| path.strip_prefix(&self.out_directory).unwrap_or(&path).to_path_buf())
+ }
+
+ /// Returns the path for the Split DWARF file - this can differ depending on which Split DWARF
+ /// mode is being used, which is the logic that this function is intended to encapsulate.
+ pub fn split_dwarf_path(
+ &self,
+ split_dwarf_kind: SplitDwarfKind,
+ cgu_name: Option<&str>,
+ ) -> Option<PathBuf> {
+ let obj_out = self.temp_path(OutputType::Object, cgu_name);
+ let dwo_out = self.temp_path_dwo(cgu_name);
+ match split_dwarf_kind {
+ SplitDwarfKind::None => None,
+ // Single mode doesn't change how DWARF is emitted, but does add Split DWARF attributes
+ // (pointing at the path which is being determined here). Use the path to the current
+ // object file.
+ SplitDwarfKind::Single => Some(obj_out),
+ // Split mode emits the DWARF into a different file, use that path.
+ SplitDwarfKind::Split => Some(dwo_out),
+ }
+ }
}
pub fn host_triple() -> &'static str {
deduplicate_diagnostics: self.deduplicate_diagnostics,
}
}
+
+ pub fn get_symbol_mangling_version(&self) -> SymbolManglingVersion {
+ self.symbol_mangling_version.unwrap_or(SymbolManglingVersion::Legacy)
+ }
}
// The type of entry function, so users can have their own entry functions
if !debugging_opts.parse_only {
for list in matches.opt_strs("emit") {
for output_type in list.split(',') {
- let mut parts = output_type.splitn(2, '=');
- let shorthand = parts.next().unwrap();
+ let (shorthand, path) = match output_type.split_once('=') {
+ None => (output_type, None),
+ Some((shorthand, path)) => (shorthand, Some(PathBuf::from(path))),
+ };
let output_type = OutputType::from_shorthand(shorthand).unwrap_or_else(|| {
early_error(
error_format,
),
)
});
- let path = parts.next().map(PathBuf::from);
output_types.insert(output_type, path);
}
}
early_error(error_format, &format!("target file {:?} does not exist", path))
})
}
- Some(target) => TargetTriple::TargetTriple(target),
+ Some(target) => TargetTriple::from_alias(target),
_ => TargetTriple::from_triple(host_triple()),
}
}
let max_c = matches
.opt_strs_pos("C")
.into_iter()
- .flat_map(
- |(i, s)| {
- if let Some("opt-level") = s.splitn(2, '=').next() { Some(i) } else { None }
- },
- )
+ .flat_map(|(i, s)| {
+ // NB: This can match a string without `=`.
+ if let Some("opt-level") = s.splitn(2, '=').next() { Some(i) } else { None }
+ })
.max();
if max_o > max_c {
OptLevel::Default
let max_c = matches
.opt_strs_pos("C")
.into_iter()
- .flat_map(
- |(i, s)| {
- if let Some("debuginfo") = s.splitn(2, '=').next() { Some(i) } else { None }
- },
- )
+ .flat_map(|(i, s)| {
+ // NB: This can match a string without `=`.
+ if let Some("debuginfo") = s.splitn(2, '=').next() { Some(i) } else { None }
+ })
.max();
if max_g > max_c {
DebugInfo::Full
.map(|s| {
// Parse string of the form "[KIND=]lib[:new_name]",
// where KIND is one of "dylib", "framework", "static".
- let mut parts = s.splitn(2, '=');
- let kind = parts.next().unwrap();
- let (name, kind) = match (parts.next(), kind) {
- (None, name) => (name, NativeLibKind::Unspecified),
- (Some(name), "dylib") => (name, NativeLibKind::Dylib),
- (Some(name), "framework") => (name, NativeLibKind::Framework),
- (Some(name), "static") => (name, NativeLibKind::StaticBundle),
- (Some(name), "static-nobundle") => (name, NativeLibKind::StaticNoBundle),
- (_, s) => {
- early_error(
- error_format,
- &format!(
- "unknown library kind `{}`, expected \
- one of dylib, framework, or static",
- s
- ),
- );
+ let (name, kind) = match s.split_once('=') {
+ None => (s, NativeLibKind::Unspecified),
+ Some((kind, name)) => {
+ let kind = match kind {
+ "dylib" => NativeLibKind::Dylib,
+ "framework" => NativeLibKind::Framework,
+ "static" => NativeLibKind::StaticBundle,
+ "static-nobundle" => NativeLibKind::StaticNoBundle,
+ s => {
+ early_error(
+ error_format,
+ &format!(
+ "unknown library kind `{}`, expected \
+ one of dylib, framework, or static",
+ s
+ ),
+ );
+ }
+ };
+ (name.to_string(), kind)
}
};
if kind == NativeLibKind::StaticNoBundle
accepted on the nightly compiler",
);
}
- let mut name_parts = name.splitn(2, ':');
- let name = name_parts.next().unwrap();
- let new_name = name_parts.next();
- (name.to_owned(), new_name.map(|n| n.to_owned()), kind)
+ let (name, new_name) = match name.split_once(':') {
+ None => (name, None),
+ Some((name, new_name)) => (name.to_string(), Some(new_name.to_owned())),
+ };
+ (name, new_name, kind)
})
.collect()
}
let is_unstable_enabled = debugging_opts.unstable_options;
let mut externs: BTreeMap<String, ExternEntry> = BTreeMap::new();
for arg in matches.opt_strs("extern") {
- let mut parts = arg.splitn(2, '=');
- let name = parts
- .next()
- .unwrap_or_else(|| early_error(error_format, "--extern value must not be empty"));
- let path = parts.next().map(|s| s.to_string());
-
- let mut name_parts = name.splitn(2, ':');
- let first_part = name_parts.next();
- let second_part = name_parts.next();
- let (options, name) = match (first_part, second_part) {
- (Some(opts), Some(name)) => (Some(opts), name),
- (Some(name), None) => (None, name),
- (None, None) => early_error(error_format, "--extern name must not be empty"),
- _ => unreachable!(),
+ let (name, path) = match arg.split_once('=') {
+ None => (arg, None),
+ Some((name, path)) => (name.to_string(), Some(path.to_string())),
+ };
+ let (options, name) = match name.split_once(':') {
+ None => (None, name),
+ Some((opts, name)) => (Some(opts), name.to_string()),
};
let entry = externs.entry(name.to_owned());
matches
.opt_strs("remap-path-prefix")
.into_iter()
- .map(|remap| {
- let mut parts = remap.rsplitn(2, '='); // reverse iterator
- let to = parts.next();
- let from = parts.next();
- match (from, to) {
- (Some(from), Some(to)) => (PathBuf::from(from), PathBuf::from(to)),
- _ => early_error(
- error_format,
- "--remap-path-prefix must contain '=' between FROM and TO",
- ),
- }
+ .map(|remap| match remap.rsplit_once('=') {
+ None => early_error(
+ error_format,
+ "--remap-path-prefix must contain '=' between FROM and TO",
+ ),
+ Some((from, to)) => (PathBuf::from(from), PathBuf::from(to)),
})
.collect()
}
// and reversible name mangling. Note, LLVM coverage tools can analyze coverage over
// multiple runs, including some changes to source code; so mangled names must be consistent
// across compilations.
- debugging_opts.symbol_mangling_version = SymbolManglingVersion::V0;
+ match debugging_opts.symbol_mangling_version {
+ None => {
+ debugging_opts.symbol_mangling_version = Some(SymbolManglingVersion::V0);
+ }
+ Some(SymbolManglingVersion::Legacy) => {
+ early_warn(
+ error_format,
+ "-Z instrument-coverage requires symbol mangling version `v0`, \
+ but `-Z symbol-mangling-version=legacy` was specified",
+ );
+ }
+ Some(SymbolManglingVersion::V0) => {}
+ }
+
+ if debugging_opts.mir_opt_level > 1 {
+ early_warn(
+ error_format,
+ &format!(
+ "`-Z mir-opt-level={}` (any level > 1) enables function inlining, which \
+ limits the effectiveness of `-Z instrument-coverage`.",
+ debugging_opts.mir_opt_level,
+ ),
+ );
+ }
}
if let Ok(graphviz_font) = std::env::var("RUSTC_GRAPHVIZ_FONT") {
impl_dep_tracking_hash_via_hash!(Edition);
impl_dep_tracking_hash_via_hash!(LinkerPluginLto);
impl_dep_tracking_hash_via_hash!(SwitchWithOptPath);
- impl_dep_tracking_hash_via_hash!(SymbolManglingVersion);
+ impl_dep_tracking_hash_via_hash!(Option<SymbolManglingVersion>);
impl_dep_tracking_hash_via_hash!(Option<SourceFileHashAlgorithm>);
impl_dep_tracking_hash_via_hash!(TrimmedDefPaths);
#![feature(crate_visibility_modifier)]
#![feature(once_cell)]
#![feature(or_patterns)]
+#![feature(str_split_once)]
#[macro_use]
extern crate bitflags;
{
let mut op = $defaultfn();
for option in matches.opt_strs($prefix) {
- let mut iter = option.splitn(2, '=');
- let key = iter.next().unwrap();
- let value = iter.next();
+ let (key, value) = match option.split_once('=') {
+ None => (option, None),
+ Some((k, v)) => (k.to_string(), Some(v)),
+ };
let option_to_lookup = key.replace("-", "_");
let mut found = false;
for &(candidate, setter, type_desc, _) in $stat {
pub const parse_switch_with_opt_path: &str =
"an optional path to the profiling data output directory";
pub const parse_merge_functions: &str = "one of: `disabled`, `trampolines`, or `aliases`";
+ pub const parse_split_dwarf_kind: &str = "one of: `none`, `single` or `split`";
pub const parse_symbol_mangling_version: &str = "either `legacy` or `v0` (RFC 2603)";
pub const parse_src_file_hash: &str = "either `md5` or `sha1`";
pub const parse_relocation_model: &str =
true
}
+ fn parse_split_dwarf_kind(
+ slot: &mut SplitDwarfKind,
+ v: Option<&str>,
+ ) -> bool {
+ *slot = match v {
+ Some("none") => SplitDwarfKind::None,
+ Some("split") => SplitDwarfKind::Split,
+ Some("single") => SplitDwarfKind::Single,
+ _ => return false,
+ };
+ true
+ }
+
fn parse_symbol_mangling_version(
- slot: &mut SymbolManglingVersion,
+ slot: &mut Option<SymbolManglingVersion>,
v: Option<&str>,
) -> bool {
*slot = match v {
- Some("legacy") => SymbolManglingVersion::Legacy,
- Some("v0") => SymbolManglingVersion::V0,
+ Some("legacy") => Some(SymbolManglingVersion::Legacy),
+ Some("v0") => Some(SymbolManglingVersion::V0),
_ => return false,
};
true
"hash algorithm of source files in debug info (`md5`, `sha1`, or `sha256`)"),
strip: Strip = (Strip::None, parse_strip, [UNTRACKED],
"tell the linker which information to strip (`none` (default), `debuginfo` or `symbols`)"),
- symbol_mangling_version: SymbolManglingVersion = (SymbolManglingVersion::Legacy,
+ split_dwarf: SplitDwarfKind = (SplitDwarfKind::None, parse_split_dwarf_kind, [UNTRACKED],
+ "enable generation of split dwarf"),
+ split_dwarf_inlining: bool = (true, parse_bool, [UNTRACKED],
+ "provide minimal debug info in the object/executable to facilitate online \
+ symbolication/stack traces in the absence of .dwo/.dwp files when using Split DWARF"),
+ symbol_mangling_version: Option<SymbolManglingVersion> = (None,
parse_symbol_mangling_version, [TRACKED],
- "which mangling version to use for symbol names"),
+ "which mangling version to use for symbol names ('legacy' (default) or 'v0')"),
teach: bool = (false, parse_bool, [TRACKED],
"show extended diagnostic help (default: no)"),
terminal_width: Option<usize> = (None, parse_opt_uint, [UNTRACKED],
}
pub fn link_dead_code(&self) -> bool {
- match self.opts.cg.link_dead_code {
- Some(explicitly_set) => explicitly_set,
- None => false,
- }
+ self.opts.cg.link_dead_code.unwrap_or(false)
}
pub fn mark_attr_known(&self, attr: &Attribute) {
document_private_items,
dotdot_in_tuple_patterns,
dotdoteq_in_patterns,
+ dreg,
+ dreg_low16,
+ dreg_low8,
drop,
drop_in_place,
drop_types_in_const,
format_args_capture,
format_args_nl,
freeze,
+ freg,
frem_fast,
from,
from_desugaring,
iter,
keyword,
kind,
+ kreg,
label,
label_break_value,
lang,
lint_reasons,
literal,
llvm_asm,
+ local,
local_inner_macros,
log10f32,
log10f64,
pub_restricted,
pure,
pushpop_unsafe,
+ qreg,
+ qreg_low4,
+ qreg_low8,
quad_precision_float,
question_mark,
quote,
reexport_test_harness_main,
reference,
reflect,
+ reg,
+ reg16,
+ reg32,
+ reg64,
+ reg_abcd,
+ reg_byte,
+ reg_thumb,
register_attr,
register_tool,
relaxed_adts,
spotlight,
sqrtf32,
sqrtf64,
+ sreg,
+ sreg_low16,
sse4a_target_feature,
stable,
staged_api,
volatile_load,
volatile_set_memory,
volatile_store,
+ vreg,
+ vreg_low16,
warn,
wasm_import_module,
wasm_target_feature,
wrapping_mul,
wrapping_sub,
write_bytes,
+ xmm_reg,
+ ymm_reg,
+ zmm_reg,
}
}
self.0.as_u32()
}
+ pub fn is_empty(self) -> bool {
+ self == kw::Invalid
+ }
+
/// This method is supposed to be used in error messages, so it's expected to be
/// identical to printing the original identifier token written in source code
/// (`token_to_string`, `Ident::to_string`), except that symbols don't keep the rawness flag
// 2. we favor `instantiating_crate` where possible (i.e. when `Some`)
let mangling_version_crate = instantiating_crate.unwrap_or(def_id.krate);
let mangling_version = if mangling_version_crate == LOCAL_CRATE {
- tcx.sess.opts.debugging_opts.symbol_mangling_version
+ tcx.sess.opts.debugging_opts.get_symbol_mangling_version()
} else {
tcx.symbol_mangling_version(mangling_version_crate)
};
}
impl $arch_regclass {
- pub fn name(self) -> &'static str {
+ pub fn name(self) -> rustc_span::Symbol {
match self {
- $(Self::$class => stringify!($class),)*
+ $(Self::$class => rustc_span::symbol::sym::$class,)*
}
}
- pub fn parse(_arch: super::InlineAsmArch, name: &str) -> Result<Self, &'static str> {
+ pub fn parse(_arch: super::InlineAsmArch, name: rustc_span::Symbol) -> Result<Self, &'static str> {
match name {
$(
- stringify!($class) => Ok(Self::$class),
+ rustc_span::sym::$class => Ok(Self::$class),
)*
_ => Err("unknown register class"),
}
}
impl InlineAsmRegClass {
- pub fn name(self) -> &'static str {
+ pub fn name(self) -> Symbol {
match self {
Self::X86(r) => r.name(),
Self::Arm(r) => r.name(),
}
pub fn parse(arch: InlineAsmArch, name: Symbol) -> Result<Self, &'static str> {
- // FIXME: use direct symbol comparison for register class names
- name.with(|name| {
- Ok(match arch {
- InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
- Self::X86(X86InlineAsmRegClass::parse(arch, name)?)
- }
- InlineAsmArch::Arm => Self::Arm(ArmInlineAsmRegClass::parse(arch, name)?),
- InlineAsmArch::AArch64 => {
- Self::AArch64(AArch64InlineAsmRegClass::parse(arch, name)?)
- }
- InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
- Self::RiscV(RiscVInlineAsmRegClass::parse(arch, name)?)
- }
- InlineAsmArch::Nvptx64 => Self::Nvptx(NvptxInlineAsmRegClass::parse(arch, name)?),
- InlineAsmArch::Hexagon => {
- Self::Hexagon(HexagonInlineAsmRegClass::parse(arch, name)?)
- }
- InlineAsmArch::Mips | InlineAsmArch::Mips64 => {
- Self::Mips(MipsInlineAsmRegClass::parse(arch, name)?)
- }
- InlineAsmArch::SpirV => Self::SpirV(SpirVInlineAsmRegClass::parse(arch, name)?),
- InlineAsmArch::Wasm32 => Self::Wasm(WasmInlineAsmRegClass::parse(arch, name)?),
- })
+ Ok(match arch {
+ InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
+ Self::X86(X86InlineAsmRegClass::parse(arch, name)?)
+ }
+ InlineAsmArch::Arm => Self::Arm(ArmInlineAsmRegClass::parse(arch, name)?),
+ InlineAsmArch::AArch64 => Self::AArch64(AArch64InlineAsmRegClass::parse(arch, name)?),
+ InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
+ Self::RiscV(RiscVInlineAsmRegClass::parse(arch, name)?)
+ }
+ InlineAsmArch::Nvptx64 => Self::Nvptx(NvptxInlineAsmRegClass::parse(arch, name)?),
+ InlineAsmArch::Hexagon => Self::Hexagon(HexagonInlineAsmRegClass::parse(arch, name)?),
+ InlineAsmArch::Mips | InlineAsmArch::Mips64 => {
+ Self::Mips(MipsInlineAsmRegClass::parse(arch, name)?)
+ }
+ InlineAsmArch::SpirV => Self::SpirV(SpirVInlineAsmRegClass::parse(arch, name)?),
+ InlineAsmArch::Wasm32 => Self::Wasm(WasmInlineAsmRegClass::parse(arch, name)?),
})
}
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Reg(r) => write!(f, "\"{}\"", r.name()),
- Self::RegClass(r) => f.write_str(r.name()),
+ Self::RegClass(r) => write!(f, "{}", r.name()),
}
}
}
#![feature(never_type)]
#![feature(associated_type_bounds)]
#![feature(exhaustive_patterns)]
+#![feature(str_split_once)]
#[macro_use]
extern crate rustc_macros;
let deployment_target = env::var("MACOSX_DEPLOYMENT_TARGET").ok();
let version = deployment_target
.as_ref()
- .and_then(|s| {
- let mut i = s.splitn(2, '.');
- i.next().and_then(|a| i.next().map(|b| (a, b)))
- })
+ .and_then(|s| s.split_once('.'))
.and_then(|(a, b)| a.parse::<u32>().and_then(|a| b.parse::<u32>().map(|b| (a, b))).ok());
version.unwrap_or((10, 7))
Ok(TargetTriple::TargetPath(canonicalized_path))
}
+ /// Creates a target triple from its alias
+ pub fn from_alias(triple: String) -> Self {
+ macro_rules! target_aliases {
+ ( $(($alias:literal, $target:literal ),)+ ) => {
+ match triple.as_str() {
+ $( $alias => TargetTriple::from_triple($target), )+
+ _ => TargetTriple::TargetTriple(triple),
+ }
+ }
+ }
+
+ target_aliases! {
+ // `x86_64-pc-solaris` is an alias for `x86_64_sun_solaris` for backwards compatibility reasons.
+ // (See <https://github.com/rust-lang/rust/issues/40531>.)
+ ("x86_64-pc-solaris", "x86_64-sun-solaris"),
+ }
+ }
+
/// Returns a string triple for this target.
///
/// If this target is a path, the file name (without extension) is returned.
});
};
- typeck_results
+ if let Some(cause) = typeck_results
.generator_interior_types
.iter()
.find(|ty::GeneratorInteriorTypeCause { ty, .. }| ty_matches(ty))
- .map(|cause| {
- // Check to see if any awaited expressions have the target type.
- let from_awaited_ty = visitor
- .awaits
- .into_iter()
- .map(|id| hir.expect_expr(id))
- .find(|await_expr| {
- let ty = typeck_results.expr_ty_adjusted(&await_expr);
- debug!(
- "maybe_note_obligation_cause_for_async_await: await_expr={:?}",
- await_expr
- );
- ty_matches(ty)
- })
- .map(|expr| expr.span);
- let ty::GeneratorInteriorTypeCause { span, scope_span, yield_span, expr, .. } =
- cause;
+ {
+ // Check to see if any awaited expressions have the target type.
+ let from_awaited_ty = visitor
+ .awaits
+ .into_iter()
+ .map(|id| hir.expect_expr(id))
+ .find(|await_expr| {
+ let ty = typeck_results.expr_ty_adjusted(&await_expr);
+ debug!(
+ "maybe_note_obligation_cause_for_async_await: await_expr={:?}",
+ await_expr
+ );
+ ty_matches(ty)
+ })
+ .map(|expr| expr.span);
+ let ty::GeneratorInteriorTypeCause { span, scope_span, yield_span, expr, .. } = cause;
- interior_or_upvar_span = Some(GeneratorInteriorOrUpvar::Interior(*span));
- interior_extra_info = Some((*scope_span, *yield_span, *expr, from_awaited_ty));
- });
+ interior_or_upvar_span = Some(GeneratorInteriorOrUpvar::Interior(*span));
+ interior_extra_info = Some((*scope_span, *yield_span, *expr, from_awaited_ty));
+ };
debug!(
"maybe_note_obligation_cause_for_async_await: interior_or_upvar={:?} \
);
nested.push(Obligation::new(
obligation.cause.clone(),
- obligation.param_env.clone(),
+ obligation.param_env,
normalized_super_trait,
));
}
);
nested.push(Obligation::new(
obligation.cause.clone(),
- obligation.param_env.clone(),
+ obligation.param_env,
normalized_bound,
));
}
--- /dev/null
+[package]
+name = "rustc_type_ir"
+version = "0.0.0"
+authors = ["The Rust Project Developers"]
+edition = "2018"
+
+[lib]
+doctest = false
+
+[dependencies]
+bitflags = "1.2.1"
+rustc_index = { path = "../rustc_index" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_data_structures = { path = "../rustc_data_structures" }
--- /dev/null
+#![feature(never_type)]
+#![feature(const_panic)]
+#![feature(control_flow_enum)]
+
+#[macro_use]
+extern crate bitflags;
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+
+bitflags! {
+ /// Flags that we track on types. These flags are propagated upwards
+ /// through the type during type construction, so that we can quickly check
+ /// whether the type has various kinds of types in it without recursing
+ /// over the type itself.
+ pub struct TypeFlags: u32 {
+ // Does this have parameters? Used to determine whether substitution is
+ // required.
+ /// Does this have `Param`?
+ const HAS_TY_PARAM = 1 << 0;
+ /// Does this have `ReEarlyBound`?
+ const HAS_RE_PARAM = 1 << 1;
+ /// Does this have `ConstKind::Param`?
+ const HAS_CT_PARAM = 1 << 2;
+
+ const NEEDS_SUBST = TypeFlags::HAS_TY_PARAM.bits
+ | TypeFlags::HAS_RE_PARAM.bits
+ | TypeFlags::HAS_CT_PARAM.bits;
+
+ /// Does this have `Infer`?
+ const HAS_TY_INFER = 1 << 3;
+ /// Does this have `ReVar`?
+ const HAS_RE_INFER = 1 << 4;
+ /// Does this have `ConstKind::Infer`?
+ const HAS_CT_INFER = 1 << 5;
+
+ /// Does this have inference variables? Used to determine whether
+ /// inference is required.
+ const NEEDS_INFER = TypeFlags::HAS_TY_INFER.bits
+ | TypeFlags::HAS_RE_INFER.bits
+ | TypeFlags::HAS_CT_INFER.bits;
+
+ /// Does this have `Placeholder`?
+ const HAS_TY_PLACEHOLDER = 1 << 6;
+ /// Does this have `RePlaceholder`?
+ const HAS_RE_PLACEHOLDER = 1 << 7;
+ /// Does this have `ConstKind::Placeholder`?
+ const HAS_CT_PLACEHOLDER = 1 << 8;
+
+ /// `true` if there are "names" of regions and so forth
+ /// that are local to a particular fn/inferctxt
+ const HAS_FREE_LOCAL_REGIONS = 1 << 9;
+
+ /// `true` if there are "names" of types and regions and so forth
+ /// that are local to a particular fn
+ const HAS_FREE_LOCAL_NAMES = TypeFlags::HAS_TY_PARAM.bits
+ | TypeFlags::HAS_CT_PARAM.bits
+ | TypeFlags::HAS_TY_INFER.bits
+ | TypeFlags::HAS_CT_INFER.bits
+ | TypeFlags::HAS_TY_PLACEHOLDER.bits
+ | TypeFlags::HAS_CT_PLACEHOLDER.bits
+ | TypeFlags::HAS_FREE_LOCAL_REGIONS.bits;
+
+ /// Does this have `Projection`?
+ const HAS_TY_PROJECTION = 1 << 10;
+ /// Does this have `Opaque`?
+ const HAS_TY_OPAQUE = 1 << 11;
+ /// Does this have `ConstKind::Unevaluated`?
+ const HAS_CT_PROJECTION = 1 << 12;
+
+ /// Could this type be normalized further?
+ const HAS_PROJECTION = TypeFlags::HAS_TY_PROJECTION.bits
+ | TypeFlags::HAS_TY_OPAQUE.bits
+ | TypeFlags::HAS_CT_PROJECTION.bits;
+
+ /// Is an error type/const reachable?
+ const HAS_ERROR = 1 << 13;
+
+ /// Does this have any region that "appears free" in the type?
+ /// Basically anything but `ReLateBound` and `ReErased`.
+ const HAS_FREE_REGIONS = 1 << 14;
+
+ /// Does this have any `ReLateBound` regions? Used to check
+ /// if a global bound is safe to evaluate.
+ const HAS_RE_LATE_BOUND = 1 << 15;
+
+ /// Does this have any `ReErased` regions?
+ const HAS_RE_ERASED = 1 << 16;
+
+ /// Does this value have parameters/placeholders/inference variables which could be
+ /// replaced later, in a way that would change the results of `impl` specialization?
+ const STILL_FURTHER_SPECIALIZABLE = 1 << 17;
+ }
+}
+
+rustc_index::newtype_index! {
+ /// A [De Bruijn index][dbi] is a standard means of representing
+ /// regions (and perhaps later types) in a higher-ranked setting. In
+ /// particular, imagine a type like this:
+ ///
+ /// for<'a> fn(for<'b> fn(&'b isize, &'a isize), &'a char)
+ /// ^ ^ | | |
+ /// | | | | |
+ /// | +------------+ 0 | |
+ /// | | |
+ /// +----------------------------------+ 1 |
+ /// | |
+ /// +----------------------------------------------+ 0
+ ///
+ /// In this type, there are two binders (the outer fn and the inner
+ /// fn). We need to be able to determine, for any given region, which
+ /// fn type it is bound by, the inner or the outer one. There are
+ /// various ways you can do this, but a De Bruijn index is one of the
+ /// more convenient and has some nice properties. The basic idea is to
+ /// count the number of binders, inside out. Some examples should help
+ /// clarify what I mean.
+ ///
+ /// Let's start with the reference type `&'b isize` that is the first
+ /// argument to the inner function. This region `'b` is assigned a De
+ /// Bruijn index of 0, meaning "the innermost binder" (in this case, a
+ /// fn). The region `'a` that appears in the second argument type (`&'a
+ /// isize`) would then be assigned a De Bruijn index of 1, meaning "the
+ /// second-innermost binder". (These indices are written on the arrows
+ /// in the diagram).
+ ///
+ /// What is interesting is that De Bruijn index attached to a particular
+ /// variable will vary depending on where it appears. For example,
+ /// the final type `&'a char` also refers to the region `'a` declared on
+ /// the outermost fn. But this time, this reference is not nested within
+ /// any other binders (i.e., it is not an argument to the inner fn, but
+ /// rather the outer one). Therefore, in this case, it is assigned a
+ /// De Bruijn index of 0, because the innermost binder in that location
+ /// is the outer fn.
+ ///
+ /// [dbi]: https://en.wikipedia.org/wiki/De_Bruijn_index
+ pub struct DebruijnIndex {
+ DEBUG_FORMAT = "DebruijnIndex({})",
+ const INNERMOST = 0,
+ }
+}
+
+impl DebruijnIndex {
+ /// Returns the resulting index when this value is moved into
+ /// `amount` number of new binders. So, e.g., if you had
+ ///
+ /// for<'a> fn(&'a x)
+ ///
+ /// and you wanted to change it to
+ ///
+ /// for<'a> fn(for<'b> fn(&'a x))
+ ///
+ /// you would need to shift the index for `'a` into a new binder.
+ #[must_use]
+ pub fn shifted_in(self, amount: u32) -> DebruijnIndex {
+ DebruijnIndex::from_u32(self.as_u32() + amount)
+ }
+
+ /// Update this index in place by shifting it "in" through
+ /// `amount` number of binders.
+ pub fn shift_in(&mut self, amount: u32) {
+ *self = self.shifted_in(amount);
+ }
+
+ /// Returns the resulting index when this value is moved out from
+ /// `amount` number of new binders.
+ #[must_use]
+ pub fn shifted_out(self, amount: u32) -> DebruijnIndex {
+ DebruijnIndex::from_u32(self.as_u32() - amount)
+ }
+
+ /// Update in place by shifting out from `amount` binders.
+ pub fn shift_out(&mut self, amount: u32) {
+ *self = self.shifted_out(amount);
+ }
+
+ /// Adjusts any De Bruijn indices so as to make `to_binder` the
+ /// innermost binder. That is, if we have something bound at `to_binder`,
+ /// it will now be bound at INNERMOST. This is an appropriate thing to do
+ /// when moving a region out from inside binders:
+ ///
+ /// ```
+ /// for<'a> fn(for<'b> for<'c> fn(&'a u32), _)
+ /// // Binder: D3 D2 D1 ^^
+ /// ```
+ ///
+ /// Here, the region `'a` would have the De Bruijn index D3,
+ /// because it is the bound 3 binders out. However, if we wanted
+ /// to refer to that region `'a` in the second argument (the `_`),
+ /// those two binders would not be in scope. In that case, we
+ /// might invoke `shift_out_to_binder(D3)`. This would adjust the
+ /// De Bruijn index of `'a` to D1 (the innermost binder).
+ ///
+ /// If we invoke `shift_out_to_binder` and the region is in fact
+ /// bound by one of the binders we are shifting out of, that is an
+ /// error (and should fail an assertion failure).
+ pub fn shifted_out_to_binder(self, to_binder: DebruijnIndex) -> Self {
+ self.shifted_out(to_binder.as_u32() - INNERMOST.as_u32())
+ }
+}
+
+impl<CTX> HashStable<CTX> for DebruijnIndex {
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.as_u32().hash_stable(ctx, hasher);
+ }
+}
// FIXME(60707): Consider removing hack with principled solution.
self.check_expr_has_type_or_error(scrut, self.tcx.types.bool, |_| {})
} else {
- self.demand_scrutinee_type(arms, scrut)
+ self.demand_scrutinee_type(scrut, arms_contain_ref_bindings(arms), arms.is_empty())
};
// If there are no arms, that is a diverging match; a special case.
self.diverges.set(Diverges::Maybe);
match g {
hir::Guard::If(e) => {
- self.check_expr_has_type_or_error(e, tcx.types.bool, |_| {})
+ self.check_expr_has_type_or_error(e, tcx.types.bool, |_| {});
+ }
+ hir::Guard::IfLet(pat, e) => {
+ let scrutinee_ty = self.demand_scrutinee_type(
+ e,
+ pat.contains_explicit_ref_binding(),
+ false,
+ );
+ self.check_pat_top(&pat, scrutinee_ty, None, true);
}
};
}
fn demand_scrutinee_type(
&self,
- arms: &'tcx [hir::Arm<'tcx>],
scrut: &'tcx hir::Expr<'tcx>,
+ contains_ref_bindings: Option<hir::Mutability>,
+ no_arms: bool,
) -> Ty<'tcx> {
// Not entirely obvious: if matches may create ref bindings, we want to
// use the *precise* type of the scrutinee, *not* some supertype, as
// (once introduced) is populated by the time we get here.
//
// See #44848.
- let contains_ref_bindings = arms
- .iter()
- .filter_map(|a| a.pat.contains_explicit_ref_binding())
- .max_by_key(|m| match *m {
- hir::Mutability::Mut => 1,
- hir::Mutability::Not => 0,
- });
-
if let Some(m) = contains_ref_bindings {
self.check_expr_with_needs(scrut, Needs::maybe_mut_place(m))
- } else if arms.is_empty() {
+ } else if no_arms {
self.check_expr(scrut)
} else {
// ...but otherwise we want to use any supertype of the
}
}
}
+
+fn arms_contain_ref_bindings(arms: &'tcx [hir::Arm<'tcx>]) -> Option<hir::Mutability> {
+ arms.iter().filter_map(|a| a.pat.contains_explicit_ref_binding()).max_by_key(|m| match *m {
+ hir::Mutability::Mut => 1,
+ hir::Mutability::Not => 0,
+ })
+}
if no_accessible_remaining_fields {
self.report_no_accessible_fields(adt_ty, span);
} else {
- self.report_missing_field(adt_ty, span, remaining_fields);
+ self.report_missing_fields(adt_ty, span, remaining_fields);
}
}
///
/// error: aborting due to previous error
/// ```
- fn report_missing_field(
+ fn report_missing_fields(
&self,
adt_ty: Ty<'tcx>,
span: Span,
Guard::If(ref e) => {
self.visit_expr(e);
}
+ Guard::IfLet(ref pat, ref e) => {
+ self.visit_pat(pat);
+ self.visit_expr(e);
+ }
}
let mut scope_var_ids =
use rustc_hir::{ExprKind, Node, QPath};
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
use rustc_middle::hir::map as hir_map;
+use rustc_middle::ty::fast_reject::simplify_type;
use rustc_middle::ty::print::with_crate_prefix;
use rustc_middle::ty::{
self, ToPolyTraitRef, ToPredicate, Ty, TyCtxt, TypeFoldable, WithConstness,
} else {
"items from traits can only be used if the trait is implemented and in scope"
});
+ let candidates_len = candidates.len();
let message = |action| {
format!(
"the following {traits_define} an item `{name}`, perhaps you need to {action} \
{one_of_them}:",
traits_define =
- if candidates.len() == 1 { "trait defines" } else { "traits define" },
+ if candidates_len == 1 { "trait defines" } else { "traits define" },
action = action,
- one_of_them = if candidates.len() == 1 { "it" } else { "one of them" },
+ one_of_them = if candidates_len == 1 { "it" } else { "one of them" },
name = item_name,
)
};
// Obtain the span for `param` and use it for a structured suggestion.
- let mut suggested = false;
if let (Some(ref param), Some(ref table)) =
(param_type, self.in_progress_typeck_results)
{
Applicability::MaybeIncorrect,
);
}
- suggested = true;
+ return;
}
Node::Item(hir::Item {
kind: hir::ItemKind::Trait(.., bounds, _),
}),
Applicability::MaybeIncorrect,
);
- suggested = true;
+ return;
}
_ => {}
}
}
}
- if !suggested {
- let action = if let Some(param) = param_type {
- format!("restrict type parameter `{}` with", param)
- } else {
- // FIXME: it might only need to be imported into scope, not implemented.
- "implement".to_string()
- };
- let mut use_note = true;
- if let [trait_info] = &candidates[..] {
- if let Some(span) = self.tcx.hir().span_if_local(trait_info.def_id) {
- err.span_note(
- self.tcx.sess.source_map().guess_head_span(span),
- &format!(
- "`{}` defines an item `{}`, perhaps you need to {} it",
- self.tcx.def_path_str(trait_info.def_id),
- item_name,
- action
- ),
- );
- use_note = false
+ let (potential_candidates, explicitly_negative) = if param_type.is_some() {
+ // FIXME: Even though negative bounds are not implemented, we could maybe handle
+ // cases where a positive bound implies a negative impl.
+ (candidates, Vec::new())
+ } else if let Some(simp_rcvr_ty) = simplify_type(self.tcx, rcvr_ty, true) {
+ let mut potential_candidates = Vec::new();
+ let mut explicitly_negative = Vec::new();
+ for candidate in candidates {
+ // Check if there's a negative impl of `candidate` for `rcvr_ty`
+ if self
+ .tcx
+ .all_impls(candidate.def_id)
+ .filter(|imp_did| {
+ self.tcx.impl_polarity(*imp_did) == ty::ImplPolarity::Negative
+ })
+ .any(|imp_did| {
+ let imp = self.tcx.impl_trait_ref(imp_did).unwrap();
+ let imp_simp = simplify_type(self.tcx, imp.self_ty(), true);
+ imp_simp.map(|s| s == simp_rcvr_ty).unwrap_or(false)
+ })
+ {
+ explicitly_negative.push(candidate);
+ } else {
+ potential_candidates.push(candidate);
}
}
- if use_note {
+ (potential_candidates, explicitly_negative)
+ } else {
+ // We don't know enough about `recv_ty` to make proper suggestions.
+ (candidates, Vec::new())
+ };
+
+ let action = if let Some(param) = param_type {
+ format!("restrict type parameter `{}` with", param)
+ } else {
+ // FIXME: it might only need to be imported into scope, not implemented.
+ "implement".to_string()
+ };
+ match &potential_candidates[..] {
+ [] => {}
+ [trait_info] if trait_info.def_id.is_local() => {
+ let span = self.tcx.hir().span_if_local(trait_info.def_id).unwrap();
+ err.span_note(
+ self.tcx.sess.source_map().guess_head_span(span),
+ &format!(
+ "`{}` defines an item `{}`, perhaps you need to {} it",
+ self.tcx.def_path_str(trait_info.def_id),
+ item_name,
+ action
+ ),
+ );
+ }
+ trait_infos => {
let mut msg = message(action);
- for (i, trait_info) in candidates.iter().enumerate() {
+ for (i, trait_info) in trait_infos.iter().enumerate() {
msg.push_str(&format!(
"\ncandidate #{}: `{}`",
i + 1,
self.tcx.def_path_str(trait_info.def_id),
));
}
- err.note(&msg[..]);
+ err.note(&msg);
+ }
+ }
+ match &explicitly_negative[..] {
+ [] => {}
+ [trait_info] => {
+ let msg = format!(
+ "the trait `{}` defines an item `{}`, but is explicitely unimplemented",
+ self.tcx.def_path_str(trait_info.def_id),
+ item_name
+ );
+ err.note(&msg);
+ }
+ trait_infos => {
+ let mut msg = format!(
+ "the following traits define an item `{}`, but are explicitely unimplemented:",
+ item_name
+ );
+ for trait_info in trait_infos {
+ msg.push_str(&format!("\n{}", self.tcx.def_path_str(trait_info.def_id)));
+ }
+ err.note(&msg);
}
}
}
// inference algorithm will reject it).
// Equate the type variables for the upvars with the actual types.
- let final_upvar_tys = self.final_upvar_tys(closure_hir_id);
+ let final_upvar_tys = self.final_upvar_tys(closure_def_id);
debug!(
"analyze_closure: id={:?} substs={:?} final_upvar_tys={:?}",
closure_hir_id, substs, final_upvar_tys
}
// Returns a list of `Ty`s for each upvar.
- fn final_upvar_tys(&self, closure_id: hir::HirId) -> Vec<Ty<'tcx>> {
+ fn final_upvar_tys(&self, closure_id: DefId) -> Vec<Ty<'tcx>> {
// Presently an unboxed closure type cannot "escape" out of a
// function, so we will only encounter ones that originated in the
// local crate or were inlined into it along with some function.
// This may change if abstract return types of some sort are
// implemented.
let tcx = self.tcx;
- let closure_def_id = tcx.hir().local_def_id(closure_id);
self.typeck_results
.borrow()
- .closure_captures
- .get(&closure_def_id.to_def_id())
- .iter()
- .flat_map(|upvars| {
- upvars.iter().map(|(&var_hir_id, _)| {
- let upvar_ty = self.node_ty(var_hir_id);
- let upvar_id = ty::UpvarId::new(var_hir_id, closure_def_id);
- let capture = self.typeck_results.borrow().upvar_capture(upvar_id);
-
- debug!("var_id={:?} upvar_ty={:?} capture={:?}", var_hir_id, upvar_ty, capture);
-
- match capture {
- ty::UpvarCapture::ByValue(_) => upvar_ty,
- ty::UpvarCapture::ByRef(borrow) => tcx.mk_ref(
- borrow.region,
- ty::TypeAndMut { ty: upvar_ty, mutbl: borrow.kind.to_mutbl_lossy() },
- ),
- }
- })
+ .closure_min_captures_flattened(closure_id)
+ .map(|captured_place| {
+ let upvar_ty = captured_place.place.ty();
+ let capture = captured_place.info.capture_kind;
+
+ debug!(
+ "place={:?} upvar_ty={:?} capture={:?}",
+ captured_place.place, upvar_ty, capture
+ );
+
+ match capture {
+ ty::UpvarCapture::ByValue(_) => upvar_ty,
+ ty::UpvarCapture::ByRef(borrow) => tcx.mk_ref(
+ borrow.region,
+ ty::TypeAndMut { ty: upvar_ty, mutbl: borrow.kind.to_mutbl_lossy() },
+ ),
+ }
})
.collect()
}
closure_captures.insert(*var_hir_id, upvar_id);
- let new_capture_kind = if let Some(capture_kind) =
- upvar_capture_map.get(&upvar_id)
- {
- // upvar_capture_map only stores the UpvarCapture (CaptureKind),
- // so we create a fake capture info with no expression.
- let fake_capture_info =
- ty::CaptureInfo { expr_id: None, capture_kind: capture_kind.clone() };
- determine_capture_info(fake_capture_info, capture_info).capture_kind
- } else {
- capture_info.capture_kind
- };
+ let new_capture_kind =
+ if let Some(capture_kind) = upvar_capture_map.get(&upvar_id) {
+ // upvar_capture_map only stores the UpvarCapture (CaptureKind),
+ // so we create a fake capture info with no expression.
+ let fake_capture_info =
+ ty::CaptureInfo { expr_id: None, capture_kind: *capture_kind };
+ determine_capture_info(fake_capture_info, capture_info).capture_kind
+ } else {
+ capture_info.capture_kind
+ };
upvar_capture_map.insert(upvar_id, new_capture_kind);
}
}
hir::BodyOwnerKind::Closure | hir::BodyOwnerKind::Fn => (),
}
wbcx.visit_body(body);
+ wbcx.visit_min_capture_map();
wbcx.visit_upvar_capture_map();
wbcx.visit_closures();
wbcx.visit_liberated_fn_sigs();
}
impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
+ fn visit_min_capture_map(&mut self) {
+ let mut min_captures_wb = ty::MinCaptureInformationMap::with_capacity_and_hasher(
+ self.fcx.typeck_results.borrow().closure_min_captures.len(),
+ Default::default(),
+ );
+ for (closure_def_id, root_min_captures) in
+ self.fcx.typeck_results.borrow().closure_min_captures.iter()
+ {
+ let mut root_var_map_wb = ty::RootVariableMinCaptureList::with_capacity_and_hasher(
+ root_min_captures.len(),
+ Default::default(),
+ );
+ for (var_hir_id, min_list) in root_min_captures.iter() {
+ let min_list_wb = min_list
+ .iter()
+ .map(|captured_place| {
+ let locatable = captured_place.info.expr_id.unwrap_or(
+ self.tcx().hir().local_def_id_to_hir_id(closure_def_id.expect_local()),
+ );
+
+ self.resolve(captured_place.clone(), &locatable)
+ })
+ .collect();
+ root_var_map_wb.insert(*var_hir_id, min_list_wb);
+ }
+ min_captures_wb.insert(*closure_def_id, root_var_map_wb);
+ }
+
+ self.typeck_results.closure_min_captures = min_captures_wb;
+ }
+
fn visit_upvar_capture_map(&mut self) {
for (upvar_id, upvar_capture) in self.fcx.typeck_results.borrow().upvar_capture_map.iter() {
let new_upvar_capture = match *upvar_capture {
self.tcx().sess,
span,
E0212,
- "cannot extract an associated type from a higher-ranked trait bound \
- in this context"
+ "cannot use the associated type of a trait \
+ with uninferred generic parameters"
);
match self.node() {
// * It must be an associated type for this trait (*not* a
// supertrait).
if let ty::Projection(projection) = ty.kind() {
- if projection.substs == trait_identity_substs
+ projection.substs == trait_identity_substs
&& tcx.associated_item(projection.item_def_id).container.id() == def_id
- {
- true
- } else {
- false
- }
} else {
false
}
use rustc_infer::infer::InferCtxt;
use rustc_middle::hir::place::ProjectionKind;
use rustc_middle::ty::{self, adjustment, TyCtxt};
-use rustc_span::Span;
use rustc_target::abi::VariantIdx;
use crate::mem_categorization as mc;
}));
}
- /// Walk closure captures but using `closure_caputes` instead
- /// of `closure_min_captures`.
- ///
- /// This is needed because clippy uses `ExprUseVisitor` after TypeckResults
- /// are written back. We don't currently writeback min_captures to
- /// TypeckResults.
- fn walk_captures_closure_captures(&mut self, closure_expr: &hir::Expr<'_>) {
- // FIXME(arora-aman): Remove this function once rust-lang/project-rfc-2229#18
- // is completed.
- debug!("walk_captures_closure_captures({:?}), ", closure_expr);
-
- let closure_def_id = self.tcx().hir().local_def_id(closure_expr.hir_id).to_def_id();
- let cl_span = self.tcx().hir().span(closure_expr.hir_id);
-
- let captures = &self.mc.typeck_results.closure_captures[&closure_def_id];
-
- for (&var_id, &upvar_id) in captures {
- let upvar_capture = self.mc.typeck_results.upvar_capture(upvar_id);
- let captured_place =
- return_if_err!(self.cat_captured_var(closure_expr.hir_id, cl_span, var_id));
- match upvar_capture {
- ty::UpvarCapture::ByValue(_) => {
- let mode = copy_or_move(&self.mc, &captured_place);
- self.delegate.consume(&captured_place, captured_place.hir_id, mode);
- }
- ty::UpvarCapture::ByRef(upvar_borrow) => {
- self.delegate.borrow(&captured_place, captured_place.hir_id, upvar_borrow.kind);
- }
- }
- }
- }
-
/// Handle the case where the current body contains a closure.
///
/// When the current body being handled is a closure, then we must make sure that
let place = &captured_place.place;
let capture_info = captured_place.info;
- let upvar_id = if body_owner_is_closure {
+ let place_base = if body_owner_is_closure {
// Mark the place to be captured by the enclosing closure
- ty::UpvarId::new(*var_hir_id, self.body_owner)
+ PlaceBase::Upvar(ty::UpvarId::new(*var_hir_id, self.body_owner))
} else {
- ty::UpvarId::new(*var_hir_id, closure_def_id.expect_local())
+ // If the body owner isn't a closure then the variable must
+ // be a local variable
+ PlaceBase::Local(*var_hir_id)
};
let place_with_id = PlaceWithHirId::new(
capture_info.expr_id.unwrap_or(closure_expr.hir_id),
place.base_ty,
- PlaceBase::Upvar(upvar_id),
+ place_base,
place.projections.clone(),
);
}
}
}
- } else if self.mc.typeck_results.closure_captures.contains_key(&closure_def_id) {
- // Handle the case where clippy calls ExprUseVisitor after
- self.walk_captures_closure_captures(closure_expr)
}
}
-
- fn cat_captured_var(
- &mut self,
- closure_hir_id: hir::HirId,
- closure_span: Span,
- var_id: hir::HirId,
- ) -> mc::McResult<PlaceWithHirId<'tcx>> {
- // Create the place for the variable being borrowed, from the
- // perspective of the creator (parent) of the closure.
- let var_ty = self.mc.node_ty(var_id)?;
- self.mc.cat_res(closure_hir_id, closure_span, var_ty, Res::Local(var_id))
- }
}
fn copy_or_move<'a, 'tcx>(
/// Mutably borrows the owned node. Unlike `reborrow_mut`, this is safe,
/// because the return value cannot be used to destroy the node itself,
/// and there cannot be other references to the tree (except during the
- /// process of `into_iter` or `drop`, but that is a horrific already).
+ /// process of `into_iter` or `drop`, but that is horrific already).
pub fn borrow_mut(&mut self) -> NodeRef<marker::Mut<'_>, K, V, Type> {
NodeRef { height: self.height, node: self.node, _marker: PhantomData }
}
let internal_node = NodeRef { height: self.height, node: top, _marker: PhantomData };
*self = internal_node.first_edge().descend();
- self.borrow_mut().clear_parent_link();
+ self.clear_parent_link();
unsafe {
Global.deallocate(top.cast(), Layout::new::<InternalNode<K, V>>());
}
impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
- /// Set or clear the node's link to its parent edge,
+ /// Sets the node's link to its parent edge,
/// without invalidating other references to the node.
fn set_parent_link(&mut self, parent: NonNull<InternalNode<K, V>>, parent_idx: usize) {
let leaf = Self::as_leaf_ptr(self);
unsafe { (*leaf).parent = Some(parent) };
unsafe { (*leaf).parent_idx.write(parent_idx as u16) };
}
+}
- /// Clear the node's link to its parent edge.
- /// This only makes sense when there are no other references to the node.
+impl<K, V> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> {
+ /// Clears the root's link to its parent edge.
fn clear_parent_link(&mut self) {
- let leaf = Self::as_leaf_mut(self);
+ let leaf = NodeRef::as_leaf_mut(&mut self.borrow_mut());
leaf.parent = None;
}
}
ForceResult::Internal(internal) => {
let node = ptr::read(internal.reborrow().edge_at(idx + 1));
let mut edge = Root { node, height: internal.height - 1, _marker: PhantomData };
- // In practice, clearing the parent is a waste of time, because we will
+ // Currently, clearing the parent link is superfluous, because we will
// insert the node elsewhere and set its parent link again.
- edge.borrow_mut().clear_parent_link();
+ edge.clear_parent_link();
Some(edge)
}
};
ForceResult::Internal(mut internal) => {
let node = slice_remove(internal.reborrow_mut().into_edge_area_slice(), 0);
let mut edge = Root { node, height: internal.height - 1, _marker: PhantomData };
- // In practice, clearing the parent is a waste of time, because we will
+ // Currently, clearing the parent link is superfluous, because we will
// insert the node elsewhere and set its parent link again.
- edge.borrow_mut().clear_parent_link();
+ edge.clear_parent_link();
internal.correct_childrens_parent_links(0..old_len);
(key, val)
}
}
+
+ /// Replace the key and value that the KV handle refers to.
+ pub fn replace_kv(&mut self, k: K, v: V) -> (K, V) {
+ let (key, val) = self.kv_mut();
+ (mem::replace(key, k), mem::replace(val, v))
+ }
}
impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> {
/// Chooses a balancing context involving the node as a child, thus between
/// the KV immediately to the left or to the right in the parent node.
/// Returns an `Err` if there is no parent.
+ /// Panics if the parent is empty.
///
- /// This method optimizes for a node that has fewer elements than its left
- /// and right siblings, if they exist, by preferring the left parent KV.
- /// Merging with the left sibling is faster, since we only need to move
+ /// Prefers the left side, to be optimal if the given node is somehow
+ /// underfull, meaning here only that it has fewer elements than its left
+ /// sibling and than its right sibling, if they exist. In that case,
+ /// merging with the left sibling is faster, since we only need to move
/// the node's N elements, instead of shifting them to the right and moving
/// more than N elements in front. Stealing from the left sibling is also
/// typically faster, since we only need to shift the node's N elements to
/// the left.
pub fn choose_parent_kv(self) -> Result<LeftOrRight<BalancingContext<'a, K, V>>, Self> {
match unsafe { ptr::read(&self) }.ascend() {
- Ok(parent) => match parent.left_kv() {
+ Ok(parent_edge) => match parent_edge.left_kv() {
Ok(left_parent_kv) => Ok(LeftOrRight::Left(BalancingContext {
parent: unsafe { ptr::read(&left_parent_kv) },
left_child: left_parent_kv.left_edge().descend(),
right_child: self,
})),
- Err(parent) => match parent.right_kv() {
+ Err(parent_edge) => match parent_edge.right_kv() {
Ok(right_parent_kv) => Ok(LeftOrRight::Right(BalancingContext {
parent: unsafe { ptr::read(&right_parent_kv) },
left_child: self,
right_child: right_parent_kv.right_edge().descend(),
})),
- Err(_) => unreachable!("empty non-root node"),
+ Err(_) => unreachable!("empty internal node"),
},
},
Err(root) => Err(root),
///
/// Panics unless we `.can_merge()`.
pub fn merge(
- mut self,
+ self,
track_edge_idx: Option<LeftOrRight<usize>>,
) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::Edge> {
+ let Handle { node: mut parent_node, idx: parent_idx, _marker } = self.parent;
+ let old_parent_len = parent_node.len();
let mut left_node = self.left_child;
- let left_len = left_node.len();
+ let old_left_len = left_node.len();
let right_node = self.right_child;
let right_len = right_node.len();
+ let new_left_len = old_left_len + 1 + right_len;
- assert!(left_len + right_len < CAPACITY);
+ assert!(new_left_len <= CAPACITY);
assert!(match track_edge_idx {
None => true,
- Some(LeftOrRight::Left(idx)) => idx <= left_len,
+ Some(LeftOrRight::Left(idx)) => idx <= old_left_len,
Some(LeftOrRight::Right(idx)) => idx <= right_len,
});
unsafe {
- *left_node.reborrow_mut().into_len_mut() += right_len as u16 + 1;
+ *left_node.reborrow_mut().into_len_mut() = new_left_len as u16;
- let parent_key = slice_remove(
- self.parent.node.reborrow_mut().into_key_area_slice(),
- self.parent.idx,
- );
- left_node.reborrow_mut().into_key_area_mut_at(left_len).write(parent_key);
+ let parent_key =
+ slice_remove(parent_node.reborrow_mut().into_key_area_slice(), parent_idx);
+ left_node.reborrow_mut().into_key_area_mut_at(old_left_len).write(parent_key);
ptr::copy_nonoverlapping(
right_node.reborrow().key_area().as_ptr(),
- left_node.reborrow_mut().into_key_area_slice().as_mut_ptr().add(left_len + 1),
+ left_node.reborrow_mut().into_key_area_slice().as_mut_ptr().add(old_left_len + 1),
right_len,
);
- let parent_val = slice_remove(
- self.parent.node.reborrow_mut().into_val_area_slice(),
- self.parent.idx,
- );
- left_node.reborrow_mut().into_val_area_mut_at(left_len).write(parent_val);
+ let parent_val =
+ slice_remove(parent_node.reborrow_mut().into_val_area_slice(), parent_idx);
+ left_node.reborrow_mut().into_val_area_mut_at(old_left_len).write(parent_val);
ptr::copy_nonoverlapping(
right_node.reborrow().val_area().as_ptr(),
- left_node.reborrow_mut().into_val_area_slice().as_mut_ptr().add(left_len + 1),
+ left_node.reborrow_mut().into_val_area_slice().as_mut_ptr().add(old_left_len + 1),
right_len,
);
- slice_remove(
- &mut self.parent.node.reborrow_mut().into_edge_area_slice(),
- self.parent.idx + 1,
- );
- let parent_old_len = self.parent.node.len();
- self.parent.node.correct_childrens_parent_links(self.parent.idx + 1..parent_old_len);
- *self.parent.node.reborrow_mut().into_len_mut() -= 1;
+ slice_remove(&mut parent_node.reborrow_mut().into_edge_area_slice(), parent_idx + 1);
+ parent_node.correct_childrens_parent_links(parent_idx + 1..old_parent_len);
+ *parent_node.reborrow_mut().into_len_mut() -= 1;
- if self.parent.node.height > 1 {
+ if parent_node.height > 1 {
// SAFETY: the height of the nodes being merged is one below the height
// of the node of this edge, thus above zero, so they are internal.
let mut left_node = left_node.reborrow_mut().cast_to_internal_unchecked();
let right_node = right_node.cast_to_internal_unchecked();
ptr::copy_nonoverlapping(
right_node.reborrow().edge_area().as_ptr(),
- left_node.reborrow_mut().into_edge_area_slice().as_mut_ptr().add(left_len + 1),
+ left_node
+ .reborrow_mut()
+ .into_edge_area_slice()
+ .as_mut_ptr()
+ .add(old_left_len + 1),
right_len + 1,
);
- left_node.correct_childrens_parent_links(left_len + 1..=left_len + 1 + right_len);
+ left_node.correct_childrens_parent_links(old_left_len + 1..new_left_len + 1);
Global.deallocate(right_node.node.cast(), Layout::new::<InternalNode<K, V>>());
} else {
let new_idx = match track_edge_idx {
None => 0,
Some(LeftOrRight::Left(idx)) => idx,
- Some(LeftOrRight::Right(idx)) => left_len + 1 + idx,
+ Some(LeftOrRight::Right(idx)) => old_left_len + 1 + idx,
};
Handle::new_edge(left_node, new_idx)
}
unsafe {
let (k, v, edge) = self.left_child.pop();
- let k = mem::replace(self.parent.kv_mut().0, k);
- let v = mem::replace(self.parent.kv_mut().1, v);
+ let (k, v) = self.parent.replace_kv(k, v);
match self.right_child.reborrow_mut().force() {
ForceResult::Leaf(mut leaf) => leaf.push_front(k, v),
unsafe {
let (k, v, edge) = self.right_child.pop_front();
- let k = mem::replace(self.parent.kv_mut().0, k);
- let v = mem::replace(self.parent.kv_mut().1, v);
+ let (k, v) = self.parent.replace_kv(k, v);
match self.left_child.reborrow_mut().force() {
ForceResult::Leaf(mut leaf) => leaf.push(k, v),
/// This does stealing similar to `steal_left` but steals multiple elements at once.
pub fn bulk_steal_left(&mut self, count: usize) {
+ assert!(count > 0);
unsafe {
let left_node = &mut self.left_child;
let old_left_len = left_node.len();
/// The symmetric clone of `bulk_steal_left`.
pub fn bulk_steal_right(&mut self, count: usize) {
+ assert!(count > 0);
unsafe {
let left_node = &mut self.left_child;
let old_left_len = left_node.len();
use super::map::MIN_LEN;
use super::node::{marker, ForceResult::*, Handle, LeftOrRight::*, NodeRef};
use super::unwrap_unchecked;
-use core::mem;
impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV> {
/// Removes a key-value pair from the tree, and returns that pair, as well as
pos = unsafe { new_pos.cast_to_leaf_unchecked() };
// Only if we merged, the parent (if any) has shrunk, but skipping
- // the following step does not pay off in benchmarks.
+ // the following step otherwise does not pay off in benchmarks.
//
// SAFETY: We won't destroy or rearrange the leaf where `pos` is at
// by handling its parent recursively; at worst we will destroy or
// rearrange the parent through the grandparent, thus change the
- // leaf's parent pointer.
+ // link to the parent inside the leaf.
if let Ok(parent) = unsafe { pos.reborrow_mut() }.into_node().ascend() {
parent.into_node().handle_shrunk_node_recursively(handle_emptied_internal_root);
}
// The internal node may have been stolen from or merged. Go back right
// to find where the original KV ended up.
let mut internal = unsafe { unwrap_unchecked(left_hole.next_kv().ok()) };
- let old_key = mem::replace(internal.kv_mut().0, left_kv.0);
- let old_val = mem::replace(internal.kv_mut().1, left_kv.1);
+ let old_kv = internal.replace_kv(left_kv.0, left_kv.1);
let pos = internal.next_leaf_edge();
- ((old_key, old_val), pos)
+ (old_kv, pos)
}
}
impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
- /// Stocks up a possibly underfull internal node, recursively.
- /// Climbs up until it reaches an ancestor that has elements to spare or the root.
+ /// Stocks up a possibly underfull internal node and its ancestors,
+ /// until it reaches an ancestor that has elements to spare or is the root.
fn handle_shrunk_node_recursively<F: FnOnce()>(mut self, handle_emptied_internal_root: F) {
loop {
self = match self.len() {
) -> Option<NodeRef<marker::Mut<'a>, K, V, marker::Internal>> {
match self.forget_type().choose_parent_kv() {
Ok(Left(left_parent_kv)) => {
- debug_assert!(left_parent_kv.right_child_len() == MIN_LEN - 1);
+ debug_assert_eq!(left_parent_kv.right_child_len(), MIN_LEN - 1);
if left_parent_kv.can_merge() {
let pos = left_parent_kv.merge(None);
let parent_edge = unsafe { unwrap_unchecked(pos.into_node().ascend().ok()) };
}
}
Ok(Right(right_parent_kv)) => {
- debug_assert!(right_parent_kv.left_child_len() == MIN_LEN - 1);
+ debug_assert_eq!(right_parent_kv.left_child_len(), MIN_LEN - 1);
if right_parent_kv.can_merge() {
let pos = right_parent_kv.merge(None);
let parent_edge = unsafe { unwrap_unchecked(pos.into_node().ascend().ok()) };
/// ```
#[unstable(feature = "map_first_last", issue = "62924")]
pub fn pop_first(&mut self) -> Option<T> {
- self.map.first_entry().map(|entry| entry.remove_entry().0)
+ self.map.pop_first().map(|kv| kv.0)
}
/// Removes the last value from the set and returns it, if any.
/// ```
#[unstable(feature = "map_first_last", issue = "62924")]
pub fn pop_last(&mut self) -> Option<T> {
- self.map.last_entry().map(|entry| entry.remove_entry().0)
+ self.map.pop_last().map(|kv| kv.0)
}
/// Adds a value to the set.
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for IterMut<'_, T> {}
-impl<T> IterMut<'_, T> {
- /// Inserts the given element just after the element most recently returned by `.next()`.
- /// The inserted element does not appear in the iteration.
- ///
- /// This method will be removed soon.
- #[inline]
- #[unstable(
- feature = "linked_list_extras",
- reason = "this is probably better handled by a cursor type -- we'll see",
- issue = "27794"
- )]
- #[rustc_deprecated(
- reason = "Deprecated in favor of CursorMut methods. This method will be removed soon.",
- since = "1.47.0"
- )]
- pub fn insert_next(&mut self, element: T) {
- match self.head {
- // `push_back` is okay with aliasing `element` references
- None => self.list.push_back(element),
- Some(head) => unsafe {
- let prev = match head.as_ref().prev {
- // `push_front` is okay with aliasing nodes
- None => return self.list.push_front(element),
- Some(prev) => prev,
- };
-
- let node = Some(
- Box::leak(box Node { next: Some(head), prev: Some(prev), element }).into(),
- );
-
- // Not creating references to entire nodes to not invalidate the
- // reference to `element` we handed to the user.
- (*prev.as_ptr()).next = node;
- (*head.as_ptr()).prev = node;
-
- self.list.len += 1;
- },
- }
- }
-
- /// Provides a reference to the next element, without changing the iterator.
- ///
- /// This method will be removed soon.
- #[inline]
- #[unstable(
- feature = "linked_list_extras",
- reason = "this is probably better handled by a cursor type -- we'll see",
- issue = "27794"
- )]
- #[rustc_deprecated(
- reason = "Deprecated in favor of CursorMut methods. This method will be removed soon.",
- since = "1.47.0"
- )]
- pub fn peek_next(&mut self) -> Option<&mut T> {
- if self.len == 0 {
- None
- } else {
- unsafe { self.head.as_mut().map(|node| &mut node.as_mut().element) }
- }
- }
-}
-
/// A cursor over a `LinkedList`.
///
/// A `Cursor` is like an iterator, except that it can freely seek back-and-forth.
#[inline]
fn is_contiguous(&self) -> bool {
+ // FIXME: Should we consider `head == 0` to mean
+ // that `self` is contiguous?
self.tail <= self.head
}
if self.is_contiguous() {
let tail = self.tail;
let head = self.head;
- return unsafe { &mut self.buffer_as_mut_slice()[tail..head] };
+ return unsafe { RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0 };
}
let buf = self.buf.ptr();
self.tail = 0;
self.head = len;
}
- } else if free >= self.head {
+ } else if free > self.head {
+ // FIXME: We currently do not consider ....ABCDEFGH
+ // to be contiguous because `head` would be `0` in this
+ // case. While we probably want to change this it
+ // isn't trivial as a few places expect `is_contiguous`
+ // to mean that we can just slice using `buf[tail..head]`.
+
// there is enough free space to copy the head in one go,
// this means that we first shift the tail forwards, and then
// copy the head to the correct position.
// ...ABCDEFGH.
self.tail = self.head;
- self.head = self.tail + len;
+ self.head = self.wrap_add(self.tail, len);
}
} else {
// free is smaller than both head and tail,
let tail = self.tail;
let head = self.head;
- unsafe { &mut self.buffer_as_mut_slice()[tail..head] }
+ unsafe { RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0 }
}
/// Rotates the double-ended queue `mid` places to the left.
let len = other.len();
let cap = other.cap();
- if other.head != 0 {
+ if other.tail != 0 {
ptr::copy(buf.add(other.tail), buf, len);
}
Vec::from_raw_parts(buf, len, cap)
);
}
+#[test]
+fn make_contiguous_head_to_end() {
+ let mut dq = VecDeque::with_capacity(3);
+ dq.push_front('B');
+ dq.push_front('A');
+ dq.push_back('C');
+ dq.make_contiguous();
+ let expected_tail = 0;
+ let expected_head = 3;
+ assert_eq!(expected_tail, dq.tail);
+ assert_eq!(expected_head, dq.head);
+ assert_eq!((&['A', 'B', 'C'] as &[_], &[] as &[_]), dq.as_slices());
+}
+
#[test]
fn test_remove() {
// This test checks that every single combination of tail position, length, and
/// to the same boxed integer value, not five references pointing to independently
/// boxed integers.
///
+/// Also, note that `vec![expr; 0]` is allowed, and produces an empty vector.
+/// This will still evaluate `expr`, however, and immediately drop the resulting value, so
+/// be mindful of side effects.
+///
/// [`Vec`]: crate::vec::Vec
#[cfg(not(test))]
#[macro_export]
// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
// significant, because the number of different `A` types seen in practice is
// much smaller than the number of `T` types.)
+#[inline(never)]
fn finish_grow<A>(
new_layout: Result<Layout, LayoutError>,
current_memory: Option<(NonNull<u8>, Layout)>,
self.len() == 0
}
- /// Splits the string into two at the given index.
+ /// Splits the string into two at the given byte index.
///
/// Returns a newly allocated `String`. `self` contains bytes `[0, at)`, and
/// the returned `String` contains bytes `[at, len)`. `at` must be on the
I: TrustedLen<Item = T>,
{
fn from_iter(iterator: I) -> Self {
- let mut vector = Vec::new();
+ let mut vector = match iterator.size_hint() {
+ (_, Some(upper)) => Vec::with_capacity(upper),
+ _ => Vec::new(),
+ };
// must delegate to spec_extend() since extend() itself delegates
// to spec_from for empty Vecs
vector.spec_extend(iterator);
}
impl Ordering {
+ /// Returns `true` if the ordering is the `Equal` variant.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ordering_helpers)]
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(Ordering::Less.is_eq(), false);
+ /// assert_eq!(Ordering::Equal.is_eq(), true);
+ /// assert_eq!(Ordering::Greater.is_eq(), false);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "ordering_helpers", issue = "79885")]
+ pub const fn is_eq(self) -> bool {
+ matches!(self, Equal)
+ }
+
+ /// Returns `true` if the ordering is not the `Equal` variant.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ordering_helpers)]
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(Ordering::Less.is_ne(), true);
+ /// assert_eq!(Ordering::Equal.is_ne(), false);
+ /// assert_eq!(Ordering::Greater.is_ne(), true);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "ordering_helpers", issue = "79885")]
+ pub const fn is_ne(self) -> bool {
+ !matches!(self, Equal)
+ }
+
+ /// Returns `true` if the ordering is the `Less` variant.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ordering_helpers)]
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(Ordering::Less.is_lt(), true);
+ /// assert_eq!(Ordering::Equal.is_lt(), false);
+ /// assert_eq!(Ordering::Greater.is_lt(), false);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "ordering_helpers", issue = "79885")]
+ pub const fn is_lt(self) -> bool {
+ matches!(self, Less)
+ }
+
+ /// Returns `true` if the ordering is the `Greater` variant.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ordering_helpers)]
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(Ordering::Less.is_gt(), false);
+ /// assert_eq!(Ordering::Equal.is_gt(), false);
+ /// assert_eq!(Ordering::Greater.is_gt(), true);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "ordering_helpers", issue = "79885")]
+ pub const fn is_gt(self) -> bool {
+ matches!(self, Greater)
+ }
+
+ /// Returns `true` if the ordering is either the `Less` or `Equal` variant.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ordering_helpers)]
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(Ordering::Less.is_le(), true);
+ /// assert_eq!(Ordering::Equal.is_le(), true);
+ /// assert_eq!(Ordering::Greater.is_le(), false);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "ordering_helpers", issue = "79885")]
+ pub const fn is_le(self) -> bool {
+ !matches!(self, Greater)
+ }
+
+ /// Returns `true` if the ordering is either the `Greater` or `Equal` variant.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(ordering_helpers)]
+ /// use std::cmp::Ordering;
+ ///
+ /// assert_eq!(Ordering::Less.is_ge(), false);
+ /// assert_eq!(Ordering::Equal.is_ge(), true);
+ /// assert_eq!(Ordering::Greater.is_ge(), true);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "ordering_helpers", issue = "79885")]
+ pub const fn is_ge(self) -> bool {
+ !matches!(self, Less)
+ }
+
/// Reverses the `Ordering`.
///
/// * `Less` becomes `Greater`.
impl PartialOrd for bool {
#[inline]
fn partial_cmp(&self, other: &bool) -> Option<Ordering> {
- (*self as u8).partial_cmp(&(*other as u8))
+ Some(self.cmp(other))
}
}
/// This will statically either panic, or do nothing.
///
/// This intrinsic does not have a stable counterpart.
+ #[rustc_const_unstable(feature = "const_assert_type", issue = "none")]
pub fn assert_inhabited<T>();
/// A guard for unsafe functions that cannot ever be executed if `T` does not permit
//! 2. If you're creating a collection, implementing [`IntoIterator`] for it
//! will allow your collection to be used with the `for` loop.
//!
+//! # Iterating by reference
+//!
+//! Since [`into_iter()`] takes `self` by value, using a `for` loop to iterate
+//! over a collection consumes that collection. Often, you may want to iterate
+//! over a collection without consuming it. Many collections offer methods that
+//! provide iterators over references, conventionally called `iter()` and
+//! `iter_mut()` respectively:
+//!
+//! ```
+//! let mut values = vec![41];
+//! for x in values.iter_mut() {
+//! *x += 1;
+//! }
+//! for x in values.iter() {
+//! assert_eq!(*x, 42);
+//! }
+//! assert_eq!(values.len(), 1); // `values` is still owned by this function.
+//! ```
+//!
+//! If a collection type `C` provides `iter()`, it usually also implements
+//! `IntoIterator` for `&C`, with an implementation that just calls `iter()`.
+//! Likewise, a collection `C` that provides `iter_mut()` generally implements
+//! `IntoIterator` for `&mut C` by delegating to `iter_mut()`. This enables a
+//! convenient shorthand:
+//!
+//! ```
+//! let mut values = vec![41];
+//! for x in &mut values { // same as `values.iter_mut()`
+//! *x += 1;
+//! }
+//! for x in &values { // same as `values.iter()`
+//! assert_eq!(*x, 42);
+//! }
+//! assert_eq!(values.len(), 1);
+//! ```
+//!
+//! While many collections offer `iter()`, not all offer `iter_mut()`. For
+//! example, mutating the keys of a [`HashSet<T>`] or [`HashMap<K, V>`] could
+//! put the collection into an inconsistent state if the key hashes change, so
+//! these collections only offer `iter()`.
+//!
+//! [`into_iter()`]: IntoIterator::into_iter
+//! [`HashSet<T>`]: ../../std/collections/struct.HashSet.html
+//! [`HashMap<K, V>`]: ../../std/collections/struct.HashMap.html
+//!
//! # Adapters
//!
//! Functions which take an [`Iterator`] and return another [`Iterator`] are
/// assert_eq!(merged, "alphabetagamma");
/// ```
///
- /// Flattening once only removes one level of nesting:
+ /// Flattening only removes one level of nesting at a time:
///
/// ```
/// let d3 = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]];
///
/// Here we see that `flatten()` does not perform a "deep" flatten.
/// Instead, only one level of nesting is removed. That is, if you
- /// `flatten()` a three-dimensional array the result will be
+ /// `flatten()` a three-dimensional array, the result will be
/// two-dimensional and not one-dimensional. To get a one-dimensional
/// structure, you have to `flatten()` again.
///
#![feature(cfg_target_has_atomic)]
#![cfg_attr(not(bootstrap), feature(const_heap))]
#![feature(const_alloc_layout)]
+#![feature(const_assert_type)]
#![feature(const_discriminant)]
#![feature(const_cell_into_inner)]
#![feature(const_checked_int_methods)]
#![feature(const_ptr_offset)]
#![feature(const_ptr_offset_from)]
#![feature(const_raw_ptr_comparison)]
+#![feature(const_raw_ptr_deref)]
#![feature(const_slice_from_raw_parts)]
#![feature(const_slice_ptr_len)]
#![feature(const_size_of_val)]
#![feature(const_type_name)]
#![feature(const_likely)]
#![feature(const_unreachable_unchecked)]
+#![feature(const_maybe_uninit_assume_init)]
+#![feature(const_maybe_uninit_as_ptr)]
#![feature(custom_inner_attributes)]
#![feature(decl_macro)]
#![feature(doc_cfg)]
/// let data = read(&mut buf);
/// ```
#[unstable(feature = "maybe_uninit_uninit_array", issue = "none")]
+ #[rustc_const_unstable(feature = "maybe_uninit_uninit_array", issue = "none")]
#[inline(always)]
- pub fn uninit_array<const LEN: usize>() -> [Self; LEN] {
+ pub const fn uninit_array<const LEN: usize>() -> [Self; LEN] {
// SAFETY: An uninitialized `[MaybeUninit<_>; LEN]` is valid.
unsafe { MaybeUninit::<[MaybeUninit<T>; LEN]>::uninit().assume_init() }
}
/// skip running the destructor. For your convenience, this also returns a mutable
/// reference to the (now safely initialized) contents of `self`.
#[unstable(feature = "maybe_uninit_extra", issue = "63567")]
+ #[rustc_const_unstable(feature = "maybe_uninit_extra", issue = "63567")]
#[inline(always)]
- pub fn write(&mut self, val: T) -> &mut T {
+ pub const fn write(&mut self, val: T) -> &mut T {
*self = MaybeUninit::new(val);
// SAFETY: We just initialized this value.
unsafe { self.assume_init_mut() }
/// // `x` had not been initialized yet, so this last line caused undefined behavior. ⚠️
/// ```
#[stable(feature = "maybe_uninit", since = "1.36.0")]
+ #[rustc_const_unstable(feature = "const_maybe_uninit_assume_init", issue = "none")]
#[inline(always)]
#[rustc_diagnostic_item = "assume_init"]
- pub unsafe fn assume_init(self) -> T {
+ pub const unsafe fn assume_init(self) -> T {
// SAFETY: the caller must guarantee that `self` is initialized.
// This also means that `self` must be a `value` variant.
unsafe {
/// }
/// ```
#[unstable(feature = "maybe_uninit_ref", issue = "63568")]
+ #[rustc_const_unstable(feature = "const_maybe_uninit_assume_init", issue = "none")]
#[inline(always)]
- pub unsafe fn assume_init_ref(&self) -> &T {
+ pub const unsafe fn assume_init_ref(&self) -> &T {
// SAFETY: the caller must guarantee that `self` is initialized.
// This also means that `self` must be a `value` variant.
unsafe {
intrinsics::assert_inhabited::<T>();
- &*self.value
+ &*self.as_ptr()
}
}
// to uninitialized data (e.g., in `libcore/fmt/float.rs`). We should make
// a final decision about the rules before stabilization.
#[unstable(feature = "maybe_uninit_ref", issue = "63568")]
+ #[rustc_const_unstable(feature = "const_maybe_uninit_assume_init", issue = "none")]
#[inline(always)]
- pub unsafe fn assume_init_mut(&mut self) -> &mut T {
+ pub const unsafe fn assume_init_mut(&mut self) -> &mut T {
// SAFETY: the caller must guarantee that `self` is initialized.
// This also means that `self` must be a `value` variant.
unsafe {
intrinsics::assert_inhabited::<T>();
- &mut *self.value
+ &mut *self.as_mut_ptr()
}
}
///
/// [`assume_init_ref`]: MaybeUninit::assume_init_ref
#[unstable(feature = "maybe_uninit_slice", issue = "63569")]
+ #[rustc_const_unstable(feature = "const_maybe_uninit_assume_init", issue = "none")]
#[inline(always)]
- pub unsafe fn slice_assume_init_ref(slice: &[Self]) -> &[T] {
+ pub const unsafe fn slice_assume_init_ref(slice: &[Self]) -> &[T] {
// SAFETY: casting slice to a `*const [T]` is safe since the caller guarantees that
// `slice` is initialized, and`MaybeUninit` is guaranteed to have the same layout as `T`.
// The pointer obtained is valid since it refers to memory owned by `slice` which is a
///
/// [`assume_init_mut`]: MaybeUninit::assume_init_mut
#[unstable(feature = "maybe_uninit_slice", issue = "63569")]
+ #[rustc_const_unstable(feature = "const_maybe_uninit_assume_init", issue = "none")]
#[inline(always)]
- pub unsafe fn slice_assume_init_mut(slice: &mut [Self]) -> &mut [T] {
+ pub const unsafe fn slice_assume_init_mut(slice: &mut [Self]) -> &mut [T] {
// SAFETY: similar to safety notes for `slice_get_ref`, but we have a
// mutable reference which is also guaranteed to be valid for writes.
unsafe { &mut *(slice as *mut [Self] as *mut [T]) }
/// Gets a pointer to the first element of the array.
#[unstable(feature = "maybe_uninit_slice", issue = "63569")]
+ #[rustc_const_unstable(feature = "maybe_uninit_slice", issue = "63569")]
#[inline(always)]
- pub fn slice_as_ptr(this: &[MaybeUninit<T>]) -> *const T {
+ pub const fn slice_as_ptr(this: &[MaybeUninit<T>]) -> *const T {
this.as_ptr() as *const T
}
/// Gets a mutable pointer to the first element of the array.
#[unstable(feature = "maybe_uninit_slice", issue = "63569")]
+ #[rustc_const_unstable(feature = "maybe_uninit_slice", issue = "63569")]
#[inline(always)]
- pub fn slice_as_mut_ptr(this: &mut [MaybeUninit<T>]) -> *mut T {
+ pub const fn slice_as_mut_ptr(this: &mut [MaybeUninit<T>]) -> *mut T {
this.as_mut_ptr() as *mut T
}
+
+ /// Copies the elements from `src` to `this`, returning a mutable reference to the now initalized contents of `this`.
+ ///
+ /// If `T` does not implement `Copy`, use [`write_slice_cloned`]
+ ///
+ /// This is similar to [`slice::copy_from_slice`].
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the two slices have different lengths.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(maybe_uninit_write_slice)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut dst = [MaybeUninit::uninit(); 32];
+ /// let src = [0; 32];
+ ///
+ /// let init = MaybeUninit::write_slice(&mut dst, &src);
+ ///
+ /// assert_eq!(init, src);
+ /// ```
+ ///
+ /// ```
+ /// #![feature(maybe_uninit_write_slice, vec_spare_capacity)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut vec = Vec::with_capacity(32);
+ /// let src = [0; 16];
+ ///
+ /// MaybeUninit::write_slice(&mut vec.spare_capacity_mut()[..src.len()], &src);
+ ///
+ /// // SAFETY: we have just copied all the elements of len into the spare capacity
+ /// // the first src.len() elements of the vec are valid now.
+ /// unsafe {
+ /// vec.set_len(src.len());
+ /// }
+ ///
+ /// assert_eq!(vec, src);
+ /// ```
+ ///
+ /// [`write_slice_cloned`]: MaybeUninit::write_slice_cloned
+ /// [`slice::copy_from_slice`]: ../../std/primitive.slice.html#method.copy_from_slice
+ #[unstable(feature = "maybe_uninit_write_slice", issue = "79995")]
+ pub fn write_slice<'a>(this: &'a mut [MaybeUninit<T>], src: &[T]) -> &'a mut [T]
+ where
+ T: Copy,
+ {
+ // SAFETY: &[T] and &[MaybeUninit<T>] have the same layout
+ let uninit_src: &[MaybeUninit<T>] = unsafe { super::transmute(src) };
+
+ this.copy_from_slice(uninit_src);
+
+ // SAFETY: Valid elements have just been copied into `this` so it is initalized
+ unsafe { MaybeUninit::slice_assume_init_mut(this) }
+ }
+
+ /// Clones the elements from `src` to `this`, returning a mutable reference to the now initalized contents of `this`.
+ /// Any already initalized elements will not be dropped.
+ ///
+ /// If `T` implements `Copy`, use [`write_slice`]
+ ///
+ /// This is similar to [`slice::clone_from_slice`] but does not drop existing elements.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the two slices have different lengths, or if the implementation of `Clone` panics.
+ ///
+ /// If there is a panic, the already cloned elements will be dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(maybe_uninit_write_slice)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut dst = [MaybeUninit::uninit(), MaybeUninit::uninit(), MaybeUninit::uninit(), MaybeUninit::uninit(), MaybeUninit::uninit()];
+ /// let src = ["wibbly".to_string(), "wobbly".to_string(), "timey".to_string(), "wimey".to_string(), "stuff".to_string()];
+ ///
+ /// let init = MaybeUninit::write_slice_cloned(&mut dst, &src);
+ ///
+ /// assert_eq!(init, src);
+ /// ```
+ ///
+ /// ```
+ /// #![feature(maybe_uninit_write_slice, vec_spare_capacity)]
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let mut vec = Vec::with_capacity(32);
+ /// let src = ["rust", "is", "a", "pretty", "cool", "language"];
+ ///
+ /// MaybeUninit::write_slice_cloned(&mut vec.spare_capacity_mut()[..src.len()], &src);
+ ///
+ /// // SAFETY: we have just cloned all the elements of len into the spare capacity
+ /// // the first src.len() elements of the vec are valid now.
+ /// unsafe {
+ /// vec.set_len(src.len());
+ /// }
+ ///
+ /// assert_eq!(vec, src);
+ /// ```
+ ///
+ /// [`write_slice`]: MaybeUninit::write_slice
+ /// [`slice::clone_from_slice`]: ../../std/primitive.slice.html#method.clone_from_slice
+ #[unstable(feature = "maybe_uninit_write_slice", issue = "79995")]
+ pub fn write_slice_cloned<'a>(this: &'a mut [MaybeUninit<T>], src: &[T]) -> &'a mut [T]
+ where
+ T: Clone,
+ {
+ // unlike copy_from_slice this does not call clone_from_slice on the slice
+ // this is because `MaybeUninit<T: Clone>` does not implement Clone.
+
+ struct Guard<'a, T> {
+ slice: &'a mut [MaybeUninit<T>],
+ initialized: usize,
+ }
+
+ impl<'a, T> Drop for Guard<'a, T> {
+ fn drop(&mut self) {
+ let initialized_part = &mut self.slice[..self.initialized];
+ // SAFETY: this raw slice will contain only initialized objects
+ // that's why, it is allowed to drop it.
+ unsafe {
+ crate::ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(initialized_part));
+ }
+ }
+ }
+
+ assert_eq!(this.len(), src.len(), "destination and source slices have different lengths");
+ // NOTE: We need to explicitly slice them to the same length
+ // for bounds checking to be elided, and the optimizer will
+ // generate memcpy for simple cases (for example T = u8).
+ let len = this.len();
+ let src = &src[..len];
+
+ // guard is needed b/c panic might happen during a clone
+ let mut guard = Guard { slice: this, initialized: 0 };
+
+ for i in 0..len {
+ guard.slice[i].write(src[i].clone());
+ guard.initialized += 1;
+ }
+
+ super::forget(guard);
+
+ // SAFETY: Valid elements have just been written into `this` so it is initalized
+ unsafe { MaybeUninit::slice_assume_init_mut(this) }
+ }
}
"),
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_int_methods", since = "1.32.0")]
+ #[doc(alias = "popcount")]
+ #[doc(alias = "popcnt")]
#[inline]
pub const fn count_ones(self) -> u32 { (self as $UnsignedT).count_ones() }
}
Note that this is *not* the same as a rotate-left; the RHS of a wrapping shift-left is restricted to
the range of the type, rather than the bits shifted out of the LHS being returned to the other end.
-The primitive integer types all implement a `[`rotate_left`](#method.rotate_left) function,
+The primitive integer types all implement a [`rotate_left`](#method.rotate_left) function,
which may be what you want instead.
# Examples
```"),
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_math", since = "1.32.0")]
+ #[doc(alias = "popcount")]
+ #[doc(alias = "popcnt")]
#[inline]
pub const fn count_ones(self) -> u32 {
intrinsics::ctpop(self as $ActualT) as u32
assert_eq!(n.count_ones(), 3);
```"),
#[inline]
+ #[doc(alias = "popcount")]
+ #[doc(alias = "popcnt")]
#[unstable(feature = "wrapping_int_impl", issue = "32463")]
pub const fn count_ones(self) -> u32 {
self.0.count_ones()
/// Converts from `Option<Option<T>>` to `Option<T>`
///
/// # Examples
+ ///
/// Basic usage:
+ ///
/// ```
/// let x: Option<Option<u32>> = Some(Some(6));
/// assert_eq!(Some(6), x.flatten());
/// let x: Option<Option<u32>> = None;
/// assert_eq!(None, x.flatten());
/// ```
- /// Flattening once only removes one level of nesting:
+ ///
+ /// Flattening only removes one level of nesting at a time:
+ ///
/// ```
/// let x: Option<Option<Option<u32>>> = Some(Some(Some(6)));
/// assert_eq!(Some(Some(6)), x.flatten());
/// Converts from `Result<Result<T, E>, E>` to `Result<T, E>`
///
/// # Examples
+ ///
/// Basic usage:
+ ///
/// ```
/// #![feature(result_flattening)]
/// let x: Result<Result<&'static str, u32>, u32> = Ok(Ok("hello"));
/// assert_eq!(Err(6), x.flatten());
/// ```
///
- /// Flattening once only removes one level of nesting:
+ /// Flattening only removes one level of nesting at a time:
///
/// ```
/// #![feature(result_flattening)]
#![feature(bound_cloned)]
#![feature(box_syntax)]
#![feature(cell_update)]
+#![feature(cfg_panic)]
#![feature(cfg_target_has_atomic)]
#![feature(const_assume)]
#![feature(const_cell_into_inner)]
+#![feature(const_maybe_uninit_assume_init)]
#![feature(core_intrinsics)]
#![feature(core_private_bignum)]
#![feature(core_private_diy_float)]
#![feature(raw)]
#![feature(sort_internals)]
#![feature(slice_partition_at_index)]
+#![feature(maybe_uninit_write_slice)]
#![feature(min_specialization)]
#![feature(step_trait)]
#![feature(step_trait_ext)]
use core::mem::*;
+use std::rc::Rc;
+
#[test]
fn size_of_basic() {
assert_eq!(size_of::<u8>(), 1);
is_send_sync::<Discriminant<Regular>>();
is_send_sync::<Discriminant<NotSendSync>>();
}
+
+#[test]
+#[cfg(not(bootstrap))]
+fn assume_init_good() {
+ const TRUE: bool = unsafe { MaybeUninit::<bool>::new(true).assume_init() };
+
+ assert!(TRUE);
+}
+
+#[test]
+fn uninit_write_slice() {
+ let mut dst = [MaybeUninit::new(255); 64];
+ let src = [0; 64];
+
+ assert_eq!(MaybeUninit::write_slice(&mut dst, &src), &src);
+}
+
+#[test]
+#[should_panic(expected = "source slice length (32) does not match destination slice length (64)")]
+fn uninit_write_slice_panic_lt() {
+ let mut dst = [MaybeUninit::uninit(); 64];
+ let src = [0; 32];
+
+ MaybeUninit::write_slice(&mut dst, &src);
+}
+
+#[test]
+#[should_panic(expected = "source slice length (128) does not match destination slice length (64)")]
+fn uninit_write_slice_panic_gt() {
+ let mut dst = [MaybeUninit::uninit(); 64];
+ let src = [0; 128];
+
+ MaybeUninit::write_slice(&mut dst, &src);
+}
+
+#[test]
+fn uninit_clone_from_slice() {
+ let mut dst = [MaybeUninit::new(255); 64];
+ let src = [0; 64];
+
+ assert_eq!(MaybeUninit::write_slice_cloned(&mut dst, &src), &src);
+}
+
+#[test]
+#[should_panic(expected = "destination and source slices have different lengths")]
+fn uninit_write_slice_cloned_panic_lt() {
+ let mut dst = [MaybeUninit::uninit(); 64];
+ let src = [0; 32];
+
+ MaybeUninit::write_slice_cloned(&mut dst, &src);
+}
+
+#[test]
+#[should_panic(expected = "destination and source slices have different lengths")]
+fn uninit_write_slice_cloned_panic_gt() {
+ let mut dst = [MaybeUninit::uninit(); 64];
+ let src = [0; 128];
+
+ MaybeUninit::write_slice_cloned(&mut dst, &src);
+}
+
+#[test]
+#[cfg(panic = "unwind")]
+fn uninit_write_slice_cloned_mid_panic() {
+ use std::panic;
+
+ enum IncrementOrPanic {
+ Increment(Rc<()>),
+ ExpectedPanic,
+ UnexpectedPanic,
+ }
+
+ impl Clone for IncrementOrPanic {
+ fn clone(&self) -> Self {
+ match self {
+ Self::Increment(rc) => Self::Increment(rc.clone()),
+ Self::ExpectedPanic => panic!("expected panic on clone"),
+ Self::UnexpectedPanic => panic!("unexpected panic on clone"),
+ }
+ }
+ }
+
+ let rc = Rc::new(());
+
+ let mut dst = [
+ MaybeUninit::uninit(),
+ MaybeUninit::uninit(),
+ MaybeUninit::uninit(),
+ MaybeUninit::uninit(),
+ ];
+
+ let src = [
+ IncrementOrPanic::Increment(rc.clone()),
+ IncrementOrPanic::Increment(rc.clone()),
+ IncrementOrPanic::ExpectedPanic,
+ IncrementOrPanic::UnexpectedPanic,
+ ];
+
+ let err = panic::catch_unwind(panic::AssertUnwindSafe(|| {
+ MaybeUninit::write_slice_cloned(&mut dst, &src);
+ }));
+
+ drop(src);
+
+ match err {
+ Ok(_) => unreachable!(),
+ Err(payload) => {
+ payload
+ .downcast::<&'static str>()
+ .and_then(|s| if *s == "expected panic on clone" { Ok(s) } else { Err(s) })
+ .unwrap_or_else(|p| panic::resume_unwind(p));
+
+ assert_eq!(Rc::strong_count(&rc), 1)
+ }
+ }
+}
+
+#[test]
+fn uninit_write_slice_cloned_no_drop() {
+ let rc = Rc::new(());
+
+ let mut dst = [MaybeUninit::uninit()];
+ let src = [rc.clone()];
+
+ MaybeUninit::write_slice_cloned(&mut dst, &src);
+
+ drop(src);
+
+ assert_eq!(Rc::strong_count(&rc), 2);
+}
/// Fused multiply-add. Computes `(self * a) + b` with only one rounding
/// error, yielding a more accurate result than an unfused multiply-add.
///
- /// Using `mul_add` can be more performant than an unfused multiply-add if
- /// the target architecture has a dedicated `fma` CPU instruction.
+ /// Using `mul_add` *may* be more performant than an unfused multiply-add if
+ /// the target architecture has a dedicated `fma` CPU instruction. However,
+ /// this is not always true, and will be heavily dependant on designing
+ /// algorithms with specific target hardware in mind.
///
/// # Examples
///
/// Fused multiply-add. Computes `(self * a) + b` with only one rounding
/// error, yielding a more accurate result than an unfused multiply-add.
///
- /// Using `mul_add` can be more performant than an unfused multiply-add if
- /// the target architecture has a dedicated `fma` CPU instruction.
+ /// Using `mul_add` *may* be more performant than an unfused multiply-add if
+ /// the target architecture has a dedicated `fma` CPU instruction. However,
+ /// this is not always true, and will be heavily dependant on designing
+ /// algorithms with specific target hardware in mind.
///
/// # Examples
///
/// Gets the underlying byte representation.
///
- /// Note: it is *crucial* that this API is private, to avoid
+ /// Note: it is *crucial* that this API is not externally public, to avoid
/// revealing the internal, platform-specific encodings.
#[inline]
- fn bytes(&self) -> &[u8] {
+ pub(crate) fn bytes(&self) -> &[u8] {
unsafe { &*(&self.inner as *const _ as *const [u8]) }
}
use crate::fmt;
use crate::io::{self, BufReader, Initializer, IoSlice, IoSliceMut, LineWriter};
use crate::lazy::SyncOnceCell;
+use crate::pin::Pin;
use crate::sync::atomic::{AtomicBool, Ordering};
use crate::sync::{Arc, Mutex, MutexGuard};
use crate::sys::stdio;
// FIXME: this should be LineWriter or BufWriter depending on the state of
// stdout (tty or not). Note that if this is not line buffered it
// should also flush-on-panic or some form of flush-on-abort.
- inner: &'static ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>,
+ inner: Pin<&'static ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>>,
}
/// A locked reference to the `Stdout` handle.
pub fn stdout() -> Stdout {
static INSTANCE: SyncOnceCell<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> =
SyncOnceCell::new();
+
+ fn cleanup() {
+ if let Some(instance) = INSTANCE.get() {
+ // Flush the data and disable buffering during shutdown
+ // by replacing the line writer by one with zero
+ // buffering capacity.
+ // We use try_lock() instead of lock(), because someone
+ // might have leaked a StdoutLock, which would
+ // otherwise cause a deadlock here.
+ if let Some(lock) = Pin::static_ref(instance).try_lock() {
+ *lock.borrow_mut() = LineWriter::with_capacity(0, stdout_raw());
+ }
+ }
+ }
+
Stdout {
- inner: INSTANCE.get_or_init(|| unsafe {
- let _ = sys_common::at_exit(|| {
- if let Some(instance) = INSTANCE.get() {
- // Flush the data and disable buffering during shutdown
- // by replacing the line writer by one with zero
- // buffering capacity.
- // We use try_lock() instead of lock(), because someone
- // might have leaked a StdoutLock, which would
- // otherwise cause a deadlock here.
- if let Some(lock) = instance.try_lock() {
- *lock.borrow_mut() = LineWriter::with_capacity(0, stdout_raw());
- }
- }
- });
- let r = ReentrantMutex::new(RefCell::new(LineWriter::new(stdout_raw())));
- r.init();
- r
- }),
+ inner: Pin::static_ref(&INSTANCE).get_or_init_pin(
+ || unsafe {
+ let _ = sys_common::at_exit(cleanup);
+ ReentrantMutex::new(RefCell::new(LineWriter::new(stdout_raw())))
+ },
+ |mutex| unsafe { mutex.init() },
+ ),
}
}
/// an error.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Stderr {
- inner: &'static ReentrantMutex<RefCell<StderrRaw>>,
+ inner: Pin<&'static ReentrantMutex<RefCell<StderrRaw>>>,
}
/// A locked reference to the `Stderr` handle.
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn stderr() -> Stderr {
- // Note that unlike `stdout()` we don't use `Lazy` here which registers a
- // destructor. Stderr is not buffered nor does the `stderr_raw` type consume
- // any owned resources, so there's no need to run any destructors at some
- // point in the future.
- //
- // This has the added benefit of allowing `stderr` to be usable during
- // process shutdown as well!
+ // Note that unlike `stdout()` we don't use `at_exit` here to register a
+ // destructor. Stderr is not buffered , so there's no need to run a
+ // destructor for flushing the buffer
static INSTANCE: SyncOnceCell<ReentrantMutex<RefCell<StderrRaw>>> = SyncOnceCell::new();
Stderr {
- inner: INSTANCE.get_or_init(|| unsafe {
- let r = ReentrantMutex::new(RefCell::new(stderr_raw()));
- r.init();
- r
- }),
+ inner: Pin::static_ref(&INSTANCE).get_or_init_pin(
+ || unsafe { ReentrantMutex::new(RefCell::new(stderr_raw())) },
+ |mutex| unsafe { mutex.init() },
+ ),
}
}
///
/// For more information on for-loops, see the [Rust book] or the [Reference].
///
+/// See also, [`loop`], [`while`].
+///
/// [`in`]: keyword.in.html
/// [`impl`]: keyword.impl.html
+/// [`loop`]: keyword.loop.html
+/// [`while`]: keyword.while.html
/// [higher-ranked trait bounds]: ../reference/trait-bounds.html#higher-ranked-trait-bounds
/// [Rust book]:
/// ../book/ch03-05-control-flow.html#looping-through-a-collection-with-for
///
/// For more information on `while` and loops in general, see the [reference].
///
+/// See also, [`for`], [`loop`].
+///
/// [`for`]: keyword.for.html
/// [`loop`]: keyword.loop.html
/// [reference]: ../reference/expressions/loop-expr.html#predicate-loops
///
/// For more information on `loop` and loops in general, see the [Reference].
///
+/// See also, [`for`], [`while`].
+///
+/// [`for`]: keyword.for.html
+/// [`while`]: keyword.while.html
/// [Reference]: ../reference/expressions/loop-expr.html
mod loop_keyword {}
mem::MaybeUninit,
ops::{Deref, Drop},
panic::{RefUnwindSafe, UnwindSafe},
+ pin::Pin,
sync::Once,
};
Ok(unsafe { self.get_unchecked() })
}
+ /// Internal-only API that gets the contents of the cell, initializing it
+ /// in two steps with `f` and `g` if the cell was empty.
+ ///
+ /// `f` is called to construct the value, which is then moved into the cell
+ /// and given as a (pinned) mutable reference to `g` to finish
+ /// initialization.
+ ///
+ /// This allows `g` to inspect an manipulate the value after it has been
+ /// moved into its final place in the cell, but before the cell is
+ /// considered initialized.
+ ///
+ /// # Panics
+ ///
+ /// If `f` or `g` panics, the panic is propagated to the caller, and the
+ /// cell remains uninitialized.
+ ///
+ /// With the current implementation, if `g` panics, the value from `f` will
+ /// not be dropped. This should probably be fixed if this is ever used for
+ /// a type where this matters.
+ ///
+ /// It is an error to reentrantly initialize the cell from `f`. The exact
+ /// outcome is unspecified. Current implementation deadlocks, but this may
+ /// be changed to a panic in the future.
+ pub(crate) fn get_or_init_pin<F, G>(self: Pin<&Self>, f: F, g: G) -> Pin<&T>
+ where
+ F: FnOnce() -> T,
+ G: FnOnce(Pin<&mut T>),
+ {
+ if let Some(value) = self.get_ref().get() {
+ // SAFETY: The inner value was already initialized, and will not be
+ // moved anymore.
+ return unsafe { Pin::new_unchecked(value) };
+ }
+
+ let slot = &self.value;
+
+ // Ignore poisoning from other threads
+ // If another thread panics, then we'll be able to run our closure
+ self.once.call_once_force(|_| {
+ let value = f();
+ // SAFETY: We use the Once (self.once) to guarantee unique access
+ // to the UnsafeCell (slot).
+ let value: &mut T = unsafe { (&mut *slot.get()).write(value) };
+ // SAFETY: The value has been written to its final place in
+ // self.value. We do not to move it anymore, which we promise here
+ // with a Pin<&mut T>.
+ g(unsafe { Pin::new_unchecked(value) });
+ });
+
+ // SAFETY: The inner value has been initialized, and will not be moved
+ // anymore.
+ unsafe { Pin::new_unchecked(self.get_ref().get_unchecked()) }
+ }
+
/// Consumes the `SyncOnceCell`, returning the wrapped value. Returns
/// `None` if the cell was empty.
///
#![feature(format_args_nl)]
#![feature(gen_future)]
#![feature(generator_trait)]
+#![feature(get_mut_unchecked)]
#![feature(global_asm)]
#![feature(hashmap_internals)]
#![feature(int_error_internals)]
#![feature(panic_info_message)]
#![feature(panic_internals)]
#![feature(panic_unwind)]
+#![feature(pin_static_ref)]
#![feature(prelude_import)]
#![feature(ptr_internals)]
#![feature(raw)]
#![feature(stdsimd)]
#![feature(stmt_expr_attributes)]
#![feature(str_internals)]
+#![feature(str_split_once)]
#![feature(test)]
#![feature(thread_local)]
#![feature(thread_local_internals)]
);
t!("\\\\.\\foo/bar",
- iter: ["\\\\.\\foo/bar", "\\"],
+ iter: ["\\\\.\\foo", "\\", "bar"],
has_root: true,
is_absolute: true,
- parent: None,
- file_name: None,
- file_stem: None,
+ parent: Some("\\\\.\\foo/"),
+ file_name: Some("bar"),
+ file_stem: Some("bar"),
extension: None
);
/// * A repeat expression `[x; N]`, which produces an array with `N` copies of `x`.
/// The type of `x` must be [`Copy`].
///
+/// Note that `[expr; 0]` is allowed, and produces an empty array.
+/// This will still evaluate `expr`, however, and immediately drop the resulting value, so
+/// be mindful of side effects.
+///
/// Arrays of *any* size implement the following traits if the element type allows it:
///
/// - [`Copy`]
use crate::os::unix::net::UnixStream;
use crate::process::{ChildStderr, ChildStdin, ChildStdout};
use crate::ptr;
-use crate::sync::atomic::{AtomicBool, Ordering};
+use crate::sync::atomic::{AtomicBool, AtomicU8, Ordering};
use crate::sys::cvt;
#[cfg(test)]
}
}
-/// linux-specific implementation that will attempt to use copy_file_range for copy offloading
-/// as the name says, it only works on regular files
+/// Invalid file descriptor.
+///
+/// Valid file descriptors are guaranteed to be positive numbers (see `open()` manpage)
+/// while negative values are used to indicate errors.
+/// Thus -1 will never be overlap with a valid open file.
+const INVALID_FD: RawFd = -1;
+
+/// Linux-specific implementation that will attempt to use copy_file_range for copy offloading.
+/// As the name says, it only works on regular files.
///
/// Callers must handle fallback to a generic copy loop.
/// `Fallback` may indicate non-zero number of bytes already written
pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) -> CopyResult {
use crate::cmp;
+ const NOT_PROBED: u8 = 0;
+ const UNAVAILABLE: u8 = 1;
+ const AVAILABLE: u8 = 2;
+
// Kernel prior to 4.5 don't have copy_file_range
// We store the availability in a global to avoid unnecessary syscalls
- static HAS_COPY_FILE_RANGE: AtomicBool = AtomicBool::new(true);
+ static HAS_COPY_FILE_RANGE: AtomicU8 = AtomicU8::new(NOT_PROBED);
syscall! {
fn copy_file_range(
) -> libc::ssize_t
}
- let has_copy_file_range = HAS_COPY_FILE_RANGE.load(Ordering::Relaxed);
- let mut written = 0u64;
- while written < max_len {
- let copy_result = if has_copy_file_range {
- let bytes_to_copy = cmp::min(max_len - written, usize::MAX as u64);
- // cap to 1GB chunks in case u64::MAX is passed as max_len and the file has a non-zero seek position
- // this allows us to copy large chunks without hitting EOVERFLOW,
- // unless someone sets a file offset close to u64::MAX - 1GB, in which case a fallback would be required
- let bytes_to_copy = cmp::min(bytes_to_copy as usize, 0x4000_0000usize);
- let copy_result = unsafe {
- // We actually don't have to adjust the offsets,
- // because copy_file_range adjusts the file offset automatically
- cvt(copy_file_range(
- reader,
- ptr::null_mut(),
- writer,
- ptr::null_mut(),
- bytes_to_copy,
- 0,
- ))
+ match HAS_COPY_FILE_RANGE.load(Ordering::Relaxed) {
+ NOT_PROBED => {
+ // EPERM can indicate seccomp filters or an immutable file.
+ // To distinguish these cases we probe with invalid file descriptors which should result in EBADF if the syscall is supported
+ // and some other error (ENOSYS or EPERM) if it's not available
+ let result = unsafe {
+ cvt(copy_file_range(INVALID_FD, ptr::null_mut(), INVALID_FD, ptr::null_mut(), 1, 0))
};
- if let Err(ref copy_err) = copy_result {
- match copy_err.raw_os_error() {
- Some(libc::ENOSYS | libc::EPERM | libc::EOPNOTSUPP) => {
- HAS_COPY_FILE_RANGE.store(false, Ordering::Relaxed);
- }
- _ => {}
- }
+
+ if matches!(result.map_err(|e| e.raw_os_error()), Err(Some(libc::EBADF))) {
+ HAS_COPY_FILE_RANGE.store(AVAILABLE, Ordering::Relaxed);
+ } else {
+ HAS_COPY_FILE_RANGE.store(UNAVAILABLE, Ordering::Relaxed);
+ return CopyResult::Fallback(0);
}
- copy_result
- } else {
- Err(Error::from_raw_os_error(libc::ENOSYS))
+ }
+ UNAVAILABLE => return CopyResult::Fallback(0),
+ _ => {}
+ };
+
+ let mut written = 0u64;
+ while written < max_len {
+ let bytes_to_copy = cmp::min(max_len - written, usize::MAX as u64);
+ // cap to 1GB chunks in case u64::MAX is passed as max_len and the file has a non-zero seek position
+ // this allows us to copy large chunks without hitting EOVERFLOW,
+ // unless someone sets a file offset close to u64::MAX - 1GB, in which case a fallback would be required
+ let bytes_to_copy = cmp::min(bytes_to_copy as usize, 0x4000_0000usize);
+ let copy_result = unsafe {
+ // We actually don't have to adjust the offsets,
+ // because copy_file_range adjusts the file offset automatically
+ cvt(copy_file_range(reader, ptr::null_mut(), writer, ptr::null_mut(), bytes_to_copy, 0))
};
+
match copy_result {
Ok(0) if written == 0 => {
// fallback to work around several kernel bugs where copy_file_range will fail to
libc::ENOSYS | libc::EXDEV | libc::EINVAL | libc::EPERM | libc::EOPNOTSUPP,
) => {
// Try fallback io::copy if either:
- // - Kernel version is < 4.5 (ENOSYS)
+ // - Kernel version is < 4.5 (ENOSYS¹)
// - Files are mounted on different fs (EXDEV)
// - copy_file_range is broken in various ways on RHEL/CentOS 7 (EOPNOTSUPP)
- // - copy_file_range is disallowed, for example by seccomp (EPERM)
+ // - copy_file_range file is immutable or syscall is blocked by seccomp¹ (EPERM)
// - copy_file_range cannot be used with pipes or device nodes (EINVAL)
+ //
+ // ¹ these cases should be detected by the initial probe but we handle them here
+ // anyway in case syscall interception changes during runtime
assert_eq!(written, 0);
CopyResult::Fallback(0)
}
-use crate::env::temp_dir;
use crate::fs::OpenOptions;
use crate::io;
use crate::io::Result;
use crate::io::SeekFrom;
use crate::io::{BufRead, Read, Seek, Write};
use crate::os::unix::io::AsRawFd;
+use crate::sys_common::io::test::tmpdir;
#[test]
fn copy_specialization() -> Result<()> {
use crate::io::{BufReader, BufWriter};
- let path = crate::env::temp_dir();
- let source_path = path.join("copy-spec.source");
- let sink_path = path.join("copy-spec.sink");
+ let tmp_path = tmpdir();
+ let source_path = tmp_path.join("copy-spec.source");
+ let sink_path = tmp_path.join("copy-spec.sink");
let result: Result<()> = try {
let mut source = crate::fs::OpenOptions::new()
#[bench]
fn bench_file_to_file_copy(b: &mut test::Bencher) {
const BYTES: usize = 128 * 1024;
- let src_path = temp_dir().join("file-copy-bench-src");
+ let temp_path = tmpdir();
+ let src_path = temp_path.join("file-copy-bench-src");
let mut src = crate::fs::OpenOptions::new()
.create(true)
.truncate(true)
.unwrap();
src.write(&vec![0u8; BYTES]).unwrap();
- let sink_path = temp_dir().join("file-copy-bench-sink");
+ let sink_path = temp_path.join("file-copy-bench-sink");
let mut sink = crate::fs::OpenOptions::new()
.create(true)
.truncate(true)
#[bench]
fn bench_file_to_socket_copy(b: &mut test::Bencher) {
const BYTES: usize = 128 * 1024;
- let src_path = temp_dir().join("pipe-copy-bench-src");
+ let temp_path = tmpdir();
+ let src_path = temp_path.join("pipe-copy-bench-src");
let mut src = OpenOptions::new()
.create(true)
.truncate(true)
#[bench]
fn bench_file_to_uds_copy(b: &mut test::Bencher) {
const BYTES: usize = 128 * 1024;
- let src_path = temp_dir().join("uds-copy-bench-src");
+ let temp_path = tmpdir();
+ let src_path = temp_path.join("uds-copy-bench-src");
let mut src = OpenOptions::new()
.create(true)
.truncate(true)
pub type CHAR = c_char;
pub type ULONG_PTR = usize;
pub type ULONG = c_ulong;
+pub type NTSTATUS = LONG;
+pub type ACCESS_MASK = DWORD;
pub type LPBOOL = *mut BOOL;
pub type LPBYTE = *mut BYTE;
pub const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
+pub const STATUS_SUCCESS: NTSTATUS = 0x00000000;
+
#[repr(C)]
#[cfg(not(target_pointer_width = "64"))]
pub struct WSADATA {
panic!("rwlocks not available")
}
}
+compat_fn! {
+ "api-ms-win-core-synch-l1-2-0":
+ pub fn WaitOnAddress(
+ Address: LPVOID,
+ CompareAddress: LPVOID,
+ AddressSize: SIZE_T,
+ dwMilliseconds: DWORD
+ ) -> BOOL {
+ panic!("WaitOnAddress not available")
+ }
+ pub fn WakeByAddressSingle(Address: LPVOID) -> () {
+ // If this api is unavailable, there cannot be anything waiting, because
+ // WaitOnAddress would've panicked. So it's fine to do nothing here.
+ }
+}
+
+compat_fn! {
+ "ntdll":
+ pub fn NtCreateKeyedEvent(
+ KeyedEventHandle: LPHANDLE,
+ DesiredAccess: ACCESS_MASK,
+ ObjectAttributes: LPVOID,
+ Flags: ULONG
+ ) -> NTSTATUS {
+ panic!("keyed events not available")
+ }
+ pub fn NtReleaseKeyedEvent(
+ EventHandle: HANDLE,
+ Key: LPVOID,
+ Alertable: BOOLEAN,
+ Timeout: PLARGE_INTEGER
+ ) -> NTSTATUS {
+ panic!("keyed events not available")
+ }
+ pub fn NtWaitForKeyedEvent(
+ EventHandle: HANDLE,
+ Key: LPVOID,
+ Alertable: BOOLEAN,
+ Timeout: PLARGE_INTEGER
+ ) -> NTSTATUS {
+ panic!("keyed events not available")
+ }
+}
)*) => ($(
$(#[$meta])*
pub mod $symbol {
+ #[allow(unused_imports)]
use super::*;
use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::mem;
pub mod thread;
pub mod thread_local_dtor;
pub mod thread_local_key;
+pub mod thread_parker;
pub mod time;
cfg_if::cfg_if! {
if #[cfg(not(target_vendor = "uwp"))] {
pub const MAIN_SEP_STR: &str = "\\";
pub const MAIN_SEP: char = '\\';
-// The unsafety here stems from converting between `&OsStr` and `&[u8]`
-// and back. This is safe to do because (1) we only look at ASCII
-// contents of the encoding and (2) new &OsStr values are produced
-// only from ASCII-bounded slices of existing &OsStr values.
-fn os_str_as_u8_slice(s: &OsStr) -> &[u8] {
- unsafe { mem::transmute(s) }
-}
-unsafe fn u8_slice_as_os_str(s: &[u8]) -> &OsStr {
- mem::transmute(s)
+// Safety: `bytes` must be a valid wtf8 encoded slice
+#[inline]
+unsafe fn bytes_as_os_str(bytes: &[u8]) -> &OsStr {
+ // &OsStr is layout compatible with &Slice, which is compatible with &Wtf8,
+ // which is compatible with &[u8].
+ mem::transmute(bytes)
}
#[inline]
b == b'\\'
}
-// In most DOS systems, it is not possible to have more than 26 drive letters.
-// See <https://en.wikipedia.org/wiki/Drive_letter_assignment#Common_assignments>.
-pub fn is_valid_drive_letter(disk: u8) -> bool {
- disk.is_ascii_alphabetic()
-}
-
pub fn parse_prefix(path: &OsStr) -> Option<Prefix<'_>> {
use Prefix::{DeviceNS, Disk, Verbatim, VerbatimDisk, VerbatimUNC, UNC};
- let path = os_str_as_u8_slice(path);
-
- // \\
- if let Some(path) = path.strip_prefix(br"\\") {
- // \\?\
- if let Some(path) = path.strip_prefix(br"?\") {
- // \\?\UNC\server\share
- if let Some(path) = path.strip_prefix(br"UNC\") {
- let (server, share) = match get_first_two_components(path, is_verbatim_sep) {
- Some((server, share)) => unsafe {
- (u8_slice_as_os_str(server), u8_slice_as_os_str(share))
- },
- None => (unsafe { u8_slice_as_os_str(path) }, OsStr::new("")),
- };
- return Some(VerbatimUNC(server, share));
+ if let Some(path) = strip_prefix(path, r"\\") {
+ // \\
+ if let Some(path) = strip_prefix(path, r"?\") {
+ // \\?\
+ if let Some(path) = strip_prefix(path, r"UNC\") {
+ // \\?\UNC\server\share
+
+ let (server, path) = parse_next_component(path, true);
+ let (share, _) = parse_next_component(path, true);
+
+ Some(VerbatimUNC(server, share))
} else {
- // \\?\path
- match path {
- // \\?\C:\path
- [c, b':', b'\\', ..] if is_valid_drive_letter(*c) => {
- return Some(VerbatimDisk(c.to_ascii_uppercase()));
- }
- // \\?\cat_pics
- _ => {
- let idx = path.iter().position(|&b| b == b'\\').unwrap_or(path.len());
- let slice = &path[..idx];
- return Some(Verbatim(unsafe { u8_slice_as_os_str(slice) }));
- }
+ let (prefix, _) = parse_next_component(path, true);
+
+ // in verbatim paths only recognize an exact drive prefix
+ if let Some(drive) = parse_drive_exact(prefix) {
+ // \\?\C:
+ Some(VerbatimDisk(drive))
+ } else {
+ // \\?\prefix
+ Some(Verbatim(prefix))
}
}
- } else if let Some(path) = path.strip_prefix(b".\\") {
+ } else if let Some(path) = strip_prefix(path, r".\") {
// \\.\COM42
- let idx = path.iter().position(|&b| b == b'\\').unwrap_or(path.len());
- let slice = &path[..idx];
- return Some(DeviceNS(unsafe { u8_slice_as_os_str(slice) }));
- }
- match get_first_two_components(path, is_sep_byte) {
- Some((server, share)) if !server.is_empty() && !share.is_empty() => {
+ let (prefix, _) = parse_next_component(path, false);
+ Some(DeviceNS(prefix))
+ } else {
+ let (server, path) = parse_next_component(path, false);
+ let (share, _) = parse_next_component(path, false);
+
+ if !server.is_empty() && !share.is_empty() {
// \\server\share
- return Some(unsafe { UNC(u8_slice_as_os_str(server), u8_slice_as_os_str(share)) });
+ Some(UNC(server, share))
+ } else {
+ // no valid prefix beginning with "\\" recognized
+ None
}
- _ => {}
}
- } else if let [c, b':', ..] = path {
+ } else if let Some(drive) = parse_drive(path) {
// C:
- if is_valid_drive_letter(*c) {
- return Some(Disk(c.to_ascii_uppercase()));
- }
+ Some(Disk(drive))
+ } else {
+ // no prefix
+ None
}
- None
}
-/// Returns the first two path components with predicate `f`.
-///
-/// The two components returned will be use by caller
-/// to construct `VerbatimUNC` or `UNC` Windows path prefix.
-///
-/// Returns [`None`] if there are no separators in path.
-fn get_first_two_components(path: &[u8], f: fn(u8) -> bool) -> Option<(&[u8], &[u8])> {
- let idx = path.iter().position(|&x| f(x))?;
- // Panic safe
- // The max `idx+1` is `path.len()` and `path[path.len()..]` is a valid index.
- let (first, path) = (&path[..idx], &path[idx + 1..]);
- let idx = path.iter().position(|&x| f(x)).unwrap_or(path.len());
- let second = &path[..idx];
- Some((first, second))
+// Parses a drive prefix, e.g. "C:" and "C:\whatever"
+fn parse_drive(prefix: &OsStr) -> Option<u8> {
+ // In most DOS systems, it is not possible to have more than 26 drive letters.
+ // See <https://en.wikipedia.org/wiki/Drive_letter_assignment#Common_assignments>.
+ fn is_valid_drive_letter(drive: &u8) -> bool {
+ drive.is_ascii_alphabetic()
+ }
+
+ match prefix.bytes() {
+ [drive, b':', ..] if is_valid_drive_letter(drive) => Some(drive.to_ascii_uppercase()),
+ _ => None,
+ }
+}
+
+// Parses a drive prefix exactly, e.g. "C:"
+fn parse_drive_exact(prefix: &OsStr) -> Option<u8> {
+ // only parse two bytes: the drive letter and the drive separator
+ if prefix.len() == 2 { parse_drive(prefix) } else { None }
+}
+
+fn strip_prefix<'a>(path: &'a OsStr, prefix: &str) -> Option<&'a OsStr> {
+ // `path` and `prefix` are valid wtf8 and utf8 encoded slices respectively, `path[prefix.len()]`
+ // is thus a code point boundary and `path[prefix.len()..]` is a valid wtf8 encoded slice.
+ match path.bytes().strip_prefix(prefix.as_bytes()) {
+ Some(path) => unsafe { Some(bytes_as_os_str(path)) },
+ None => None,
+ }
+}
+
+// Parse the next path component.
+//
+// Returns the next component and the rest of the path excluding the component and separator.
+// Does not recognize `/` as a separator character if `verbatim` is true.
+fn parse_next_component(path: &OsStr, verbatim: bool) -> (&OsStr, &OsStr) {
+ let separator = if verbatim { is_verbatim_sep } else { is_sep_byte };
+
+ match path.bytes().iter().position(|&x| separator(x)) {
+ Some(separator_start) => {
+ let mut separator_end = separator_start + 1;
+
+ // a series of multiple separator characters is treated as a single separator,
+ // except in verbatim paths
+ while !verbatim && separator_end < path.len() && separator(path.bytes()[separator_end])
+ {
+ separator_end += 1;
+ }
+
+ let component = &path.bytes()[..separator_start];
+
+ // Panic safe
+ // The max `separator_end` is `bytes.len()` and `bytes[bytes.len()..]` is a valid index.
+ let path = &path.bytes()[separator_end..];
+
+ // Safety: `path` is a valid wtf8 encoded slice and each of the separators ('/', '\')
+ // is encoded in a single byte, therefore `bytes[separator_start]` and
+ // `bytes[separator_end]` must be code point boundaries and thus
+ // `bytes[..separator_start]` and `bytes[separator_end..]` are valid wtf8 slices.
+ unsafe { (bytes_as_os_str(component), bytes_as_os_str(path)) }
+ }
+ None => (path, OsStr::new("")),
+ }
}
use super::*;
#[test]
-fn test_get_first_two_components() {
+fn test_parse_next_component() {
assert_eq!(
- get_first_two_components(br"server\share", is_verbatim_sep),
- Some((&b"server"[..], &b"share"[..])),
+ parse_next_component(OsStr::new(r"server\share"), true),
+ (OsStr::new(r"server"), OsStr::new(r"share"))
);
assert_eq!(
- get_first_two_components(br"server\", is_verbatim_sep),
- Some((&b"server"[..], &b""[..]))
+ parse_next_component(OsStr::new(r"server/share"), true),
+ (OsStr::new(r"server/share"), OsStr::new(r""))
);
assert_eq!(
- get_first_two_components(br"\server\", is_verbatim_sep),
- Some((&b""[..], &b"server"[..]))
+ parse_next_component(OsStr::new(r"server/share"), false),
+ (OsStr::new(r"server"), OsStr::new(r"share"))
);
- assert_eq!(get_first_two_components(br"there are no separators here", is_verbatim_sep), None,);
+ assert_eq!(
+ parse_next_component(OsStr::new(r"server\"), false),
+ (OsStr::new(r"server"), OsStr::new(r""))
+ );
+
+ assert_eq!(
+ parse_next_component(OsStr::new(r"\server\"), false),
+ (OsStr::new(r""), OsStr::new(r"server\"))
+ );
+
+ assert_eq!(
+ parse_next_component(OsStr::new(r"servershare"), false),
+ (OsStr::new(r"servershare"), OsStr::new(""))
+ );
+
+ assert_eq!(
+ parse_next_component(OsStr::new(r"server/\//\/\\\\/////\/share"), false),
+ (OsStr::new(r"server"), OsStr::new(r"share"))
+ );
+
+ assert_eq!(
+ parse_next_component(OsStr::new(r"server\\\\\\\\\\\\\\share"), true),
+ (OsStr::new(r"server"), OsStr::new(r"\\\\\\\\\\\\\share"))
+ );
}
-use crate::mem;
+use crate::mem::ManuallyDrop;
use crate::ptr;
use crate::sync::atomic::AtomicPtr;
use crate::sync::atomic::Ordering::SeqCst;
}
unsafe fn register_dtor(key: Key, dtor: Dtor) {
- let mut node = Box::new(Node { key, dtor, next: ptr::null_mut() });
+ let mut node = ManuallyDrop::new(Box::new(Node { key, dtor, next: ptr::null_mut() }));
let mut head = DTORS.load(SeqCst);
loop {
node.next = head;
- match DTORS.compare_exchange(head, &mut *node, SeqCst, SeqCst) {
- Ok(_) => {
- mem::forget(node);
- return;
- }
+ match DTORS.compare_exchange(head, &mut **node, SeqCst, SeqCst) {
+ Ok(_) => return, // nothing to drop, we successfully added the node to the list
Err(cur) => head = cur,
}
}
--- /dev/null
+// Thread parker implementation for Windows.
+//
+// This uses WaitOnAddress and WakeByAddressSingle if available (Windows 8+).
+// This modern API is exactly the same as the futex syscalls the Linux thread
+// parker uses. When These APIs are available, the implementation of this
+// thread parker matches the Linux thread parker exactly.
+//
+// However, when the modern API is not available, this implementation falls
+// back to NT Keyed Events, which are similar, but have some important
+// differences. These are available since Windows XP.
+//
+// WaitOnAddress first checks the state of the thread parker to make sure it no
+// WakeByAddressSingle calls can be missed between updating the parker state
+// and calling the function.
+//
+// NtWaitForKeyedEvent does not have this option, and unconditionally blocks
+// without checking the parker state first. Instead, NtReleaseKeyedEvent
+// (unlike WakeByAddressSingle) *blocks* until it woke up a thread waiting for
+// it by NtWaitForKeyedEvent. This way, we can be sure no events are missed,
+// but we need to be careful not to block unpark() if park_timeout() was woken
+// up by a timeout instead of unpark().
+//
+// Unlike WaitOnAddress, NtWaitForKeyedEvent/NtReleaseKeyedEvent operate on a
+// HANDLE (created with NtCreateKeyedEvent). This means that we can be sure
+// a succesfully awoken park() was awoken by unpark() and not a
+// NtReleaseKeyedEvent call from some other code, as these events are not only
+// matched by the key (address of the parker (state)), but also by this HANDLE.
+// We lazily allocate this handle the first time it is needed.
+//
+// The fast path (calling park() after unpark() was already called) and the
+// possible states are the same for both implementations. This is used here to
+// make sure the fast path does not even check which API to use, but can return
+// right away, independent of the used API. Only the slow paths (which will
+// actually block/wake a thread) check which API is available and have
+// different implementations.
+//
+// Unfortunately, NT Keyed Events are an undocumented Windows API. However:
+// - This API is relatively simple with obvious behaviour, and there are
+// several (unofficial) articles documenting the details. [1]
+// - `parking_lot` has been using this API for years (on Windows versions
+// before Windows 8). [2] Many big projects extensively use parking_lot,
+// such as servo and the Rust compiler itself.
+// - It is the underlying API used by Windows SRW locks and Windows critical
+// sections. [3] [4]
+// - The source code of the implementations of Wine, ReactOs, and Windows XP
+// are available and match the expected behaviour.
+// - The main risk with an undocumented API is that it might change in the
+// future. But since we only use it for older versions of Windows, that's not
+// a problem.
+// - Even if these functions do not block or wake as we expect (which is
+// unlikely, see all previous points), this implementation would still be
+// memory safe. The NT Keyed Events API is only used to sleep/block in the
+// right place.
+//
+// [1]: http://www.locklessinc.com/articles/keyed_events/
+// [2]: https://github.com/Amanieu/parking_lot/commit/43abbc964e
+// [3]: https://docs.microsoft.com/en-us/archive/msdn-magazine/2012/november/windows-with-c-the-evolution-of-synchronization-in-windows-and-c
+// [4]: Windows Internals, Part 1, ISBN 9780735671300
+
+use crate::convert::TryFrom;
+use crate::ptr;
+use crate::sync::atomic::{
+ AtomicI8, AtomicUsize,
+ Ordering::{Acquire, Relaxed, Release},
+};
+use crate::sys::{c, dur2timeout};
+use crate::time::Duration;
+
+pub struct Parker {
+ state: AtomicI8,
+}
+
+const PARKED: i8 = -1;
+const EMPTY: i8 = 0;
+const NOTIFIED: i8 = 1;
+
+// Notes about memory ordering:
+//
+// Memory ordering is only relevant for the relative ordering of operations
+// between different variables. Even Ordering::Relaxed guarantees a
+// monotonic/consistent order when looking at just a single atomic variable.
+//
+// So, since this parker is just a single atomic variable, we only need to look
+// at the ordering guarantees we need to provide to the 'outside world'.
+//
+// The only memory ordering guarantee that parking and unparking provide, is
+// that things which happened before unpark() are visible on the thread
+// returning from park() afterwards. Otherwise, it was effectively unparked
+// before unpark() was called while still consuming the 'token'.
+//
+// In other words, unpark() needs to synchronize with the part of park() that
+// consumes the token and returns.
+//
+// This is done with a release-acquire synchronization, by using
+// Ordering::Release when writing NOTIFIED (the 'token') in unpark(), and using
+// Ordering::Acquire when reading this state in park() after waking up.
+impl Parker {
+ pub fn new() -> Self {
+ Self { state: AtomicI8::new(EMPTY) }
+ }
+
+ // Assumes this is only called by the thread that owns the Parker,
+ // which means that `self.state != PARKED`.
+ pub unsafe fn park(&self) {
+ // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the
+ // first case.
+ if self.state.fetch_sub(1, Acquire) == NOTIFIED {
+ return;
+ }
+
+ if c::WaitOnAddress::is_available() {
+ loop {
+ // Wait for something to happen, assuming it's still set to PARKED.
+ c::WaitOnAddress(self.ptr(), &PARKED as *const _ as c::LPVOID, 1, c::INFINITE);
+ // Change NOTIFIED=>EMPTY but leave PARKED alone.
+ if self.state.compare_and_swap(NOTIFIED, EMPTY, Acquire) == NOTIFIED {
+ // Actually woken up by unpark().
+ return;
+ } else {
+ // Spurious wake up. We loop to try again.
+ }
+ }
+ } else {
+ // Wait for unpark() to produce this event.
+ c::NtWaitForKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut());
+ // Set the state back to EMPTY (from either PARKED or NOTIFIED).
+ // Note that we don't just write EMPTY, but use swap() to also
+ // include an acquire-ordered read to synchronize with unpark()'s
+ // release-ordered write.
+ self.state.swap(EMPTY, Acquire);
+ }
+ }
+
+ // Assumes this is only called by the thread that owns the Parker,
+ // which means that `self.state != PARKED`.
+ pub unsafe fn park_timeout(&self, timeout: Duration) {
+ // Change NOTIFIED=>EMPTY or EMPTY=>PARKED, and directly return in the
+ // first case.
+ if self.state.fetch_sub(1, Acquire) == NOTIFIED {
+ return;
+ }
+
+ if c::WaitOnAddress::is_available() {
+ // Wait for something to happen, assuming it's still set to PARKED.
+ c::WaitOnAddress(self.ptr(), &PARKED as *const _ as c::LPVOID, 1, dur2timeout(timeout));
+ // Set the state back to EMPTY (from either PARKED or NOTIFIED).
+ // Note that we don't just write EMPTY, but use swap() to also
+ // include an acquire-ordered read to synchronize with unpark()'s
+ // release-ordered write.
+ if self.state.swap(EMPTY, Acquire) == NOTIFIED {
+ // Actually woken up by unpark().
+ } else {
+ // Timeout or spurious wake up.
+ // We return either way, because we can't easily tell if it was the
+ // timeout or not.
+ }
+ } else {
+ // Need to wait for unpark() using NtWaitForKeyedEvent.
+ let handle = keyed_event_handle();
+
+ // NtWaitForKeyedEvent uses a unit of 100ns, and uses negative
+ // values to indicate a relative time on the monotonic clock.
+ // This is documented here for the underlying KeWaitForSingleObject function:
+ // https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/wdm/nf-wdm-kewaitforsingleobject
+ let mut timeout = match i64::try_from((timeout.as_nanos() + 99) / 100) {
+ Ok(t) => -t,
+ Err(_) => i64::MIN,
+ };
+
+ // Wait for unpark() to produce this event.
+ let unparked =
+ c::NtWaitForKeyedEvent(handle, self.ptr(), 0, &mut timeout) == c::STATUS_SUCCESS;
+
+ // Set the state back to EMPTY (from either PARKED or NOTIFIED).
+ let prev_state = self.state.swap(EMPTY, Acquire);
+
+ if !unparked && prev_state == NOTIFIED {
+ // We were awoken by a timeout, not by unpark(), but the state
+ // was set to NOTIFIED, which means we *just* missed an
+ // unpark(), which is now blocked on us to wait for it.
+ // Wait for it to consume the event and unblock that thread.
+ c::NtWaitForKeyedEvent(handle, self.ptr(), 0, ptr::null_mut());
+ }
+ }
+ }
+
+ pub fn unpark(&self) {
+ // Change PARKED=>NOTIFIED, EMPTY=>NOTIFIED, or NOTIFIED=>NOTIFIED, and
+ // wake the thread in the first case.
+ //
+ // Note that even NOTIFIED=>NOTIFIED results in a write. This is on
+ // purpose, to make sure every unpark() has a release-acquire ordering
+ // with park().
+ if self.state.swap(NOTIFIED, Release) == PARKED {
+ if c::WakeByAddressSingle::is_available() {
+ unsafe {
+ c::WakeByAddressSingle(self.ptr());
+ }
+ } else {
+ // If we run NtReleaseKeyedEvent before the waiting thread runs
+ // NtWaitForKeyedEvent, this (shortly) blocks until we can wake it up.
+ // If the waiting thread wakes up before we run NtReleaseKeyedEvent
+ // (e.g. due to a timeout), this blocks until we do wake up a thread.
+ // To prevent this thread from blocking indefinitely in that case,
+ // park_impl() will, after seeing the state set to NOTIFIED after
+ // waking up, call NtWaitForKeyedEvent again to unblock us.
+ unsafe {
+ c::NtReleaseKeyedEvent(keyed_event_handle(), self.ptr(), 0, ptr::null_mut());
+ }
+ }
+ }
+ }
+
+ fn ptr(&self) -> c::LPVOID {
+ &self.state as *const _ as c::LPVOID
+ }
+}
+
+fn keyed_event_handle() -> c::HANDLE {
+ const INVALID: usize = !0;
+ static HANDLE: AtomicUsize = AtomicUsize::new(INVALID);
+ match HANDLE.load(Relaxed) {
+ INVALID => {
+ let mut handle = c::INVALID_HANDLE_VALUE;
+ unsafe {
+ match c::NtCreateKeyedEvent(
+ &mut handle,
+ c::GENERIC_READ | c::GENERIC_WRITE,
+ ptr::null_mut(),
+ 0,
+ ) {
+ c::STATUS_SUCCESS => {}
+ r => panic!("Unable to create keyed event handle: error {}", r),
+ }
+ }
+ match HANDLE.compare_exchange(INVALID, handle as usize, Relaxed, Relaxed) {
+ Ok(_) => handle,
+ Err(h) => {
+ // Lost the race to another thread initializing HANDLE before we did.
+ // Closing our handle and using theirs instead.
+ unsafe {
+ c::CloseHandle(handle);
+ }
+ h as c::HANDLE
+ }
+ }
+ }
+ handle => handle as c::HANDLE,
+ }
+}
}
// split the string by ':' and convert the second part to u16
- let mut parts_iter = s.rsplitn(2, ':');
- let port_str = try_opt!(parts_iter.next(), "invalid socket address");
- let host = try_opt!(parts_iter.next(), "invalid socket address");
+ let (host, port_str) = try_opt!(s.rsplit_once(':'), "invalid socket address");
let port: u16 = try_opt!(port_str.parse().ok(), "invalid port value");
-
(host, port).try_into()
}
}
#[cfg(all(test, not(target_os = "emscripten")))]
mod tests;
-use crate::fmt;
-use crate::marker;
+use crate::marker::PhantomPinned;
use crate::ops::Deref;
use crate::panic::{RefUnwindSafe, UnwindSafe};
+use crate::pin::Pin;
use crate::sys::mutex as sys;
/// A re-entrant mutual exclusion
pub struct ReentrantMutex<T> {
inner: sys::ReentrantMutex,
data: T,
+ _pinned: PhantomPinned,
}
unsafe impl<T: Send> Send for ReentrantMutex<T> {}
/// guarded data.
#[must_use = "if unused the ReentrantMutex will immediately unlock"]
pub struct ReentrantMutexGuard<'a, T: 'a> {
- lock: &'a ReentrantMutex<T>,
+ lock: Pin<&'a ReentrantMutex<T>>,
}
-impl<T> !marker::Send for ReentrantMutexGuard<'_, T> {}
+impl<T> !Send for ReentrantMutexGuard<'_, T> {}
impl<T> ReentrantMutex<T> {
/// Creates a new reentrant mutex in an unlocked state.
/// once this mutex is in its final resting place, and only then are the
/// lock/unlock methods safe.
pub const unsafe fn new(t: T) -> ReentrantMutex<T> {
- ReentrantMutex { inner: sys::ReentrantMutex::uninitialized(), data: t }
+ ReentrantMutex {
+ inner: sys::ReentrantMutex::uninitialized(),
+ data: t,
+ _pinned: PhantomPinned,
+ }
}
/// Initializes this mutex so it's ready for use.
///
/// Unsafe to call more than once, and must be called after this will no
/// longer move in memory.
- pub unsafe fn init(&self) {
- self.inner.init();
+ pub unsafe fn init(self: Pin<&mut Self>) {
+ self.get_unchecked_mut().inner.init()
}
/// Acquires a mutex, blocking the current thread until it is able to do so.
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return failure if the mutex would otherwise be
/// acquired.
- pub fn lock(&self) -> ReentrantMutexGuard<'_, T> {
+ pub fn lock(self: Pin<&Self>) -> ReentrantMutexGuard<'_, T> {
unsafe { self.inner.lock() }
- ReentrantMutexGuard::new(&self)
+ ReentrantMutexGuard { lock: self }
}
/// Attempts to acquire this lock.
/// If another user of this mutex panicked while holding the mutex, then
/// this call will return failure if the mutex would otherwise be
/// acquired.
- pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, T>> {
- if unsafe { self.inner.try_lock() } { Some(ReentrantMutexGuard::new(&self)) } else { None }
+ pub fn try_lock(self: Pin<&Self>) -> Option<ReentrantMutexGuard<'_, T>> {
+ if unsafe { self.inner.try_lock() } {
+ Some(ReentrantMutexGuard { lock: self })
+ } else {
+ None
+ }
}
}
}
}
-impl<T: fmt::Debug + 'static> fmt::Debug for ReentrantMutex<T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self.try_lock() {
- Some(guard) => f.debug_struct("ReentrantMutex").field("data", &*guard).finish(),
- None => {
- struct LockedPlaceholder;
- impl fmt::Debug for LockedPlaceholder {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.write_str("<locked>")
- }
- }
-
- f.debug_struct("ReentrantMutex").field("data", &LockedPlaceholder).finish()
- }
- }
- }
-}
-
-impl<'mutex, T> ReentrantMutexGuard<'mutex, T> {
- fn new(lock: &'mutex ReentrantMutex<T>) -> ReentrantMutexGuard<'mutex, T> {
- ReentrantMutexGuard { lock }
- }
-}
-
impl<T> Deref for ReentrantMutexGuard<'_, T> {
type Target = T;
+use crate::boxed::Box;
use crate::cell::RefCell;
+use crate::pin::Pin;
use crate::sync::Arc;
use crate::sys_common::remutex::{ReentrantMutex, ReentrantMutexGuard};
use crate::thread;
#[test]
fn smoke() {
let m = unsafe {
- let m = ReentrantMutex::new(());
- m.init();
+ let mut m = Box::pin(ReentrantMutex::new(()));
+ m.as_mut().init();
m
};
+ let m = m.as_ref();
{
let a = m.lock();
{
#[test]
fn is_mutex() {
let m = unsafe {
- let m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
- m.init();
- m
+ // FIXME: Simplify this if Arc gets a Arc::get_pin_mut.
+ let mut m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
+ Pin::new_unchecked(Arc::get_mut_unchecked(&mut m)).init();
+ Pin::new_unchecked(m)
};
let m2 = m.clone();
- let lock = m.lock();
+ let lock = m.as_ref().lock();
let child = thread::spawn(move || {
- let lock = m2.lock();
+ let lock = m2.as_ref().lock();
assert_eq!(*lock.borrow(), 4950);
});
for i in 0..100 {
- let lock = m.lock();
+ let lock = m.as_ref().lock();
*lock.borrow_mut() += i;
}
drop(lock);
#[test]
fn trylock_works() {
let m = unsafe {
- let m = Arc::new(ReentrantMutex::new(()));
- m.init();
- m
+ // FIXME: Simplify this if Arc gets a Arc::get_pin_mut.
+ let mut m = Arc::new(ReentrantMutex::new(()));
+ Pin::new_unchecked(Arc::get_mut_unchecked(&mut m)).init();
+ Pin::new_unchecked(m)
};
let m2 = m.clone();
- let _lock = m.try_lock();
- let _lock2 = m.try_lock();
+ let _lock = m.as_ref().try_lock();
+ let _lock2 = m.as_ref().try_lock();
thread::spawn(move || {
- let lock = m2.try_lock();
+ let lock = m2.as_ref().try_lock();
assert!(lock.is_none());
})
.join()
.unwrap();
- let _lock3 = m.try_lock();
+ let _lock3 = m.as_ref().try_lock();
}
pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell<u32>>);
))] {
mod futex;
pub use futex::Parker;
+ } else if #[cfg(windows)] {
+ pub use crate::sys::thread_parker::Parker;
} else {
mod generic;
pub use generic::Parker;
-Subproject commit 777efaf5644706b36706a7a5c51edb63835e05ca
+Subproject commit f1ed22803f09e3b2c2b86d773db07ce70804987a
#![feature(termination_trait_lib)]
#![feature(test)]
#![feature(total_cmp)]
+#![feature(str_split_once)]
// Public reexports
pub use self::bench::{black_box, Bencher};
/// value.
pub fn from_env_var(env_var_name: &str) -> Option<Self> {
let durations_str = env::var(env_var_name).ok()?;
+ let (warn_str, critical_str) = durations_str.split_once(',').unwrap_or_else(|| {
+ panic!(
+ "Duration variable {} expected to have 2 numbers separated by comma, but got {}",
+ env_var_name, durations_str
+ )
+ });
- // Split string into 2 substrings by comma and try to parse numbers.
- let mut durations = durations_str.splitn(2, ',').map(|v| {
+ let parse_u64 = |v| {
u64::from_str(v).unwrap_or_else(|_| {
panic!(
"Duration value in variable {} is expected to be a number, but got {}",
env_var_name, v
)
})
- });
-
- // Callback to be called if the environment variable has unexpected structure.
- let panic_on_incorrect_value = || {
- panic!(
- "Duration variable {} expected to have 2 numbers separated by comma, but got {}",
- env_var_name, durations_str
- );
};
- let (warn, critical) = (
- durations.next().unwrap_or_else(panic_on_incorrect_value),
- durations.next().unwrap_or_else(panic_on_incorrect_value),
- );
-
+ let warn = parse_u64(warn_str);
+ let critical = parse_u64(critical_str);
if warn > critical {
panic!("Test execution warn time should be less or equal to the critical time");
}
# If the user already has a host build triple with an existing `rustc`
# install, use their preference. This fixes most issues with Windows builds
# being detected as GNU instead of MSVC.
+ default_encoding = sys.getdefaultencoding()
try:
version = subprocess.check_output(["rustc", "--version", "--verbose"])
+ version = version.decode(default_encoding)
host = next(x for x in version.split('\n') if x.startswith("host: "))
triple = host.split("host: ")[1]
if verbose:
print("rustup not detected: {}".format(e))
print("falling back to auto-detect")
- default_encoding = sys.getdefaultencoding()
required = sys.platform != 'win32'
ostype = require(["uname", "-s"], exit=required)
cputype = require(['uname', '-m'], exit=required)
env.setdefault("RUSTFLAGS", "")
env["RUSTFLAGS"] += " -Cdebuginfo=2"
- build_section = "target.{}".format(self.build_triple())
+ build_section = "target.{}".format(self.build)
target_features = []
if self.get_toml("crt-static", build_section) == "true":
target_features += ["+crt-static"]
run(args, env=env, verbose=self.verbose)
def build_triple(self):
- """Build triple as in LLVM"""
+ """Build triple as in LLVM
+
+ Note that `default_build_triple` is moderately expensive,
+ so use `self.build` where possible.
+ """
config = self.get_toml('build')
if config:
return config
filtered_submodules = []
submodules_names = []
llvm_checked_out = os.path.exists(os.path.join(self.rust_root, "src/llvm-project/.git"))
+ external_llvm_provided = self.get_toml('llvm-config') or self.downloading_llvm()
+ llvm_needed = not self.get_toml('codegen-backends', 'rust') \
+ or "llvm" in self.get_toml('codegen-backends', 'rust')
for module in submodules:
if module.endswith("llvm-project"):
- # Don't sync the llvm-project submodule either if an external LLVM
- # was provided, or if we are downloading LLVM. Also, if the
- # submodule has been initialized already, sync it anyways so that
- # it doesn't mess up contributor pull requests.
- if self.get_toml('llvm-config') or self.downloading_llvm():
+ # Don't sync the llvm-project submodule if an external LLVM was
+ # provided, if we are downloading LLVM or if the LLVM backend is
+ # not being built. Also, if the submodule has been initialized
+ # already, sync it anyways so that it doesn't mess up contributor
+ # pull requests.
+ if external_llvm_provided or not llvm_needed:
if self.get_toml('lld') != 'true' and not llvm_checked_out:
continue
check = self.check_submodule(module, slow_submodules)
.env("CFG_RELEASE_CHANNEL", &self.config.channel)
.env("RUSTDOC_REAL", self.rustdoc(compiler))
.env("RUSTC_BOOTSTRAP", "1")
- .arg("-Znormalize_docs")
.arg("-Winvalid_codeblock_attributes");
if self.config.deny_warnings {
cmd.arg("-Dwarnings");
}
+ // cfg(not(bootstrap)), can be removed on the next beta bump
+ if compiler.stage != 0 {
+ cmd.arg("-Znormalize-docs");
+ }
// Remove make-related flags that can cause jobserver problems.
cmd.env_remove("MAKEFLAGS");
copy_codegen_backends_to_sysroot(builder, build_compiler, target_compiler);
+ // We prepend this bin directory to the user PATH when linking Rust binaries. To
+ // avoid shadowing the system LLD we rename the LLD we provide to `rust-lld`.
let libdir = builder.sysroot_libdir(target_compiler, target_compiler.host);
+ let libdir_bin = libdir.parent().unwrap().join("bin");
+ t!(fs::create_dir_all(&libdir_bin));
+
if let Some(lld_install) = lld_install {
let src_exe = exe("lld", target_compiler.host);
let dst_exe = exe("rust-lld", target_compiler.host);
- // we prepend this bin directory to the user PATH when linking Rust binaries. To
- // avoid shadowing the system LLD we rename the LLD we provide to `rust-lld`.
- let dst = libdir.parent().unwrap().join("bin");
- t!(fs::create_dir_all(&dst));
- builder.copy(&lld_install.join("bin").join(&src_exe), &dst.join(&dst_exe));
+ builder.copy(&lld_install.join("bin").join(&src_exe), &libdir_bin.join(&dst_exe));
+ }
+
+ // Similarly, copy `llvm-dwp` into libdir for Split DWARF.
+ {
+ let src_exe = exe("llvm-dwp", target_compiler.host);
+ let dst_exe = exe("rust-llvm-dwp", target_compiler.host);
+ let llvm_config_bin = builder.ensure(native::Llvm { target: target_compiler.host });
+ let llvm_bin_dir = llvm_config_bin.parent().unwrap();
+ builder.copy(&llvm_bin_dir.join(&src_exe), &libdir_bin.join(&dst_exe));
}
// Ensure that `libLLVM.so` ends up in the newly build compiler directory,
// component for now.
maybe_install_llvm_runtime(builder, host, image);
+ let src_dir = builder.sysroot_libdir(compiler, host).parent().unwrap().join("bin");
+ let dst_dir = image.join("lib/rustlib").join(&*host.triple).join("bin");
+ t!(fs::create_dir_all(&dst_dir));
+
// Copy over lld if it's there
if builder.config.lld_enabled {
let exe = exe("rust-lld", compiler.host);
- let src =
- builder.sysroot_libdir(compiler, host).parent().unwrap().join("bin").join(&exe);
- // for the rationale about this rename check `compile::copy_lld_to_sysroot`
- let dst = image.join("lib/rustlib").join(&*host.triple).join("bin").join(&exe);
- t!(fs::create_dir_all(&dst.parent().unwrap()));
- builder.copy(&src, &dst);
+ builder.copy(&src_dir.join(&exe), &dst_dir.join(&exe));
}
+ // Copy over llvm-dwp if it's there
+ let exe = exe("rust-llvm-dwp", compiler.host);
+ builder.copy(&src_dir.join(&exe), &dst_dir.join(&exe));
+
// Man pages
t!(fs::create_dir_all(image.join("share/man/man1")));
let man_src = builder.src.join("src/doc/man");
builder.copy(&builder.src.join(file), &dst_src.join(file));
}
- // libtest includes std and everything else, so vendoring it
- // creates exactly what's needed for `cargo -Zbuild-std` or any
- // other analysis of the stdlib's source. Cargo also needs help
- // finding the lock, so we copy it to libtest temporarily.
- //
- // Note that this requires std to only have one version of each
- // crate. e.g. two versions of getopts won't be patchable.
- let dst_libtest = dst_src.join("library/test");
- let dst_vendor = dst_src.join("vendor");
- let root_lock = dst_src.join("Cargo.lock");
- let temp_lock = dst_libtest.join("Cargo.lock");
-
- // `cargo vendor` will delete everything from the lockfile that
- // isn't used by libtest, so we need to not use any links!
- builder.really_copy(&root_lock, &temp_lock);
-
- let mut cmd = Command::new(&builder.initial_cargo);
- cmd.arg("vendor").arg(dst_vendor).current_dir(&dst_libtest);
- builder.info("Dist src");
- let _time = timeit(builder);
- builder.run(&mut cmd);
-
- builder.remove(&temp_lock);
-
// Create source tarball in rust-installer format
let mut cmd = rust_installer(builder);
cmd.arg("generate")
.arg("--component-name=rust-src")
.arg("--legacy-manifest-dirs=rustlib,cargo");
+ builder.info("Dist src");
+ let _time = timeit(builder);
builder.run(&mut cmd);
builder.remove_dir(&image);
return change_drive(unc_to_lfs(&path)).unwrap_or(path);
fn unc_to_lfs(s: &str) -> &str {
- if s.starts_with("//?/") { &s[4..] } else { s }
+ s.strip_prefix("//?/").unwrap_or(s)
}
fn change_drive(s: &str) -> Option<String> {
install_bin("llvm-profdata");
install_bin("llvm-bcanalyzer");
install_bin("llvm-cov");
+ install_bin("llvm-dwp");
builder.install(&builder.llvm_filecheck(target), &dst_bindir, 0o755);
// Copy the include directory as well; needed mostly to build
cargo.rustdocflag("--document-private-items");
cargo.rustdocflag("--enable-index-page");
cargo.rustdocflag("-Zunstable-options");
- cargo.rustdocflag("-Znormalize-docs");
+ // cfg(not(bootstrap)), can be removed on the next beta bump
+ if stage != 0 {
+ cargo.rustdocflag("-Znormalize-docs");
+ }
compile::rustc_cargo(builder, &mut cargo, target);
// Only include compiler crates, no dependencies of those, such as `libc`.
Change this file to make users of the `download-ci-llvm` configuration download
a new version of LLVM from CI, even if the LLVM submodule hasn’t changed.
-Last change is for: https://github.com/rust-lang/rust/pull/78131
+Last change is for: https://github.com/rust-lang/rust/pull/80087
paths
}
- /// Copies a file from `src` to `dst` and doesn't use links, so
- /// that the copy can be modified without affecting the original.
- pub fn really_copy(&self, src: &Path, dst: &Path) {
- if self.config.dry_run {
- return;
- }
- self.verbose_than(1, &format!("Copy {:?} to {:?}", src, dst));
- if src == dst {
- return;
- }
- let _ = fs::remove_file(&dst);
- let metadata = t!(src.symlink_metadata());
- if let Err(e) = fs::copy(src, dst) {
- panic!("failed to copy `{}` to `{}`: {}", src.display(), dst.display(), e)
- }
- t!(fs::set_permissions(dst, metadata.permissions()));
- let atime = FileTime::from_last_access_time(&metadata);
- let mtime = FileTime::from_last_modification_time(&metadata);
- t!(filetime::set_file_times(dst, atime, mtime));
- }
-
/// Copies a file from `src` to `dst`
pub fn copy(&self, src: &Path, dst: &Path) {
if self.config.dry_run {
use build_helper::{output, t};
+use crate::cache::INTERNER;
use crate::config::Target;
use crate::Build;
}
// We need cmake, but only if we're actually building LLVM or sanitizers.
- let building_llvm = build
- .hosts
- .iter()
- .map(|host| {
- build
- .config
- .target_config
- .get(host)
- .map(|config| config.llvm_config.is_none())
- .unwrap_or(true)
- })
- .any(|build_llvm_ourselves| build_llvm_ourselves);
+ let building_llvm = build.config.rust_codegen_backends.contains(&INTERNER.intern_str("llvm"))
+ && build
+ .hosts
+ .iter()
+ .map(|host| {
+ build
+ .config
+ .target_config
+ .get(host)
+ .map(|config| config.llvm_config.is_none())
+ .unwrap_or(true)
+ })
+ .any(|build_llvm_ourselves| build_llvm_ourselves);
if building_llvm || build.config.any_sanitizers_enabled() {
cmd_finder.must_have("cmake");
}
}
}
- // Externally configured LLVM requires FileCheck to exist
- let filecheck = build.llvm_filecheck(build.build);
- if !filecheck.starts_with(&build.out) && !filecheck.exists() && build.config.codegen_tests {
- panic!("FileCheck executable {:?} does not exist", filecheck);
+ if build.config.rust_codegen_backends.contains(&INTERNER.intern_str("llvm")) {
+ // Externally configured LLVM requires FileCheck to exist
+ let filecheck = build.llvm_filecheck(build.build);
+ if !filecheck.starts_with(&build.out) && !filecheck.exists() && build.config.codegen_tests {
+ panic!("FileCheck executable {:?} does not exist", filecheck);
+ }
}
for target in &build.targets {
panic!("the iOS target is only supported on macOS");
}
- build
- .config
- .target_config
- .entry(target.clone())
- .or_insert(Target::from_triple(&target.triple));
+ build.config.target_config.entry(*target).or_insert(Target::from_triple(&target.triple));
if target.contains("-none-") || target.contains("nvptx") {
if build.no_std(*target) == Some(false) {
// If this is a native target (host is also musl) and no musl-root is given,
// fall back to the system toolchain in /usr before giving up
if build.musl_root(*target).is_none() && build.config.build == *target {
- let target = build.config.target_config.entry(target.clone()).or_default();
+ let target = build.config.target_config.entry(*target).or_default();
target.musl_root = Some("/usr".into());
}
match build.musl_libdir(*target) {
suite: "incremental"
});
-default_test!(Debuginfo { path: "src/test/debuginfo", mode: "debuginfo", suite: "debuginfo" });
+default_test_with_compare_mode!(Debuginfo {
+ path: "src/test/debuginfo",
+ mode: "debuginfo",
+ suite: "debuginfo",
+ compare_mode: "split-dwarf"
+});
host_test!(UiFullDeps { path: "src/test/ui-fulldeps", mode: "ui", suite: "ui-fulldeps" });
-FROM ubuntu:16.04
+FROM ubuntu:20.04
-RUN apt-get update && apt-get install -y --no-install-recommends \
+# Avoid interactive prompts while installing `tzdata` dependency with `DEBIAN_FRONTEND`.
+RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
g++ \
make \
ninja-build \
* If you are building the Rust compiler from source, you can optionally use the bundled LLVM tools, built from source. Those tool binaries can typically be found in your build platform directory at something like: `rust/build/x86_64-unknown-linux-gnu/llvm/bin/llvm-*`.
* You can install compatible versions of these tools via `rustup`.
-The `rustup` option is guaranteed to install a compatible version of the LLVM tools, but they can be hard to find. We recommend [`cargo-bintools`], which installs Rust-specific wrappers around these and other LLVM tools, so you can invoke them via `cargo` commands!
+The `rustup` option is guaranteed to install a compatible version of the LLVM tools, but they can be hard to find. We recommend [`cargo-binutils`], which installs Rust-specific wrappers around these and other LLVM tools, so you can invoke them via `cargo` commands!
```shell
$ rustup component add llvm-tools-preview
[rustc-dev-guide-how-to-build-and-run]: https://rustc-dev-guide.rust-lang.org/building/how-to-build-and-run.html
[`rustfilt`]: https://crates.io/crates/rustfilt
[`json5format`]: https://crates.io/crates/json5format
-[`cargo-bintools`]: https://crates.io/crates/cargo-bintools
+[`cargo-binutils`]: https://crates.io/crates/cargo-binutils
[`llvm-profdata merge`]: https://llvm.org/docs/CommandGuide/llvm-profdata.html#profdata-merge
[`llvm-cov report`]: https://llvm.org/docs/CommandGuide/llvm-cov.html#llvm-cov-report
[`llvm-cov show`]: https://llvm.org/docs/CommandGuide/llvm-cov.html#llvm-cov-show
-[source-based code coverage in Clang]: https://clang.llvm.org/docs/SourceBasedCodeCoverage.html
\ No newline at end of file
+[source-based code coverage in Clang]: https://clang.llvm.org/docs/SourceBasedCodeCoverage.html
------------------------
-The `const_fn` feature allows marking free functions and inherent methods as
-`const`, enabling them to be called in constants contexts, with constant
-arguments.
-
-## Examples
-
-```rust
-#![feature(const_fn)]
-
-const fn double(x: i32) -> i32 {
- x * 2
-}
-
-const FIVE: i32 = 5;
-const TEN: i32 = double(FIVE);
-
-fn main() {
- assert_eq!(5, FIVE);
- assert_eq!(10, TEN);
-}
-```
+The `const_fn` feature enables additional functionality not stabilized in the
+[minimal subset of `const_fn`](https://github.com/rust-lang/rust/issues/53555)
};
Some(Item {
- source: Span::empty(),
+ source: Span::dummy(),
name: None,
attrs: Default::default(),
visibility: Inherited,
match br {
// We only care about named late bound regions, as we need to add them
// to the 'for<>' section
- ty::BrNamed(_, name) => Some(GenericParamDef {
- name: name.to_string(),
- kind: GenericParamDefKind::Lifetime,
- }),
+ ty::BrNamed(_, name) => {
+ Some(GenericParamDef { name, kind: GenericParamDefKind::Lifetime })
+ }
_ => None,
}
})
}
WherePredicate::EqPredicate { lhs, rhs } => {
match lhs {
- Type::QPath { name: ref left_name, ref self_type, ref trait_ } => {
+ Type::QPath { name: left_name, ref self_type, ref trait_ } => {
let ty = &*self_type;
match **trait_ {
Type::ResolvedPath {
} => {
let mut new_trait_path = trait_path.clone();
- if self.is_fn_ty(tcx, trait_) && left_name == FN_OUTPUT_NAME {
+ if self.is_fn_ty(tcx, trait_) && left_name == sym::Output {
ty_to_fn
.entry(*ty.clone())
.and_modify(|e| *e = (e.0.clone(), Some(rhs.clone())))
use rustc_middle::ty;
use rustc_mir::const_eval::is_min_const_fn;
use rustc_span::hygiene::MacroKind;
-use rustc_span::symbol::{sym, Symbol};
+use rustc_span::symbol::{kw, sym, Symbol};
use rustc_span::Span;
use crate::clean::{self, Attributes, GetDefId, ToSource, TypeKind};
let attrs = merge_attrs(cx, Some(parent_module), target_attrs, attrs_clone);
cx.renderinfo.borrow_mut().inlined.insert(did);
- let what_rustc_thinks = clean::Item::from_def_id_and_parts(did, Some(name.clean(cx)), kind, cx);
+ let what_rustc_thinks = clean::Item::from_def_id_and_parts(did, Some(name), kind, cx);
ret.push(clean::Item { attrs, ..what_rustc_thinks });
Some(ret)
}
return;
}
- let attrs = merge_attrs(cx, parent_module.into(), load_attrs(cx, did), attrs);
- debug!("merged_attrs={:?}", attrs);
-
let tcx = cx.tcx;
let associated_trait = tcx.impl_trait_ref(did);
debug!("build_impl: impl {:?} for {:?}", trait_.def_id(), for_.def_id());
- ret.push(clean::Item::from_def_id_and_parts(
+ let mut item = clean::Item::from_def_id_and_parts(
did,
None,
clean::ImplItem(clean::Impl {
blanket_impl: None,
}),
cx,
- ));
+ );
+ item.attrs = merge_attrs(cx, parent_module.into(), load_attrs(cx, did), attrs);
+ debug!("merged_attrs={:?}", item.attrs);
+ ret.push(item);
}
fn build_module(cx: &DocContext<'_>, did: DefId, visited: &mut FxHashSet<DefId>) -> clean::Module {
items.push(clean::Item {
name: None,
attrs: clean::Attributes::default(),
- source: clean::Span::empty(),
+ source: clean::Span::dummy(),
def_id: DefId::local(CRATE_DEF_INDEX),
visibility: clean::Public,
stability: None,
for pred in &mut g.where_predicates {
match *pred {
clean::WherePredicate::BoundPredicate { ty: clean::Generic(ref s), ref mut bounds }
- if *s == "Self" =>
+ if *s == kw::SelfUpper =>
{
bounds.retain(|bound| match *bound {
clean::GenericBound::TraitBound(
name: ref _name,
},
ref bounds,
- } => !(bounds.is_empty() || *s == "Self" && did == trait_did),
+ } => !(bounds.is_empty() || *s == kw::SelfUpper && did == trait_did),
_ => true,
});
g
let mut ty_bounds = Vec::new();
g.where_predicates.retain(|pred| match *pred {
clean::WherePredicate::BoundPredicate { ty: clean::Generic(ref s), ref bounds }
- if *s == "Self" =>
+ if *s == kw::SelfUpper =>
{
ty_bounds.extend(bounds.iter().cloned());
false
use rustc_mir::const_eval::{is_const_fn, is_min_const_fn, is_unstable_const_fn};
use rustc_span::hygiene::{AstPass, MacroKind};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
-use rustc_span::{self, ExpnKind, Pos};
+use rustc_span::{self, ExpnKind};
use rustc_typeck::hir_ty_to_ty;
use std::collections::hash_map::Entry;
crate use self::types::Visibility::{Inherited, Public};
crate use self::types::*;
-const FN_OUTPUT_NAME: &str = "Output";
-
crate trait Clean<T> {
fn clean(&self, cx: &DocContext<'_>) -> T;
}
for attr in attrs.lists(sym::doc) {
if attr.has_name(sym::keyword) {
if let Some(v) = attr.value_str() {
- keyword = Some(v.to_string());
+ keyword = Some(v);
break;
}
}
};
ExternalCrate {
- name: cx.tcx.crate_name(*self).to_string(),
+ name: cx.tcx.crate_name(*self),
src: krate_src,
attrs: cx.tcx.get_attrs(root).clean(cx),
primitives,
ModuleItem(Module { is_crate: self.is_crate, items }),
cx,
);
- Item {
- name: Some(what_rustc_thinks.name.unwrap_or_default()),
- attrs,
- source: span.clean(cx),
- ..what_rustc_thinks
- }
+ Item { attrs, source: span.clean(cx), ..what_rustc_thinks }
}
}
.collect_referenced_late_bound_regions(&poly_trait_ref)
.into_iter()
.filter_map(|br| match br {
- ty::BrNamed(_, name) => Some(GenericParamDef {
- name: name.to_string(),
- kind: GenericParamDefKind::Lifetime,
- }),
+ ty::BrNamed(_, name) => {
+ Some(GenericParamDef { name, kind: GenericParamDefKind::Lifetime })
+ }
_ => None,
})
.collect();
GenericBound::Outlives(_) => panic!("cleaning a trait got a lifetime"),
};
Type::QPath {
- name: cx.tcx.associated_item(self.item_def_id).ident.name.clean(cx),
+ name: cx.tcx.associated_item(self.item_def_id).ident.name,
self_type: box self.self_ty().clean(cx),
trait_: box trait_,
}
impl Clean<GenericParamDef> for ty::GenericParamDef {
fn clean(&self, cx: &DocContext<'_>) -> GenericParamDef {
let (name, kind) = match self.kind {
- ty::GenericParamDefKind::Lifetime => {
- (self.name.to_string(), GenericParamDefKind::Lifetime)
- }
+ ty::GenericParamDefKind::Lifetime => (self.name, GenericParamDefKind::Lifetime),
ty::GenericParamDefKind::Type { has_default, synthetic, .. } => {
let default =
if has_default { Some(cx.tcx.type_of(self.def_id).clean(cx)) } else { None };
(
- self.name.clean(cx),
+ self.name,
GenericParamDefKind::Type {
did: self.def_id,
bounds: vec![], // These are filled in from the where-clauses.
)
}
ty::GenericParamDefKind::Const { .. } => (
- self.name.clean(cx),
+ self.name,
GenericParamDefKind::Const {
did: self.def_id,
ty: cx.tcx.type_of(self.def_id).clean(cx),
for bound in bounds {
s.push_str(&format!(" + {}", bound.name.ident()));
}
- s
+ Symbol::intern(&s)
} else {
- self.name.ident().to_string()
+ self.name.ident().name
};
(name, GenericParamDefKind::Lifetime)
}
hir::GenericParamKind::Type { ref default, synthetic } => (
- self.name.ident().name.clean(cx),
+ self.name.ident().name,
GenericParamDefKind::Type {
did: cx.tcx.hir().local_def_id(self.hir_id).to_def_id(),
bounds: self.bounds.clean(cx),
},
),
hir::GenericParamKind::Const { ref ty } => (
- self.name.ident().name.clean(cx),
+ self.name.ident().name,
GenericParamDefKind::Const {
did: cx.tcx.hir().local_def_id(self.hir_id).to_def_id(),
ty: ty.clean(cx),
.collect::<Vec<GenericParamDef>>();
// param index -> [(DefId of trait, associated type name, type)]
- let mut impl_trait_proj = FxHashMap::<u32, Vec<(DefId, String, Ty<'tcx>)>>::default();
+ let mut impl_trait_proj = FxHashMap::<u32, Vec<(DefId, Symbol, Ty<'tcx>)>>::default();
let where_predicates = preds
.predicates
if let Some(((_, trait_did, name), rhs)) =
proj.as_ref().and_then(|(lhs, rhs)| Some((lhs.projection()?, rhs)))
{
- impl_trait_proj.entry(param_idx).or_default().push((
- trait_did,
- name.to_string(),
- rhs,
- ));
+ impl_trait_proj
+ .entry(param_idx)
+ .or_default()
+ .push((trait_did, name, rhs));
}
return None;
if let crate::core::ImplTraitParam::ParamIndex(idx) = param {
if let Some(proj) = impl_trait_proj.remove(&idx) {
for (trait_did, name, rhs) in proj {
- simplify::merge_bounds(cx, &mut bounds, trait_did, &name, &rhs.clean(cx));
+ simplify::merge_bounds(cx, &mut bounds, trait_did, name, &rhs.clean(cx));
}
}
} else {
.iter()
.enumerate()
.map(|(i, ty)| {
- let mut name = self.1.get(i).map(|ident| ident.to_string()).unwrap_or_default();
+ let mut name = self.1.get(i).map(|ident| ident.name).unwrap_or(kw::Invalid);
if name.is_empty() {
- name = "_".to_string();
+ name = kw::Underscore;
}
Argument { name, type_: ty.clean(cx) }
})
.iter()
.map(|t| Argument {
type_: t.clean(cx),
- name: names.next().map_or_else(|| String::new(), |name| name.to_string()),
+ name: names.next().map(|i| i.name).unwrap_or(kw::Invalid),
})
.collect(),
},
AssocTypeItem(bounds.clean(cx), default.clean(cx))
}
};
- Item::from_def_id_and_parts(local_did, Some(self.ident.name.clean(cx)), inner, cx)
+ Item::from_def_id_and_parts(local_did, Some(self.ident.name), inner, cx)
})
}
}
TypedefItem(Typedef { type_, generics: Generics::default(), item_type }, true)
}
};
- Item::from_def_id_and_parts(local_did, Some(self.ident.name.clean(cx)), inner, cx)
+ Item::from_def_id_and_parts(local_did, Some(self.ident.name), inner, cx)
})
}
}
};
let self_arg_ty = sig.input(0).skip_binder();
if self_arg_ty == self_ty {
- decl.inputs.values[0].type_ = Generic(String::from("Self"));
+ decl.inputs.values[0].type_ = Generic(kw::SelfUpper);
} else if let ty::Ref(_, ty, _) = *self_arg_ty.kind() {
if ty == self_ty {
match decl.inputs.values[0].type_ {
BorrowedRef { ref mut type_, .. } => {
- **type_ = Generic(String::from("Self"))
+ **type_ = Generic(kw::SelfUpper)
}
_ => unreachable!(),
}
}
}
ty::AssocKind::Type => {
- let my_name = self.ident.name.clean(cx);
+ let my_name = self.ident.name;
if let ty::TraitContainer(_) = self.container {
let bounds = cx.tcx.explicit_item_bounds(self.def_id);
_ => return None,
}
match **self_type {
- Generic(ref s) if *s == "Self" => {}
+ Generic(ref s) if *s == kw::SelfUpper => {}
_ => return None,
}
Some(bounds)
}
};
- Item::from_def_id_and_parts(self.def_id, Some(self.ident.name.clean(cx)), kind, cx)
+ Item::from_def_id_and_parts(self.def_id, Some(self.ident.name), kind, cx)
}
}
segments: trait_segments.clean(cx),
};
Type::QPath {
- name: p.segments.last().expect("segments were empty").ident.name.clean(cx),
+ name: p.segments.last().expect("segments were empty").ident.name,
self_type: box qself.clean(cx),
trait_: box resolve_type(cx, trait_path, hir_id),
}
};
let trait_path = hir::Path { span, res, segments: &[] };
Type::QPath {
- name: segment.ident.name.clean(cx),
+ name: segment.ident.name,
self_type: box qself.clean(cx),
trait_: box resolve_type(cx, trait_path.clean(cx), hir_id),
}
let mut bindings = vec![];
for pb in obj.projection_bounds() {
bindings.push(TypeBinding {
- name: cx.tcx.associated_item(pb.item_def_id()).ident.name.clean(cx),
+ name: cx.tcx.associated_item(pb.item_def_id()).ident.name,
kind: TypeBindingKind::Equality { ty: pb.skip_binder().ty.clean(cx) },
});
}
if let Some(bounds) = cx.impl_trait_bounds.borrow_mut().remove(&p.index.into()) {
ImplTrait(bounds)
} else {
- Generic(p.name.to_string())
+ Generic(p.name)
}
}
.tcx
.associated_item(proj.projection_ty.item_def_id)
.ident
- .name
- .clean(cx),
+ .name,
kind: TypeBindingKind::Equality {
ty: proj.ty.clean(cx),
},
fn clean(&self, cx: &DocContext<'_>) -> Item {
let what_rustc_thinks = Item::from_def_id_and_parts(
self.did,
- Some(self.ident.name.clean(cx)),
+ Some(self.ident.name),
StructFieldItem(cx.tcx.type_of(self.did).clean(cx)),
cx,
);
.fields
.iter()
.map(|field| {
- let name = Some(field.ident.name.clean(cx));
+ let name = Some(field.ident.name);
let kind = StructFieldItem(cx.tcx.type_of(field.did).clean(cx));
let what_rustc_thinks =
Item::from_def_id_and_parts(field.did, name, kind, cx);
};
let what_rustc_thinks = Item::from_def_id_and_parts(
self.def_id,
- Some(self.ident.name.clean(cx)),
+ Some(self.ident.name),
VariantItem(Variant { kind }),
cx,
);
}
impl Clean<Span> for rustc_span::Span {
- fn clean(&self, cx: &DocContext<'_>) -> Span {
- if self.is_dummy() {
- return Span::empty();
- }
-
- // Get the macro invocation instead of the definition,
- // in case the span is result of a macro expansion.
- // (See rust-lang/rust#39726)
- let span = self.source_callsite();
-
- let sm = cx.sess().source_map();
- let filename = sm.span_to_filename(span);
- let lo = sm.lookup_char_pos(span.lo());
- let hi = sm.lookup_char_pos(span.hi());
- Span {
- filename,
- cnum: lo.file.cnum,
- loline: lo.line,
- locol: lo.col.to_usize(),
- hiline: hi.line,
- hicol: hi.col.to_usize(),
- original: span,
- }
+ fn clean(&self, _cx: &DocContext<'_>) -> Span {
+ Span::from_rustc_span(*self)
}
}
_ => unreachable!("not yet converted"),
};
- vec![Item::from_def_id_and_parts(def_id, Some(name.clean(cx)), kind, cx)]
+ vec![Item::from_def_id_and_parts(def_id, Some(name), kind, cx)]
})
}
}
}
}
-impl Clean<Deprecation> for attr::Deprecation {
- fn clean(&self, _: &DocContext<'_>) -> Deprecation {
- Deprecation {
- since: self.since.map(|s| s.to_string()).filter(|s| !s.is_empty()),
- note: self.note.map(|n| n.to_string()).filter(|n| !n.is_empty()),
- is_since_rustc_version: self.is_since_rustc_version,
- }
- }
-}
-
impl Clean<TypeBinding> for hir::TypeBinding<'_> {
fn clean(&self, cx: &DocContext<'_>) -> TypeBinding {
- TypeBinding { name: self.ident.name.clean(cx), kind: self.kind.clean(cx) }
+ TypeBinding { name: self.ident.name, kind: self.kind.clean(cx) }
}
}
use rustc_hir::def_id::DefId;
use rustc_middle::ty;
+use rustc_span::Symbol;
use crate::clean;
use crate::clean::GenericArgs as PP;
cx: &clean::DocContext<'_>,
bounds: &mut Vec<clean::GenericBound>,
trait_did: DefId,
- name: &str,
+ name: Symbol,
rhs: &clean::Type,
) -> bool {
!bounds.iter_mut().any(|b| {
match last.args {
PP::AngleBracketed { ref mut bindings, .. } => {
bindings.push(clean::TypeBinding {
- name: name.to_string(),
+ name,
kind: clean::TypeBindingKind::Equality { ty: rhs.clone() },
});
}
use rustc_ast::util::comments::beautify_doc_string;
use rustc_ast::{self as ast, AttrStyle};
use rustc_ast::{FloatTy, IntTy, UintTy};
-use rustc_attr::{ConstStability, Stability, StabilityLevel};
+use rustc_attr::{ConstStability, Deprecation, Stability, StabilityLevel};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_feature::UnstableFeatures;
use rustc_hir as hir;
use rustc_hir::def::Res;
-use rustc_hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
+use rustc_hir::def_id::{CrateNum, DefId};
use rustc_hir::lang_items::LangItem;
use rustc_hir::Mutability;
use rustc_index::vec::IndexVec;
-use rustc_middle::ty::{AssocKind, TyCtxt};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::Session;
use rustc_span::hygiene::MacroKind;
use rustc_span::source_map::DUMMY_SP;
use rustc_span::symbol::{kw, sym, Ident, Symbol, SymbolStr};
-use rustc_span::{self, FileName};
+use rustc_span::{self, FileName, Loc};
use rustc_target::abi::VariantIdx;
use rustc_target::spec::abi::Abi;
use smallvec::{smallvec, SmallVec};
#[derive(Clone, Debug)]
crate struct Crate {
- crate name: String,
+ crate name: Symbol,
crate version: Option<String>,
crate src: FileName,
crate module: Option<Item>,
#[derive(Clone, Debug)]
crate struct ExternalCrate {
- crate name: String,
+ crate name: Symbol,
crate src: FileName,
crate attrs: Attributes,
crate primitives: Vec<(DefId, PrimitiveType)>,
- crate keywords: Vec<(DefId, String)>,
+ crate keywords: Vec<(DefId, Symbol)>,
}
/// Anything with a source location and set of attributes and, optionally, a
/// Stringified span
crate source: Span,
/// Not everything has a name. E.g., impls
- crate name: Option<String>,
+ crate name: Option<Symbol>,
crate attrs: Attributes,
crate visibility: Visibility,
crate kind: ItemKind,
kind: ItemKind,
cx: &DocContext<'_>,
) -> Item {
- Item::from_def_id_and_parts(
- cx.tcx.hir().local_def_id(hir_id).to_def_id(),
- name.clean(cx),
- kind,
- cx,
- )
+ Item::from_def_id_and_parts(cx.tcx.hir().local_def_id(hir_id).to_def_id(), name, kind, cx)
}
pub fn from_def_id_and_parts(
def_id: DefId,
- name: Option<String>,
+ name: Option<Symbol>,
kind: ItemKind,
cx: &DocContext<'_>,
) -> Item {
attrs: cx.tcx.get_attrs(def_id).clean(cx),
visibility: cx.tcx.visibility(def_id).clean(cx),
stability: cx.tcx.lookup_stability(def_id).cloned(),
- deprecation: cx.tcx.lookup_deprecation(def_id).clean(cx),
+ deprecation: cx.tcx.lookup_deprecation(def_id),
const_stability: cx.tcx.lookup_const_stability(def_id).cloned(),
}
}
AssocTypeItem(Vec<GenericBound>, Option<Type>),
/// An item that has been stripped by a rustdoc pass
StrippedItem(Box<ItemKind>),
- KeywordItem(String),
+ KeywordItem(Symbol),
}
impl ItemKind {
_ => false,
}
}
-
- crate fn as_assoc_kind(&self) -> Option<AssocKind> {
- match *self {
- ItemKind::AssocConstItem(..) => Some(AssocKind::Const),
- ItemKind::AssocTypeItem(..) => Some(AssocKind::Type),
- ItemKind::TyMethodItem(..) | ItemKind::MethodItem(..) => Some(AssocKind::Fn),
- _ => None,
- }
- }
}
#[derive(Clone, Debug)]
#[derive(Clone, PartialEq, Eq, Debug, Hash)]
crate struct GenericParamDef {
- crate name: String,
+ crate name: Symbol,
crate kind: GenericParamDefKind,
}
#[derive(Clone, PartialEq, Eq, Debug, Hash)]
crate struct Argument {
crate type_: Type,
- crate name: String,
+ crate name: Symbol,
}
#[derive(Clone, PartialEq, Debug)]
impl Argument {
crate fn to_self(&self) -> Option<SelfTy> {
- if self.name != "self" {
+ if self.name != kw::SelfLower {
return None;
}
if self.type_.is_self_type() {
},
/// For parameterized types, so the consumer of the JSON don't go
/// looking for types which don't exist anywhere.
- Generic(String),
+ Generic(Symbol),
/// Primitives are the fixed-size numeric types (plus int/usize/float), char,
/// arrays, slices, and tuples.
Primitive(PrimitiveType),
// `<Type as Trait>::Name`
QPath {
- name: String,
+ name: Symbol,
self_type: Box<Type>,
trait_: Box<Type>,
},
}
#[derive(Clone, PartialEq, Eq, Hash, Copy, Debug)]
+/// N.B. this has to be different from `hir::PrimTy` because it also includes types that aren't
+/// paths, like `Unit`.
crate enum PrimitiveType {
Isize,
I8,
crate fn is_self_type(&self) -> bool {
match *self {
- Generic(ref name) => name == "Self",
+ Generic(name) => name == kw::SelfUpper,
_ => false,
}
}
}
}
- crate fn projection(&self) -> Option<(&Type, DefId, &str)> {
+ crate fn projection(&self) -> Option<(&Type, DefId, Symbol)> {
let (self_, trait_, name) = match self {
- QPath { ref self_type, ref trait_, ref name } => (self_type, trait_, name),
+ QPath { ref self_type, ref trait_, name } => (self_type, trait_, name),
_ => return None,
};
let trait_did = match **trait_ {
ResolvedPath { did, .. } => did,
_ => return None,
};
- Some((&self_, trait_did, name))
+ Some((&self_, trait_did, *name))
}
}
crate fn to_url_str(&self) -> &'static str {
self.as_str()
}
+
+ crate fn as_sym(&self) -> Symbol {
+ use PrimitiveType::*;
+ match self {
+ Isize => sym::isize,
+ I8 => sym::i8,
+ I16 => sym::i16,
+ I32 => sym::i32,
+ I64 => sym::i64,
+ I128 => sym::i128,
+ Usize => sym::usize,
+ U8 => sym::u8,
+ U16 => sym::u16,
+ U32 => sym::u32,
+ U64 => sym::u64,
+ U128 => sym::u128,
+ F32 => sym::f32,
+ F64 => sym::f64,
+ Str => sym::str,
+ Bool => sym::bool,
+ Char => sym::char,
+ Array => sym::array,
+ Slice => sym::slice,
+ Tuple => sym::tuple,
+ Unit => sym::unit,
+ RawPointer => sym::pointer,
+ Reference => sym::reference,
+ Fn => kw::Fn,
+ Never => sym::never,
+ }
+ }
}
impl From<ast::IntTy> for PrimitiveType {
Struct(VariantStruct),
}
+/// Small wrapper around `rustc_span::Span` that adds helper methods and enforces calling `source_callsite`.
#[derive(Clone, Debug)]
-crate struct Span {
- crate filename: FileName,
- crate cnum: CrateNum,
- crate loline: usize,
- crate locol: usize,
- crate hiline: usize,
- crate hicol: usize,
- crate original: rustc_span::Span,
-}
+crate struct Span(rustc_span::Span);
impl Span {
- crate fn empty() -> Span {
- Span {
- filename: FileName::Anon(0),
- cnum: LOCAL_CRATE,
- loline: 0,
- locol: 0,
- hiline: 0,
- hicol: 0,
- original: rustc_span::DUMMY_SP,
- }
+ crate fn from_rustc_span(sp: rustc_span::Span) -> Self {
+ // Get the macro invocation instead of the definition,
+ // in case the span is result of a macro expansion.
+ // (See rust-lang/rust#39726)
+ Self(sp.source_callsite())
+ }
+
+ crate fn dummy() -> Self {
+ Self(rustc_span::DUMMY_SP)
}
crate fn span(&self) -> rustc_span::Span {
- self.original
+ self.0
+ }
+
+ crate fn filename(&self, sess: &Session) -> FileName {
+ sess.source_map().span_to_filename(self.0)
+ }
+
+ crate fn lo(&self, sess: &Session) -> Loc {
+ sess.source_map().lookup_char_pos(self.0.lo())
+ }
+
+ crate fn hi(&self, sess: &Session) -> Loc {
+ sess.source_map().lookup_char_pos(self.0.hi())
+ }
+
+ crate fn cnum(&self, sess: &Session) -> CrateNum {
+ // FIXME: is there a time when the lo and hi crate would be different?
+ self.lo(sess).file.cnum
}
}
crate helpers: Vec<String>,
}
-#[derive(Clone, Debug)]
-crate struct Deprecation {
- crate since: Option<String>,
- crate note: Option<String>,
- crate is_since_rustc_version: bool,
-}
-
/// An type binding on an associated type (e.g., `A = Bar` in `Foo<A = Bar>` or
/// `A: Send + Sync` in `Foo<A: Send + Sync>`).
#[derive(Clone, PartialEq, Eq, Debug, Hash)]
crate struct TypeBinding {
- crate name: String,
+ crate name: Symbol,
crate kind: TypeBindingKind,
}
m.items.extend(primitives.iter().map(|&(def_id, prim)| {
Item::from_def_id_and_parts(
def_id,
- Some(prim.to_url_str().to_owned()),
+ Some(prim.as_sym()),
ItemKind::PrimitiveItem(prim),
cx,
)
cx: &DocContext<'_>,
recurse: i32,
) -> FxHashSet<(Type, TypeKind)> {
- let arg_s = arg.print().to_string();
let mut res = FxHashSet::default();
if recurse >= 10 {
// FIXME: remove this whole recurse thing when the recursion bug is fixed
return res;
}
if arg.is_full_generic() {
+ let arg_s = Symbol::intern(&arg.print().to_string());
if let Some(where_pred) = generics.where_predicates.iter().find(|g| match g {
&WherePredicate::BoundPredicate { ref ty, .. } => ty.def_id() == arg.def_id(),
_ => false,
}
}
-crate fn name_from_pat(p: &hir::Pat<'_>) -> String {
+crate fn name_from_pat(p: &hir::Pat<'_>) -> Symbol {
use rustc_hir::*;
debug!("trying to get a name from pattern: {:?}", p);
- match p.kind {
- PatKind::Wild => "_".to_string(),
- PatKind::Binding(_, _, ident, _) => ident.to_string(),
+ Symbol::intern(&match p.kind {
+ PatKind::Wild => return kw::Underscore,
+ PatKind::Binding(_, _, ident, _) => return ident.name,
PatKind::TupleStruct(ref p, ..) | PatKind::Path(ref p) => qpath_to_string(p),
PatKind::Struct(ref name, ref fields, etc) => format!(
"{} {{ {}{} }}",
.join(", "),
if etc { ", .." } else { "" }
),
- PatKind::Or(ref pats) => {
- pats.iter().map(|p| name_from_pat(&**p)).collect::<Vec<String>>().join(" | ")
- }
+ PatKind::Or(ref pats) => pats
+ .iter()
+ .map(|p| name_from_pat(&**p).to_string())
+ .collect::<Vec<String>>()
+ .join(" | "),
PatKind::Tuple(ref elts, _) => format!(
"({})",
- elts.iter().map(|p| name_from_pat(&**p)).collect::<Vec<String>>().join(", ")
+ elts.iter()
+ .map(|p| name_from_pat(&**p).to_string())
+ .collect::<Vec<String>>()
+ .join(", ")
),
- PatKind::Box(ref p) => name_from_pat(&**p),
- PatKind::Ref(ref p, _) => name_from_pat(&**p),
+ PatKind::Box(ref p) => return name_from_pat(&**p),
+ PatKind::Ref(ref p, _) => return name_from_pat(&**p),
PatKind::Lit(..) => {
warn!(
"tried to get argument name from PatKind::Lit, which is silly in function arguments"
);
- "()".to_string()
+ return Symbol::intern("()");
}
PatKind::Range(..) => panic!(
"tried to get argument name from PatKind::Range, \
which is not allowed in function arguments"
),
PatKind::Slice(ref begin, ref mid, ref end) => {
- let begin = begin.iter().map(|p| name_from_pat(&**p));
+ let begin = begin.iter().map(|p| name_from_pat(&**p).to_string());
let mid = mid.as_ref().map(|p| format!("..{}", name_from_pat(&**p))).into_iter();
- let end = end.iter().map(|p| name_from_pat(&**p));
+ let end = end.iter().map(|p| name_from_pat(&**p).to_string());
format!("[{}]", begin.chain(mid).chain(end).collect::<Vec<_>>().join(", "))
}
- }
+ })
}
crate fn print_const(cx: &DocContext<'_>, n: &'tcx ty::Const<'_>) -> String {
let is_generic = match path.res {
Res::PrimTy(p) => return Primitive(PrimitiveType::from(p)),
Res::SelfTy(..) if path.segments.len() == 1 => {
- return Generic(kw::SelfUpper.to_string());
+ return Generic(kw::SelfUpper);
}
Res::Def(DefKind::TyParam, _) if path.segments.len() == 1 => {
- return Generic(format!("{:#}", path.print()));
+ return Generic(Symbol::intern(&format!("{:#}", path.print())));
}
Res::SelfTy(..) | Res::Def(DefKind::TyParam | DefKind::AssocTy, _) => true,
_ => false,
matches
.opt_strs("default-setting")
.iter()
- .map(|s| {
- let mut kv = s.splitn(2, '=');
- // never panics because `splitn` always returns at least one element
- let k = kv.next().unwrap().to_string();
- let v = kv.next().unwrap_or("true").to_string();
- (k, v)
+ .map(|s| match s.split_once('=') {
+ None => (s.clone(), "true".to_string()),
+ Some((k, v)) => (k.to_string(), v.to_string()),
})
.collect(),
];
) -> Result<BTreeMap<String, String>, &'static str> {
let mut externs = BTreeMap::new();
for arg in &matches.opt_strs("extern-html-root-url") {
- let mut parts = arg.splitn(2, '=');
- let name = parts.next().ok_or("--extern-html-root-url must not be empty")?;
- let url = parts.next().ok_or("--extern-html-root-url must be of the form name=url")?;
+ let (name, url) =
+ arg.split_once('=').ok_or("--extern-html-root-url must be of the form name=url")?;
externs.insert(name.to_string(), url.to_string());
}
-
Ok(externs)
}
use rustc_hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX};
use rustc_middle::middle::privacy::AccessLevels;
use rustc_span::source_map::FileName;
+use rustc_span::Symbol;
use crate::clean::{self, GetDefId};
use crate::config::RenderInfo;
crate implementors: FxHashMap<DefId, Vec<Impl>>,
/// Cache of where external crate documentation can be found.
- crate extern_locations: FxHashMap<CrateNum, (String, PathBuf, ExternalLocation)>,
+ crate extern_locations: FxHashMap<CrateNum, (Symbol, PathBuf, ExternalLocation)>,
/// Cache of where documentation for primitives can be found.
crate primitive_locations: FxHashMap<clean::PrimitiveType, DefId>,
},
_ => PathBuf::new(),
};
- let extern_url = extern_html_root_urls.get(&e.name).map(|u| &**u);
+ let extern_url = extern_html_root_urls.get(&*e.name.as_str()).map(|u| &**u);
cache
.extern_locations
- .insert(n, (e.name.clone(), src_root, extern_location(e, extern_url, &dst)));
+ .insert(n, (e.name, src_root, extern_location(e, extern_url, &dst)));
let did = DefId { krate: n, index: CRATE_DEF_INDEX };
cache.external_paths.insert(did, (vec![e.name.to_string()], ItemType::Module));
cache.primitive_locations.insert(prim, def_id);
}
- cache.stack.push(krate.name.clone());
+ cache.stack.push(krate.name.to_string());
krate = cache.fold_crate(krate);
for (trait_did, dids, impl_) in cache.orphan_trait_impls.drain(..) {
// Keep track of the fully qualified path for this item.
let pushed = match item.name {
- Some(ref n) if !n.is_empty() => {
+ Some(n) if !n.is_empty() => {
self.stack.push(n.to_string());
true
}
use std::sync::Arc;
+use rustc_data_structures::sync::Lrc;
+use rustc_session::Session;
use rustc_span::edition::Edition;
use crate::clean;
render_info: RenderInfo,
edition: Edition,
cache: &mut Cache,
+ sess: Lrc<Session>,
) -> Result<(Self, clean::Crate), Error>;
/// Renders a single non-module item. This means no recursive sub-item rendering is required.
render_info: RenderInfo,
diag: &rustc_errors::Handler,
edition: Edition,
+ sess: Lrc<Session>,
) -> Result<(), Error> {
let (krate, mut cache) = Cache::from_krate(
render_info.clone(),
);
let (mut format_renderer, mut krate) =
- T::init(krate, options, render_info, edition, &mut cache)?;
+ T::init(krate, options, render_info, edition, &mut cache, sess)?;
let cache = Arc::new(cache);
// Freeze the cache now that the index has been built. Put an Arc into TLS for future
None => return Ok(()),
};
- item.name = Some(krate.name.clone());
+ item.name = Some(krate.name);
// Render the crate documentation
let mut work = vec![(format_renderer.clone(), item)];
display_fn(move |f| match self.kind {
clean::GenericParamDefKind::Lifetime => write!(f, "{}", self.name),
clean::GenericParamDefKind::Type { ref bounds, ref default, .. } => {
- f.write_str(&self.name)?;
+ f.write_str(&*self.name.as_str())?;
if !bounds.is_empty() {
if f.alternate() {
Ok(())
}
clean::GenericParamDefKind::Const { ref ty, .. } => {
- f.write_str("const ")?;
- f.write_str(&self.name)?;
-
if f.alternate() {
- write!(f, ": {:#}", ty.print())
+ write!(f, "const {}: {:#}", self.name, ty.print())
} else {
- write!(f, ": {}", ty.print())
+ write!(f, "const {}: {}", self.name, ty.print())
}
}
})
fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter<'_>, use_absolute: bool) -> fmt::Result {
match *t {
- clean::Generic(ref name) => f.write_str(name),
+ clean::Generic(name) => write!(f, "{}", name),
clean::ResolvedPath { did, ref param_names, ref path, is_generic } => {
if param_names.is_some() {
f.write_str("dyn ")?;
impl clean::TypeBinding {
crate fn print(&self) -> impl fmt::Display + '_ {
display_fn(move |f| {
- f.write_str(&self.name)?;
+ f.write_str(&*self.name.as_str())?;
match self.kind {
clean::TypeBindingKind::Equality { ref ty } => {
if f.alternate() {
placeholder=\"Click or press ‘S’ to search, ‘?’ for more options…\" \
type=\"search\">\
</div>\
- <span class=\"help-button\">?</span>
+ <button type=\"button\" class=\"help-button\">?</button>
<a id=\"settings-menu\" href=\"{root_path}settings.html\">\
<img src=\"{static_root_path}wheel{suffix}.svg\" \
width=\"18\" \
let trimmed = s.trim();
if trimmed.starts_with("##") {
Line::Shown(Cow::Owned(s.replacen("##", "#", 1)))
- } else if trimmed.starts_with("# ") {
+ } else if let Some(stripped) = trimmed.strip_prefix("# ") {
// # text
- Line::Hidden(&trimmed[2..])
+ Line::Hidden(&stripped)
} else if trimmed == "#" {
// We cannot handle '#text' because it could be #[attr].
Line::Hidden("")
#[test]
fn test_lang_string_parse() {
- fn t(
- s: &str,
- should_panic: bool,
- no_run: bool,
- ignore: Ignore,
- rust: bool,
- test_harness: bool,
- compile_fail: bool,
- allow_fail: bool,
- error_codes: Vec<String>,
- edition: Option<Edition>,
- ) {
- assert_eq!(
- LangString::parse(s, ErrorCodes::Yes, true, None),
- LangString {
- should_panic,
- no_run,
- ignore,
- rust,
- test_harness,
- compile_fail,
- error_codes,
- original: s.to_owned(),
- allow_fail,
- edition,
- }
- )
+ fn t(lg: LangString) {
+ let s = &lg.original;
+ assert_eq!(LangString::parse(s, ErrorCodes::Yes, true, None), lg)
}
- let ignore_foo = Ignore::Some(vec!["foo".to_string()]);
- fn v() -> Vec<String> {
- Vec::new()
- }
-
- // marker | should_panic | no_run | ignore | rust | test_harness
- // | compile_fail | allow_fail | error_codes | edition
- t("", false, false, Ignore::None, true, false, false, false, v(), None);
- t("rust", false, false, Ignore::None, true, false, false, false, v(), None);
- t("sh", false, false, Ignore::None, false, false, false, false, v(), None);
- t("ignore", false, false, Ignore::All, true, false, false, false, v(), None);
- t("ignore-foo", false, false, ignore_foo, true, false, false, false, v(), None);
- t("should_panic", true, false, Ignore::None, true, false, false, false, v(), None);
- t("no_run", false, true, Ignore::None, true, false, false, false, v(), None);
- t("test_harness", false, false, Ignore::None, true, true, false, false, v(), None);
- t("compile_fail", false, true, Ignore::None, true, false, true, false, v(), None);
- t("allow_fail", false, false, Ignore::None, true, false, false, true, v(), None);
- t("{.no_run .example}", false, true, Ignore::None, true, false, false, false, v(), None);
- t("{.sh .should_panic}", true, false, Ignore::None, false, false, false, false, v(), None);
- t("{.example .rust}", false, false, Ignore::None, true, false, false, false, v(), None);
- t("{.test_harness .rust}", false, false, Ignore::None, true, true, false, false, v(), None);
- t("text, no_run", false, true, Ignore::None, false, false, false, false, v(), None);
- t("text,no_run", false, true, Ignore::None, false, false, false, false, v(), None);
- t(
- "edition2015",
- false,
- false,
- Ignore::None,
- true,
- false,
- false,
- false,
- v(),
- Some(Edition::Edition2015),
- );
- t(
- "edition2018",
- false,
- false,
- Ignore::None,
- true,
- false,
- false,
- false,
- v(),
- Some(Edition::Edition2018),
- );
+ t(LangString::all_false());
+ t(LangString { original: "rust".into(), ..LangString::all_false() });
+ t(LangString { original: "sh".into(), rust: false, ..LangString::all_false() });
+ t(LangString { original: "ignore".into(), ignore: Ignore::All, ..LangString::all_false() });
+ t(LangString {
+ original: "ignore-foo".into(),
+ ignore: Ignore::Some(vec!["foo".to_string()]),
+ ..LangString::all_false()
+ });
+ t(LangString {
+ original: "should_panic".into(),
+ should_panic: true,
+ ..LangString::all_false()
+ });
+ t(LangString { original: "no_run".into(), no_run: true, ..LangString::all_false() });
+ t(LangString {
+ original: "test_harness".into(),
+ test_harness: true,
+ ..LangString::all_false()
+ });
+ t(LangString {
+ original: "compile_fail".into(),
+ no_run: true,
+ compile_fail: true,
+ ..LangString::all_false()
+ });
+ t(LangString { original: "allow_fail".into(), allow_fail: true, ..LangString::all_false() });
+ t(LangString {
+ original: "{.no_run .example}".into(),
+ no_run: true,
+ ..LangString::all_false()
+ });
+ t(LangString {
+ original: "{.sh .should_panic}".into(),
+ should_panic: true,
+ rust: false,
+ ..LangString::all_false()
+ });
+ t(LangString { original: "{.example .rust}".into(), ..LangString::all_false() });
+ t(LangString {
+ original: "{.test_harness .rust}".into(),
+ test_harness: true,
+ ..LangString::all_false()
+ });
+ t(LangString {
+ original: "text, no_run".into(),
+ no_run: true,
+ rust: false,
+ ..LangString::all_false()
+ });
+ t(LangString {
+ original: "text,no_run".into(),
+ no_run: true,
+ rust: false,
+ ..LangString::all_false()
+ });
+ t(LangString {
+ original: "edition2015".into(),
+ edition: Some(Edition::Edition2015),
+ ..LangString::all_false()
+ });
+ t(LangString {
+ original: "edition2018".into(),
+ edition: Some(Edition::Edition2018),
+ ..LangString::all_false()
+ });
}
#[test]
) -> ExternalLocation {
use ExternalLocation::*;
// See if there's documentation generated into the local directory
- let local_location = dst.join(&e.name);
+ let local_location = dst.join(&*e.name.as_str());
if local_location.is_dir() {
return Local;
}
if let Some(&(ref fqp, _)) = paths.get(&did) {
search_index.push(IndexItem {
ty: item.type_(),
- name: item.name.clone().unwrap(),
+ name: item.name.unwrap().to_string(),
path: fqp[..fqp.len() - 1].join("::"),
desc: item.doc_value().map_or_else(|| String::new(), short_markdown_summary),
parent: Some(did),
});
Some(path_segment.name.clone())
}
- clean::Generic(ref s) if accept_generic => Some(s.clone()),
+ clean::Generic(s) if accept_generic => Some(s.to_string()),
clean::Primitive(ref p) => Some(format!("{:?}", p)),
clean::BorrowedRef { ref type_, .. } => get_index_type_name(type_, accept_generic),
// FIXME: add all from clean::Type.
use itertools::Itertools;
use rustc_ast_pretty::pprust;
-use rustc_attr::StabilityLevel;
+use rustc_attr::{Deprecation, StabilityLevel};
use rustc_data_structures::flock;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::sync::Lrc;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_hir::Mutability;
use rustc_middle::middle::stability;
+use rustc_session::Session;
use rustc_span::edition::Edition;
use rustc_span::hygiene::MacroKind;
use rustc_span::source_map::FileName;
-use rustc_span::symbol::{sym, Symbol};
+use rustc_span::symbol::{kw, sym, Symbol};
use serde::ser::SerializeSeq;
use serde::{Serialize, Serializer};
-use crate::clean::{self, AttributesExt, Deprecation, GetDefId, RenderedLink, SelfTy, TypeKind};
+use crate::clean::{self, AttributesExt, GetDefId, RenderedLink, SelfTy, TypeKind};
use crate::config::{RenderInfo, RenderOptions};
use crate::docfs::{DocFS, PathError};
use crate::doctree;
}
crate struct SharedContext {
+ crate sess: Lrc<Session>,
/// The path to the crate root source minus the file name.
/// Used for simplifying paths to the highlighted source code files.
crate src_root: PathBuf,
// `style-suffix.min.css`. Path::extension would just return `css`
// which would result in `style.min-suffix.css` which isn't what we
// want.
- let mut iter = filename.splitn(2, '.');
- let base = iter.next().unwrap();
- let ext = iter.next().unwrap();
- let filename = format!("{}{}.{}", base, self.shared.resource_suffix, ext,);
+ let (base, ext) = filename.split_once('.').unwrap();
+ let filename = format!("{}{}.{}", base, self.shared.resource_suffix, ext);
self.dst.join(&filename)
}
+
+ fn sess(&self) -> &Session {
+ &self.shared.sess
+ }
}
impl SharedContext {
_render_info: RenderInfo,
edition: Edition,
cache: &mut Cache,
+ sess: Lrc<Session>,
) -> Result<(Context, clean::Crate), Error> {
// need to save a copy of the options for rendering the index page
let md_opts = options.clone();
// If user passed in `--playground-url` arg, we fill in crate name here
let mut playground = None;
if let Some(url) = playground_url {
- playground = Some(markdown::Playground { crate_name: Some(krate.name.clone()), url });
+ playground =
+ Some(markdown::Playground { crate_name: Some(krate.name.to_string()), url });
}
let mut layout = layout::Layout {
logo: String::new(),
favicon: String::new(),
external_html,
default_settings,
- krate: krate.name.clone(),
+ krate: krate.name.to_string(),
css_file_extension: extension_css,
generate_search_filter,
};
}
(sym::html_playground_url, Some(s)) => {
playground = Some(markdown::Playground {
- crate_name: Some(krate.name.clone()),
+ crate_name: Some(krate.name.to_string()),
url: s.to_string(),
});
}
}
let (sender, receiver) = channel();
let mut scx = SharedContext {
+ sess,
collapsed: krate.collapsed,
src_root,
include_sources,
}
fn after_krate(&mut self, krate: &clean::Crate, cache: &Cache) -> Result<(), Error> {
- let final_file = self.dst.join(&krate.name).join("all.html");
+ let final_file = self.dst.join(&*krate.name.as_str()).join("all.html");
let settings_file = self.dst.join("settings.html");
let crate_name = krate.name.clone();
if !buf.is_empty() {
let name = item.name.as_ref().unwrap();
let item_type = item.type_();
- let file_name = &item_path(item_type, name);
+ let file_name = &item_path(item_type, &name.as_str());
self.shared.ensure_dir(&self.dst)?;
let joint_dst = self.dst.join(file_name);
self.shared.fs.write(&joint_dst, buf.as_bytes())?;
}
let dst = cx.dst.join(&format!("source-files{}.js", cx.shared.resource_suffix));
- let (mut all_sources, _krates) = try_err!(collect(&dst, &krate.name, "sourcesIndex"), &dst);
+ let (mut all_sources, _krates) =
+ try_err!(collect(&dst, &krate.name.as_str(), "sourcesIndex"), &dst);
all_sources.push(format!(
"sourcesIndex[\"{}\"] = {};",
&krate.name,
// Update the search index
let dst = cx.dst.join(&format!("search-index{}.js", cx.shared.resource_suffix));
- let (mut all_indexes, mut krates) = try_err!(collect_json(&dst, &krate.name), &dst);
+ let (mut all_indexes, mut krates) = try_err!(collect_json(&dst, &krate.name.as_str()), &dst);
all_indexes.push(search_index);
// Sort the indexes by crate so the file will be generated identically even
extra_scripts: &[],
static_extra_scripts: &[],
};
- krates.push(krate.name.clone());
+ krates.push(krate.name.to_string());
krates.sort();
krates.dedup();
mydst.push(&format!("{}.{}.js", remote_item_type, remote_path[remote_path.len() - 1]));
let (mut all_implementors, _) =
- try_err!(collect(&mydst, &krate.name, "implementors"), &mydst);
+ try_err!(collect(&mydst, &krate.name.as_str(), "implementors"), &mydst);
all_implementors.push(implementors);
// Sort the implementors by crate so the file will be generated
// identically even with rustdoc running in parallel.
if !title.is_empty() {
title.push_str("::");
}
- title.push_str(it.name.as_ref().unwrap());
+ title.push_str(&it.name.unwrap().as_str());
}
title.push_str(" - Rust");
let tyname = it.type_();
/// of their crate documentation isn't known.
fn src_href(&self, item: &clean::Item, cache: &Cache) -> Option<String> {
let mut root = self.root_path();
-
let mut path = String::new();
+ let cnum = item.source.cnum(self.sess());
// We can safely ignore synthetic `SourceFile`s.
- let file = match item.source.filename {
+ let file = match item.source.filename(self.sess()) {
FileName::Real(ref path) => path.local_path().to_path_buf(),
_ => return None,
};
let file = &file;
- let (krate, path) = if item.source.cnum == LOCAL_CRATE {
+ let symbol;
+ let (krate, path) = if cnum == LOCAL_CRATE {
if let Some(path) = self.shared.local_sources.get(file) {
- (&self.shared.layout.krate, path)
+ (self.shared.layout.krate.as_str(), path)
} else {
return None;
}
} else {
- let (krate, src_root) = match *cache.extern_locations.get(&item.source.cnum)? {
- (ref name, ref src, ExternalLocation::Local) => (name, src),
- (ref name, ref src, ExternalLocation::Remote(ref s)) => {
+ let (krate, src_root) = match *cache.extern_locations.get(&cnum)? {
+ (name, ref src, ExternalLocation::Local) => (name, src),
+ (name, ref src, ExternalLocation::Remote(ref s)) => {
root = s.to_string();
(name, src)
}
let mut fname = file.file_name().expect("source has no filename").to_os_string();
fname.push(".html");
path.push_str(&fname.to_string_lossy());
- (krate, &path)
+ symbol = krate.as_str();
+ (&*symbol, &path)
};
- let lines = if item.source.loline == item.source.hiline {
- item.source.loline.to_string()
- } else {
- format!("{}-{}", item.source.loline, item.source.hiline)
- };
+ let loline = item.source.lo(self.sess()).line;
+ let hiline = item.source.hi(self.sess()).line;
+ let lines =
+ if loline == hiline { loline.to_string() } else { format!("{}-{}", loline, hiline) };
Some(format!(
"{root}src/{krate}/{path}#{lines}",
root = Escape(&root),
fn full_path(cx: &Context, item: &clean::Item) -> String {
let mut s = cx.current.join("::");
s.push_str("::");
- s.push_str(item.name.as_ref().unwrap());
+ s.push_str(&item.name.unwrap().as_str());
s
}
(true, false) => return Ordering::Greater,
}
}
- let lhs = i1.name.as_ref().map_or("", |s| &**s);
- let rhs = i2.name.as_ref().map_or("", |s| &**s);
- compare_names(lhs, rhs)
+ let lhs = i1.name.unwrap_or(kw::Invalid).as_str();
+ let rhs = i2.name.unwrap_or(kw::Invalid).as_str();
+ compare_names(&lhs, &rhs)
}
if cx.shared.sort_modules_alphabetically {
add = add,
stab = stab.unwrap_or_else(String::new),
unsafety_flag = unsafety_flag,
- href = item_path(myitem.type_(), myitem.name.as_ref().unwrap()),
+ href = item_path(myitem.type_(), &myitem.name.unwrap().as_str()),
title = [full_path(cx, myitem), myitem.type_().to_string()]
.iter()
.filter_map(|s| if !s.is_empty() { Some(s.as_str()) } else { None })
// The trailing space after each tag is to space it properly against the rest of the docs.
if let Some(depr) = &item.deprecation {
let mut message = "Deprecated";
- if !stability::deprecation_in_effect(depr.is_since_rustc_version, depr.since.as_deref()) {
+ if !stability::deprecation_in_effect(
+ depr.is_since_rustc_version,
+ depr.since.map(|s| s.as_str()).as_deref(),
+ ) {
message = "Deprecation planned";
}
tags += &tag_html("deprecated", "", message);
let mut extra_info = vec![];
let error_codes = cx.shared.codes;
- if let Some(Deprecation { ref note, ref since, is_since_rustc_version }) = item.deprecation {
+ if let Some(Deprecation { note, since, is_since_rustc_version, suggestion: _ }) =
+ item.deprecation
+ {
// We display deprecation messages for #[deprecated] and #[rustc_deprecated]
// but only display the future-deprecation messages for #[rustc_deprecated].
let mut message = if let Some(since) = since {
+ let since = &since.as_str();
if !stability::deprecation_in_effect(is_since_rustc_version, Some(since)) {
- format!("Deprecating in {}", Escape(&since))
+ if *since == "TBD" {
+ format!("Deprecating in a future Rust version")
+ } else {
+ format!("Deprecating in {}", Escape(since))
+ }
} else {
- format!("Deprecated since {}", Escape(&since))
+ format!("Deprecated since {}", Escape(since))
}
} else {
String::from("Deprecated")
};
if let Some(note) = note {
+ let note = note.as_str();
let mut ids = cx.id_map.borrow_mut();
let html = MarkdownHtml(
¬e,
fn trait_item(w: &mut Buffer, cx: &Context, m: &clean::Item, t: &clean::Item, cache: &Cache) {
let name = m.name.as_ref().unwrap();
- info!("Documenting {} on {}", name, t.name.as_deref().unwrap_or_default());
+ info!("Documenting {} on {:?}", name, t.name);
let item_type = m.type_();
let id = cx.derive_id(format!("{}.{}", item_type, name));
write!(w, "<h3 id=\"{id}\" class=\"method\"><code>", id = id,);
AssocItemLink::GotoSource(did, provided_methods) => {
// We're creating a link from an impl-item to the corresponding
// trait-item and need to map the anchored type accordingly.
- let ty = if provided_methods.contains(name) {
+ let ty = if provided_methods.contains(&*name.as_str()) {
ItemType::Method
} else {
ItemType::TyMethod
what: AssocItemRender<'_>,
cache: &Cache,
) {
- info!(
- "Documenting associated items of {}",
- containing_item.name.as_deref().unwrap_or_default()
- );
+ info!("Documenting associated items of {:?}", containing_item.name);
let v = match cache.impls.get(&it) {
Some(v) => v,
None => return,
ty: \"{ty}\", \
relpath: \"{path}\"\
}};</script>",
- name = it.name.as_ref().map(|x| &x[..]).unwrap_or(""),
+ name = it.name.unwrap_or(kw::Invalid),
ty = it.type_(),
path = relpath
);
use crate::html::layout;
use crate::html::render::{SharedContext, BASIC_KEYWORDS};
use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_session::Session;
use rustc_span::source_map::FileName;
use std::ffi::OsStr;
use std::fs;
krate: clean::Crate,
) -> Result<clean::Crate, Error> {
info!("emitting source files");
- let dst = dst.join("src").join(&krate.name);
+ let dst = dst.join("src").join(&*krate.name.as_str());
scx.ensure_dir(&dst)?;
let mut folder = SourceCollector { dst, scx };
Ok(folder.fold_crate(krate))
impl<'a> DocFolder for SourceCollector<'a> {
fn fold_item(&mut self, item: clean::Item) -> Option<clean::Item> {
+ // If we're not rendering sources, there's nothing to do.
// If we're including source files, and we haven't seen this file yet,
// then we need to render it out to the filesystem.
if self.scx.include_sources
// skip all synthetic "files"
- && item.source.filename.is_real()
+ && item.source.filename(self.sess()).is_real()
// skip non-local files
- && item.source.cnum == LOCAL_CRATE
+ && item.source.cnum(self.sess()) == LOCAL_CRATE
{
+ let filename = item.source.filename(self.sess());
// If it turns out that we couldn't read this file, then we probably
// can't read any of the files (generating html output from json or
// something like that), so just don't include sources for the
// entire crate. The other option is maintaining this mapping on a
// per-file basis, but that's probably not worth it...
- self.scx.include_sources = match self.emit_source(&item.source.filename) {
+ self.scx.include_sources = match self.emit_source(&filename) {
Ok(()) => true,
Err(e) => {
println!(
"warning: source code was requested to be rendered, \
but processing `{}` had an error: {}",
- item.source.filename, e
+ filename, e
);
println!(" skipping rendering of source code");
false
}
};
}
+ // FIXME: if `include_sources` isn't set and DocFolder didn't require consuming the crate by value,
+ // we could return None here without having to walk the rest of the crate.
Some(self.fold_item_recur(item))
}
}
impl<'a> SourceCollector<'a> {
+ fn sess(&self) -> &Session {
+ &self.scx.sess
+ }
+
/// Renders the given filename into its corresponding HTML source file.
fn emit_source(&mut self, filename: &FileName) -> Result<(), Error> {
let p = match *filename {
function makeTabHeader(tabNb, text, nbElems) {
if (currentTab === tabNb) {
- return "<div class=\"selected\">" + text +
- " <div class=\"count\">(" + nbElems + ")</div></div>";
+ return "<button class=\"selected\">" + text +
+ " <div class=\"count\">(" + nbElems + ")</div></button>";
}
- return "<div>" + text + " <div class=\"count\">(" + nbElems + ")</div></div>";
+ return "<button>" + text + " <div class=\"count\">(" + nbElems + ")</div></button>";
}
function showResults(results) {
function implHider(addOrRemove, fullHide) {
return function(n) {
- var is_method = hasClass(n, "method") || fullHide;
- if (is_method || hasClass(n, "type")) {
- if (is_method === true) {
+ var shouldHide =
+ fullHide === true ||
+ hasClass(n, "method") === true ||
+ hasClass(n, "associatedconstant") === true;
+ if (shouldHide === true || hasClass(n, "type") === true) {
+ if (shouldHide === true) {
if (addOrRemove) {
addClass(n, "hidden-by-impl-hider");
} else {
+/*
+This whole CSS file is used only in case rustdoc is rendered with javascript disabled. Since a lot
+of content is hidden by default (depending on the settings too), we have to overwrite some of the
+rules.
+*/
+
#main > h2 + div, #main > h2 + h3, #main > h3 + div {
display: block;
}
#main > h2 + h3 {
display: flex;
}
+
+#main .impl-items .hidden {
+ display: block !important;
+}
height: 35px;
}
-#titles > div {
+#titles > button {
float: left;
width: 33.3%;
text-align: center;
font-size: 18px;
cursor: pointer;
+ border: 0;
border-top: 2px solid;
}
-#titles > div:not(:last-child) {
+#titles > button:not(:last-child) {
margin-right: 1px;
width: calc(33.3% - 1px);
}
-#titles > div > div.count {
+#titles > button > div.count {
display: inline-block;
font-size: 16px;
}
top: 24px;
}
- #titles > div > div.count {
+ #titles > button > div.count {
float: left;
width: 100%;
}
}
@media (max-width: 416px) {
- #titles, #titles > div {
+ #titles, #titles > button {
height: 73px;
}
+ #main {
+ margin-top: 100px;
+ }
+
#main > table:not(.table-display) td {
word-break: break-word;
- min-width: 10%;
+ width: 50%;
}
.search-container > div {
border-color: #5c6773;
}
-#titles > div.selected {
+#titles > button.selected {
background-color: #141920 !important;
border-bottom: 1px solid #ffb44c !important;
border-top: none;
}
-#titles > div:not(.selected) {
+#titles > button:not(.selected) {
background-color: transparent !important;
border: none;
}
-#titles > div:hover {
+#titles > button:hover {
border-bottom: 1px solid rgba(242, 151, 24, 0.3);
}
-#titles > div > div.count {
+#titles > button > div.count {
color: #888;
}
.block a.current.derive,.content span.macro,.content a.macro,.block a.current.macro {}
.content .highlighted.trait {}
.content span.struct,.content a.struct,.block a.current.struct {}
-#titles>div:hover,#titles>div.selected {}
+#titles>button:hover,#titles>button.selected {}
.content .highlighted.traitalias {}
.content span.type,.content a.type,.block a.current.type {}
.content span.union,.content a.union,.block a.current.union {}
border-color: #777;
}
-#titles > div:not(.selected) {
+#titles > button:not(.selected) {
background-color: #252525;
border-top-color: #252525;
}
-#titles > div:hover, #titles > div.selected {
+#titles > button:hover, #titles > button.selected {
border-top-color: #0089ff;
+ background-color: #353535;
}
-#titles > div > div.count {
+#titles > button > div.count {
color: #888;
}
border-color: #999;
}
-#titles > div:not(.selected) {
+#titles > button:not(.selected) {
background-color: #e6e6e6;
border-top-color: #e6e6e6;
}
-#titles > div:hover, #titles > div.selected {
+#titles > button:hover, #titles > button.selected {
+ background-color: #ffffff;
border-top-color: #0089ff;
}
-#titles > div > div.count {
+#titles > button > div.count {
color: #888;
}
use rustc_ast::ast;
use rustc_span::def_id::{DefId, CRATE_DEF_INDEX};
+use rustc_span::Pos;
use crate::clean;
use crate::doctree;
use crate::formats::item_type::ItemType;
use crate::json::types::*;
+use crate::json::JsonRenderer;
-impl From<clean::Item> for Option<Item> {
- fn from(item: clean::Item) -> Self {
+impl JsonRenderer {
+ pub(super) fn convert_item(&self, item: clean::Item) -> Option<Item> {
let item_type = ItemType::from(&item);
let clean::Item {
source,
_ => Some(Item {
id: def_id.into(),
crate_id: def_id.krate.as_u32(),
- name,
- source: source.into(),
+ name: name.map(|sym| sym.to_string()),
+ source: self.convert_span(source),
visibility: visibility.into(),
docs: attrs.collapsed_doc_value().unwrap_or_default(),
links: attrs
}),
}
}
-}
-impl From<clean::Span> for Option<Span> {
- fn from(span: clean::Span) -> Self {
- let clean::Span { loline, locol, hiline, hicol, .. } = span;
- match span.filename {
- rustc_span::FileName::Real(name) => Some(Span {
- filename: match name {
- rustc_span::RealFileName::Named(path) => path,
- rustc_span::RealFileName::Devirtualized { local_path, virtual_name: _ } => {
- local_path
- }
- },
- begin: (loline, locol),
- end: (hiline, hicol),
- }),
+ fn convert_span(&self, span: clean::Span) -> Option<Span> {
+ match span.filename(&self.sess) {
+ rustc_span::FileName::Real(name) => {
+ let hi = span.hi(&self.sess);
+ let lo = span.lo(&self.sess);
+ Some(Span {
+ filename: match name {
+ rustc_span::RealFileName::Named(path) => path,
+ rustc_span::RealFileName::Devirtualized { local_path, virtual_name: _ } => {
+ local_path
+ }
+ },
+ begin: (lo.line, lo.col.to_usize()),
+ end: (hi.line, hi.col.to_usize()),
+ })
+ }
_ => None,
}
}
}
-impl From<clean::Deprecation> for Deprecation {
- fn from(deprecation: clean::Deprecation) -> Self {
- let clean::Deprecation { since, note, is_since_rustc_version: _ } = deprecation;
- Deprecation { since, note }
+impl From<rustc_attr::Deprecation> for Deprecation {
+ fn from(deprecation: rustc_attr::Deprecation) -> Self {
+ #[rustfmt::skip]
+ let rustc_attr::Deprecation { since, note, is_since_rustc_version: _, suggestion: _ } = deprecation;
+ Deprecation { since: since.map(|s| s.to_string()), note: note.map(|s| s.to_string()) }
}
}
impl From<clean::TypeBinding> for TypeBinding {
fn from(binding: clean::TypeBinding) -> Self {
- TypeBinding { name: binding.name, binding: binding.kind.into() }
+ TypeBinding { name: binding.name.to_string(), binding: binding.kind.into() }
}
}
impl From<clean::GenericParamDef> for GenericParamDef {
fn from(generic_param: clean::GenericParamDef) -> Self {
- GenericParamDef { name: generic_param.name, kind: generic_param.kind.into() }
+ GenericParamDef { name: generic_param.name.to_string(), kind: generic_param.kind.into() }
}
}
.map(|v| v.into_iter().map(Into::into).collect())
.unwrap_or_default(),
},
- Generic(s) => Type::Generic(s),
+ Generic(s) => Type::Generic(s.to_string()),
Primitive(p) => Type::Primitive(p.as_str().to_string()),
BareFunction(f) => Type::FunctionPointer(Box::new((*f).into())),
Tuple(t) => Type::Tuple(t.into_iter().map(Into::into).collect()),
type_: Box::new((*type_).into()),
},
QPath { name, self_type, trait_ } => Type::QualifiedPath {
- name,
+ name: name.to_string(),
self_type: Box::new((*self_type).into()),
trait_: Box::new((*trait_).into()),
},
fn from(decl: clean::FnDecl) -> Self {
let clean::FnDecl { inputs, output, c_variadic, attrs: _ } = decl;
FnDecl {
- inputs: inputs.values.into_iter().map(|arg| (arg.name, arg.type_.into())).collect(),
+ inputs: inputs
+ .values
+ .into_iter()
+ .map(|arg| (arg.name.to_string(), arg.type_.into()))
+ .collect(),
output: match output {
clean::FnRetTy::Return(t) => Some(t.into()),
clean::FnRetTy::DefaultReturn => None,
use std::rc::Rc;
use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::Lrc;
+use rustc_session::Session;
use rustc_span::edition::Edition;
use crate::clean;
#[derive(Clone)]
crate struct JsonRenderer {
+ sess: Lrc<Session>,
/// A mapping of IDs that contains all local items for this crate which gets output as a top
/// level field of the JSON blob.
index: Rc<RefCell<FxHashMap<types::Id, types::Item>>>,
_render_info: RenderInfo,
_edition: Edition,
_cache: &mut Cache,
+ sess: Lrc<Session>,
) -> Result<(Self, clean::Crate), Error> {
debug!("Initializing json renderer");
Ok((
JsonRenderer {
+ sess,
index: Rc::new(RefCell::new(FxHashMap::default())),
out_path: options.output,
},
item.kind.inner_items().for_each(|i| self.item(i.clone(), cache).unwrap());
let id = item.def_id;
- if let Some(mut new_item) = item.into(): Option<types::Item> {
+ if let Some(mut new_item) = self.convert_item(item) {
if let types::ItemEnum::TraitItem(ref mut t) = new_item.inner {
t.implementors = self.get_trait_implementors(id, cache)
} else if let types::ItemEnum::StructItem(ref mut s) = new_item.inner {
(
k.as_u32(),
types::ExternalCrate {
- name: v.0.clone(),
+ name: v.0.to_string(),
html_root_url: match &v.2 {
ExternalLocation::Remote(s) => Some(s.clone()),
_ => None,
#![feature(once_cell)]
#![feature(type_ascription)]
#![feature(split_inclusive)]
+#![feature(str_split_once)]
#![recursion_limit = "256"]
#[macro_use]
use std::env;
use std::process;
+use rustc_data_structures::sync::Lrc;
use rustc_errors::ErrorReported;
use rustc_session::config::{make_crate_type_option, ErrorOutputType, RustcOptGroup};
use rustc_session::getopts;
+use rustc_session::Session;
use rustc_session::{early_error, early_warn};
#[macro_use]
.collect()
}
-fn stable<F>(name: &'static str, f: F) -> RustcOptGroup
-where
- F: Fn(&mut getopts::Options) -> &mut getopts::Options + 'static,
-{
- RustcOptGroup::stable(name, f)
-}
-
-fn unstable<F>(name: &'static str, f: F) -> RustcOptGroup
-where
- F: Fn(&mut getopts::Options) -> &mut getopts::Options + 'static,
-{
- RustcOptGroup::unstable(name, f)
-}
-
fn opts() -> Vec<RustcOptGroup> {
+ let stable: fn(_, fn(&mut getopts::Options) -> &mut _) -> _ = RustcOptGroup::stable;
+ let unstable: fn(_, fn(&mut getopts::Options) -> &mut _) -> _ = RustcOptGroup::unstable;
vec![
stable("h", |o| o.optflag("h", "help", "show this help message")),
stable("V", |o| o.optflag("V", "version", "print rustdoc's version")),
render_info: config::RenderInfo,
diag: &rustc_errors::Handler,
edition: rustc_span::edition::Edition,
+ sess: Lrc<Session>,
) -> MainResult {
- match formats::run_format::<T>(krate, renderopts, render_info, &diag, edition) {
+ match formats::run_format::<T>(krate, renderopts, render_info, &diag, edition, sess) {
Ok(_) => Ok(()),
Err(e) => {
let mut msg = diag.struct_err(&format!("couldn't generate documentation: {}", e.error));
// compiler all the way through the analysis passes. The rustdoc output is
// then generated from the cleaned AST of the crate. This runs all the
// plug/cleaning passes.
- let crate_name = options.crate_name.clone();
let crate_version = options.crate_version.clone();
let output_format = options.output_format;
let (mut krate, renderinfo, renderopts, sess) = core::run_core(options);
info!("finished with rustc");
- if let Some(name) = crate_name {
- krate.name = name
- }
-
krate.version = crate_version;
if show_coverage {
info!("going to format");
let (error_format, edition, debugging_options) = diag_opts;
let diag = core::new_handler(error_format, None, &debugging_options);
+ let sess_time = sess.clone();
match output_format {
- None | Some(config::OutputFormat::Html) => sess.time("render_html", || {
- run_renderer::<html::render::Context>(krate, renderopts, renderinfo, &diag, edition)
+ None | Some(config::OutputFormat::Html) => sess_time.time("render_html", || {
+ run_renderer::<html::render::Context>(
+ krate, renderopts, renderinfo, &diag, edition, sess,
+ )
}),
- Some(config::OutputFormat::Json) => sess.time("render_json", || {
- run_renderer::<json::JsonRenderer>(krate, renderopts, renderinfo, &diag, edition)
+ Some(config::OutputFormat::Json) => sess_time.time("render_json", || {
+ run_renderer::<json::JsonRenderer>(krate, renderopts, renderinfo, &diag, edition, sess)
}),
}
}
return Some(i);
}
clean::ImplItem(ref impl_) => {
+ let filename = i.source.filename(self.ctx.sess());
if let Some(ref tr) = impl_.trait_ {
- debug!(
- "impl {:#} for {:#} in {}",
- tr.print(),
- impl_.for_.print(),
- i.source.filename
- );
+ debug!("impl {:#} for {:#} in {}", tr.print(), impl_.for_.print(), filename,);
// don't count trait impls, the missing-docs lint doesn't so we shouldn't
// either
// inherent impls *can* be documented, and those docs show up, but in most
// cases it doesn't make sense, as all methods on a type are in one single
// impl block
- debug!("impl {:#} in {}", impl_.for_.print(), i.source.filename);
+ debug!("impl {:#} in {}", impl_.for_.print(), filename);
}
}
_ => {
None,
);
+ let filename = i.source.filename(self.ctx.sess());
let has_doc_example = tests.found_tests != 0;
let hir_id = self.ctx.tcx.hir().local_def_id_to_hir_id(i.def_id.expect_local());
let (level, source) = self.ctx.tcx.lint_level_at_node(MISSING_DOCS, hir_id);
// unless the user had an explicit `allow`
let should_have_docs =
level != lint::Level::Allow || matches!(source, LintSource::Default);
- debug!("counting {:?} {:?} in {}", i.type_(), i.name, i.source.filename);
- self.items.entry(i.source.filename.clone()).or_default().count_item(
+ debug!("counting {:?} {:?} in {}", i.type_(), i.name, filename);
+ self.items.entry(filename).or_default().count_item(
has_docs,
has_doc_example,
should_have_doc_example(self.ctx, &i),
//! [RFC 1946]: https://github.com/rust-lang/rfcs/blob/master/text/1946-intra-rustdoc-links.md
use rustc_ast as ast;
-use rustc_data_structures::stable_set::FxHashSet;
+use rustc_data_structures::{fx::FxHashMap, stable_set::FxHashSet};
use rustc_errors::{Applicability, DiagnosticBuilder};
use rustc_expand::base::SyntaxExtensionKind;
use rustc_hir as hir;
use std::mem;
use std::ops::Range;
-use crate::clean::{self, Crate, GetDefId, Item, ItemLink, PrimitiveType};
+use crate::clean::{self, Crate, Item, ItemLink, PrimitiveType};
use crate::core::DocContext;
use crate::fold::DocFolder;
use crate::html::markdown::markdown_links;
RustdocAnchorConflict(Res),
}
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+struct ResolutionInfo {
+ module_id: DefId,
+ dis: Option<Disambiguator>,
+ path_str: String,
+ extra_fragment: Option<String>,
+}
+
+struct DiagnosticInfo<'a> {
+ item: &'a Item,
+ dox: &'a str,
+ ori_link: &'a str,
+ link_range: Option<Range<usize>>,
+}
+
+#[derive(Clone, Debug, Hash)]
+struct CachedLink {
+ pub res: (Res, Option<String>),
+ pub side_channel: Option<(DefKind, DefId)>,
+}
+
struct LinkCollector<'a, 'tcx> {
cx: &'a DocContext<'tcx>,
/// A stack of modules used to decide what scope to resolve in.
/// because `clean` and the disambiguator code expect them to be different.
/// See the code for associated items on inherent impls for details.
kind_side_channel: Cell<Option<(DefKind, DefId)>>,
+ /// Cache the resolved links so we can avoid resolving (and emitting errors for) the same link
+ visited_links: FxHashMap<ResolutionInfo, CachedLink>,
}
impl<'a, 'tcx> LinkCollector<'a, 'tcx> {
fn new(cx: &'a DocContext<'tcx>) -> Self {
- LinkCollector { cx, mod_ids: Vec::new(), kind_side_channel: Cell::new(None) }
+ LinkCollector {
+ cx,
+ mod_ids: Vec::new(),
+ kind_side_channel: Cell::new(None),
+ visited_links: FxHashMap::default(),
+ }
}
/// Given a full link, parse it as an [enum struct variant].
// Try looking for methods and associated items.
let mut split = path_str.rsplitn(2, "::");
- // this can be an `unwrap()` because we ensure the link is never empty
- let (item_str, item_name) = split.next().map(|i| (i, Symbol::intern(i))).unwrap();
+ // NB: `split`'s first element is always defined, even if the delimiter was not present.
+ // NB: `item_str` could be empty when resolving in the root namespace (e.g. `::std`).
+ let item_str = split.next().unwrap();
+ let item_name = Symbol::intern(item_str);
let path_root = split
.next()
.map(|f| f.to_owned())
ns: Namespace,
cx: &DocContext<'_>,
) -> Option<(ty::AssocKind, DefId)> {
- let ty = cx.tcx.type_of(did);
- // First consider blanket impls: `impl From<T> for T`
- let implicit_impls = crate::clean::get_auto_trait_and_blanket_impls(cx, ty, did);
- let mut candidates: Vec<_> = implicit_impls
- .flat_map(|impl_outer| {
- match impl_outer.kind {
- clean::ImplItem(impl_) => {
- debug!("considering auto or blanket impl for trait {:?}", impl_.trait_);
- // Give precedence to methods that were overridden
- if !impl_.provided_trait_methods.contains(&*item_name.as_str()) {
- let mut items = impl_.items.into_iter().filter_map(|assoc| {
- if assoc.name.as_deref() != Some(&*item_name.as_str()) {
- return None;
- }
- let kind = assoc
- .kind
- .as_assoc_kind()
- .expect("inner items for a trait should be associated items");
- if kind.namespace() != ns {
- return None;
- }
-
- trace!("considering associated item {:?}", assoc.kind);
- // We have a slight issue: normal methods come from `clean` types,
- // but provided methods come directly from `tcx`.
- // Fortunately, we don't need the whole method, we just need to know
- // what kind of associated item it is.
- Some((kind, assoc.def_id))
- });
- let assoc = items.next();
- debug_assert_eq!(items.count(), 0);
- assoc
- } else {
- // These are provided methods or default types:
- // ```
- // trait T {
- // type A = usize;
- // fn has_default() -> A { 0 }
- // }
- // ```
- let trait_ = impl_.trait_.unwrap().def_id().unwrap();
- cx.tcx
- .associated_items(trait_)
- .find_by_name_and_namespace(
- cx.tcx,
- Ident::with_dummy_span(item_name),
- ns,
- trait_,
- )
- .map(|assoc| (assoc.kind, assoc.def_id))
- }
- }
- _ => panic!("get_impls returned something that wasn't an impl"),
- }
- })
- .collect();
+ // FIXME: this should also consider blanket impls (`impl<T> X for T`). Unfortunately
+ // `get_auto_trait_and_blanket_impls` is broken because the caching behavior is wrong. In the
+ // meantime, just don't look for these blanket impls.
// Next consider explicit impls: `impl MyTrait for MyType`
// Give precedence to inherent impls.
- if candidates.is_empty() {
- let traits = traits_implemented_by(cx, did, module);
- debug!("considering traits {:?}", traits);
- candidates.extend(traits.iter().filter_map(|&trait_| {
- cx.tcx
- .associated_items(trait_)
- .find_by_name_and_namespace(cx.tcx, Ident::with_dummy_span(item_name), ns, trait_)
- .map(|assoc| (assoc.kind, assoc.def_id))
- }));
- }
+ let traits = traits_implemented_by(cx, did, module);
+ debug!("considering traits {:?}", traits);
+ let mut candidates = traits.iter().filter_map(|&trait_| {
+ cx.tcx
+ .associated_items(trait_)
+ .find_by_name_and_namespace(cx.tcx, Ident::with_dummy_span(item_name), ns, trait_)
+ .map(|assoc| (assoc.kind, assoc.def_id))
+ });
// FIXME(#74563): warn about ambiguity
- debug!("the candidates were {:?}", candidates);
- candidates.pop()
+ debug!("the candidates were {:?}", candidates.clone().collect::<Vec<_>>());
+ candidates.next()
}
/// Given a type, return all traits in scope in `module` implemented by that type.
///
/// These are common and we should just resolve to the trait in that case.
fn is_derive_trait_collision<T>(ns: &PerNS<Result<(Res, T), ResolutionFailure<'_>>>) -> bool {
- matches!(*ns, PerNS {
- type_ns: Ok((Res::Def(DefKind::Trait, _), _)),
- macro_ns: Ok((Res::Def(DefKind::Macro(MacroKind::Derive), _), _)),
- ..
- })
+ matches!(
+ *ns,
+ PerNS {
+ type_ns: Ok((Res::Def(DefKind::Trait, _), _)),
+ macro_ns: Ok((Res::Def(DefKind::Macro(MacroKind::Derive), _), _)),
+ ..
+ }
+ )
}
impl<'a, 'tcx> DocFolder for LinkCollector<'a, 'tcx> {
///
/// FIXME(jynelson): this is way too many arguments
fn resolve_link(
- &self,
+ &mut self,
item: &Item,
dox: &str,
self_name: &Option<String>,
let link = ori_link.replace("`", "");
let parts = link.split('#').collect::<Vec<_>>();
let (link, extra_fragment) = if parts.len() > 2 {
+ // A valid link can't have multiple #'s
anchor_failure(cx, &item, &link, dox, link_range, AnchorFailure::MultipleAnchors);
return None;
} else if parts.len() == 2 {
} else {
// This is a bug.
debug!("attempting to resolve item without parent module: {}", path_str);
- let err_kind = ResolutionFailure::NoParentItem.into();
resolution_failure(
self,
&item,
disambiguator,
dox,
link_range,
- smallvec![err_kind],
+ smallvec![ResolutionFailure::NoParentItem],
);
return None;
};
return None;
}
- let (mut res, mut fragment) = self.resolve_with_disambiguator(
- disambiguator,
- item,
- dox,
- path_str,
+ let key = ResolutionInfo {
module_id,
+ dis: disambiguator,
+ path_str: path_str.to_owned(),
extra_fragment,
- &ori_link,
- link_range.clone(),
- )?;
+ };
+ let diag =
+ DiagnosticInfo { item, dox, ori_link: &ori_link, link_range: link_range.clone() };
+ let (mut res, mut fragment) = self.resolve_with_disambiguator_cached(key, diag)?;
// Check for a primitive which might conflict with a module
// Report the ambiguity and require that the user specify which one they meant.
}
}
+ fn resolve_with_disambiguator_cached(
+ &mut self,
+ key: ResolutionInfo,
+ diag: DiagnosticInfo<'_>,
+ ) -> Option<(Res, Option<String>)> {
+ // Try to look up both the result and the corresponding side channel value
+ if let Some(ref cached) = self.visited_links.get(&key) {
+ self.kind_side_channel.set(cached.side_channel.clone());
+ return Some(cached.res.clone());
+ }
+
+ let res = self.resolve_with_disambiguator(&key, diag);
+
+ // Cache only if resolved successfully - don't silence duplicate errors
+ if let Some(res) = &res {
+ // Store result for the actual namespace
+ self.visited_links.insert(
+ key,
+ CachedLink {
+ res: res.clone(),
+ side_channel: self.kind_side_channel.clone().into_inner(),
+ },
+ );
+ }
+
+ res
+ }
+
/// After parsing the disambiguator, resolve the main part of the link.
// FIXME(jynelson): wow this is just so much
fn resolve_with_disambiguator(
&self,
- disambiguator: Option<Disambiguator>,
- item: &Item,
- dox: &str,
- path_str: &str,
- base_node: DefId,
- extra_fragment: Option<String>,
- ori_link: &str,
- link_range: Option<Range<usize>>,
+ key: &ResolutionInfo,
+ diag: DiagnosticInfo<'_>,
) -> Option<(Res, Option<String>)> {
+ let disambiguator = key.dis;
+ let path_str = &key.path_str;
+ let base_node = key.module_id;
+ let extra_fragment = &key.extra_fragment;
+
match disambiguator.map(Disambiguator::ns) {
Some(ns @ (ValueNS | TypeNS)) => {
- match self.resolve(path_str, ns, base_node, &extra_fragment) {
+ match self.resolve(path_str, ns, base_node, extra_fragment) {
Ok(res) => Some(res),
Err(ErrorKind::Resolve(box mut kind)) => {
// We only looked in one namespace. Try to give a better error if possible.
// FIXME: really it should be `resolution_failure` that does this, not `resolve_with_disambiguator`
// See https://github.com/rust-lang/rust/pull/76955#discussion_r493953382 for a good approach
for &new_ns in &[other_ns, MacroNS] {
- if let Some(res) = self.check_full_res(
- new_ns,
- path_str,
- base_node,
- &extra_fragment,
- ) {
+ if let Some(res) =
+ self.check_full_res(new_ns, path_str, base_node, extra_fragment)
+ {
kind = ResolutionFailure::WrongNamespace(res, ns);
break;
}
}
resolution_failure(
self,
- &item,
+ diag.item,
path_str,
disambiguator,
- dox,
- link_range,
+ diag.dox,
+ diag.link_range,
smallvec![kind],
);
// This could just be a normal link or a broken link
return None;
}
Err(ErrorKind::AnchorFailure(msg)) => {
- anchor_failure(self.cx, &item, &ori_link, dox, link_range, msg);
+ anchor_failure(
+ self.cx,
+ diag.item,
+ diag.ori_link,
+ diag.dox,
+ diag.link_range,
+ msg,
+ );
return None;
}
}
macro_ns: self
.resolve_macro(path_str, base_node)
.map(|res| (res, extra_fragment.clone())),
- type_ns: match self.resolve(path_str, TypeNS, base_node, &extra_fragment) {
+ type_ns: match self.resolve(path_str, TypeNS, base_node, extra_fragment) {
Ok(res) => {
debug!("got res in TypeNS: {:?}", res);
Ok(res)
}
Err(ErrorKind::AnchorFailure(msg)) => {
- anchor_failure(self.cx, &item, ori_link, dox, link_range, msg);
+ anchor_failure(
+ self.cx,
+ diag.item,
+ diag.ori_link,
+ diag.dox,
+ diag.link_range,
+ msg,
+ );
return None;
}
Err(ErrorKind::Resolve(box kind)) => Err(kind),
},
- value_ns: match self.resolve(path_str, ValueNS, base_node, &extra_fragment) {
+ value_ns: match self.resolve(path_str, ValueNS, base_node, extra_fragment) {
Ok(res) => Ok(res),
Err(ErrorKind::AnchorFailure(msg)) => {
- anchor_failure(self.cx, &item, ori_link, dox, link_range, msg);
+ anchor_failure(
+ self.cx,
+ diag.item,
+ diag.ori_link,
+ diag.dox,
+ diag.link_range,
+ msg,
+ );
return None;
}
Err(ErrorKind::Resolve(box kind)) => Err(kind),
Res::Def(DefKind::Ctor(..), _) | Res::SelfCtor(..) => {
Err(ResolutionFailure::WrongNamespace(res, TypeNS))
}
- _ => match (fragment, extra_fragment) {
+ _ => match (fragment, extra_fragment.clone()) {
(Some(fragment), Some(_)) => {
// Shouldn't happen but who knows?
Ok((res, Some(fragment)))
if len == 0 {
resolution_failure(
self,
- &item,
+ diag.item,
path_str,
disambiguator,
- dox,
- link_range,
+ diag.dox,
+ diag.link_range,
candidates.into_iter().filter_map(|res| res.err()).collect(),
);
// this could just be a normal link
let candidates = candidates.map(|candidate| candidate.ok().map(|(res, _)| res));
ambiguity_error(
self.cx,
- &item,
+ diag.item,
path_str,
- dox,
- link_range,
+ diag.dox,
+ diag.link_range,
candidates.present_items().collect(),
);
return None;
}
Some(MacroNS) => {
match self.resolve_macro(path_str, base_node) {
- Ok(res) => Some((res, extra_fragment)),
+ Ok(res) => Some((res, extra_fragment.clone())),
Err(mut kind) => {
// `resolve_macro` only looks in the macro namespace. Try to give a better error if possible.
for &ns in &[TypeNS, ValueNS] {
if let Some(res) =
- self.check_full_res(ns, path_str, base_node, &extra_fragment)
+ self.check_full_res(ns, path_str, base_node, extra_fragment)
{
kind = ResolutionFailure::WrongNamespace(res, MacroNS);
break;
}
resolution_failure(
self,
- &item,
+ diag.item,
path_str,
disambiguator,
- dox,
- link_range,
+ diag.dox,
+ diag.link_range,
smallvec![kind],
);
return None;
}
}
-#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
/// Disambiguators for a link.
enum Disambiguator {
/// `prim@`
dox: &str,
link_range: Option<Range<usize>>,
) {
- let item_name = item.name.as_deref().unwrap_or("<unknown>");
+ let sym;
+ let item_name = match item.name {
+ Some(name) => {
+ sym = name.as_str();
+ &*sym
+ }
+ None => "<unknown>",
+ };
let msg =
format!("public documentation for `{}` links to private item `{}`", item_name, path_str);
use rustc_data_structures::fx::FxHashSet;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_middle::ty::DefIdTree;
use rustc_span::symbol::sym;
crate const COLLECT_TRAIT_IMPLS: Pass = Pass {
for &impl_node in cx.tcx.hir().trait_impls(trait_did) {
let impl_did = cx.tcx.hir().local_def_id(impl_node);
cx.tcx.sess.time("build_local_trait_impl", || {
- inline::build_impl(cx, None, impl_did.to_def_id(), None, &mut new_items);
+ let mut extra_attrs = Vec::new();
+ let mut parent = cx.tcx.parent(impl_did.to_def_id());
+ while let Some(did) = parent {
+ extra_attrs.extend(
+ cx.tcx
+ .get_attrs(did)
+ .iter()
+ .filter(|attr| attr.has_name(sym::doc))
+ .filter(|attr| {
+ if let Some([attr]) = attr.meta_item_list().as_deref() {
+ attr.has_name(sym::cfg)
+ } else {
+ false
+ }
+ })
+ .cloned(),
+ );
+ parent = cx.tcx.parent(did);
+ }
+ inline::build_impl(
+ cx,
+ None,
+ impl_did.to_def_id(),
+ Some(&extra_attrs),
+ &mut new_items,
+ );
});
}
}
}
crate fn should_have_doc_example(cx: &DocContext<'_>, item: &clean::Item) -> bool {
- if matches!(item.kind,
+ if matches!(
+ item.kind,
clean::StructFieldItem(_)
- | clean::VariantItem(_)
- | clean::AssocConstItem(_, _)
- | clean::AssocTypeItem(_, _)
- | clean::TypedefItem(_, _)
- | clean::StaticItem(_)
- | clean::ConstantItem(_)
- | clean::ExternCrateItem(_, _)
- | clean::ImportItem(_)
- | clean::PrimitiveItem(_)
- | clean::KeywordItem(_)
+ | clean::VariantItem(_)
+ | clean::AssocConstItem(_, _)
+ | clean::AssocTypeItem(_, _)
+ | clean::TypedefItem(_, _)
+ | clean::StaticItem(_)
+ | clean::ConstantItem(_)
+ | clean::ExternCrateItem(_, _)
+ | clean::ImportItem(_)
+ | clean::PrimitiveItem(_)
+ | clean::KeywordItem(_)
) {
return false;
}
--- /dev/null
+// compile-flags: -O -C no-prepopulate-passes
+
+#![crate_type = "lib"]
+
+// FIXME(eddyb) all of these tests show memory stores and loads, even after a
+// scalar `bitcast`, more special-casing is required to remove `alloca` usage.
+
+// CHECK: define i32 @f32_to_bits(float %x)
+// CHECK: %2 = bitcast float %x to i32
+// CHECK-NEXT: store i32 %2, i32* %0
+// CHECK-NEXT: %3 = load i32, i32* %0
+// CHECK: ret i32 %3
+#[no_mangle]
+pub fn f32_to_bits(x: f32) -> u32 {
+ unsafe { std::mem::transmute(x) }
+}
+
+// CHECK: define i8 @bool_to_byte(i1 zeroext %b)
+// CHECK: %1 = zext i1 %b to i8
+// CHECK-NEXT: store i8 %1, i8* %0
+// CHECK-NEXT: %2 = load i8, i8* %0
+// CHECK: ret i8 %2
+#[no_mangle]
+pub fn bool_to_byte(b: bool) -> u8 {
+ unsafe { std::mem::transmute(b) }
+}
+
+// CHECK: define zeroext i1 @byte_to_bool(i8 %byte)
+// CHECK: %1 = trunc i8 %byte to i1
+// CHECK-NEXT: %2 = zext i1 %1 to i8
+// CHECK-NEXT: store i8 %2, i8* %0
+// CHECK-NEXT: %3 = load i8, i8* %0
+// CHECK-NEXT: %4 = trunc i8 %3 to i1
+// CHECK: ret i1 %4
+#[no_mangle]
+pub unsafe fn byte_to_bool(byte: u8) -> bool {
+ std::mem::transmute(byte)
+}
+
+// CHECK: define i8* @ptr_to_ptr(i16* %p)
+// CHECK: %2 = bitcast i16* %p to i8*
+// CHECK-NEXT: store i8* %2, i8** %0
+// CHECK-NEXT: %3 = load i8*, i8** %0
+// CHECK: ret i8* %3
+#[no_mangle]
+pub fn ptr_to_ptr(p: *mut u16) -> *mut u8 {
+ unsafe { std::mem::transmute(p) }
+}
+
+// HACK(eddyb) scalar `transmute`s between pointers and non-pointers are
+// currently not special-cased like other scalar `transmute`s, because
+// LLVM requires specifically `ptrtoint`/`inttoptr` instead of `bitcast`.
+//
+// Tests below show the non-special-cased behavior (with the possible
+// future special-cased instructions in the "NOTE(eddyb)" comments).
+
+// CHECK: define [[USIZE:i[0-9]+]] @ptr_to_int(i16* %p)
+
+// NOTE(eddyb) see above, the following two CHECK lines should ideally be this:
+// %2 = ptrtoint i16* %p to [[USIZE]]
+// store [[USIZE]] %2, [[USIZE]]* %0
+// CHECK: %2 = bitcast [[USIZE]]* %0 to i16**
+// CHECK-NEXT: store i16* %p, i16** %2
+
+// CHECK-NEXT: %3 = load [[USIZE]], [[USIZE]]* %0
+// CHECK: ret [[USIZE]] %3
+#[no_mangle]
+pub fn ptr_to_int(p: *mut u16) -> usize {
+ unsafe { std::mem::transmute(p) }
+}
+
+// CHECK: define i16* @int_to_ptr([[USIZE]] %i)
+
+// NOTE(eddyb) see above, the following two CHECK lines should ideally be this:
+// %2 = inttoptr [[USIZE]] %i to i16*
+// store i16* %2, i16** %0
+// CHECK: %2 = bitcast i16** %0 to [[USIZE]]*
+// CHECK-NEXT: store [[USIZE]] %i, [[USIZE]]* %2
+
+// CHECK-NEXT: %3 = load i16*, i16** %0
+// CHECK: ret i16* %3
+#[no_mangle]
+pub fn int_to_ptr(i: usize) -> *mut u16 {
+ unsafe { std::mem::transmute(i) }
+}
--- /dev/null
+pub trait MyTrait {}
--- /dev/null
+// aux-build:issue-79890.rs
+// revisions:rpass1 rpass2 rpass3
+// compile-flags:--extern issue_79890 --test
+// edition:2018
+
+// Tests that we don't ICE when the set of imported crates changes
+#[cfg(rpass2)] use issue_79890::MyTrait;
--- /dev/null
+- // MIR for `main` before ConstDebugInfo
++ // MIR for `main` after ConstDebugInfo
+
+ fn main() -> () {
+ let mut _0: (); // return place in scope 0 at $DIR/const_debuginfo.rs:8:11: 8:11
+ let _1: u8; // in scope 0 at $DIR/const_debuginfo.rs:9:9: 9:10
+ let mut _5: u8; // in scope 0 at $DIR/const_debuginfo.rs:12:15: 12:20
+ let mut _6: u8; // in scope 0 at $DIR/const_debuginfo.rs:12:15: 12:16
+ let mut _7: u8; // in scope 0 at $DIR/const_debuginfo.rs:12:19: 12:20
+ let mut _8: u8; // in scope 0 at $DIR/const_debuginfo.rs:12:23: 12:24
+ let mut _14: u32; // in scope 0 at $DIR/const_debuginfo.rs:21:13: 21:16
+ let mut _15: u32; // in scope 0 at $DIR/const_debuginfo.rs:21:19: 21:22
+ scope 1 {
+- debug x => _1; // in scope 1 at $DIR/const_debuginfo.rs:9:9: 9:10
++ debug x => const 1_u8; // in scope 1 at $DIR/const_debuginfo.rs:9:9: 9:10
+ let _2: u8; // in scope 1 at $DIR/const_debuginfo.rs:10:9: 10:10
+ scope 2 {
+- debug y => _2; // in scope 2 at $DIR/const_debuginfo.rs:10:9: 10:10
++ debug y => const 2_u8; // in scope 2 at $DIR/const_debuginfo.rs:10:9: 10:10
+ let _3: u8; // in scope 2 at $DIR/const_debuginfo.rs:11:9: 11:10
+ scope 3 {
+- debug z => _3; // in scope 3 at $DIR/const_debuginfo.rs:11:9: 11:10
++ debug z => const 3_u8; // in scope 3 at $DIR/const_debuginfo.rs:11:9: 11:10
+ let _4: u8; // in scope 3 at $DIR/const_debuginfo.rs:12:9: 12:12
+ scope 4 {
+- debug sum => _4; // in scope 4 at $DIR/const_debuginfo.rs:12:9: 12:12
++ debug sum => const 6_u8; // in scope 4 at $DIR/const_debuginfo.rs:12:9: 12:12
+ let _9: &str; // in scope 4 at $DIR/const_debuginfo.rs:14:9: 14:10
+ scope 5 {
+- debug s => _9; // in scope 5 at $DIR/const_debuginfo.rs:14:9: 14:10
++ debug s => const "hello, world!"; // in scope 5 at $DIR/const_debuginfo.rs:14:9: 14:10
+ let _10: (bool, bool, u32); // in scope 5 at $DIR/const_debuginfo.rs:16:9: 16:10
+ scope 6 {
+ debug f => _10; // in scope 6 at $DIR/const_debuginfo.rs:16:9: 16:10
+ let _11: std::option::Option<u16>; // in scope 6 at $DIR/const_debuginfo.rs:18:9: 18:10
+ scope 7 {
+ debug o => _11; // in scope 7 at $DIR/const_debuginfo.rs:18:9: 18:10
+ let _12: Point; // in scope 7 at $DIR/const_debuginfo.rs:20:9: 20:10
+ scope 8 {
+ debug p => _12; // in scope 8 at $DIR/const_debuginfo.rs:20:9: 20:10
+ let _13: u32; // in scope 8 at $DIR/const_debuginfo.rs:21:9: 21:10
+ scope 9 {
+- debug a => _13; // in scope 9 at $DIR/const_debuginfo.rs:21:9: 21:10
++ debug a => const 64_u32; // in scope 9 at $DIR/const_debuginfo.rs:21:9: 21:10
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ bb0: {
+ StorageLive(_1); // scope 0 at $DIR/const_debuginfo.rs:9:9: 9:10
+ _1 = const 1_u8; // scope 0 at $DIR/const_debuginfo.rs:9:13: 9:16
+ StorageLive(_2); // scope 1 at $DIR/const_debuginfo.rs:10:9: 10:10
+ _2 = const 2_u8; // scope 1 at $DIR/const_debuginfo.rs:10:13: 10:16
+ StorageLive(_3); // scope 2 at $DIR/const_debuginfo.rs:11:9: 11:10
+ _3 = const 3_u8; // scope 2 at $DIR/const_debuginfo.rs:11:13: 11:16
+ StorageLive(_4); // scope 3 at $DIR/const_debuginfo.rs:12:9: 12:12
+ StorageLive(_5); // scope 3 at $DIR/const_debuginfo.rs:12:15: 12:20
+ StorageLive(_6); // scope 3 at $DIR/const_debuginfo.rs:12:15: 12:16
+ _6 = const 1_u8; // scope 3 at $DIR/const_debuginfo.rs:12:15: 12:16
+ StorageLive(_7); // scope 3 at $DIR/const_debuginfo.rs:12:19: 12:20
+ _7 = const 2_u8; // scope 3 at $DIR/const_debuginfo.rs:12:19: 12:20
+ _5 = const 3_u8; // scope 3 at $DIR/const_debuginfo.rs:12:15: 12:20
+ StorageDead(_7); // scope 3 at $DIR/const_debuginfo.rs:12:19: 12:20
+ StorageDead(_6); // scope 3 at $DIR/const_debuginfo.rs:12:19: 12:20
+ StorageLive(_8); // scope 3 at $DIR/const_debuginfo.rs:12:23: 12:24
+ _8 = const 3_u8; // scope 3 at $DIR/const_debuginfo.rs:12:23: 12:24
+ _4 = const 6_u8; // scope 3 at $DIR/const_debuginfo.rs:12:15: 12:24
+ StorageDead(_8); // scope 3 at $DIR/const_debuginfo.rs:12:23: 12:24
+ StorageDead(_5); // scope 3 at $DIR/const_debuginfo.rs:12:23: 12:24
+ StorageLive(_9); // scope 4 at $DIR/const_debuginfo.rs:14:9: 14:10
+ _9 = const "hello, world!"; // scope 4 at $DIR/const_debuginfo.rs:14:13: 14:28
+ // ty::Const
+ // + ty: &str
+ // + val: Value(Slice { data: Allocation { bytes: [104, 101, 108, 108, 111, 44, 32, 119, 111, 114, 108, 100, 33], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [8191], len: Size { raw: 13 } }, size: Size { raw: 13 }, align: Align { pow2: 0 }, mutability: Not, extra: () }, start: 0, end: 13 })
+ // mir::Constant
+ // + span: $DIR/const_debuginfo.rs:14:13: 14:28
+ // + literal: Const { ty: &str, val: Value(Slice { data: Allocation { bytes: [104, 101, 108, 108, 111, 44, 32, 119, 111, 114, 108, 100, 33], relocations: Relocations(SortedMap { data: [] }), init_mask: InitMask { blocks: [8191], len: Size { raw: 13 } }, size: Size { raw: 13 }, align: Align { pow2: 0 }, mutability: Not, extra: () }, start: 0, end: 13 }) }
+ StorageLive(_10); // scope 5 at $DIR/const_debuginfo.rs:16:9: 16:10
+ (_10.0: bool) = const true; // scope 5 at $DIR/const_debuginfo.rs:16:13: 16:34
+ (_10.1: bool) = const false; // scope 5 at $DIR/const_debuginfo.rs:16:13: 16:34
+ (_10.2: u32) = const 123_u32; // scope 5 at $DIR/const_debuginfo.rs:16:13: 16:34
+ StorageLive(_11); // scope 6 at $DIR/const_debuginfo.rs:18:9: 18:10
+ ((_11 as Some).0: u16) = const 99_u16; // scope 6 at $DIR/const_debuginfo.rs:18:13: 18:24
+ discriminant(_11) = 1; // scope 6 at $DIR/const_debuginfo.rs:18:13: 18:24
+ StorageLive(_12); // scope 7 at $DIR/const_debuginfo.rs:20:9: 20:10
+ (_12.0: u32) = const 32_u32; // scope 7 at $DIR/const_debuginfo.rs:20:13: 20:35
+ (_12.1: u32) = const 32_u32; // scope 7 at $DIR/const_debuginfo.rs:20:13: 20:35
+ StorageLive(_13); // scope 8 at $DIR/const_debuginfo.rs:21:9: 21:10
+ StorageLive(_14); // scope 8 at $DIR/const_debuginfo.rs:21:13: 21:16
+ _14 = const 32_u32; // scope 8 at $DIR/const_debuginfo.rs:21:13: 21:16
+ StorageLive(_15); // scope 8 at $DIR/const_debuginfo.rs:21:19: 21:22
+ _15 = const 32_u32; // scope 8 at $DIR/const_debuginfo.rs:21:19: 21:22
+ _13 = const 64_u32; // scope 8 at $DIR/const_debuginfo.rs:21:13: 21:22
+ StorageDead(_15); // scope 8 at $DIR/const_debuginfo.rs:21:21: 21:22
+ StorageDead(_14); // scope 8 at $DIR/const_debuginfo.rs:21:21: 21:22
+ _0 = const (); // scope 0 at $DIR/const_debuginfo.rs:8:11: 22:2
+ StorageDead(_13); // scope 8 at $DIR/const_debuginfo.rs:22:1: 22:2
+ StorageDead(_12); // scope 7 at $DIR/const_debuginfo.rs:22:1: 22:2
+ StorageDead(_11); // scope 6 at $DIR/const_debuginfo.rs:22:1: 22:2
+ StorageDead(_10); // scope 5 at $DIR/const_debuginfo.rs:22:1: 22:2
+ StorageDead(_9); // scope 4 at $DIR/const_debuginfo.rs:22:1: 22:2
+ StorageDead(_4); // scope 3 at $DIR/const_debuginfo.rs:22:1: 22:2
+ StorageDead(_3); // scope 2 at $DIR/const_debuginfo.rs:22:1: 22:2
+ StorageDead(_2); // scope 1 at $DIR/const_debuginfo.rs:22:1: 22:2
+ StorageDead(_1); // scope 0 at $DIR/const_debuginfo.rs:22:1: 22:2
+ return; // scope 0 at $DIR/const_debuginfo.rs:22:2: 22:2
+ }
+ }
+
--- /dev/null
+// compile-flags: -C overflow-checks=no -Zunsound-mir-opts
+
+struct Point {
+ x: u32,
+ y: u32,
+}
+
+fn main() {
+ let x = 1u8;
+ let y = 2u8;
+ let z = 3u8;
+ let sum = x + y + z;
+
+ let s = "hello, world!";
+
+ let f = (true, false, 123u32);
+
+ let o = Some(99u16);
+
+ let p = Point { x: 32, y: 32 };
+ let a = p.x + p.y;
+}
+
+// EMIT_MIR const_debuginfo.main.ConstDebugInfo.diff
graph [fontname="Courier, monospace"];
node [fontname="Courier, monospace"];
edge [fontname="Courier, monospace"];
- bcb0__Cov_0_4 [shape="none", label=<<table border="0" cellborder="1" cellspacing="0"><tr><td bgcolor="gray" align="center" colspan="1">bcb0</td></tr><tr><td align="left" balign="left"></td></tr><tr><td align="left" balign="left">Counter(bcb0) at 18:1-20:2<br/> 19:5-19:9: @0[0]: _0 = const true<br/> 20:2-20:2: @0.Return: return</td></tr><tr><td align="left" balign="left">bb0: Return</td></tr></table>>];
+ bcb0__Cov_0_4 [shape="none", label=<<table border="0" cellborder="1" cellspacing="0"><tr><td bgcolor="gray" align="center" colspan="1">bcb0</td></tr><tr><td align="left" balign="left"></td></tr><tr><td align="left" balign="left">Counter(bcb0) at 18:1-20:2<br/> 19:5-19:9: @0[0]: Coverage::Counter(1) for $DIR/coverage_graphviz.rs:18:1 - 20:2<br/> 20:2-20:2: @0.Return: return</td></tr><tr><td align="left" balign="left">bb0: Return</td></tr></table>>];
}
graph [fontname="Courier, monospace"];
node [fontname="Courier, monospace"];
edge [fontname="Courier, monospace"];
- bcb2__Cov_0_3 [shape="none", label=<<table border="0" cellborder="1" cellspacing="0"><tr><td bgcolor="gray" align="center" colspan="1">bcb2</td></tr><tr><td align="left" balign="left">Expression(bcb0 - bcb1) at 13:10-13:10<br/> 13:10-13:10: @4[0]: _1 = const ()</td></tr><tr><td align="left" balign="left">bb4: Goto</td></tr></table>>];
+ bcb2__Cov_0_3 [shape="none", label=<<table border="0" cellborder="1" cellspacing="0"><tr><td bgcolor="gray" align="center" colspan="1">bcb2</td></tr><tr><td align="left" balign="left">Expression(bcb0 - bcb1) at 13:10-13:10<br/> 13:10-13:10: @4[0]: Coverage::Expression(4294967295) = 1 - 2 for $DIR/coverage_graphviz.rs:13:10 - 13:11</td></tr><tr><td align="left" balign="left">bb4: Goto</td></tr></table>>];
bcb1__Cov_0_3 [shape="none", label=<<table border="0" cellborder="1" cellspacing="0"><tr><td bgcolor="gray" align="center" colspan="1">bcb1</td></tr><tr><td align="left" balign="left">Counter(bcb1) at 12:13-12:18<br/> 12:13-12:18: @5[0]: _0 = const ()<br/>Expression(bcb1 + 0) at 15:2-15:2<br/> 15:2-15:2: @5.Return: return</td></tr><tr><td align="left" balign="left">bb3: FalseEdge</td></tr><tr><td align="left" balign="left">bb5: Return</td></tr></table>>];
bcb0__Cov_0_3 [shape="none", label=<<table border="0" cellborder="1" cellspacing="0"><tr><td bgcolor="gray" align="center" colspan="1">bcb0</td></tr><tr><td align="left" balign="left"></td></tr><tr><td align="left" balign="left">Counter(bcb0) at 9:1-11:17<br/> 11:12-11:17: @1.Call: _2 = bar() -> [return: bb2, unwind: bb6]<br/> 11:12-11:17: @2[0]: FakeRead(ForMatchedPlace, _2)</td></tr><tr><td align="left" balign="left">bb0: FalseUnwind<br/>bb1: Call</td></tr><tr><td align="left" balign="left">bb2: SwitchInt</td></tr></table>>];
bcb2__Cov_0_3 -> bcb0__Cov_0_3 [label=<>];
let mut _0: bool; // return place in scope 0 at /the/src/instrument_coverage.rs:19:13: 19:17
bb0: {
- _0 = const true; // scope 0 at /the/src/instrument_coverage.rs:20:5: 20:9
+ Coverage::Counter(1) for /the/src/instrument_coverage.rs:19:1 - 21:2; // scope 0 at /the/src/instrument_coverage.rs:21:2: 21:2
+ _0 = const true; // scope 0 at /the/src/instrument_coverage.rs:20:5: 20:9
return; // scope 0 at /the/src/instrument_coverage.rs:21:2: 21:2
}
}
let mut _3: !; // in scope 0 at /the/src/instrument_coverage.rs:12:18: 14:10
bb0: {
++ Coverage::Counter(1) for /the/src/instrument_coverage.rs:10:1 - 12:17; // scope 0 at /the/src/instrument_coverage.rs:11:5: 15:6
falseUnwind -> [real: bb1, cleanup: bb6]; // scope 0 at /the/src/instrument_coverage.rs:11:5: 15:6
}
bb2: {
FakeRead(ForMatchedPlace, _2); // scope 0 at /the/src/instrument_coverage.rs:12:12: 12:17
-+ Coverage::Counter(1) for /the/src/instrument_coverage.rs:10:1 - 12:17; // scope 0 at /the/src/instrument_coverage.rs:12:9: 14:10
switchInt(_2) -> [false: bb4, otherwise: bb3]; // scope 0 at /the/src/instrument_coverage.rs:12:9: 14:10
}
bb3: {
++ Coverage::Expression(4294967294) = 2 + 0 for /the/src/instrument_coverage.rs:16:1 - 16:2; // scope 0 at /the/src/instrument_coverage.rs:12:9: 14:10
++ Coverage::Counter(2) for /the/src/instrument_coverage.rs:13:13 - 13:18; // scope 0 at /the/src/instrument_coverage.rs:12:9: 14:10
falseEdge -> [real: bb5, imaginary: bb4]; // scope 0 at /the/src/instrument_coverage.rs:12:9: 14:10
}
bb4: {
++ Coverage::Expression(4294967295) = 1 - 2 for /the/src/instrument_coverage.rs:14:10 - 14:11; // scope 0 at /the/src/instrument_coverage.rs:11:5: 15:6
_1 = const (); // scope 0 at /the/src/instrument_coverage.rs:14:10: 14:10
StorageDead(_2); // scope 0 at /the/src/instrument_coverage.rs:15:5: 15:6
-+ Coverage::Expression(4294967295) = 1 - 2 for /the/src/instrument_coverage.rs:14:10 - 14:11; // scope 0 at /the/src/instrument_coverage.rs:11:5: 15:6
goto -> bb0; // scope 0 at /the/src/instrument_coverage.rs:11:5: 15:6
}
bb5: {
_0 = const (); // scope 0 at /the/src/instrument_coverage.rs:13:13: 13:18
StorageDead(_2); // scope 0 at /the/src/instrument_coverage.rs:15:5: 15:6
-+ Coverage::Counter(2) for /the/src/instrument_coverage.rs:13:13 - 13:18; // scope 0 at /the/src/instrument_coverage.rs:16:2: 16:2
-+ Coverage::Expression(4294967294) = 2 + 0 for /the/src/instrument_coverage.rs:16:1 - 16:2; // scope 0 at /the/src/instrument_coverage.rs:16:2: 16:2
return; // scope 0 at /the/src/instrument_coverage.rs:16:2: 16:2
}
--- /dev/null
+- // MIR for `discriminant` before LowerIntrinsics
++ // MIR for `discriminant` after LowerIntrinsics
+
+ fn discriminant(_1: T) -> () {
+ debug t => _1; // in scope 0 at $DIR/lower_intrinsics.rs:68:24: 68:25
+ let mut _0: (); // return place in scope 0 at $DIR/lower_intrinsics.rs:68:30: 68:30
+ let _2: <T as std::marker::DiscriminantKind>::Discriminant; // in scope 0 at $DIR/lower_intrinsics.rs:69:5: 69:45
+ let mut _3: &T; // in scope 0 at $DIR/lower_intrinsics.rs:69:42: 69:44
+ let _4: &T; // in scope 0 at $DIR/lower_intrinsics.rs:69:42: 69:44
+ let _5: u8; // in scope 0 at $DIR/lower_intrinsics.rs:70:5: 70:45
+ let mut _6: &i32; // in scope 0 at $DIR/lower_intrinsics.rs:70:42: 70:44
+ let _7: &i32; // in scope 0 at $DIR/lower_intrinsics.rs:70:42: 70:44
+ let _8: i32; // in scope 0 at $DIR/lower_intrinsics.rs:70:43: 70:44
+ let _9: u8; // in scope 0 at $DIR/lower_intrinsics.rs:71:5: 71:46
+ let mut _10: &(); // in scope 0 at $DIR/lower_intrinsics.rs:71:42: 71:45
+ let _11: &(); // in scope 0 at $DIR/lower_intrinsics.rs:71:42: 71:45
+ let _12: (); // in scope 0 at $DIR/lower_intrinsics.rs:71:43: 71:45
+ let _13: isize; // in scope 0 at $DIR/lower_intrinsics.rs:72:5: 72:48
+ let mut _14: &E; // in scope 0 at $DIR/lower_intrinsics.rs:72:42: 72:47
+ let _15: &E; // in scope 0 at $DIR/lower_intrinsics.rs:72:42: 72:47
+ let _16: E; // in scope 0 at $DIR/lower_intrinsics.rs:72:43: 72:47
+ let mut _17: &E; // in scope 0 at $DIR/lower_intrinsics.rs:72:42: 72:47
+ let mut _18: &(); // in scope 0 at $DIR/lower_intrinsics.rs:71:42: 71:45
+ let mut _19: &i32; // in scope 0 at $DIR/lower_intrinsics.rs:70:42: 70:44
+
+ bb0: {
+ StorageLive(_2); // scope 0 at $DIR/lower_intrinsics.rs:69:5: 69:45
+ StorageLive(_3); // scope 0 at $DIR/lower_intrinsics.rs:69:42: 69:44
+ StorageLive(_4); // scope 0 at $DIR/lower_intrinsics.rs:69:42: 69:44
+ _4 = &_1; // scope 0 at $DIR/lower_intrinsics.rs:69:42: 69:44
+ _3 = &(*_4); // scope 0 at $DIR/lower_intrinsics.rs:69:42: 69:44
+- _2 = discriminant_value::<T>(move _3) -> bb1; // scope 0 at $DIR/lower_intrinsics.rs:69:5: 69:45
+- // mir::Constant
+- // + span: $DIR/lower_intrinsics.rs:69:5: 69:41
+- // + literal: Const { ty: for<'r> extern "rust-intrinsic" fn(&'r T) -> <T as std::marker::DiscriminantKind>::Discriminant {std::intrinsics::discriminant_value::<T>}, val: Value(Scalar(<ZST>)) }
++ _2 = discriminant((*_3)); // scope 0 at $DIR/lower_intrinsics.rs:69:5: 69:45
++ goto -> bb1; // scope 0 at $DIR/lower_intrinsics.rs:69:5: 69:45
+ }
+
+ bb1: {
+ StorageDead(_3); // scope 0 at $DIR/lower_intrinsics.rs:69:44: 69:45
+ StorageDead(_4); // scope 0 at $DIR/lower_intrinsics.rs:69:45: 69:46
+ StorageDead(_2); // scope 0 at $DIR/lower_intrinsics.rs:69:45: 69:46
+ StorageLive(_5); // scope 0 at $DIR/lower_intrinsics.rs:70:5: 70:45
+ StorageLive(_6); // scope 0 at $DIR/lower_intrinsics.rs:70:42: 70:44
+ StorageLive(_7); // scope 0 at $DIR/lower_intrinsics.rs:70:42: 70:44
+ _19 = const discriminant::<T>::promoted[2]; // scope 0 at $DIR/lower_intrinsics.rs:70:42: 70:44
+ // ty::Const
+ // + ty: &i32
+ // + val: Unevaluated(WithOptConstParam { did: DefId(0:27 ~ lower_intrinsics[8787]::discriminant), const_param_did: None }, [T], Some(promoted[2]))
+ // mir::Constant
+ // + span: $DIR/lower_intrinsics.rs:70:42: 70:44
+ // + literal: Const { ty: &i32, val: Unevaluated(WithOptConstParam { did: DefId(0:27 ~ lower_intrinsics[8787]::discriminant), const_param_did: None }, [T], Some(promoted[2])) }
+ _7 = &(*_19); // scope 0 at $DIR/lower_intrinsics.rs:70:42: 70:44
+ _6 = &(*_7); // scope 0 at $DIR/lower_intrinsics.rs:70:42: 70:44
+- _5 = discriminant_value::<i32>(move _6) -> bb2; // scope 0 at $DIR/lower_intrinsics.rs:70:5: 70:45
+- // mir::Constant
+- // + span: $DIR/lower_intrinsics.rs:70:5: 70:41
+- // + literal: Const { ty: for<'r> extern "rust-intrinsic" fn(&'r i32) -> <i32 as std::marker::DiscriminantKind>::Discriminant {std::intrinsics::discriminant_value::<i32>}, val: Value(Scalar(<ZST>)) }
++ _5 = discriminant((*_6)); // scope 0 at $DIR/lower_intrinsics.rs:70:5: 70:45
++ goto -> bb2; // scope 0 at $DIR/lower_intrinsics.rs:70:5: 70:45
+ }
+
+ bb2: {
+ StorageDead(_6); // scope 0 at $DIR/lower_intrinsics.rs:70:44: 70:45
+ StorageDead(_7); // scope 0 at $DIR/lower_intrinsics.rs:70:45: 70:46
+ StorageDead(_5); // scope 0 at $DIR/lower_intrinsics.rs:70:45: 70:46
+ StorageLive(_9); // scope 0 at $DIR/lower_intrinsics.rs:71:5: 71:46
+ StorageLive(_10); // scope 0 at $DIR/lower_intrinsics.rs:71:42: 71:45
+ StorageLive(_11); // scope 0 at $DIR/lower_intrinsics.rs:71:42: 71:45
+ _18 = const discriminant::<T>::promoted[1]; // scope 0 at $DIR/lower_intrinsics.rs:71:42: 71:45
+ // ty::Const
+ // + ty: &()
+ // + val: Unevaluated(WithOptConstParam { did: DefId(0:27 ~ lower_intrinsics[8787]::discriminant), const_param_did: None }, [T], Some(promoted[1]))
+ // mir::Constant
+ // + span: $DIR/lower_intrinsics.rs:71:42: 71:45
+ // + literal: Const { ty: &(), val: Unevaluated(WithOptConstParam { did: DefId(0:27 ~ lower_intrinsics[8787]::discriminant), const_param_did: None }, [T], Some(promoted[1])) }
+ _11 = &(*_18); // scope 0 at $DIR/lower_intrinsics.rs:71:42: 71:45
+ _10 = &(*_11); // scope 0 at $DIR/lower_intrinsics.rs:71:42: 71:45
+- _9 = discriminant_value::<()>(move _10) -> bb3; // scope 0 at $DIR/lower_intrinsics.rs:71:5: 71:46
+- // mir::Constant
+- // + span: $DIR/lower_intrinsics.rs:71:5: 71:41
+- // + literal: Const { ty: for<'r> extern "rust-intrinsic" fn(&'r ()) -> <() as std::marker::DiscriminantKind>::Discriminant {std::intrinsics::discriminant_value::<()>}, val: Value(Scalar(<ZST>)) }
++ _9 = discriminant((*_10)); // scope 0 at $DIR/lower_intrinsics.rs:71:5: 71:46
++ goto -> bb3; // scope 0 at $DIR/lower_intrinsics.rs:71:5: 71:46
+ }
+
+ bb3: {
+ StorageDead(_10); // scope 0 at $DIR/lower_intrinsics.rs:71:45: 71:46
+ StorageDead(_11); // scope 0 at $DIR/lower_intrinsics.rs:71:46: 71:47
+ StorageDead(_9); // scope 0 at $DIR/lower_intrinsics.rs:71:46: 71:47
+ StorageLive(_13); // scope 0 at $DIR/lower_intrinsics.rs:72:5: 72:48
+ StorageLive(_14); // scope 0 at $DIR/lower_intrinsics.rs:72:42: 72:47
+ StorageLive(_15); // scope 0 at $DIR/lower_intrinsics.rs:72:42: 72:47
+ _17 = const discriminant::<T>::promoted[0]; // scope 0 at $DIR/lower_intrinsics.rs:72:42: 72:47
+ // ty::Const
+ // + ty: &E
+ // + val: Unevaluated(WithOptConstParam { did: DefId(0:27 ~ lower_intrinsics[8787]::discriminant), const_param_did: None }, [T], Some(promoted[0]))
+ // mir::Constant
+ // + span: $DIR/lower_intrinsics.rs:72:42: 72:47
+ // + literal: Const { ty: &E, val: Unevaluated(WithOptConstParam { did: DefId(0:27 ~ lower_intrinsics[8787]::discriminant), const_param_did: None }, [T], Some(promoted[0])) }
+ _15 = &(*_17); // scope 0 at $DIR/lower_intrinsics.rs:72:42: 72:47
+ _14 = &(*_15); // scope 0 at $DIR/lower_intrinsics.rs:72:42: 72:47
+- _13 = discriminant_value::<E>(move _14) -> bb4; // scope 0 at $DIR/lower_intrinsics.rs:72:5: 72:48
+- // mir::Constant
+- // + span: $DIR/lower_intrinsics.rs:72:5: 72:41
+- // + literal: Const { ty: for<'r> extern "rust-intrinsic" fn(&'r E) -> <E as std::marker::DiscriminantKind>::Discriminant {std::intrinsics::discriminant_value::<E>}, val: Value(Scalar(<ZST>)) }
++ _13 = discriminant((*_14)); // scope 0 at $DIR/lower_intrinsics.rs:72:5: 72:48
++ goto -> bb4; // scope 0 at $DIR/lower_intrinsics.rs:72:5: 72:48
+ }
+
+ bb4: {
+ StorageDead(_14); // scope 0 at $DIR/lower_intrinsics.rs:72:47: 72:48
+ StorageDead(_15); // scope 0 at $DIR/lower_intrinsics.rs:72:48: 72:49
+ StorageDead(_13); // scope 0 at $DIR/lower_intrinsics.rs:72:48: 72:49
+ _0 = const (); // scope 0 at $DIR/lower_intrinsics.rs:68:30: 73:2
+ drop(_1) -> bb5; // scope 0 at $DIR/lower_intrinsics.rs:73:1: 73:2
+ }
+
+ bb5: {
+ return; // scope 0 at $DIR/lower_intrinsics.rs:73:2: 73:2
+ }
+
+ bb6 (cleanup): {
+ resume; // scope 0 at $DIR/lower_intrinsics.rs:68:1: 73:2
+ }
+ }
+
StorageDead(_3); // scope 1 at $DIR/lower_intrinsics.rs:19:40: 19:41
StorageDead(_2); // scope 0 at $DIR/lower_intrinsics.rs:19:43: 19:44
_0 = const (); // scope 0 at $DIR/lower_intrinsics.rs:18:24: 20:2
+ goto -> bb2; // scope 0 at $DIR/lower_intrinsics.rs:20:1: 20:2
+ }
+
+ bb2: {
return; // scope 0 at $DIR/lower_intrinsics.rs:20:2: 20:2
}
+
+ bb3 (cleanup): {
+ resume; // scope 0 at $DIR/lower_intrinsics.rs:18:1: 20:2
+ }
}
StorageDead(_1); // scope 0 at $DIR/lower_intrinsics.rs:59:1: 59:2
return; // scope 0 at $DIR/lower_intrinsics.rs:59:2: 59:2
}
+
+ bb2 (cleanup): {
+ resume; // scope 0 at $DIR/lower_intrinsics.rs:55:1: 59:2
+ }
}
}
#[inline(never)]
-pub fn f_zst<T>(t: T) {
+pub fn f_zst<T>(_t: T) {
}
#[inline(never)]
-pub fn f_non_zst<T>(t: T) {}
+pub fn f_non_zst<T>(_t: T) {}
// EMIT_MIR lower_intrinsics.non_const.LowerIntrinsics.diff
pub fn non_const<T>() -> usize {
let size_of_t = core::intrinsics::size_of::<T>;
size_of_t()
}
+
+pub enum E {
+ A,
+ B,
+ C,
+}
+
+// EMIT_MIR lower_intrinsics.discriminant.LowerIntrinsics.diff
+pub fn discriminant<T>(t: T) {
+ core::intrinsics::discriminant_value(&t);
+ core::intrinsics::discriminant_value(&0);
+ core::intrinsics::discriminant_value(&());
+ core::intrinsics::discriminant_value(&E::B);
+}
bb1: {
return; // scope 0 at $DIR/lower_intrinsics.rs:15:2: 15:2
}
+
+ bb2 (cleanup): {
+ resume; // scope 0 at $DIR/lower_intrinsics.rs:13:1: 15:2
+ }
}
- // + literal: Const { ty: unsafe extern "rust-intrinsic" fn() -> ! {std::intrinsics::unreachable}, val: Value(Scalar(<ZST>)) }
+ unreachable; // scope 1 at $DIR/lower_intrinsics.rs:24:14: 24:45
}
+
+ bb1 (cleanup): {
+ resume; // scope 0 at $DIR/lower_intrinsics.rs:23:1: 25:2
+ }
}
StorageDead(_3); // scope 0 at $DIR/lower_intrinsics.rs:10:1: 10:2
return; // scope 0 at $DIR/lower_intrinsics.rs:10:2: 10:2
}
+
+ bb4 (cleanup): {
+ resume; // scope 0 at $DIR/lower_intrinsics.rs:6:1: 10:2
+ }
}
},
"lines": {
"count": 15,
- "covered": 12,
- "percent": 80
+ "covered": 13,
+ "percent": 86.66666666666667
},
"regions": {
"count": 14,
- "covered": 12,
- "notcovered": 2,
- "percent": 85.71428571428571
+ "covered": 13,
+ "notcovered": 1,
+ "percent": 92.85714285714286
}
}
}
},
"lines": {
"count": 15,
- "covered": 12,
- "percent": 80
+ "covered": 13,
+ "percent": 86.66666666666667
},
"regions": {
"count": 14,
- "covered": 12,
- "notcovered": 2,
- "percent": 85.71428571428571
+ "covered": 13,
+ "notcovered": 1,
+ "percent": 92.85714285714286
}
}
}
--- /dev/null
+{
+ "data": [
+ {
+ "files": [
+ {
+ "filename": "../coverage/match_or_pattern.rs",
+ "summary": {
+ "functions": {
+ "count": 1,
+ "covered": 1,
+ "percent": 100
+ },
+ "instantiations": {
+ "count": 1,
+ "covered": 1,
+ "percent": 100
+ },
+ "lines": {
+ "count": 37,
+ "covered": 33,
+ "percent": 89.1891891891892
+ },
+ "regions": {
+ "count": 25,
+ "covered": 17,
+ "notcovered": 8,
+ "percent": 68
+ }
+ }
+ }
+ ],
+ "totals": {
+ "functions": {
+ "count": 1,
+ "covered": 1,
+ "percent": 100
+ },
+ "instantiations": {
+ "count": 1,
+ "covered": 1,
+ "percent": 100
+ },
+ "lines": {
+ "count": 37,
+ "covered": 33,
+ "percent": 89.1891891891892
+ },
+ "regions": {
+ "count": 25,
+ "covered": 17,
+ "notcovered": 8,
+ "percent": 68
+ }
+ }
+ }
+ ],
+ "type": "llvm.coverage.json.export",
+ "version": "2.0.1"
+}
},
"lines": {
"count": 23,
- "covered": 19,
- "percent": 82.6086956521739
+ "covered": 21,
+ "percent": 91.30434782608695
},
"regions": {
"count": 13,
- "covered": 11,
- "notcovered": 2,
- "percent": 84.61538461538461
+ "covered": 12,
+ "notcovered": 1,
+ "percent": 92.3076923076923
}
}
}
},
"lines": {
"count": 23,
- "covered": 19,
- "percent": 82.6086956521739
+ "covered": 21,
+ "percent": 91.30434782608695
},
"regions": {
"count": 13,
- "covered": 11,
- "notcovered": 2,
- "percent": 84.61538461538461
+ "covered": 12,
+ "notcovered": 1,
+ "percent": 92.3076923076923
}
}
}
},
"lines": {
"count": 19,
- "covered": 16,
- "percent": 84.21052631578947
+ "covered": 17,
+ "percent": 89.47368421052632
},
"regions": {
"count": 13,
- "covered": 11,
- "notcovered": 2,
- "percent": 84.61538461538461
+ "covered": 12,
+ "notcovered": 1,
+ "percent": 92.3076923076923
}
}
}
},
"lines": {
"count": 19,
- "covered": 16,
- "percent": 84.21052631578947
+ "covered": 17,
+ "percent": 89.47368421052632
},
"regions": {
"count": 13,
- "covered": 11,
- "notcovered": 2,
- "percent": 84.61538461538461
+ "covered": 12,
+ "notcovered": 1,
+ "percent": 92.3076923076923
}
}
}
8| |
9| 1|fn main() -> Result<(),u8> {
10| 1| let mut countdown = 10;
- 11| 10| while countdown > 0 {
- 12| 10| if countdown == 1 {
- 13| 0| might_fail_assert(3);
+ 11| 11| while countdown > 0 {
+ 12| 11| if countdown == 1 {
+ 13| 1| might_fail_assert(3);
14| 10| } else if countdown < 5 {
15| 3| might_fail_assert(2);
16| 6| }
- 17| 9| countdown -= 1;
+ 17| 10| countdown -= 1;
18| | }
19| 0| Ok(())
20| 0|}
--- /dev/null
+ 1| |#![feature(or_patterns)]
+ 2| |
+ 3| 1|fn main() {
+ 4| 1| // Initialize test constants in a way that cannot be determined at compile time, to ensure
+ 5| 1| // rustc and LLVM cannot optimize out statements (or coverage counters) downstream from
+ 6| 1| // dependent conditions.
+ 7| 1| let is_true = std::env::args().len() == 1;
+ 8| 1|
+ 9| 1| let mut a: u8 = 0;
+ 10| 1| let mut b: u8 = 0;
+ 11| 1| if is_true {
+ 12| 1| a = 2;
+ 13| 1| b = 0;
+ 14| 1| }
+ ^0
+ 15| 1| match (a, b) {
+ 16| | // Or patterns generate MIR `SwitchInt` with multiple targets to the same `BasicBlock`.
+ 17| | // This test confirms a fix for Issue #79569.
+ 18| 0| (0 | 1, 2 | 3) => {}
+ 19| 1| _ => {}
+ 20| | }
+ 21| 1| if is_true {
+ 22| 1| a = 0;
+ 23| 1| b = 0;
+ 24| 1| }
+ ^0
+ 25| 1| match (a, b) {
+ 26| 0| (0 | 1, 2 | 3) => {}
+ 27| 1| _ => {}
+ 28| | }
+ 29| 1| if is_true {
+ 30| 1| a = 2;
+ 31| 1| b = 2;
+ 32| 1| }
+ ^0
+ 33| 1| match (a, b) {
+ 34| 0| (0 | 1, 2 | 3) => {}
+ 35| 1| _ => {}
+ 36| | }
+ 37| 1| if is_true {
+ 38| 1| a = 0;
+ 39| 1| b = 2;
+ 40| 1| }
+ ^0
+ 41| 1| match (a, b) {
+ 42| 1| (0 | 1, 2 | 3) => {}
+ 43| 0| _ => {}
+ 44| | }
+ 45| 1|}
+
14| |
15| 1|fn main() -> Result<(),u8> {
16| 1| let mut countdown = 10;
- 17| 10| while countdown > 0 {
- 18| 10| if countdown == 1 {
- 19| 0| let result = might_overflow(10);
- 20| 0| println!("Result: {}", result);
+ 17| 11| while countdown > 0 {
+ 18| 11| if countdown == 1 {
+ 19| 1| let result = might_overflow(10);
+ 20| 1| println!("Result: {}", result);
21| 10| } else if countdown < 5 {
22| 3| let result = might_overflow(1);
23| 3| println!("Result: {}", result);
24| 6| }
- 25| 9| countdown -= 1;
+ 25| 10| countdown -= 1;
26| | }
27| 0| Ok(())
28| 0|}
12| |
13| 1|fn main() -> Result<(), u8> {
14| 1| let mut countdown = 10;
- 15| 10| while countdown > 0 {
- 16| 10| if countdown == 1 {
- 17| 0| might_panic(true);
+ 15| 11| while countdown > 0 {
+ 16| 11| if countdown == 1 {
+ 17| 1| might_panic(true);
18| 10| } else if countdown < 5 {
19| 3| might_panic(false);
20| 6| }
- 21| 9| countdown -= 1;
+ 21| 10| countdown -= 1;
22| | }
23| 0| Ok(())
24| 0|}
6:37 -> 6:61 (count=1)
7:1 -> 7:2 (count=3)
9:1 -> 10:27 (count=1)
- 11:11 -> 11:24 (count=10)
- 12:12 -> 12:26 (count=10)
- 12:27 -> 14:10 (count=0)
+ 11:11 -> 11:24 (count=11)
+ 12:12 -> 12:26 (count=11)
+ 12:27 -> 14:10 (count=1)
14:19 -> 14:32 (count=10)
14:33 -> 16:10 (count=3)
16:10 -> 16:11 (count=6)
- 17:9 -> 17:23 (count=9)
+ 17:9 -> 17:23 (count=10)
19:5 -> 20:2 (count=0)
Segment at 4:1 (count = 4), RegionEntry
Segment at 4:41 (count = 0), Skipped
Segment at 7:2 (count = 0), Skipped
Segment at 9:1 (count = 1), RegionEntry
Segment at 10:27 (count = 0), Skipped
-Segment at 11:11 (count = 10), RegionEntry
+Segment at 11:11 (count = 11), RegionEntry
Segment at 11:24 (count = 0), Skipped
-Segment at 12:12 (count = 10), RegionEntry
+Segment at 12:12 (count = 11), RegionEntry
Segment at 12:26 (count = 0), Skipped
-Segment at 12:27 (count = 0), RegionEntry
+Segment at 12:27 (count = 1), RegionEntry
Segment at 14:10 (count = 0), Skipped
Segment at 14:19 (count = 10), RegionEntry
Segment at 14:32 (count = 0), Skipped
Segment at 14:33 (count = 3), RegionEntry
Segment at 16:10 (count = 6), RegionEntry
Segment at 16:11 (count = 0), Skipped
-Segment at 17:9 (count = 9), RegionEntry
+Segment at 17:9 (count = 10), RegionEntry
Segment at 17:23 (count = 0), Skipped
Segment at 19:5 (count = 0), RegionEntry
Segment at 20:2 (count = 0), Skipped
Counter in file 0 81:1 -> 81:2, 0
Counter in file 0 91:25 -> 91:34, 0
Counter in file 0 5:1 -> 5:25, #1
-Counter in file 0 5:25 -> 6:14, #1
-Counter in file 0 7:9 -> 7:10, #2
-Counter in file 0 9:9 -> 9:10, (#1 - #2)
-Counter in file 0 11:1 -> 11:2, (#2 + (#1 - #2))
Counter in file 0 21:1 -> 21:23, #1
+Counter in file 0 17:20 -> 17:21, #1
Counter in file 0 67:5 -> 67:23, #1
Counter in file 0 38:1 -> 38:19, #1
Counter in file 0 38:19 -> 42:12, #1
Counter in file 0 44:36 -> 44:38, (#6 + 0)
Counter in file 0 45:14 -> 45:16, #7
Counter in file 0 47:1 -> 47:2, (#5 + (#6 + #7))
+Counter in file 0 13:20 -> 13:21, #1
Counter in file 0 29:1 -> 29:22, #1
Counter in file 0 93:1 -> 101:2, #1
Counter in file 0 91:1 -> 91:25, #1
+Counter in file 0 5:25 -> 6:14, #1
+Counter in file 0 7:9 -> 7:10, #2
+Counter in file 0 9:9 -> 9:10, (#1 - #2)
+Counter in file 0 11:1 -> 11:2, (#2 + (#1 - #2))
Counter in file 0 51:5 -> 52:18, #1
Counter in file 0 53:13 -> 53:14, #2
Counter in file 0 63:13 -> 63:14, (#1 - #2)
Counter in file 0 65:5 -> 65:6, (#2 + (#1 - #2))
-Counter in file 0 17:20 -> 17:21, #1
Counter in file 0 49:1 -> 68:12, #1
Counter in file 0 69:9 -> 69:10, #2
Counter in file 0 69:14 -> 69:27, (#1 + 0)
Counter in file 0 89:1 -> 89:2, (#3 + (#2 + (#1 - (#3 + #2))))
Counter in file 0 17:1 -> 17:20, #1
Counter in file 0 66:5 -> 66:23, #1
-Counter in file 0 13:20 -> 13:21, #1
Counter in file 0 17:9 -> 17:10, #1
Counter in file 0 17:9 -> 17:10, #1
Counter in file 0 117:17 -> 117:19, #1
--- /dev/null
+Counter in file 0 3:1 -> 11:15, #1
+Counter in file 0 11:16 -> 14:6, #2
+Counter in file 0 14:6 -> 14:7, (#1 - #2)
+Counter in file 0 15:11 -> 15:17, (#2 + (#1 - #2))
+Counter in file 0 18:27 -> 18:29, #5
+Counter in file 0 19:14 -> 19:16, (#3 + #4)
+Counter in file 0 21:8 -> 21:15, ((#3 + #4) + #5)
+Counter in file 0 21:16 -> 24:6, #6
+Counter in file 0 24:6 -> 24:7, (((#3 + #4) + #5) - #6)
+Counter in file 0 25:11 -> 25:17, (#6 + (((#3 + #4) + #5) - #6))
+Counter in file 0 26:27 -> 26:29, #9
+Counter in file 0 27:14 -> 27:16, (#7 + #8)
+Counter in file 0 29:8 -> 29:15, ((#7 + #8) + #9)
+Counter in file 0 29:16 -> 32:6, #10
+Counter in file 0 32:6 -> 32:7, (((#7 + #8) + #9) - #10)
+Counter in file 0 33:11 -> 33:17, (#10 + (((#7 + #8) + #9) - #10))
+Counter in file 0 34:27 -> 34:29, #13
+Counter in file 0 35:14 -> 35:16, (#11 + #12)
+Counter in file 0 37:8 -> 37:15, ((#11 + #12) + #13)
+Counter in file 0 37:16 -> 40:6, #14
+Counter in file 0 40:6 -> 40:7, (((#11 + #12) + #13) - #14)
+Counter in file 0 41:11 -> 41:17, (#14 + (((#11 + #12) + #13) - #14))
+Counter in file 0 42:27 -> 42:29, #17
+Counter in file 0 43:14 -> 43:16, (#15 + #16)
+Counter in file 0 45:1 -> 45:2, ((#15 + #16) + #17)
+Emitting segments for file: ../coverage/match_or_pattern.rs
+Combined regions:
+ 3:1 -> 11:15 (count=1)
+ 11:16 -> 14:6 (count=1)
+ 14:6 -> 14:7 (count=0)
+ 15:11 -> 15:17 (count=1)
+ 18:27 -> 18:29 (count=0)
+ 19:14 -> 19:16 (count=1)
+ 21:8 -> 21:15 (count=1)
+ 21:16 -> 24:6 (count=1)
+ 24:6 -> 24:7 (count=0)
+ 25:11 -> 25:17 (count=1)
+ 26:27 -> 26:29 (count=0)
+ 27:14 -> 27:16 (count=1)
+ 29:8 -> 29:15 (count=1)
+ 29:16 -> 32:6 (count=1)
+ 32:6 -> 32:7 (count=0)
+ 33:11 -> 33:17 (count=1)
+ 34:27 -> 34:29 (count=0)
+ 35:14 -> 35:16 (count=1)
+ 37:8 -> 37:15 (count=1)
+ 37:16 -> 40:6 (count=1)
+ 40:6 -> 40:7 (count=0)
+ 41:11 -> 41:17 (count=1)
+ 42:27 -> 42:29 (count=1)
+ 43:14 -> 43:16 (count=0)
+ 45:1 -> 45:2 (count=1)
+Segment at 3:1 (count = 1), RegionEntry
+Segment at 11:15 (count = 0), Skipped
+Segment at 11:16 (count = 1), RegionEntry
+Segment at 14:6 (count = 0), RegionEntry
+Segment at 14:7 (count = 0), Skipped
+Segment at 15:11 (count = 1), RegionEntry
+Segment at 15:17 (count = 0), Skipped
+Segment at 18:27 (count = 0), RegionEntry
+Segment at 18:29 (count = 0), Skipped
+Segment at 19:14 (count = 1), RegionEntry
+Segment at 19:16 (count = 0), Skipped
+Segment at 21:8 (count = 1), RegionEntry
+Segment at 21:15 (count = 0), Skipped
+Segment at 21:16 (count = 1), RegionEntry
+Segment at 24:6 (count = 0), RegionEntry
+Segment at 24:7 (count = 0), Skipped
+Segment at 25:11 (count = 1), RegionEntry
+Segment at 25:17 (count = 0), Skipped
+Segment at 26:27 (count = 0), RegionEntry
+Segment at 26:29 (count = 0), Skipped
+Segment at 27:14 (count = 1), RegionEntry
+Segment at 27:16 (count = 0), Skipped
+Segment at 29:8 (count = 1), RegionEntry
+Segment at 29:15 (count = 0), Skipped
+Segment at 29:16 (count = 1), RegionEntry
+Segment at 32:6 (count = 0), RegionEntry
+Segment at 32:7 (count = 0), Skipped
+Segment at 33:11 (count = 1), RegionEntry
+Segment at 33:17 (count = 0), Skipped
+Segment at 34:27 (count = 0), RegionEntry
+Segment at 34:29 (count = 0), Skipped
+Segment at 35:14 (count = 1), RegionEntry
+Segment at 35:16 (count = 0), Skipped
+Segment at 37:8 (count = 1), RegionEntry
+Segment at 37:15 (count = 0), Skipped
+Segment at 37:16 (count = 1), RegionEntry
+Segment at 40:6 (count = 0), RegionEntry
+Segment at 40:7 (count = 0), Skipped
+Segment at 41:11 (count = 1), RegionEntry
+Segment at 41:17 (count = 0), Skipped
+Segment at 42:27 (count = 1), RegionEntry
+Segment at 42:29 (count = 0), Skipped
+Segment at 43:14 (count = 0), RegionEntry
+Segment at 43:16 (count = 0), Skipped
+Segment at 45:1 (count = 1), RegionEntry
+Segment at 45:2 (count = 0), Skipped
7:6 -> 7:7 (count=3)
8:9 -> 13:2 (count=4)
15:1 -> 16:27 (count=1)
- 17:11 -> 17:24 (count=10)
- 18:12 -> 18:26 (count=10)
- 18:27 -> 21:10 (count=0)
+ 17:11 -> 17:24 (count=11)
+ 18:12 -> 18:26 (count=11)
+ 18:27 -> 21:10 (count=1)
21:19 -> 21:32 (count=10)
21:33 -> 24:10 (count=3)
24:10 -> 24:11 (count=6)
- 25:9 -> 25:23 (count=9)
+ 25:9 -> 25:23 (count=10)
27:5 -> 28:2 (count=0)
Segment at 4:1 (count = 4), RegionEntry
Segment at 5:18 (count = 0), Skipped
Segment at 13:2 (count = 0), Skipped
Segment at 15:1 (count = 1), RegionEntry
Segment at 16:27 (count = 0), Skipped
-Segment at 17:11 (count = 10), RegionEntry
+Segment at 17:11 (count = 11), RegionEntry
Segment at 17:24 (count = 0), Skipped
-Segment at 18:12 (count = 10), RegionEntry
+Segment at 18:12 (count = 11), RegionEntry
Segment at 18:26 (count = 0), Skipped
-Segment at 18:27 (count = 0), RegionEntry
+Segment at 18:27 (count = 1), RegionEntry
Segment at 21:10 (count = 0), Skipped
Segment at 21:19 (count = 10), RegionEntry
Segment at 21:32 (count = 0), Skipped
Segment at 21:33 (count = 3), RegionEntry
Segment at 24:10 (count = 6), RegionEntry
Segment at 24:11 (count = 0), Skipped
-Segment at 25:9 (count = 9), RegionEntry
+Segment at 25:9 (count = 10), RegionEntry
Segment at 25:23 (count = 0), Skipped
Segment at 27:5 (count = 0), RegionEntry
Segment at 28:2 (count = 0), Skipped
6:9 -> 7:26 (count=1)
8:12 -> 11:2 (count=3)
13:1 -> 14:27 (count=1)
- 15:11 -> 15:24 (count=10)
- 16:12 -> 16:26 (count=10)
- 16:27 -> 18:10 (count=0)
+ 15:11 -> 15:24 (count=11)
+ 16:12 -> 16:26 (count=11)
+ 16:27 -> 18:10 (count=1)
18:19 -> 18:32 (count=10)
18:33 -> 20:10 (count=3)
20:10 -> 20:11 (count=6)
- 21:9 -> 21:23 (count=9)
+ 21:9 -> 21:23 (count=10)
23:5 -> 24:2 (count=0)
Segment at 4:1 (count = 4), RegionEntry
Segment at 4:36 (count = 0), Skipped
Segment at 11:2 (count = 0), Skipped
Segment at 13:1 (count = 1), RegionEntry
Segment at 14:27 (count = 0), Skipped
-Segment at 15:11 (count = 10), RegionEntry
+Segment at 15:11 (count = 11), RegionEntry
Segment at 15:24 (count = 0), Skipped
-Segment at 16:12 (count = 10), RegionEntry
+Segment at 16:12 (count = 11), RegionEntry
Segment at 16:26 (count = 0), Skipped
-Segment at 16:27 (count = 0), RegionEntry
+Segment at 16:27 (count = 1), RegionEntry
Segment at 18:10 (count = 0), Skipped
Segment at 18:19 (count = 10), RegionEntry
Segment at 18:32 (count = 0), Skipped
Segment at 18:33 (count = 3), RegionEntry
Segment at 20:10 (count = 6), RegionEntry
Segment at 20:11 (count = 0), Skipped
-Segment at 21:9 (count = 9), RegionEntry
+Segment at 21:9 (count = 10), RegionEntry
Segment at 21:23 (count = 0), Skipped
Segment at 23:5 (count = 0), RegionEntry
Segment at 24:2 (count = 0), Skipped
--- /dev/null
+<!DOCTYPE html>
+<!--
+
+Preview this file as rendered HTML from the github source at:
+https://htmlpreview.github.io/?https://github.com/rust-lang/rust/blob/master/src/test/run-make-fulldeps/coverage-spanview/expected_mir_dump.match_or_pattern/match_or_pattern.main.-------.InstrumentCoverage.0.html
+
+For revisions in Pull Requests (PR):
+ * Replace "rust-lang" with the github PR author
+ * Replace "master" with the PR branch name
+
+-->
+<html>
+<head>
+<title>match_or_pattern.main - Coverage Spans</title>
+<style>
+ .line {
+ counter-increment: line;
+ }
+ .line:before {
+ content: counter(line) ": ";
+ font-family: Menlo, Monaco, monospace;
+ font-style: italic;
+ width: 3.8em;
+ display: inline-block;
+ text-align: right;
+ filter: opacity(50%);
+ -webkit-user-select: none;
+ }
+ .code {
+ color: #dddddd;
+ background-color: #222222;
+ font-family: Menlo, Monaco, monospace;
+ line-height: 1.4em;
+ border-bottom: 2px solid #222222;
+ white-space: pre;
+ display: inline-block;
+ }
+ .odd {
+ background-color: #55bbff;
+ color: #223311;
+ }
+ .even {
+ background-color: #ee7756;
+ color: #551133;
+ }
+ .code {
+ --index: calc(var(--layer) - 1);
+ padding-top: calc(var(--index) * 0.15em);
+ filter:
+ hue-rotate(calc(var(--index) * 25deg))
+ saturate(calc(100% - (var(--index) * 2%)))
+ brightness(calc(100% - (var(--index) * 1.5%)));
+ }
+ .annotation {
+ color: #4444ff;
+ font-family: monospace;
+ font-style: italic;
+ display: none;
+ -webkit-user-select: none;
+ }
+ body:active .annotation {
+ /* requires holding mouse down anywhere on the page */
+ display: inline-block;
+ }
+ span:hover .annotation {
+ /* requires hover over a span ONLY on its first line */
+ display: inline-block;
+ }
+</style>
+</head>
+<body>
+<div class="code" style="counter-reset: line 2"><span class="line"><span><span class="code even" style="--layer: 1" title="7:19-7:35: @0.Call: _4 = args() -> [return: bb1, unwind: bb41]
+7:19-7:35: @1[0]: _3 = &_4
+7:19-7:41: @1.Call: _2 = <Args as ExactSizeIterator>::len(move _3) -> [return: bb2, unwind: bb40]
+7:19-7:46: @2[1]: _1 = Eq(move _2, const 1_usize)
+7:9-7:16: @2[3]: FakeRead(ForLet, _1)
+9:21-9:22: @3[2]: _5 = const 0_u8
+9:9-9:14: @3[3]: FakeRead(ForLet, _5)
+9:16-9:18: @3[4]: AscribeUserType(_5, o, UserTypeProjection { base: UserType(1), projs: [] })
+10:21-10:22: @3[6]: _6 = const 0_u8
+10:9-10:14: @3[7]: FakeRead(ForLet, _6)
+10:16-10:18: @3[8]: AscribeUserType(_6, o, UserTypeProjection { base: UserType(3), projs: [] })
+11:8-11:15: @3[11]: _8 = _1
+11:8-11:15: @3[12]: FakeRead(ForMatchedPlace, _8)"><span class="annotation">@0,1,2,3⦊</span>fn main() {</span></span>
+<span class="line"><span class="code even" style="--layer: 1" title="7:19-7:35: @0.Call: _4 = args() -> [return: bb1, unwind: bb41]
+7:19-7:35: @1[0]: _3 = &_4
+7:19-7:41: @1.Call: _2 = <Args as ExactSizeIterator>::len(move _3) -> [return: bb2, unwind: bb40]
+7:19-7:46: @2[1]: _1 = Eq(move _2, const 1_usize)
+7:9-7:16: @2[3]: FakeRead(ForLet, _1)
+9:21-9:22: @3[2]: _5 = const 0_u8
+9:9-9:14: @3[3]: FakeRead(ForLet, _5)
+9:16-9:18: @3[4]: AscribeUserType(_5, o, UserTypeProjection { base: UserType(1), projs: [] })
+10:21-10:22: @3[6]: _6 = const 0_u8
+10:9-10:14: @3[7]: FakeRead(ForLet, _6)
+10:16-10:18: @3[8]: AscribeUserType(_6, o, UserTypeProjection { base: UserType(3), projs: [] })
+11:8-11:15: @3[11]: _8 = _1
+11:8-11:15: @3[12]: FakeRead(ForMatchedPlace, _8)"> // Initialize test constants in a way that cannot be determined at compile time, to ensure</span></span>
+<span class="line"><span class="code even" style="--layer: 1" title="7:19-7:35: @0.Call: _4 = args() -> [return: bb1, unwind: bb41]
+7:19-7:35: @1[0]: _3 = &_4
+7:19-7:41: @1.Call: _2 = <Args as ExactSizeIterator>::len(move _3) -> [return: bb2, unwind: bb40]
+7:19-7:46: @2[1]: _1 = Eq(move _2, const 1_usize)
+7:9-7:16: @2[3]: FakeRead(ForLet, _1)
+9:21-9:22: @3[2]: _5 = const 0_u8
+9:9-9:14: @3[3]: FakeRead(ForLet, _5)
+9:16-9:18: @3[4]: AscribeUserType(_5, o, UserTypeProjection { base: UserType(1), projs: [] })
+10:21-10:22: @3[6]: _6 = const 0_u8
+10:9-10:14: @3[7]: FakeRead(ForLet, _6)
+10:16-10:18: @3[8]: AscribeUserType(_6, o, UserTypeProjection { base: UserType(3), projs: [] })
+11:8-11:15: @3[11]: _8 = _1
+11:8-11:15: @3[12]: FakeRead(ForMatchedPlace, _8)"> // rustc and LLVM cannot optimize out statements (or coverage counters) downstream from</span></span>
+<span class="line"><span class="code even" style="--layer: 1" title="7:19-7:35: @0.Call: _4 = args() -> [return: bb1, unwind: bb41]
+7:19-7:35: @1[0]: _3 = &_4
+7:19-7:41: @1.Call: _2 = <Args as ExactSizeIterator>::len(move _3) -> [return: bb2, unwind: bb40]
+7:19-7:46: @2[1]: _1 = Eq(move _2, const 1_usize)
+7:9-7:16: @2[3]: FakeRead(ForLet, _1)
+9:21-9:22: @3[2]: _5 = const 0_u8
+9:9-9:14: @3[3]: FakeRead(ForLet, _5)
+9:16-9:18: @3[4]: AscribeUserType(_5, o, UserTypeProjection { base: UserType(1), projs: [] })
+10:21-10:22: @3[6]: _6 = const 0_u8
+10:9-10:14: @3[7]: FakeRead(ForLet, _6)
+10:16-10:18: @3[8]: AscribeUserType(_6, o, UserTypeProjection { base: UserType(3), projs: [] })
+11:8-11:15: @3[11]: _8 = _1
+11:8-11:15: @3[12]: FakeRead(ForMatchedPlace, _8)"> // dependent conditions.</span></span>
+<span class="line"><span class="code even" style="--layer: 1" title="7:19-7:35: @0.Call: _4 = args() -> [return: bb1, unwind: bb41]
+7:19-7:35: @1[0]: _3 = &_4
+7:19-7:41: @1.Call: _2 = <Args as ExactSizeIterator>::len(move _3) -> [return: bb2, unwind: bb40]
+7:19-7:46: @2[1]: _1 = Eq(move _2, const 1_usize)
+7:9-7:16: @2[3]: FakeRead(ForLet, _1)
+9:21-9:22: @3[2]: _5 = const 0_u8
+9:9-9:14: @3[3]: FakeRead(ForLet, _5)
+9:16-9:18: @3[4]: AscribeUserType(_5, o, UserTypeProjection { base: UserType(1), projs: [] })
+10:21-10:22: @3[6]: _6 = const 0_u8
+10:9-10:14: @3[7]: FakeRead(ForLet, _6)
+10:16-10:18: @3[8]: AscribeUserType(_6, o, UserTypeProjection { base: UserType(3), projs: [] })
+11:8-11:15: @3[11]: _8 = _1
+11:8-11:15: @3[12]: FakeRead(ForMatchedPlace, _8)"> let is_true = std::env::args().len() == 1;</span></span>
+<span class="line"><span class="code even" style="--layer: 1" title="7:19-7:35: @0.Call: _4 = args() -> [return: bb1, unwind: bb41]
+7:19-7:35: @1[0]: _3 = &_4
+7:19-7:41: @1.Call: _2 = <Args as ExactSizeIterator>::len(move _3) -> [return: bb2, unwind: bb40]
+7:19-7:46: @2[1]: _1 = Eq(move _2, const 1_usize)
+7:9-7:16: @2[3]: FakeRead(ForLet, _1)
+9:21-9:22: @3[2]: _5 = const 0_u8
+9:9-9:14: @3[3]: FakeRead(ForLet, _5)
+9:16-9:18: @3[4]: AscribeUserType(_5, o, UserTypeProjection { base: UserType(1), projs: [] })
+10:21-10:22: @3[6]: _6 = const 0_u8
+10:9-10:14: @3[7]: FakeRead(ForLet, _6)
+10:16-10:18: @3[8]: AscribeUserType(_6, o, UserTypeProjection { base: UserType(3), projs: [] })
+11:8-11:15: @3[11]: _8 = _1
+11:8-11:15: @3[12]: FakeRead(ForMatchedPlace, _8)"></span></span>
+<span class="line"><span class="code even" style="--layer: 1" title="7:19-7:35: @0.Call: _4 = args() -> [return: bb1, unwind: bb41]
+7:19-7:35: @1[0]: _3 = &_4
+7:19-7:41: @1.Call: _2 = <Args as ExactSizeIterator>::len(move _3) -> [return: bb2, unwind: bb40]
+7:19-7:46: @2[1]: _1 = Eq(move _2, const 1_usize)
+7:9-7:16: @2[3]: FakeRead(ForLet, _1)
+9:21-9:22: @3[2]: _5 = const 0_u8
+9:9-9:14: @3[3]: FakeRead(ForLet, _5)
+9:16-9:18: @3[4]: AscribeUserType(_5, o, UserTypeProjection { base: UserType(1), projs: [] })
+10:21-10:22: @3[6]: _6 = const 0_u8
+10:9-10:14: @3[7]: FakeRead(ForLet, _6)
+10:16-10:18: @3[8]: AscribeUserType(_6, o, UserTypeProjection { base: UserType(3), projs: [] })
+11:8-11:15: @3[11]: _8 = _1
+11:8-11:15: @3[12]: FakeRead(ForMatchedPlace, _8)"> let mut a: u8 = 0;</span></span>
+<span class="line"><span class="code even" style="--layer: 1" title="7:19-7:35: @0.Call: _4 = args() -> [return: bb1, unwind: bb41]
+7:19-7:35: @1[0]: _3 = &_4
+7:19-7:41: @1.Call: _2 = <Args as ExactSizeIterator>::len(move _3) -> [return: bb2, unwind: bb40]
+7:19-7:46: @2[1]: _1 = Eq(move _2, const 1_usize)
+7:9-7:16: @2[3]: FakeRead(ForLet, _1)
+9:21-9:22: @3[2]: _5 = const 0_u8
+9:9-9:14: @3[3]: FakeRead(ForLet, _5)
+9:16-9:18: @3[4]: AscribeUserType(_5, o, UserTypeProjection { base: UserType(1), projs: [] })
+10:21-10:22: @3[6]: _6 = const 0_u8
+10:9-10:14: @3[7]: FakeRead(ForLet, _6)
+10:16-10:18: @3[8]: AscribeUserType(_6, o, UserTypeProjection { base: UserType(3), projs: [] })
+11:8-11:15: @3[11]: _8 = _1
+11:8-11:15: @3[12]: FakeRead(ForMatchedPlace, _8)"> let mut b: u8 = 0;</span></span>
+<span class="line"><span class="code even" style="--layer: 1" title="7:19-7:35: @0.Call: _4 = args() -> [return: bb1, unwind: bb41]
+7:19-7:35: @1[0]: _3 = &_4
+7:19-7:41: @1.Call: _2 = <Args as ExactSizeIterator>::len(move _3) -> [return: bb2, unwind: bb40]
+7:19-7:46: @2[1]: _1 = Eq(move _2, const 1_usize)
+7:9-7:16: @2[3]: FakeRead(ForLet, _1)
+9:21-9:22: @3[2]: _5 = const 0_u8
+9:9-9:14: @3[3]: FakeRead(ForLet, _5)
+9:16-9:18: @3[4]: AscribeUserType(_5, o, UserTypeProjection { base: UserType(1), projs: [] })
+10:21-10:22: @3[6]: _6 = const 0_u8
+10:9-10:14: @3[7]: FakeRead(ForLet, _6)
+10:16-10:18: @3[8]: AscribeUserType(_6, o, UserTypeProjection { base: UserType(3), projs: [] })
+11:8-11:15: @3[11]: _8 = _1
+11:8-11:15: @3[12]: FakeRead(ForMatchedPlace, _8)"> if is_true<span class="annotation">⦉@0,1,2,3</span></span></span><span class="code" style="--layer: 0"> </span><span><span class="code odd" style="--layer: 1" title="12:9-12:14: @6[0]: _5 = const 2_u8
+13:9-13:14: @6[1]: _6 = const 0_u8
+11:16-14:6: @6[2]: _7 = const ()"><span class="annotation">@4,6⦊</span>{</span></span>
+<span class="line"><span class="code odd" style="--layer: 1" title="12:9-12:14: @6[0]: _5 = const 2_u8
+13:9-13:14: @6[1]: _6 = const 0_u8
+11:16-14:6: @6[2]: _7 = const ()"> a = 2;</span></span>
+<span class="line"><span class="code odd" style="--layer: 1" title="12:9-12:14: @6[0]: _5 = const 2_u8
+13:9-13:14: @6[1]: _6 = const 0_u8
+11:16-14:6: @6[2]: _7 = const ()"> b = 0;</span></span>
+<span class="line"><span class="code odd" style="--layer: 1" title="12:9-12:14: @6[0]: _5 = const 2_u8
+13:9-13:14: @6[1]: _6 = const 0_u8
+11:16-14:6: @6[2]: _7 = const ()"> }<span class="annotation">⦉@4,6</span></span></span><span><span class="code even" style="--layer: 1" title="14:6-14:6: @5[0]: _7 = const ()"><span class="annotation">@5⦊</span>‸<span class="annotation">⦉@5</span></span></span><span class="code" style="--layer: 0"></span></span>
+<span class="line"><span class="code" style="--layer: 0"> match </span><span><span class="code odd" style="--layer: 1" title="15:12-15:13: @7[5]: _11 = _5
+15:15-15:16: @7[7]: _12 = _6
+15:11-15:17: @7[8]: _10 = (move _11, move _12)
+15:11-15:17: @7[11]: FakeRead(ForMatchedPlace, _10)"><span class="annotation">@7⦊</span>(a, b)<span class="annotation">⦉@7</span></span></span><span class="code" style="--layer: 0"> {</span></span>
+<span class="line"><span class="code" style="--layer: 0"> // Or patterns generate MIR `SwitchInt` with multiple targets to the same `BasicBlock`.</span></span>
+<span class="line"><span class="code" style="--layer: 0"> // This test confirms a fix for Issue #79569.</span></span>
+<span class="line"><span class="code" style="--layer: 0"> (0 | 1, 2 | 3) => </span><span><span class="code even" style="--layer: 1" title="18:27-18:29: @11[0]: _9 = const ()"><span class="annotation">@10,11⦊</span>{}<span class="annotation">⦉@10,11</span></span></span><span class="code" style="--layer: 0"></span></span>
+<span class="line"><span class="code" style="--layer: 0"> _ => </span><span><span class="code odd" style="--layer: 1" title="19:14-19:16: @8[0]: _9 = const ()"><span class="annotation">@8⦊</span>{}<span class="annotation">⦉@8</span></span></span><span class="code" style="--layer: 0"></span></span>
+<span class="line"><span class="code" style="--layer: 0"> }</span></span>
+<span class="line"><span class="code" style="--layer: 0"> if </span><span><span class="code even" style="--layer: 1" title="21:8-21:15: @12[4]: _14 = _1
+21:8-21:15: @12[5]: FakeRead(ForMatchedPlace, _14)"><span class="annotation">@12⦊</span>is_true<span class="annotation">⦉@12</span></span></span><span class="code" style="--layer: 0"> </span><span><span class="code odd" style="--layer: 1" title="22:9-22:14: @15[0]: _5 = const 0_u8
+23:9-23:14: @15[1]: _6 = const 0_u8
+21:16-24:6: @15[2]: _13 = const ()"><span class="annotation">@13,15⦊</span>{</span></span>
+<span class="line"><span class="code odd" style="--layer: 1" title="22:9-22:14: @15[0]: _5 = const 0_u8
+23:9-23:14: @15[1]: _6 = const 0_u8
+21:16-24:6: @15[2]: _13 = const ()"> a = 0;</span></span>
+<span class="line"><span class="code odd" style="--layer: 1" title="22:9-22:14: @15[0]: _5 = const 0_u8
+23:9-23:14: @15[1]: _6 = const 0_u8
+21:16-24:6: @15[2]: _13 = const ()"> b = 0;</span></span>
+<span class="line"><span class="code odd" style="--layer: 1" title="22:9-22:14: @15[0]: _5 = const 0_u8
+23:9-23:14: @15[1]: _6 = const 0_u8
+21:16-24:6: @15[2]: _13 = const ()"> }<span class="annotation">⦉@13,15</span></span></span><span><span class="code even" style="--layer: 1" title="24:6-24:6: @14[0]: _13 = const ()"><span class="annotation">@14⦊</span>‸<span class="annotation">⦉@14</span></span></span><span class="code" style="--layer: 0"></span></span>
+<span class="line"><span class="code" style="--layer: 0"> match </span><span><span class="code odd" style="--layer: 1" title="25:12-25:13: @16[5]: _17 = _5
+25:15-25:16: @16[7]: _18 = _6
+25:11-25:17: @16[8]: _16 = (move _17, move _18)
+25:11-25:17: @16[11]: FakeRead(ForMatchedPlace, _16)"><span class="annotation">@16⦊</span>(a, b)<span class="annotation">⦉@16</span></span></span><span class="code" style="--layer: 0"> {</span></span>
+<span class="line"><span class="code" style="--layer: 0"> (0 | 1, 2 | 3) => </span><span><span class="code even" style="--layer: 1" title="26:27-26:29: @20[0]: _15 = const ()"><span class="annotation">@19,20⦊</span>{}<span class="annotation">⦉@19,20</span></span></span><span class="code" style="--layer: 0"></span></span>
+<span class="line"><span class="code" style="--layer: 0"> _ => </span><span><span class="code odd" style="--layer: 1" title="27:14-27:16: @17[0]: _15 = const ()"><span class="annotation">@17⦊</span>{}<span class="annotation">⦉@17</span></span></span><span class="code" style="--layer: 0"></span></span>
+<span class="line"><span class="code" style="--layer: 0"> }</span></span>
+<span class="line"><span class="code" style="--layer: 0"> if </span><span><span class="code even" style="--layer: 1" title="29:8-29:15: @21[4]: _20 = _1
+29:8-29:15: @21[5]: FakeRead(ForMatchedPlace, _20)"><span class="annotation">@21⦊</span>is_true<span class="annotation">⦉@21</span></span></span><span class="code" style="--layer: 0"> </span><span><span class="code odd" style="--layer: 1" title="30:9-30:14: @24[0]: _5 = const 2_u8
+31:9-31:14: @24[1]: _6 = const 2_u8
+29:16-32:6: @24[2]: _19 = const ()"><span class="annotation">@22,24⦊</span>{</span></span>
+<span class="line"><span class="code odd" style="--layer: 1" title="30:9-30:14: @24[0]: _5 = const 2_u8
+31:9-31:14: @24[1]: _6 = const 2_u8
+29:16-32:6: @24[2]: _19 = const ()"> a = 2;</span></span>
+<span class="line"><span class="code odd" style="--layer: 1" title="30:9-30:14: @24[0]: _5 = const 2_u8
+31:9-31:14: @24[1]: _6 = const 2_u8
+29:16-32:6: @24[2]: _19 = const ()"> b = 2;</span></span>
+<span class="line"><span class="code odd" style="--layer: 1" title="30:9-30:14: @24[0]: _5 = const 2_u8
+31:9-31:14: @24[1]: _6 = const 2_u8
+29:16-32:6: @24[2]: _19 = const ()"> }<span class="annotation">⦉@22,24</span></span></span><span><span class="code even" style="--layer: 1" title="32:6-32:6: @23[0]: _19 = const ()"><span class="annotation">@23⦊</span>‸<span class="annotation">⦉@23</span></span></span><span class="code" style="--layer: 0"></span></span>
+<span class="line"><span class="code" style="--layer: 0"> match </span><span><span class="code odd" style="--layer: 1" title="33:12-33:13: @25[5]: _23 = _5
+33:15-33:16: @25[7]: _24 = _6
+33:11-33:17: @25[8]: _22 = (move _23, move _24)
+33:11-33:17: @25[11]: FakeRead(ForMatchedPlace, _22)"><span class="annotation">@25⦊</span>(a, b)<span class="annotation">⦉@25</span></span></span><span class="code" style="--layer: 0"> {</span></span>
+<span class="line"><span class="code" style="--layer: 0"> (0 | 1, 2 | 3) => </span><span><span class="code even" style="--layer: 1" title="34:27-34:29: @29[0]: _21 = const ()"><span class="annotation">@28,29⦊</span>{}<span class="annotation">⦉@28,29</span></span></span><span class="code" style="--layer: 0"></span></span>
+<span class="line"><span class="code" style="--layer: 0"> _ => </span><span><span class="code odd" style="--layer: 1" title="35:14-35:16: @26[0]: _21 = const ()"><span class="annotation">@26⦊</span>{}<span class="annotation">⦉@26</span></span></span><span class="code" style="--layer: 0"></span></span>
+<span class="line"><span class="code" style="--layer: 0"> }</span></span>
+<span class="line"><span class="code" style="--layer: 0"> if </span><span><span class="code even" style="--layer: 1" title="37:8-37:15: @30[4]: _26 = _1
+37:8-37:15: @30[5]: FakeRead(ForMatchedPlace, _26)"><span class="annotation">@30⦊</span>is_true<span class="annotation">⦉@30</span></span></span><span class="code" style="--layer: 0"> </span><span><span class="code odd" style="--layer: 1" title="38:9-38:14: @33[0]: _5 = const 0_u8
+39:9-39:14: @33[1]: _6 = const 2_u8
+37:16-40:6: @33[2]: _25 = const ()"><span class="annotation">@31,33⦊</span>{</span></span>
+<span class="line"><span class="code odd" style="--layer: 1" title="38:9-38:14: @33[0]: _5 = const 0_u8
+39:9-39:14: @33[1]: _6 = const 2_u8
+37:16-40:6: @33[2]: _25 = const ()"> a = 0;</span></span>
+<span class="line"><span class="code odd" style="--layer: 1" title="38:9-38:14: @33[0]: _5 = const 0_u8
+39:9-39:14: @33[1]: _6 = const 2_u8
+37:16-40:6: @33[2]: _25 = const ()"> b = 2;</span></span>
+<span class="line"><span class="code odd" style="--layer: 1" title="38:9-38:14: @33[0]: _5 = const 0_u8
+39:9-39:14: @33[1]: _6 = const 2_u8
+37:16-40:6: @33[2]: _25 = const ()"> }<span class="annotation">⦉@31,33</span></span></span><span><span class="code even" style="--layer: 1" title="40:6-40:6: @32[0]: _25 = const ()"><span class="annotation">@32⦊</span>‸<span class="annotation">⦉@32</span></span></span><span class="code" style="--layer: 0"></span></span>
+<span class="line"><span class="code" style="--layer: 0"> match </span><span><span class="code odd" style="--layer: 1" title="41:12-41:13: @34[4]: _28 = _5
+41:15-41:16: @34[6]: _29 = _6
+41:11-41:17: @34[7]: _27 = (move _28, move _29)
+41:11-41:17: @34[10]: FakeRead(ForMatchedPlace, _27)"><span class="annotation">@34⦊</span>(a, b)<span class="annotation">⦉@34</span></span></span><span class="code" style="--layer: 0"> {</span></span>
+<span class="line"><span class="code" style="--layer: 0"> (0 | 1, 2 | 3) => </span><span><span class="code even" style="--layer: 1" title="42:27-42:29: @38[0]: _0 = const ()"><span class="annotation">@37,38⦊</span>{}<span class="annotation">⦉@37,38</span></span></span><span class="code" style="--layer: 0"></span></span>
+<span class="line"><span class="code" style="--layer: 0"> _ => </span><span><span class="code odd" style="--layer: 1" title="43:14-43:16: @35[0]: _0 = const ()"><span class="annotation">@35⦊</span>{}<span class="annotation">⦉@35</span></span></span><span class="code" style="--layer: 0"></span></span>
+<span class="line"><span class="code" style="--layer: 0"> }</span></span>
+<span class="line"><span class="code" style="--layer: 0">}</span><span><span class="code even" style="--layer: 1" title="45:2-45:2: @39.Return: return"><span class="annotation">@39⦊</span>‸<span class="annotation">⦉@39</span></span></span></span></div>
+</body>
+</html>
--- /dev/null
+#![feature(or_patterns)]
+
+fn main() {
+ // Initialize test constants in a way that cannot be determined at compile time, to ensure
+ // rustc and LLVM cannot optimize out statements (or coverage counters) downstream from
+ // dependent conditions.
+ let is_true = std::env::args().len() == 1;
+
+ let mut a: u8 = 0;
+ let mut b: u8 = 0;
+ if is_true {
+ a = 2;
+ b = 0;
+ }
+ match (a, b) {
+ // Or patterns generate MIR `SwitchInt` with multiple targets to the same `BasicBlock`.
+ // This test confirms a fix for Issue #79569.
+ (0 | 1, 2 | 3) => {}
+ _ => {}
+ }
+ if is_true {
+ a = 0;
+ b = 0;
+ }
+ match (a, b) {
+ (0 | 1, 2 | 3) => {}
+ _ => {}
+ }
+ if is_true {
+ a = 2;
+ b = 2;
+ }
+ match (a, b) {
+ (0 | 1, 2 | 3) => {}
+ _ => {}
+ }
+ if is_true {
+ a = 0;
+ b = 2;
+ }
+ match (a, b) {
+ (0 | 1, 2 | 3) => {}
+ _ => {}
+ }
+}
--- /dev/null
+-include ../tools.mk
+
+# only-linux
+
+all:
+ $(RUSTC) -Z split-dwarf=split foo.rs
+ rm $(TMPDIR)/foo.dwp
+ rm $(TMPDIR)/$(call BIN,foo)
--- /dev/null
+fn main() {}
// @has doc_cfg/unix_only/trait.ArmOnly.html \
// '//*[@id="main"]/*[@class="item-info"]/*[@class="stab portability"]' \
// 'This is supported on Unix and ARM only.'
- // @count - '//*[@class="stab portability"]' 2
+ // @count - '//*[@class="stab portability"]' 1
#[doc(cfg(target_arch = "arm"))]
pub trait ArmOnly {
fn unix_and_arm_only_function();
}
+ #[doc(cfg(target_arch = "arm"))]
impl ArmOnly for super::Portable {
fn unix_and_arm_only_function() {}
}
--- /dev/null
+#![crate_name = "foo"]
+// @has foo/enum.E1.html '//a/@href' '../foo/enum.E1.html#variant.A'
+
+/// [Self::A::b]
+pub enum E1 {
+ A { b: usize }
+}
+
+// @has foo/enum.E2.html '//a/@href' '../foo/enum.E2.html#variant.A'
+
+/// [Self::A::b]
+pub enum E2 {
+ A { b: usize }
+}
--- /dev/null
+#![feature(doc_cfg)]
+
+// @has 'issue_79201/trait.Foo.html'
+// @count - '//*[@class="stab portability"]' 6
+// @matches - '//*[@class="stab portability"]' 'crate feature foo-root'
+// @matches - '//*[@class="stab portability"]' 'crate feature foo-public-mod'
+// @matches - '//*[@class="stab portability"]' 'crate feature foo-private-mod'
+// @matches - '//*[@class="stab portability"]' 'crate feature foo-fn'
+// @matches - '//*[@class="stab portability"]' 'crate feature foo-method'
+
+pub trait Foo {}
+
+#[doc(cfg(feature = "foo-root"))]
+impl crate::Foo for usize {}
+
+#[doc(cfg(feature = "foo-public-mod"))]
+pub mod public {
+ impl crate::Foo for u8 {}
+}
+
+#[doc(cfg(feature = "foo-private-mod"))]
+mod private {
+ impl crate::Foo for u16 {}
+}
+
+#[doc(cfg(feature = "foo-const"))]
+const _: () = {
+ impl crate::Foo for u32 {}
+};
+
+#[doc(cfg(feature = "foo-fn"))]
+fn __() {
+ impl crate::Foo for u64 {}
+}
+
+#[doc(cfg(feature = "foo-method"))]
+impl dyn Foo {
+ fn __() {
+ impl crate::Foo for u128 {}
+ }
+}
// @has rustc_deprecated_future/index.html '//*[@class="stab deprecated"]' \
// 'Deprecation planned'
-// @has rustc_deprecated_future/struct.S.html '//*[@class="stab deprecated"]' \
+// @has rustc_deprecated_future/struct.S1.html '//*[@class="stab deprecated"]' \
// 'Deprecating in 99.99.99: effectively never'
#[rustc_deprecated(since = "99.99.99", reason = "effectively never")]
#[stable(feature = "rustc_deprecated-future-test", since = "1.0.0")]
-pub struct S;
+pub struct S1;
+
+// @has rustc_deprecated_future/index.html '//*[@class="stab deprecated"]' \
+// 'Deprecation planned'
+// @has rustc_deprecated_future/struct.S2.html '//*[@class="stab deprecated"]' \
+// 'Deprecating in a future Rust version: literally never'
+#[rustc_deprecated(since = "TBD", reason = "literally never")]
+#[stable(feature = "rustc_deprecated-future-test", since = "1.0.0")]
+pub struct S2;
fn foo2<I : for<'x> Foo<&'x isize>>(
x: <I as Foo<&isize>>::A)
- //~^ ERROR cannot extract an associated type from a higher-ranked trait bound in this context
+ //~^ ERROR cannot use the associated type of a trait with uninferred generic parameters
{
// This case is illegal because we have to instantiate `'x`, and
// we don't know what region to instantiate it with.
fn foo2<I : for<'x> Foo<&'x isize>>(
x: I::A)
- //~^ ERROR cannot extract an associated type from a higher-ranked trait bound in this context
+ //~^ ERROR cannot use the associated type of a trait with uninferred generic parameters
{
// This case is illegal because we have to instantiate `'x`, and
// we don't know what region to instantiate it with.
-error[E0212]: cannot extract an associated type from a higher-ranked trait bound in this context
+error[E0212]: cannot use the associated type of a trait with uninferred generic parameters
--> $DIR/associated-types-project-from-hrtb-in-fn.rs:13:8
|
LL | x: I::A)
error: aborting due to previous error
+For more information about this error, try `rustc --explain E0212`.
struct SomeStruct<I: for<'x> Foo<&'x isize>> {
field: I::A
- //~^ ERROR cannot extract an associated type from a higher-ranked trait bound in this context
+ //~^ ERROR cannot use the associated type of a trait with uninferred generic parameters
}
enum SomeEnum<'b, I: for<'a> Foo<&'a isize>> {
TupleVariant(I::A),
- //~^ ERROR cannot extract an associated type from a higher-ranked trait bound in this context
+ //~^ ERROR cannot use the associated type of a trait with uninferred generic parameters
StructVariant { field: I::A },
- //~^ ERROR cannot extract an associated type from a higher-ranked trait bound in this context
+ //~^ ERROR cannot use the associated type of a trait with uninferred generic parameters
OkVariant(&'b usize),
}
struct Why<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, 'j, 'k, 'n, 'o, 'p, 'q, 'r, 's, 't, 'u, 'v, 'w, 'x,
'y, 'z, 'aa, I: for<'l, 'm> Foo<&'l &'m isize>> {
field: I::A,
- //~^ ERROR cannot extract an associated type from a higher-ranked trait bound in this context
+ //~^ ERROR cannot use the associated type of a trait with uninferred generic parameters
}
pub fn main() {}
-error[E0212]: cannot extract an associated type from a higher-ranked trait bound in this context
+error[E0212]: cannot use the associated type of a trait with uninferred generic parameters
--> $DIR/associated-types-project-from-hrtb-in-struct.rs:11:12
|
LL | field: I::A
LL | field: <I as Foo<&'a isize>>::A
|
-error[E0212]: cannot extract an associated type from a higher-ranked trait bound in this context
+error[E0212]: cannot use the associated type of a trait with uninferred generic parameters
--> $DIR/associated-types-project-from-hrtb-in-struct.rs:16:18
|
LL | TupleVariant(I::A),
LL | TupleVariant(<I as Foo<&'c isize>>::A),
|
-error[E0212]: cannot extract an associated type from a higher-ranked trait bound in this context
+error[E0212]: cannot use the associated type of a trait with uninferred generic parameters
--> $DIR/associated-types-project-from-hrtb-in-struct.rs:18:28
|
LL | StructVariant { field: I::A },
LL | StructVariant { field: <I as Foo<&'c isize>>::A },
|
-error[E0212]: cannot extract an associated type from a higher-ranked trait bound in this context
+error[E0212]: cannot use the associated type of a trait with uninferred generic parameters
--> $DIR/associated-types-project-from-hrtb-in-struct.rs:35:12
|
LL | field: I::A,
error: aborting due to 4 previous errors
+For more information about this error, try `rustc --explain E0212`.
trait SomeTrait<I : for<'x> Foo<&'x isize>> {
fn some_method(&self, arg: <I as Foo<&isize>>::A);
- //~^ ERROR cannot extract an associated type from a higher-ranked trait bound in this context
+ //~^ ERROR cannot use the associated type of a trait with uninferred generic parameters
}
trait AnotherTrait<I : for<'x> Foo<&'x isize>> {
impl<X: for<'a> Banana<'a>> Peach<X> {
fn mango(&self) -> <X as Banana<'_>>::Assoc {
- //~^ ERROR cannot extract an associated type from a higher-ranked trait bound in this context
+ //~^ ERROR cannot use the associated type of a trait with uninferred generic parameters
Default::default()
}
}
trait SomeTrait<I : for<'x> Foo<&'x isize>> {
fn some_method(&self, arg: I::A);
- //~^ ERROR cannot extract an associated type from a higher-ranked trait bound in this context
+ //~^ ERROR cannot use the associated type of a trait with uninferred generic parameters
}
trait AnotherTrait<I : for<'x> Foo<&'x isize>> {
impl<X: for<'a> Banana<'a>> Peach<X> {
fn mango(&self) -> X::Assoc {
- //~^ ERROR cannot extract an associated type from a higher-ranked trait bound in this context
+ //~^ ERROR cannot use the associated type of a trait with uninferred generic parameters
Default::default()
}
}
-error[E0212]: cannot extract an associated type from a higher-ranked trait bound in this context
+error[E0212]: cannot use the associated type of a trait with uninferred generic parameters
--> $DIR/associated-types-project-from-hrtb-in-trait-method.rs:13:32
|
LL | fn some_method(&self, arg: I::A);
| ^^^^ help: use a fully qualified path with inferred lifetimes: `<I as Foo<&isize>>::A`
-error[E0212]: cannot extract an associated type from a higher-ranked trait bound in this context
+error[E0212]: cannot use the associated type of a trait with uninferred generic parameters
--> $DIR/associated-types-project-from-hrtb-in-trait-method.rs:32:24
|
LL | fn mango(&self) -> X::Assoc {
error: aborting due to 2 previous errors
+For more information about this error, try `rustc --explain E0212`.
--- /dev/null
+// Test that arrays are completely captured by closures by relying on the borrow check diagnostics
+
+#![feature(capture_disjoint_fields)]
+//~^ WARNING: the feature `capture_disjoint_fields` is incomplete
+
+fn arrays_1() {
+ let mut arr = [1, 2, 3, 4, 5];
+
+ let mut c = || {
+ arr[0] += 10;
+ };
+
+ // c will capture `arr` completely, therefore another index into the
+ // array can't be modified here
+ arr[1] += 10;
+ //~^ ERROR: cannot use `arr` because it was mutably borrowed
+ //~| ERROR: cannot use `arr[_]` because it was mutably borrowed
+ c();
+}
+
+fn arrays_2() {
+ let mut arr = [1, 2, 3, 4, 5];
+
+ let c = || {
+ println!("{:#?}", &arr[3..4]);
+ };
+
+ // c will capture `arr` completely, therefore another index into the
+ // array can't be modified here
+ arr[1] += 10;
+ //~^ ERROR: cannot assign to `arr[_]` because it is borrowed
+ c();
+}
+
+fn arrays_3() {
+ let mut arr = [1, 2, 3, 4, 5];
+
+ let c = || {
+ println!("{}", arr[3]);
+ };
+
+ // c will capture `arr` completely, therefore another index into the
+ // array can't be modified here
+ arr[1] += 10;
+ //~^ ERROR: cannot assign to `arr[_]` because it is borrowed
+ c();
+}
+
+fn arrays_4() {
+ let mut arr = [1, 2, 3, 4, 5];
+
+ let mut c = || {
+ arr[1] += 10;
+ };
+
+ // c will capture `arr` completely, therefore we cannot borrow another index
+ // into the array.
+ println!("{}", arr[3]);
+ //~^ ERROR: cannot use `arr` because it was mutably borrowed
+ //~| ERROR: cannot borrow `arr[_]` as immutable because it is also borrowed as mutable
+
+ c();
+}
+
+fn arrays_5() {
+ let mut arr = [1, 2, 3, 4, 5];
+
+ let mut c = || {
+ arr[1] += 10;
+ };
+
+ // c will capture `arr` completely, therefore we cannot borrow other indecies
+ // into the array.
+ println!("{:#?}", &arr[3..2]);
+ //~^ ERROR: cannot borrow `arr` as immutable because it is also borrowed as mutable
+
+ c();
+}
+
+fn main() {
+ arrays_1();
+ arrays_2();
+ arrays_3();
+ arrays_4();
+ arrays_5();
+}
--- /dev/null
+warning: the feature `capture_disjoint_fields` is incomplete and may not be safe to use and/or cause compiler crashes
+ --> $DIR/arrays.rs:3:12
+ |
+LL | #![feature(capture_disjoint_fields)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+ = note: see issue #53488 <https://github.com/rust-lang/rust/issues/53488> for more information
+
+error[E0503]: cannot use `arr` because it was mutably borrowed
+ --> $DIR/arrays.rs:15:5
+ |
+LL | let mut c = || {
+ | -- borrow of `arr` occurs here
+LL | arr[0] += 10;
+ | --- borrow occurs due to use of `arr` in closure
+...
+LL | arr[1] += 10;
+ | ^^^^^^ use of borrowed `arr`
+...
+LL | c();
+ | - borrow later used here
+
+error[E0503]: cannot use `arr[_]` because it was mutably borrowed
+ --> $DIR/arrays.rs:15:5
+ |
+LL | let mut c = || {
+ | -- borrow of `arr` occurs here
+LL | arr[0] += 10;
+ | --- borrow occurs due to use of `arr` in closure
+...
+LL | arr[1] += 10;
+ | ^^^^^^^^^^^^ use of borrowed `arr`
+...
+LL | c();
+ | - borrow later used here
+
+error[E0506]: cannot assign to `arr[_]` because it is borrowed
+ --> $DIR/arrays.rs:30:5
+ |
+LL | let c = || {
+ | -- borrow of `arr[_]` occurs here
+LL | println!("{:#?}", &arr[3..4]);
+ | --- borrow occurs due to use in closure
+...
+LL | arr[1] += 10;
+ | ^^^^^^^^^^^^ assignment to borrowed `arr[_]` occurs here
+LL |
+LL | c();
+ | - borrow later used here
+
+error[E0506]: cannot assign to `arr[_]` because it is borrowed
+ --> $DIR/arrays.rs:44:5
+ |
+LL | let c = || {
+ | -- borrow of `arr[_]` occurs here
+LL | println!("{}", arr[3]);
+ | --- borrow occurs due to use in closure
+...
+LL | arr[1] += 10;
+ | ^^^^^^^^^^^^ assignment to borrowed `arr[_]` occurs here
+LL |
+LL | c();
+ | - borrow later used here
+
+error[E0503]: cannot use `arr` because it was mutably borrowed
+ --> $DIR/arrays.rs:58:20
+ |
+LL | let mut c = || {
+ | -- borrow of `arr` occurs here
+LL | arr[1] += 10;
+ | --- borrow occurs due to use of `arr` in closure
+...
+LL | println!("{}", arr[3]);
+ | ^^^^^^ use of borrowed `arr`
+...
+LL | c();
+ | - borrow later used here
+
+error[E0502]: cannot borrow `arr[_]` as immutable because it is also borrowed as mutable
+ --> $DIR/arrays.rs:58:20
+ |
+LL | let mut c = || {
+ | -- mutable borrow occurs here
+LL | arr[1] += 10;
+ | --- first borrow occurs due to use of `arr` in closure
+...
+LL | println!("{}", arr[3]);
+ | ^^^^^^ immutable borrow occurs here
+...
+LL | c();
+ | - mutable borrow later used here
+
+error[E0502]: cannot borrow `arr` as immutable because it is also borrowed as mutable
+ --> $DIR/arrays.rs:74:24
+ |
+LL | let mut c = || {
+ | -- mutable borrow occurs here
+LL | arr[1] += 10;
+ | --- first borrow occurs due to use of `arr` in closure
+...
+LL | println!("{:#?}", &arr[3..2]);
+ | ^^^ immutable borrow occurs here
+...
+LL | c();
+ | - mutable borrow later used here
+
+error: aborting due to 7 previous errors; 1 warning emitted
+
+Some errors have detailed explanations: E0502, E0503, E0506.
+For more information about an error, try `rustc --explain E0502`.
--- /dev/null
+// Test borrow checker when we precise capture when using boxes
+
+#![feature(capture_disjoint_fields)]
+//~^ WARNING: the feature `capture_disjoint_fields` is incomplete
+
+struct MetaData { x: String, name: String }
+struct Data { m: MetaData }
+struct BoxedData(Box<Data>);
+struct EvenMoreBoxedData(Box<BoxedData>);
+
+// Check diagnostics when the same path is mutated both inside and outside the closure
+fn box_1() {
+ let m = MetaData { x: format!("x"), name: format!("name") };
+ let d = Data { m };
+ let b = BoxedData(Box::new(d));
+ let mut e = EvenMoreBoxedData(Box::new(b));
+
+ let mut c = || {
+ e.0.0.m.x = format!("not-x");
+ };
+
+ e.0.0.m.x = format!("not-x");
+ //~^ ERROR: cannot assign to `e.0.0.m.x` because it is borrowed
+ c();
+}
+
+// Check diagnostics when a path is mutated inside a closure while attempting to read it outside
+// the closure.
+fn box_2() {
+ let m = MetaData { x: format!("x"), name: format!("name") };
+ let d = Data { m };
+ let b = BoxedData(Box::new(d));
+ let mut e = EvenMoreBoxedData(Box::new(b));
+
+ let mut c = || {
+ e.0.0.m.x = format!("not-x");
+ };
+
+ println!("{}", e.0.0.m.x);
+ //~^ ERROR: cannot borrow `e.0.0.m.x` as immutable because it is also borrowed as mutable
+ c();
+}
+
+// Check diagnostics when a path is read inside a closure while attempting to mutate it outside
+// the closure.
+fn box_3() {
+ let m = MetaData { x: format!("x"), name: format!("name") };
+ let d = Data { m };
+ let b = BoxedData(Box::new(d));
+ let mut e = EvenMoreBoxedData(Box::new(b));
+
+ let c = || {
+ println!("{}", e.0.0.m.x);
+ };
+
+ e.0.0.m.x = format!("not-x");
+ //~^ ERROR: cannot assign to `e.0.0.m.x` because it is borrowed
+ c();
+}
+
+fn main() {
+ box_1();
+ box_2();
+ box_3();
+}
--- /dev/null
+warning: the feature `capture_disjoint_fields` is incomplete and may not be safe to use and/or cause compiler crashes
+ --> $DIR/box.rs:3:12
+ |
+LL | #![feature(capture_disjoint_fields)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+ = note: see issue #53488 <https://github.com/rust-lang/rust/issues/53488> for more information
+
+error[E0506]: cannot assign to `e.0.0.m.x` because it is borrowed
+ --> $DIR/box.rs:22:5
+ |
+LL | let mut c = || {
+ | -- borrow of `e.0.0.m.x` occurs here
+LL | e.0.0.m.x = format!("not-x");
+ | - borrow occurs due to use in closure
+...
+LL | e.0.0.m.x = format!("not-x");
+ | ^^^^^^^^^ assignment to borrowed `e.0.0.m.x` occurs here
+LL |
+LL | c();
+ | - borrow later used here
+
+error[E0502]: cannot borrow `e.0.0.m.x` as immutable because it is also borrowed as mutable
+ --> $DIR/box.rs:39:20
+ |
+LL | let mut c = || {
+ | -- mutable borrow occurs here
+LL | e.0.0.m.x = format!("not-x");
+ | - first borrow occurs due to use of `e.0.0.m.x` in closure
+...
+LL | println!("{}", e.0.0.m.x);
+ | ^^^^^^^^^ immutable borrow occurs here
+LL |
+LL | c();
+ | - mutable borrow later used here
+
+error[E0506]: cannot assign to `e.0.0.m.x` because it is borrowed
+ --> $DIR/box.rs:56:5
+ |
+LL | let c = || {
+ | -- borrow of `e.0.0.m.x` occurs here
+LL | println!("{}", e.0.0.m.x);
+ | - borrow occurs due to use in closure
+...
+LL | e.0.0.m.x = format!("not-x");
+ | ^^^^^^^^^ assignment to borrowed `e.0.0.m.x` occurs here
+LL |
+LL | c();
+ | - borrow later used here
+
+error: aborting due to 3 previous errors; 1 warning emitted
+
+Some errors have detailed explanations: E0502, E0506.
+For more information about an error, try `rustc --explain E0502`.
--- /dev/null
+// Test that when a borrow checker diagnostics are emitted, it's as precise
+// as the capture by the closure.
+
+#![feature(capture_disjoint_fields)]
+//~^ WARNING: the feature `capture_disjoint_fields` is incomplete
+#![allow(unused)]
+
+struct Point {
+ x: i32,
+ y: i32,
+}
+struct Wrapper {
+ p: Point,
+}
+
+fn main() {
+ let mut w = Wrapper { p: Point { x: 10, y: 10 } };
+
+ let mut c = || {
+ w.p.x += 20;
+ };
+
+ let py = &mut w.p.x;
+ //~^ ERROR: cannot borrow `w.p.x` as mutable more than once at a time
+ c();
+
+ *py = 20
+}
--- /dev/null
+warning: the feature `capture_disjoint_fields` is incomplete and may not be safe to use and/or cause compiler crashes
+ --> $DIR/multilevel-path.rs:4:12
+ |
+LL | #![feature(capture_disjoint_fields)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+ = note: see issue #53488 <https://github.com/rust-lang/rust/issues/53488> for more information
+
+error[E0499]: cannot borrow `w.p.x` as mutable more than once at a time
+ --> $DIR/multilevel-path.rs:23:14
+ |
+LL | let mut c = || {
+ | -- first mutable borrow occurs here
+LL | w.p.x += 20;
+ | - first borrow occurs due to use of `w.p.x` in closure
+...
+LL | let py = &mut w.p.x;
+ | ^^^^^^^^^^ second mutable borrow occurs here
+LL |
+LL | c();
+ | - first borrow later used here
+
+error: aborting due to previous error; 1 warning emitted
+
+For more information about this error, try `rustc --explain E0499`.
--- /dev/null
+// Test that borrow checker error is accurate and that min capture pass of the
+// closure analysis is working as expected.
+
+#![feature(capture_disjoint_fields)]
+//~^ WARNING: the feature `capture_disjoint_fields` is incomplete
+
+#[derive(Debug)]
+struct Point {
+ x: i32,
+ y: i32,
+}
+
+fn main() {
+ let mut p = Point { x: 10, y: 20 };
+
+ // `p` is captured via mutable borrow.
+ let mut c = || {
+ p.x += 10;
+ println!("{:?}", p);
+ };
+
+
+ println!("{:?}", p);
+ //~^ ERROR: cannot borrow `p` as immutable because it is also borrowed as mutable
+ c();
+}
--- /dev/null
+warning: the feature `capture_disjoint_fields` is incomplete and may not be safe to use and/or cause compiler crashes
+ --> $DIR/simple-struct-min-capture.rs:4:12
+ |
+LL | #![feature(capture_disjoint_fields)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+ = note: see issue #53488 <https://github.com/rust-lang/rust/issues/53488> for more information
+
+error[E0502]: cannot borrow `p` as immutable because it is also borrowed as mutable
+ --> $DIR/simple-struct-min-capture.rs:23:22
+ |
+LL | let mut c = || {
+ | -- mutable borrow occurs here
+LL | p.x += 10;
+ | - first borrow occurs due to use of `p` in closure
+...
+LL | println!("{:?}", p);
+ | ^ immutable borrow occurs here
+LL |
+LL | c();
+ | - mutable borrow later used here
+
+error: aborting due to previous error; 1 warning emitted
+
+For more information about this error, try `rustc --explain E0502`.
--- /dev/null
+// run-pass
+
+// Test precise capture when using boxes
+
+#![feature(capture_disjoint_fields)]
+//~^ WARNING: the feature `capture_disjoint_fields` is incomplete
+//~| NOTE: `#[warn(incomplete_features)]` on by default
+//~| NOTE: see issue #53488 <https://github.com/rust-lang/rust/issues/53488>
+
+
+struct MetaData { x: String, name: String }
+struct Data { m: MetaData }
+struct BoxedData(Box<Data>);
+struct EvenMoreBoxedData(Box<BoxedData>);
+
+// Mutate disjoint paths, one inside one outside the closure
+fn box_1() {
+ let m = MetaData { x: format!("x"), name: format!("name") };
+ let d = Data { m };
+ let b = BoxedData(Box::new(d));
+ let mut e = EvenMoreBoxedData(Box::new(b));
+
+ let mut c = || {
+ e.0.0.m.x = format!("not-x");
+ };
+
+ e.0.0.m.name = format!("not-name");
+ c();
+}
+
+// Mutate a path inside the closure and read a disjoint path outside the closure
+fn box_2() {
+ let m = MetaData { x: format!("x"), name: format!("name") };
+ let d = Data { m };
+ let b = BoxedData(Box::new(d));
+ let mut e = EvenMoreBoxedData(Box::new(b));
+
+ let mut c = || {
+ e.0.0.m.x = format!("not-x");
+ };
+
+ println!("{}", e.0.0.m.name);
+ c();
+}
+
+// Read a path inside the closure and mutate a disjoint path outside the closure
+fn box_3() {
+ let m = MetaData { x: format!("x"), name: format!("name") };
+ let d = Data { m };
+ let b = BoxedData(Box::new(d));
+ let mut e = EvenMoreBoxedData(Box::new(b));
+
+ let c = || {
+ println!("{}", e.0.0.m.name);
+ };
+
+ e.0.0.m.x = format!("not-x");
+ c();
+}
+
+// Read disjoint paths, one inside the closure and one outside the closure.
+fn box_4() {
+ let m = MetaData { x: format!("x"), name: format!("name") };
+ let d = Data { m };
+ let b = BoxedData(Box::new(d));
+ let e = EvenMoreBoxedData(Box::new(b));
+
+ let c = || {
+ println!("{}", e.0.0.m.name);
+ };
+
+ println!("{}", e.0.0.m.x);
+ c();
+}
+
+// Read the same path, once inside the closure and once outside the closure.
+fn box_5() {
+ let m = MetaData { x: format!("x"), name: format!("name") };
+ let d = Data { m };
+ let b = BoxedData(Box::new(d));
+ let e = EvenMoreBoxedData(Box::new(b));
+
+ let c = || {
+ println!("{}", e.0.0.m.name);
+ };
+
+ println!("{}", e.0.0.m.name);
+ c();
+}
+
+fn main() {
+ box_1();
+ box_2();
+ box_3();
+ box_4();
+ box_5();
+}
--- /dev/null
+warning: the feature `capture_disjoint_fields` is incomplete and may not be safe to use and/or cause compiler crashes
+ --> $DIR/box.rs:5:12
+ |
+LL | #![feature(capture_disjoint_fields)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+ = note: see issue #53488 <https://github.com/rust-lang/rust/issues/53488> for more information
+
+warning: 1 warning emitted
+
--- /dev/null
+// run-pass
+
+// Test that we can immutably borrow field of an instance of a structure from within a closure,
+// while having a mutable borrow to another field of the same instance outside the closure.
+
+#![feature(capture_disjoint_fields)]
+//~^ WARNING: the feature `capture_disjoint_fields` is incomplete
+//~| NOTE: `#[warn(incomplete_features)]` on by default
+//~| NOTE: see issue #53488 <https://github.com/rust-lang/rust/issues/53488>
+
+struct Point {
+ x: i32,
+ y: i32,
+}
+
+fn main() {
+ let mut p = Point { x: 10, y: 10 };
+
+ let c = || {
+ println!("{}", p.x);
+ };
+
+ // `c` should only capture `p.x`, therefore mutating `p.y` is allowed.
+ let py = &mut p.y;
+
+ c();
+ *py = 20;
+}
--- /dev/null
+warning: the feature `capture_disjoint_fields` is incomplete and may not be safe to use and/or cause compiler crashes
+ --> $DIR/capture-disjoint-field-struct.rs:6:12
+ |
+LL | #![feature(capture_disjoint_fields)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+ = note: see issue #53488 <https://github.com/rust-lang/rust/issues/53488> for more information
+
+warning: 1 warning emitted
+
--- /dev/null
+// run-pass
+
+// Test that we can mutate an element of a tuple from within a closure
+// while immutably borrowing another element of the same tuple outside the closure.
+
+#![feature(capture_disjoint_fields)]
+//~^ WARNING: the feature `capture_disjoint_fields` is incomplete
+//~| NOTE: `#[warn(incomplete_features)]` on by default
+//~| NOTE: see issue #53488 <https://github.com/rust-lang/rust/issues/53488>
+#![feature(rustc_attrs)]
+
+fn main() {
+ let mut t = (10, 10);
+
+ let mut c = || {
+ let t1 = &mut t.1;
+ *t1 = 20;
+ };
+
+ // Test that `c` only captures t.1, therefore reading t.0 is allowed.
+ println!("{}", t.0);
+ c();
+}
--- /dev/null
+warning: the feature `capture_disjoint_fields` is incomplete and may not be safe to use and/or cause compiler crashes
+ --> $DIR/capture-disjoint-field-tuple-mut.rs:6:12
+ |
+LL | #![feature(capture_disjoint_fields)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+ = note: see issue #53488 <https://github.com/rust-lang/rust/issues/53488> for more information
+
+warning: 1 warning emitted
+
--- /dev/null
+// run-pass
+
+// Test that we can immutably borrow an element of a tuple from within a closure,
+// while having a mutable borrow to another element of the same tuple outside the closure.
+
+#![feature(capture_disjoint_fields)]
+//~^ WARNING: the feature `capture_disjoint_fields` is incomplete
+//~| NOTE: `#[warn(incomplete_features)]` on by default
+//~| NOTE: see issue #53488 <https://github.com/rust-lang/rust/issues/53488>
+#![feature(rustc_attrs)]
+
+fn main() {
+ let mut t = (10, 10);
+
+ let c = || {
+ println!("{}", t.0);
+ };
+
+ // `c` only captures t.0, therefore mutating t.1 is allowed.
+ let t1 = &mut t.1;
+
+ c();
+ *t1 = 20;
+}
--- /dev/null
+warning: the feature `capture_disjoint_fields` is incomplete and may not be safe to use and/or cause compiler crashes
+ --> $DIR/capture-disjoint-field-tuple.rs:6:12
+ |
+LL | #![feature(capture_disjoint_fields)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+ = note: see issue #53488 <https://github.com/rust-lang/rust/issues/53488> for more information
+
+warning: 1 warning emitted
+
--- /dev/null
+// run-pass
+
+#![feature(capture_disjoint_fields)]
+//~^ WARNING: the feature `capture_disjoint_fields` is incomplete
+//~| NOTE: `#[warn(incomplete_features)]` on by default
+//~| NOTE: see issue #53488 <https://github.com/rust-lang/rust/issues/53488>
+
+// Tests that if a closure uses indivual fields of the same object
+// then that case is handled properly.
+
+#![allow(unused)]
+
+struct Struct {
+ x: i32,
+ y: i32,
+ s: String,
+}
+
+fn main() {
+ let mut s = Struct { x: 10, y: 10, s: String::new() };
+
+ let mut c = {
+ s.x += 10;
+ s.y += 42;
+ s.s = String::from("new");
+ };
+}
--- /dev/null
+warning: the feature `capture_disjoint_fields` is incomplete and may not be safe to use and/or cause compiler crashes
+ --> $DIR/disjoint-capture-in-same-closure.rs:3:12
+ |
+LL | #![feature(capture_disjoint_fields)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+ = note: see issue #53488 <https://github.com/rust-lang/rust/issues/53488> for more information
+
+warning: 1 warning emitted
+
--- /dev/null
+// run-pass
+
+// Test disjoint capture within an impl block
+
+#![feature(capture_disjoint_fields)]
+//~^ WARNING: the feature `capture_disjoint_fields` is incomplete
+//~| NOTE: `#[warn(incomplete_features)]` on by default
+//~| NOTE: see issue #53488 <https://github.com/rust-lang/rust/issues/53488>
+
+struct Filter {
+ div: i32,
+}
+impl Filter {
+ fn allowed(&self, x: i32) -> bool {
+ x % self.div == 1
+ }
+}
+
+struct Data {
+ filter: Filter,
+ list: Vec<i32>,
+}
+impl Data {
+ fn update(&mut self) {
+ // The closure passed to filter only captures self.filter,
+ // therefore mutating self.list is allowed.
+ self.list.retain(
+ |v| self.filter.allowed(*v),
+ );
+ }
+}
+
+fn main() {
+ let mut d = Data { filter: Filter { div: 3 }, list: Vec::new() };
+
+ for i in 1..10 {
+ d.list.push(i);
+ }
+
+ d.update();
+}
--- /dev/null
+warning: the feature `capture_disjoint_fields` is incomplete and may not be safe to use and/or cause compiler crashes
+ --> $DIR/filter-on-struct-member.rs:5:12
+ |
+LL | #![feature(capture_disjoint_fields)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+ = note: see issue #53488 <https://github.com/rust-lang/rust/issues/53488> for more information
+
+warning: 1 warning emitted
+
--- /dev/null
+// run-pass
+
+// Test that closures can catpure paths that are more precise than just one level
+// from the root variable.
+//
+// If the closures can handle such precison we should be able to mutate one path in the closure
+// while being able to mutate another path outside the closure, where the two paths are disjoint
+// after applying two projections on the root variable.
+
+#![feature(capture_disjoint_fields)]
+//~^ WARNING: the feature `capture_disjoint_fields` is incomplete
+//~| NOTE: `#[warn(incomplete_features)]` on by default
+//~| NOTE: see issue #53488 <https://github.com/rust-lang/rust/issues/53488>
+#![allow(unused)]
+
+struct Point {
+ x: i32,
+ y: i32,
+}
+struct Wrapper {
+ p: Point,
+}
+
+fn main() {
+ let mut w = Wrapper { p: Point { x: 10, y: 10 } };
+
+ let mut c = || {
+ w.p.x += 20;
+ };
+
+ // `c` only captures `w.p.x`, therefore it's safe to mutate `w.p.y`.
+ let py = &mut w.p.y;
+ c();
+
+ *py = 20
+}
--- /dev/null
+warning: the feature `capture_disjoint_fields` is incomplete and may not be safe to use and/or cause compiler crashes
+ --> $DIR/multilevel-path-1.rs:10:12
+ |
+LL | #![feature(capture_disjoint_fields)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+ = note: see issue #53488 <https://github.com/rust-lang/rust/issues/53488> for more information
+
+warning: 1 warning emitted
+
--- /dev/null
+// run-pass
+
+#![feature(capture_disjoint_fields)]
+//~^ WARNING: the feature `capture_disjoint_fields` is incomplete
+//~| NOTE: `#[warn(incomplete_features)]` on by default
+//~| NOTE: see issue #53488 <https://github.com/rust-lang/rust/issues/53488>
+#![allow(unused)]
+
+// If the closures can handle such precison we should be able to read one path in the closure
+// while being able mutate another path outside the closure, where the two paths are disjoint
+// after applying two projections on the root variable.
+
+
+struct Point {
+ x: i32,
+ y: i32,
+}
+struct Wrapper {
+ p: Point,
+}
+
+fn main() {
+ let mut w = Wrapper { p: Point { x: 10, y: 10 } };
+
+ let c = || {
+ println!("{}", w.p.x);
+ };
+
+ // `c` only captures `w.p.x`, therefore it's safe to mutate `w.p.y`.
+ let py = &mut w.p.y;
+ c();
+
+ *py = 20
+}
--- /dev/null
+warning: the feature `capture_disjoint_fields` is incomplete and may not be safe to use and/or cause compiler crashes
+ --> $DIR/multilevel-path-2.rs:3:12
+ |
+LL | #![feature(capture_disjoint_fields)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+ = note: see issue #53488 <https://github.com/rust-lang/rust/issues/53488> for more information
+
+warning: 1 warning emitted
+
--- /dev/null
+// run-pass
+
+#![feature(capture_disjoint_fields)]
+//~^ WARNING: the feature `capture_disjoint_fields` is incomplete
+//~| NOTE: `#[warn(incomplete_features)]` on by default
+//~| NOTE: see issue #53488 <https://github.com/rust-lang/rust/issues/53488>
+#![allow(unused)]
+
+// Test that when `capture_disjoint_fields` is enabled we can read a path
+// both inside and outside the closure at the same time.
+
+struct Point {
+ x: i32,
+ y: i32,
+}
+struct Wrapper {
+ p: Point,
+}
+
+fn main() {
+ let mut w = Wrapper { p: Point { x: 10, y: 10 } };
+
+ let c = || {
+ println!("{}", w.p.x);
+ };
+
+ let px = &w.p.x;
+ c();
+
+ println!("{}", px);
+}
--- /dev/null
+warning: the feature `capture_disjoint_fields` is incomplete and may not be safe to use and/or cause compiler crashes
+ --> $DIR/multilevel-path-3.rs:3:12
+ |
+LL | #![feature(capture_disjoint_fields)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+ = note: see issue #53488 <https://github.com/rust-lang/rust/issues/53488> for more information
+
+warning: 1 warning emitted
+
--- /dev/null
+// run-pass
+
+// Test whether if we can do precise capture when using nested clsoure.
+
+#![feature(capture_disjoint_fields)]
+//~^ WARNING: the feature `capture_disjoint_fields` is incomplete
+//~| NOTE: `#[warn(incomplete_features)]` on by default
+//~| NOTE: see issue #53488 <https://github.com/rust-lang/rust/issues/53488>
+
+struct Point {
+ x: i32,
+ y: i32,
+}
+
+fn main() {
+ let mut p = Point { x: 5, y: 20 };
+
+ // c1 should capture `p.x` via immutable borrow and
+ // `p.y` via mutable borrow.
+ let mut c1 = || {
+ println!("{}", p.x);
+
+ let incr = 10;
+
+ let mut c2 = || p.y += incr;
+ c2();
+
+ println!("{}", p.y);
+ };
+
+ c1();
+
+ // This should not throw an error because `p.x` is borrowed via Immutable borrow,
+ // and multiple immutable borrow of the same place are allowed.
+ let px = &p.x;
+
+ println!("{}", px);
+
+ c1();
+}
--- /dev/null
+warning: the feature `capture_disjoint_fields` is incomplete and may not be safe to use and/or cause compiler crashes
+ --> $DIR/nested-closure.rs:5:12
+ |
+LL | #![feature(capture_disjoint_fields)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+ = note: see issue #53488 <https://github.com/rust-lang/rust/issues/53488> for more information
+
+warning: 1 warning emitted
+
--- /dev/null
+// error-pattern: any use of this value will cause an error
+
+#![feature(never_type)]
+#![feature(const_maybe_uninit_assume_init)]
+
+#[allow(invalid_value)]
+fn main() {
+ use std::mem::MaybeUninit;
+
+ const _BAD: () = unsafe {
+ MaybeUninit::<!>::uninit().assume_init();
+ };
+}
--- /dev/null
+error: any use of this value will cause an error
+ --> $SRC_DIR/core/src/mem/maybe_uninit.rs:LL:COL
+ |
+LL | intrinsics::assert_inhabited::<T>();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ | |
+ | aborted execution: attempted to instantiate uninhabited type `!`
+ | inside `MaybeUninit::<!>::assume_init` at $SRC_DIR/core/src/mem/maybe_uninit.rs:LL:COL
+ | inside `_BAD` at $DIR/assume-type-intrinsics.rs:11:9
+ |
+ ::: $DIR/assume-type-intrinsics.rs:10:5
+ |
+LL | / const _BAD: () = unsafe {
+LL | | MaybeUninit::<!>::uninit().assume_init();
+LL | | };
+ | |______-
+ |
+ = note: `#[deny(const_err)]` on by default
+
+error: aborting due to previous error
+
#[rustc_deprecated(since = "99.99.99", reason = "effectively never")]
#[stable(feature = "rustc_deprecation-in-future-test", since = "1.0.0")]
-pub struct S;
+pub struct S1;
+
+#[rustc_deprecated(since = "TBD", reason = "literally never")]
+#[stable(feature = "rustc_deprecation-in-future-test", since = "1.0.0")]
+pub struct S2;
fn main() {
- let _ = S; //~ ERROR use of unit struct `S` that will be deprecated in future version 99.99.99: effectively never
+ let _ = S1; //~ ERROR use of unit struct `S1` that will be deprecated in future version 99.99.99: effectively never
+ let _ = S2; //~ ERROR use of unit struct `S2` that will be deprecated in a future Rust version: literally never
}
-error: use of unit struct `S` that will be deprecated in future version 99.99.99: effectively never
- --> $DIR/rustc_deprecation-in-future.rs:14:13
+error: use of unit struct `S1` that will be deprecated in future version 99.99.99: effectively never
+ --> $DIR/rustc_deprecation-in-future.rs:18:13
|
-LL | let _ = S;
- | ^
+LL | let _ = S1;
+ | ^^
|
note: the lint level is defined here
--> $DIR/rustc_deprecation-in-future.rs:3:9
LL | #![deny(deprecated_in_future)]
| ^^^^^^^^^^^^^^^^^^^^
-error: aborting due to previous error
+error: use of unit struct `S2` that will be deprecated in a future Rust version: literally never
+ --> $DIR/rustc_deprecation-in-future.rs:19:13
+ |
+LL | let _ = S2;
+ | ^^
+
+error: aborting due to 2 previous errors
// Thus, `&'_ u8` should be included in type signature
// of the underlying generator.
+#![feature(if_let_guard)]
+#![allow(incomplete_features)]
+
async fn f() -> u8 { 1 }
async fn foo() -> [bool; 10] { [false; 10] }
}
}
+async fn j(x: u8) {
+ match x {
+ y if let (1, 42) = (f().await, y) => (),
+ _ => (),
+ }
+}
+
fn main() {
let _ = g(10);
let _ = h(9);
let _ = i(8);
+ let _ = j(7);
}
--- /dev/null
+mod foo {
+ pub struct Pub { private: () }
+
+ pub enum Enum {
+ Variant { x: (), y: () },
+ Other
+ }
+
+ fn correct() {
+ Pub {};
+ //~^ ERROR missing field `private` in initializer of `Pub`
+ Enum::Variant { x: () };
+ //~^ ERROR missing field `y` in initializer of `Enum`
+ }
+}
+
+fn correct() {
+ foo::Pub {};
+ //~^ ERROR cannot construct `Pub` with struct literal syntax due to inaccessible fields
+}
+
+fn wrong() {
+ foo::Enum::Variant { x: () };
+ //~^ ERROR missing field `y` in initializer of `Enum`
+ foo::Enum::Variant { };
+ //~^ ERROR missing fields `x`, `y` in initializer of `Enum`
+}
+
+fn main() {}
--- /dev/null
+error[E0063]: missing field `private` in initializer of `Pub`
+ --> $DIR/issue-79593.rs:10:9
+ |
+LL | Pub {};
+ | ^^^ missing `private`
+
+error[E0063]: missing field `y` in initializer of `Enum`
+ --> $DIR/issue-79593.rs:12:9
+ |
+LL | Enum::Variant { x: () };
+ | ^^^^^^^^^^^^^ missing `y`
+
+error: cannot construct `Pub` with struct literal syntax due to inaccessible fields
+ --> $DIR/issue-79593.rs:18:5
+ |
+LL | foo::Pub {};
+ | ^^^^^^^^
+
+error[E0063]: missing field `y` in initializer of `Enum`
+ --> $DIR/issue-79593.rs:23:5
+ |
+LL | foo::Enum::Variant { x: () };
+ | ^^^^^^^^^^^^^^^^^^ missing `y`
+
+error[E0063]: missing fields `x`, `y` in initializer of `Enum`
+ --> $DIR/issue-79593.rs:25:5
+ |
+LL | foo::Enum::Variant { };
+ | ^^^^^^^^^^^^^^^^^^ missing `x`, `y`
+
+error: aborting due to 5 previous errors
+
+For more information about this error, try `rustc --explain E0063`.
--- /dev/null
+// run-pass
+// compile-flags: -Z mir-opt-level=2 -C opt-level=0
+
+// example from #78496
+pub enum E<'a> {
+ Empty,
+ Some(&'a E<'a>),
+}
+
+fn f(e: &E) -> u32 {
+ if let E::Some(E::Some(_)) = e { 1 } else { 2 }
+}
+
+fn main() {
+ assert_eq!(f(&E::Empty), 2);
+}
--- /dev/null
+// compile-flags: --crate-type=lib
+// check-pass
+// issue #55482
+#![no_std]
+
+macro_rules! foo {
+ ($e:expr) => {
+ $crate::core::assert!($e);
+ $crate::core::assert_eq!($e, true);
+ };
+}
+
+pub fn foo() { foo!(true); }
fn main() {}
trait X {
- default const A: u8; //~ ERROR `default` is only allowed on items in `impl` definitions
- default const B: u8 = 0; //~ ERROR `default` is only allowed on items in `impl` definitions
- default type D; //~ ERROR `default` is only allowed on items in `impl` definitions
- default type C: Ord; //~ ERROR `default` is only allowed on items in `impl` definitions
- default fn f1(); //~ ERROR `default` is only allowed on items in `impl` definitions
- default fn f2() {} //~ ERROR `default` is only allowed on items in `impl` definitions
+ default const A: u8; //~ ERROR `default` is only allowed on items in trait impls
+ default const B: u8 = 0; //~ ERROR `default` is only allowed on items in trait impls
+ default type D; //~ ERROR `default` is only allowed on items in trait impls
+ default type C: Ord; //~ ERROR `default` is only allowed on items in trait impls
+ default fn f1(); //~ ERROR `default` is only allowed on items in trait impls
+ default fn f2() {} //~ ERROR `default` is only allowed on items in trait impls
}
-error: `default` is only allowed on items in `impl` definitions
+error: `default` is only allowed on items in trait impls
--> $DIR/trait-item-with-defaultness-fail-semantic.rs:6:5
|
LL | default const A: u8;
| |
| `default` because of this
-error: `default` is only allowed on items in `impl` definitions
+error: `default` is only allowed on items in trait impls
--> $DIR/trait-item-with-defaultness-fail-semantic.rs:7:5
|
LL | default const B: u8 = 0;
| |
| `default` because of this
-error: `default` is only allowed on items in `impl` definitions
+error: `default` is only allowed on items in trait impls
--> $DIR/trait-item-with-defaultness-fail-semantic.rs:8:5
|
LL | default type D;
| |
| `default` because of this
-error: `default` is only allowed on items in `impl` definitions
+error: `default` is only allowed on items in trait impls
--> $DIR/trait-item-with-defaultness-fail-semantic.rs:9:5
|
LL | default type C: Ord;
| |
| `default` because of this
-error: `default` is only allowed on items in `impl` definitions
+error: `default` is only allowed on items in trait impls
--> $DIR/trait-item-with-defaultness-fail-semantic.rs:10:5
|
LL | default fn f1();
| |
| `default` because of this
-error: `default` is only allowed on items in `impl` definitions
+error: `default` is only allowed on items in trait impls
--> $DIR/trait-item-with-defaultness-fail-semantic.rs:11:5
|
LL | default fn f2() {}
--- /dev/null
+// aux-build:test-macros.rs
+// compile-flags: -Z span-debug
+// check-pass
+
+// Tests that we properly handle parsing a nonterminal
+// where we have two consecutive angle brackets (one inside
+// the nonterminal, and one outside)
+
+#![no_std] // Don't load unnecessary hygiene information from std
+extern crate std;
+extern crate test_macros;
+
+macro_rules! trailing_angle {
+ (Option<$field:ty>) => {
+ test_macros::print_bang_consume!($field);
+ }
+}
+
+trailing_angle!(Option<Vec<u8>>);
+fn main() {}
--- /dev/null
+PRINT-BANG INPUT (DISPLAY): Vec<u8>
+PRINT-BANG RE-COLLECTED (DISPLAY): Vec < u8 >
+PRINT-BANG INPUT (DEBUG): TokenStream [
+ Group {
+ delimiter: None,
+ stream: TokenStream [
+ Ident {
+ ident: "Vec",
+ span: $DIR/capture-unglued-token.rs:19:24: 19:27 (#0),
+ },
+ Punct {
+ ch: '<',
+ spacing: Alone,
+ span: $DIR/capture-unglued-token.rs:19:27: 19:28 (#0),
+ },
+ Ident {
+ ident: "u8",
+ span: $DIR/capture-unglued-token.rs:19:28: 19:30 (#0),
+ },
+ Punct {
+ ch: '>',
+ spacing: Alone,
+ span: $DIR/capture-unglued-token.rs:19:30: 19:31 (#0),
+ },
+ ],
+ span: $DIR/capture-unglued-token.rs:15:42: 15:48 (#4),
+ },
+]
--- /dev/null
+#![feature(if_let_guard)]
+#![allow(incomplete_features)]
+
+fn main() {
+ match Some(None) {
+ Some(x) if let Some(y) = x => (x, y),
+ _ => y, //~ ERROR cannot find value `y`
+ }
+ y //~ ERROR cannot find value `y`
+}
--- /dev/null
+error[E0425]: cannot find value `y` in this scope
+ --> $DIR/bindings.rs:7:14
+ |
+LL | _ => y,
+ | ^ not found in this scope
+
+error[E0425]: cannot find value `y` in this scope
+ --> $DIR/bindings.rs:9:5
+ |
+LL | y
+ | ^ not found in this scope
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0425`.
match () {
() if let 0 = 1 => {}
//~^ ERROR `if let` guard is not implemented
- //~| ERROR `let` expressions are not supported here
() if (let 0 = 1) => {}
//~^ ERROR `let` expressions in this position are experimental
error: no rules expected the token `let`
- --> $DIR/feature-gate.rs:81:15
+ --> $DIR/feature-gate.rs:80:15
|
LL | macro_rules! use_expr {
| --------------------- when calling this macro
= help: add `#![feature(if_let_guard)]` to the crate attributes to enable
error[E0658]: `if let` guard is not implemented
- --> $DIR/feature-gate.rs:77:12
+ --> $DIR/feature-gate.rs:76:12
|
LL | () if let 0 = 1 => {}
| ^^^^^^^^^^^^
= help: add `#![feature(if_let_guard)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:11:16
+ --> $DIR/feature-gate.rs:10:16
|
LL | () if (let 0 = 1) => {}
| ^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:15:18
+ --> $DIR/feature-gate.rs:14:18
|
LL | () if (((let 0 = 1))) => {}
| ^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:19:23
+ --> $DIR/feature-gate.rs:18:23
|
LL | () if true && let 0 = 1 => {}
| ^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:23:15
+ --> $DIR/feature-gate.rs:22:15
|
LL | () if let 0 = 1 && true => {}
| ^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:27:16
+ --> $DIR/feature-gate.rs:26:16
|
LL | () if (let 0 = 1) && true => {}
| ^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:31:24
+ --> $DIR/feature-gate.rs:30:24
|
LL | () if true && (let 0 = 1) => {}
| ^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:35:16
+ --> $DIR/feature-gate.rs:34:16
|
LL | () if (let 0 = 1) && (let 0 = 1) => {}
| ^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:35:31
+ --> $DIR/feature-gate.rs:34:31
|
LL | () if (let 0 = 1) && (let 0 = 1) => {}
| ^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:41:15
+ --> $DIR/feature-gate.rs:40:15
|
LL | () if let 0 = 1 && let 1 = 2 && (let 2 = 3 && let 3 = 4 && let 4 = 5) => {}
| ^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:41:28
+ --> $DIR/feature-gate.rs:40:28
|
LL | () if let 0 = 1 && let 1 = 2 && (let 2 = 3 && let 3 = 4 && let 4 = 5) => {}
| ^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:41:42
+ --> $DIR/feature-gate.rs:40:42
|
LL | () if let 0 = 1 && let 1 = 2 && (let 2 = 3 && let 3 = 4 && let 4 = 5) => {}
| ^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:41:55
+ --> $DIR/feature-gate.rs:40:55
|
LL | () if let 0 = 1 && let 1 = 2 && (let 2 = 3 && let 3 = 4 && let 4 = 5) => {}
| ^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:41:68
+ --> $DIR/feature-gate.rs:40:68
|
LL | () if let 0 = 1 && let 1 = 2 && (let 2 = 3 && let 3 = 4 && let 4 = 5) => {}
| ^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:53:15
+ --> $DIR/feature-gate.rs:52:15
|
LL | () if let Range { start: _, end: _ } = (true..true) && false => {}
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:69:16
+ --> $DIR/feature-gate.rs:68:16
|
LL | use_expr!((let 0 = 1 && 0 == 0));
| ^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error[E0658]: `let` expressions in this position are experimental
- --> $DIR/feature-gate.rs:72:16
+ --> $DIR/feature-gate.rs:71:16
|
LL | use_expr!((let 0 = 1));
| ^^^^^^^^^
= help: add `#![feature(let_chains)]` to the crate attributes to enable
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:7:15
- |
-LL | () if let 0 = 1 => {}
- | ^^^^^^^^^
- |
- = note: only supported directly in conditions of `if`- and `while`-expressions
- = note: as well as when nested within `&&` and parenthesis in those conditions
-
-error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:11:16
+ --> $DIR/feature-gate.rs:10:16
|
LL | () if (let 0 = 1) => {}
| ^^^^^^^^^
= note: as well as when nested within `&&` and parenthesis in those conditions
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:15:18
+ --> $DIR/feature-gate.rs:14:18
|
LL | () if (((let 0 = 1))) => {}
| ^^^^^^^^^
= note: as well as when nested within `&&` and parenthesis in those conditions
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:19:23
+ --> $DIR/feature-gate.rs:18:23
|
LL | () if true && let 0 = 1 => {}
| ^^^^^^^^^
= note: as well as when nested within `&&` and parenthesis in those conditions
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:23:15
+ --> $DIR/feature-gate.rs:22:15
|
LL | () if let 0 = 1 && true => {}
| ^^^^^^^^^
= note: as well as when nested within `&&` and parenthesis in those conditions
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:27:16
+ --> $DIR/feature-gate.rs:26:16
|
LL | () if (let 0 = 1) && true => {}
| ^^^^^^^^^
= note: as well as when nested within `&&` and parenthesis in those conditions
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:31:24
+ --> $DIR/feature-gate.rs:30:24
|
LL | () if true && (let 0 = 1) => {}
| ^^^^^^^^^
= note: as well as when nested within `&&` and parenthesis in those conditions
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:35:16
+ --> $DIR/feature-gate.rs:34:16
|
LL | () if (let 0 = 1) && (let 0 = 1) => {}
| ^^^^^^^^^
= note: as well as when nested within `&&` and parenthesis in those conditions
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:35:31
+ --> $DIR/feature-gate.rs:34:31
|
LL | () if (let 0 = 1) && (let 0 = 1) => {}
| ^^^^^^^^^
= note: as well as when nested within `&&` and parenthesis in those conditions
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:41:15
+ --> $DIR/feature-gate.rs:40:15
|
LL | () if let 0 = 1 && let 1 = 2 && (let 2 = 3 && let 3 = 4 && let 4 = 5) => {}
| ^^^^^^^^^
= note: as well as when nested within `&&` and parenthesis in those conditions
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:41:28
+ --> $DIR/feature-gate.rs:40:28
|
LL | () if let 0 = 1 && let 1 = 2 && (let 2 = 3 && let 3 = 4 && let 4 = 5) => {}
| ^^^^^^^^^
= note: as well as when nested within `&&` and parenthesis in those conditions
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:41:42
+ --> $DIR/feature-gate.rs:40:42
|
LL | () if let 0 = 1 && let 1 = 2 && (let 2 = 3 && let 3 = 4 && let 4 = 5) => {}
| ^^^^^^^^^
= note: as well as when nested within `&&` and parenthesis in those conditions
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:41:55
+ --> $DIR/feature-gate.rs:40:55
|
LL | () if let 0 = 1 && let 1 = 2 && (let 2 = 3 && let 3 = 4 && let 4 = 5) => {}
| ^^^^^^^^^
= note: as well as when nested within `&&` and parenthesis in those conditions
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:41:68
+ --> $DIR/feature-gate.rs:40:68
|
LL | () if let 0 = 1 && let 1 = 2 && (let 2 = 3 && let 3 = 4 && let 4 = 5) => {}
| ^^^^^^^^^
= note: as well as when nested within `&&` and parenthesis in those conditions
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:53:15
+ --> $DIR/feature-gate.rs:52:15
|
LL | () if let Range { start: _, end: _ } = (true..true) && false => {}
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: as well as when nested within `&&` and parenthesis in those conditions
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:69:16
+ --> $DIR/feature-gate.rs:68:16
|
LL | use_expr!((let 0 = 1 && 0 == 0));
| ^^^^^^^^^
= note: as well as when nested within `&&` and parenthesis in those conditions
error: `let` expressions are not supported here
- --> $DIR/feature-gate.rs:72:16
+ --> $DIR/feature-gate.rs:71:16
|
LL | use_expr!((let 0 = 1));
| ^^^^^^^^^
= note: only supported directly in conditions of `if`- and `while`-expressions
= note: as well as when nested within `&&` and parenthesis in those conditions
-error: aborting due to 36 previous errors
+error: aborting due to 35 previous errors
For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+// run-pass
+
+#![feature(if_let_guard)]
+#![allow(incomplete_features)]
+
+enum Foo {
+ Bar,
+ Baz,
+ Qux(u8),
+}
+
+fn bar(x: bool) -> Foo {
+ if x { Foo::Baz } else { Foo::Bar }
+}
+
+fn baz(x: u8) -> Foo {
+ if x % 2 == 0 { Foo::Bar } else { Foo::Baz }
+}
+
+fn qux(x: u8) -> Foo {
+ Foo::Qux(x.rotate_left(1))
+}
+
+fn main() {
+ match Some((true, 3)) {
+ Some((x, _)) if let Foo::Bar = bar(x) => panic!(),
+ Some((_, x)) if let Foo::Baz = baz(x) => {},
+ _ => panic!(),
+ }
+ match Some(42) {
+ Some(x) if let Foo::Qux(y) = qux(x) => assert_eq!(y, 84),
+ _ => panic!(),
+ }
+}
--- /dev/null
+#![feature(if_let_guard)]
+#![allow(incomplete_features)]
+
+fn ok() -> Result<Option<bool>, ()> {
+ Ok(Some(true))
+}
+
+fn main() {
+ match ok() {
+ Ok(x) if let Err(_) = x => {},
+ //~^ ERROR mismatched types
+ Ok(x) if let 0 = x => {},
+ //~^ ERROR mismatched types
+ _ => {}
+ }
+}
--- /dev/null
+error[E0308]: mismatched types
+ --> $DIR/typeck.rs:10:22
+ |
+LL | Ok(x) if let Err(_) = x => {},
+ | ^^^^^^ expected enum `Option`, found enum `std::result::Result`
+ |
+ = note: expected enum `Option<bool>`
+ found enum `std::result::Result<_, _>`
+
+error[E0308]: mismatched types
+ --> $DIR/typeck.rs:12:22
+ |
+LL | Ok(x) if let 0 = x => {},
+ | ^ expected enum `Option`, found integer
+ |
+ = note: expected enum `Option<bool>`
+ found type `{integer}`
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0308`.
--- /dev/null
+#![feature(if_let_guard)]
+#![allow(incomplete_features)]
+
+#[deny(irrefutable_let_patterns)]
+fn irrefutable_let_guard() {
+ match Some(()) {
+ Some(x) if let () = x => {}
+ //~^ ERROR irrefutable if-let guard
+ _ => {}
+ }
+}
+
+#[deny(unreachable_patterns)]
+fn unreachable_pattern() {
+ match Some(()) {
+ x if let None | None = x => {}
+ //~^ ERROR unreachable pattern
+ _ => {}
+ }
+}
+
+fn main() {}
--- /dev/null
+error: irrefutable if-let guard
+ --> $DIR/warns.rs:7:24
+ |
+LL | Some(x) if let () = x => {}
+ | ^^
+ |
+note: the lint level is defined here
+ --> $DIR/warns.rs:4:8
+ |
+LL | #[deny(irrefutable_let_patterns)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^
+
+error: unreachable pattern
+ --> $DIR/warns.rs:16:25
+ |
+LL | x if let None | None = x => {}
+ | ^^^^
+ |
+note: the lint level is defined here
+ --> $DIR/warns.rs:13:8
+ |
+LL | #[deny(unreachable_patterns)]
+ | ^^^^^^^^^^^^^^^^^^^^
+
+error: aborting due to 2 previous errors
+
--- /dev/null
+#![feature(const_trait_impl)]
+#![allow(incomplete_features)]
+
+struct Foo;
+
+const impl Foo { //~ ERROR: expected identifier, found keyword
+ fn bar() {}
+}
+
+fn main() {
+ // shouldn't error here because we shouldn't have been able to recover above
+ Foo::bar();
+}
--- /dev/null
+error: expected identifier, found keyword `impl`
+ --> $DIR/const-impl-norecover.rs:6:7
+ |
+LL | const impl Foo {
+ | ^^^^ expected identifier, found keyword
+
+error: aborting due to previous error
+
--- /dev/null
+#![feature(const_trait_impl)]
+#![allow(incomplete_features)]
+
+trait Foo {}
+
+const impl Foo for i32 {} //~ ERROR: expected identifier, found keyword
+
+trait Bar {}
+
+const impl<T: Foo> Bar for T {} //~ ERROR: expected identifier, found keyword
+
+const fn still_implements<T: Bar>() {}
+
+const _: () = still_implements::<i32>();
+
+fn main() {}
--- /dev/null
+error: expected identifier, found keyword `impl`
+ --> $DIR/const-impl-recovery.rs:6:7
+ |
+LL | const impl Foo for i32 {}
+ | ^^^^ expected identifier, found keyword
+ |
+help: you might have meant to write a const trait impl
+ |
+LL | impl const Foo for i32 {}
+ |-- ^^^^^
+
+error: expected identifier, found keyword `impl`
+ --> $DIR/const-impl-recovery.rs:10:7
+ |
+LL | const impl<T: Foo> Bar for T {}
+ | ^^^^ expected identifier, found keyword
+ |
+help: you might have meant to write a const trait impl
+ |
+LL | impl<T: Foo> const Bar for T {}
+ |-- ^^^^^
+
+error: aborting due to 2 previous errors
+
#[rustc_const_unstable(feature = "c", issue = "none")]
#[rustc_const_unstable(feature = "d", issue = "none")] //~ ERROR multiple stability levels
pub const fn multiple4() { }
-//~^ ERROR Invalid stability or deprecation version found
+//~^ ERROR Invalid stability version found
+
+#[stable(feature = "a", since = "1.0.0")]
+#[rustc_deprecated(since = "invalid", reason = "text")]
+fn invalid_deprecation_version() {} //~ ERROR Invalid deprecation version found
#[rustc_deprecated(since = "a", reason = "text")]
fn deprecated_without_unstable_or_stable() { }
LL | #[rustc_const_unstable(feature = "d", issue = "none")]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-error: Invalid stability or deprecation version found
+error: Invalid stability version found
--> $DIR/stability-attribute-sanity.rs:65:1
|
LL | pub const fn multiple4() { }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+error: Invalid deprecation version found
+ --> $DIR/stability-attribute-sanity.rs:70:1
+ |
+LL | fn invalid_deprecation_version() {}
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
error[E0549]: rustc_deprecated attribute must be paired with either stable or unstable attribute
- --> $DIR/stability-attribute-sanity.rs:68:1
+ --> $DIR/stability-attribute-sanity.rs:72:1
|
LL | #[rustc_deprecated(since = "a", reason = "text")]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-error: aborting due to 18 previous errors
+error: aborting due to 19 previous errors
Some errors have detailed explanations: E0539, E0541, E0546, E0550.
For more information about an error, try `rustc --explain E0539`.
--- /dev/null
+// This tests issue #79683: note in the error message that the trait is
+// explicitely unimplemented instead of suggesting to implement it.
+
+#![feature(negative_impls)]
+
+struct Qux;
+//~^ NOTE method `clone` not found for this
+//~^^ NOTE method `foo` not found for this
+
+impl !Clone for Qux {}
+
+trait Bar {
+ fn bar(&self);
+}
+
+impl !Bar for u32 {}
+
+trait Foo {
+ fn foo(&self);
+}
+//~^^^ NOTE `Foo` defines an item `foo`, perhaps you need to implement it
+
+trait FooBar {
+ fn foo(&self);
+}
+
+impl !Foo for Qux {}
+
+impl !FooBar for Qux {}
+
+impl !FooBar for u32 {}
+
+fn main() {
+ Qux.clone();
+ //~^ ERROR no method named `clone` found for struct `Qux`
+ //~| NOTE method not found in `Qux`
+ //~| NOTE `Clone` defines an item `clone`, but is explicitely unimplemented
+
+ 0_u32.bar();
+ //~^ ERROR no method named `bar` found for type `u32`
+ //~| NOTE method not found in `u32`
+ //~| NOTE `Bar` defines an item `bar`, but is explicitely unimplemented
+
+ Qux.foo();
+ //~^ ERROR no method named `foo` found for struct `Qux`
+ //~| NOTE method not found in `Qux`
+ //~| NOTE the following traits define an item `foo`, but are explicitely unimplemented
+
+ 0_u32.foo();
+ //~^ ERROR no method named `foo` found for type `u32`
+ //~| NOTE method not found in `u32`
+ //~| NOTE `FooBar` defines an item `foo`, but is explicitely unimplemented
+}
--- /dev/null
+error[E0599]: no method named `clone` found for struct `Qux` in the current scope
+ --> $DIR/explicitly-unimplemented-error-message.rs:34:9
+ |
+LL | struct Qux;
+ | ----------- method `clone` not found for this
+...
+LL | Qux.clone();
+ | ^^^^^ method not found in `Qux`
+ |
+ ::: $SRC_DIR/core/src/clone.rs:LL:COL
+ |
+LL | fn clone(&self) -> Self;
+ | -----
+ | |
+ | the method is available for `Arc<Qux>` here
+ | the method is available for `Rc<Qux>` here
+ |
+ = help: items from traits can only be used if the trait is implemented and in scope
+ = note: the trait `Clone` defines an item `clone`, but is explicitely unimplemented
+
+error[E0599]: no method named `bar` found for type `u32` in the current scope
+ --> $DIR/explicitly-unimplemented-error-message.rs:39:11
+ |
+LL | 0_u32.bar();
+ | ^^^ method not found in `u32`
+ |
+ = help: items from traits can only be used if the trait is implemented and in scope
+ = note: the trait `Bar` defines an item `bar`, but is explicitely unimplemented
+
+error[E0599]: no method named `foo` found for struct `Qux` in the current scope
+ --> $DIR/explicitly-unimplemented-error-message.rs:44:9
+ |
+LL | struct Qux;
+ | ----------- method `foo` not found for this
+...
+LL | Qux.foo();
+ | ^^^ method not found in `Qux`
+ |
+ = help: items from traits can only be used if the trait is implemented and in scope
+ = note: the following traits define an item `foo`, but are explicitely unimplemented:
+ Foo
+ FooBar
+
+error[E0599]: no method named `foo` found for type `u32` in the current scope
+ --> $DIR/explicitly-unimplemented-error-message.rs:49:11
+ |
+LL | 0_u32.foo();
+ | ^^^ method not found in `u32`
+ |
+ = help: items from traits can only be used if the trait is implemented and in scope
+note: `Foo` defines an item `foo`, perhaps you need to implement it
+ --> $DIR/explicitly-unimplemented-error-message.rs:18:1
+ |
+LL | trait Foo {
+ | ^^^^^^^^^
+ = note: the trait `FooBar` defines an item `foo`, but is explicitely unimplemented
+
+error: aborting due to 4 previous errors
+
+For more information about this error, try `rustc --explain E0599`.
+#![feature(untagged_unions)]
use std::mem::ManuallyDrop;
+use std::cell::RefCell;
union U1 {
a: u8
a: T
}
+union URef {
+ p: &'static mut i32,
+}
+
+union URefCell { // field that does not drop but is not `Copy`, either
+ a: (RefCell<i32>, i32),
+}
+
+fn deref_union_field(mut u: URef) {
+ // Not an assignment but an access to the union field!
+ *(u.p) = 13; //~ ERROR access to union field is unsafe
+}
+
+fn assign_noncopy_union_field(mut u: URefCell) {
+ u.a = (RefCell::new(0), 1); //~ ERROR assignment to union field that might need dropping
+ u.a.0 = RefCell::new(0); //~ ERROR assignment to union field that might need dropping
+ u.a.1 = 1; // OK
+}
+
fn generic_noncopy<T: Default>() {
let mut u3 = U3 { a: ManuallyDrop::new(T::default()) };
- u3.a = ManuallyDrop::new(T::default()); //~ ERROR assignment to non-`Copy` union field is unsafe
+ u3.a = ManuallyDrop::new(T::default()); // OK (assignment does not drop)
*u3.a = T::default(); //~ ERROR access to union field is unsafe
}
// let U1 { .. } = u1; // OK
let mut u2 = U2 { a: ManuallyDrop::new(String::from("old")) }; // OK
- u2.a = ManuallyDrop::new(String::from("new")); //~ ERROR assignment to non-`Copy` union
+ u2.a = ManuallyDrop::new(String::from("new")); // OK (assignment does not drop)
*u2.a = String::from("new"); //~ ERROR access to union field is unsafe
let mut u3 = U3 { a: ManuallyDrop::new(0) }; // OK
*u3.a = 1; //~ ERROR access to union field is unsafe
let mut u3 = U3 { a: ManuallyDrop::new(String::from("old")) }; // OK
- u3.a = ManuallyDrop::new(String::from("new")); //~ ERROR assignment to non-`Copy` union
+ u3.a = ManuallyDrop::new(String::from("new")); // OK (assignment does not drop)
*u3.a = String::from("new"); //~ ERROR access to union field is unsafe
}
-error[E0133]: assignment to non-`Copy` union field is unsafe and requires unsafe function or block
- --> $DIR/union-unsafe.rs:21:5
+error[E0133]: access to union field is unsafe and requires unsafe function or block
+ --> $DIR/union-unsafe.rs:31:5
+ |
+LL | *(u.p) = 13;
+ | ^^^^^^^^^^^ access to union field
+ |
+ = note: the field may not be properly initialized: using uninitialized data will cause undefined behavior
+
+error[E0133]: assignment to union field that might need dropping is unsafe and requires unsafe function or block
+ --> $DIR/union-unsafe.rs:35:5
|
-LL | u3.a = ManuallyDrop::new(T::default());
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ assignment to non-`Copy` union field
+LL | u.a = (RefCell::new(0), 1);
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^ assignment to union field that might need dropping
+ |
+ = note: the previous content of the field will be dropped, which causes undefined behavior if the field was not properly initialized
+
+error[E0133]: assignment to union field that might need dropping is unsafe and requires unsafe function or block
+ --> $DIR/union-unsafe.rs:36:5
+ |
+LL | u.a.0 = RefCell::new(0);
+ | ^^^^^^^^^^^^^^^^^^^^^^^ assignment to union field that might need dropping
|
= note: the previous content of the field will be dropped, which causes undefined behavior if the field was not properly initialized
error[E0133]: access to union field is unsafe and requires unsafe function or block
- --> $DIR/union-unsafe.rs:22:6
+ --> $DIR/union-unsafe.rs:43:6
|
LL | *u3.a = T::default();
| ^^^^ access to union field
= note: the field may not be properly initialized: using uninitialized data will cause undefined behavior
error[E0133]: access to union field is unsafe and requires unsafe function or block
- --> $DIR/union-unsafe.rs:28:6
+ --> $DIR/union-unsafe.rs:49:6
|
LL | *u3.a = T::default();
| ^^^^ access to union field
= note: the field may not be properly initialized: using uninitialized data will cause undefined behavior
error[E0133]: access to union field is unsafe and requires unsafe function or block
- --> $DIR/union-unsafe.rs:36:13
+ --> $DIR/union-unsafe.rs:57:13
|
LL | let a = u1.a;
| ^^^^ access to union field
= note: the field may not be properly initialized: using uninitialized data will cause undefined behavior
error[E0133]: access to union field is unsafe and requires unsafe function or block
- --> $DIR/union-unsafe.rs:39:14
+ --> $DIR/union-unsafe.rs:60:14
|
LL | let U1 { a } = u1;
| ^ access to union field
= note: the field may not be properly initialized: using uninitialized data will cause undefined behavior
error[E0133]: access to union field is unsafe and requires unsafe function or block
- --> $DIR/union-unsafe.rs:40:20
+ --> $DIR/union-unsafe.rs:61:20
|
LL | if let U1 { a: 12 } = u1 {}
| ^^ access to union field
|
= note: the field may not be properly initialized: using uninitialized data will cause undefined behavior
-error[E0133]: assignment to non-`Copy` union field is unsafe and requires unsafe function or block
- --> $DIR/union-unsafe.rs:44:5
- |
-LL | u2.a = ManuallyDrop::new(String::from("new"));
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ assignment to non-`Copy` union field
- |
- = note: the previous content of the field will be dropped, which causes undefined behavior if the field was not properly initialized
-
error[E0133]: access to union field is unsafe and requires unsafe function or block
- --> $DIR/union-unsafe.rs:45:6
+ --> $DIR/union-unsafe.rs:66:6
|
LL | *u2.a = String::from("new");
| ^^^^ access to union field
= note: the field may not be properly initialized: using uninitialized data will cause undefined behavior
error[E0133]: access to union field is unsafe and requires unsafe function or block
- --> $DIR/union-unsafe.rs:49:6
+ --> $DIR/union-unsafe.rs:70:6
|
LL | *u3.a = 1;
| ^^^^ access to union field
|
= note: the field may not be properly initialized: using uninitialized data will cause undefined behavior
-error[E0133]: assignment to non-`Copy` union field is unsafe and requires unsafe function or block
- --> $DIR/union-unsafe.rs:52:5
- |
-LL | u3.a = ManuallyDrop::new(String::from("new"));
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ assignment to non-`Copy` union field
- |
- = note: the previous content of the field will be dropped, which causes undefined behavior if the field was not properly initialized
-
error[E0133]: access to union field is unsafe and requires unsafe function or block
- --> $DIR/union-unsafe.rs:53:6
+ --> $DIR/union-unsafe.rs:74:6
|
LL | *u3.a = String::from("new");
| ^^^^ access to union field
if let Some(ref guard) = arm.guard {
match guard {
Guard::If(if_expr) => check_expr(cx, if_expr, bindings),
+ Guard::IfLet(guard_pat, guard_expr) => {
+ check_pat(cx, guard_pat, Some(*guard_expr), guard_pat.span, bindings);
+ check_expr(cx, guard_expr, bindings);
+ },
}
}
check_expr(cx, &arm.body, bindings);
self.current = if_expr_pat;
self.visit_expr(if_expr);
},
+ hir::Guard::IfLet(ref if_let_pat, ref if_let_expr) => {
+ let if_let_pat_pat = self.next("pat");
+ let if_let_expr_pat = self.next("expr");
+ println!(
+ " if let Guard::IfLet(ref {}, ref {}) = {};",
+ if_let_pat_pat, if_let_expr_pat, guard_pat
+ );
+ self.current = if_let_expr_pat;
+ self.visit_expr(if_let_expr);
+ self.current = if_let_pat_pat;
+ self.visit_pat(if_let_pat);
+ },
}
}
self.current = format!("{}[{}].pat", arms_pat, i);
"MatchSource::IfLetDesugar {{ contains_else_clause: {} }}",
contains_else_clause
),
+ hir::MatchSource::IfLetGuardDesugar => "MatchSource::IfLetGuardDesugar".to_string(),
hir::MatchSource::IfDesugar { contains_else_clause } => format!(
"MatchSource::IfDesugar {{ contains_else_clause: {} }}",
contains_else_clause
fn eq_guard(&mut self, left: &Guard<'_>, right: &Guard<'_>) -> bool {
match (left, right) {
(Guard::If(l), Guard::If(r)) => self.eq_expr(l, r),
+ (Guard::IfLet(lp, le), Guard::IfLet(rp, re)) => self.eq_pat(lp, rp) && self.eq_expr(le, re),
+ _ => false,
}
}
pub fn hash_guard(&mut self, g: &Guard<'_>) {
match g {
- Guard::If(ref expr) => {
+ Guard::If(ref expr) | Guard::IfLet(_, ref expr) => {
self.hash_expr(expr);
},
}
println!("{}If", ind);
print_expr(cx, expr, indent + 1);
},
+ hir::Guard::IfLet(pat, expr) => {
+ println!("{}IfLet", ind);
+ print_pat(cx, pat, indent + 1);
+ print_expr(cx, expr, indent + 1);
+ },
}
}
match arg.next().or_else(|| args.next()) {
Some(v) if pred(v) => return Some(v),
- _ => {},
+ _ => {}
}
}
None
const BUG_REPORT_URL: &str = "https://github.com/rust-lang/rust-clippy/issues/new";
-static ICE_HOOK: SyncLazy<Box<dyn Fn(&panic::PanicInfo<'_>) + Sync + Send + 'static>> = SyncLazy::new(|| {
- let hook = panic::take_hook();
- panic::set_hook(Box::new(|info| report_clippy_ice(info, BUG_REPORT_URL)));
- hook
-});
+static ICE_HOOK: SyncLazy<Box<dyn Fn(&panic::PanicInfo<'_>) + Sync + Send + 'static>> =
+ SyncLazy::new(|| {
+ let hook = panic::take_hook();
+ panic::set_hook(Box::new(|info| report_clippy_ice(info, BUG_REPORT_URL)));
+ hook
+ });
fn report_clippy_ice(info: &panic::PanicInfo<'_>, bug_report_url: &str) {
// Invoke our ICE handler, which prints the actual panic message and optionally a backtrace
// Setting RUSTC_WRAPPER causes Cargo to pass 'rustc' as the first argument.
// We're invoking the compiler programmatically, so we ignore this/
- let wrapper_mode = orig_args.get(1).map(Path::new).and_then(Path::file_stem) == Some("rustc".as_ref());
+ let wrapper_mode =
+ orig_args.get(1).map(Path::new).and_then(Path::file_stem) == Some("rustc".as_ref());
if wrapper_mode {
// we still want to be able to invoke it normally though
orig_args.remove(1);
}
- if !wrapper_mode && (orig_args.iter().any(|a| a == "--help" || a == "-h") || orig_args.len() == 1) {
+ if !wrapper_mode
+ && (orig_args.iter().any(|a| a == "--help" || a == "-h") || orig_args.len() == 1)
+ {
display_help();
exit(0);
}
if clippy_enabled {
args.extend(vec!["--cfg".into(), r#"feature="cargo-clippy""#.into()]);
if let Ok(extra_args) = env::var("CLIPPY_ARGS") {
- args.extend(extra_args.split("__CLIPPY_HACKERY__").filter_map(|s| {
- if s.is_empty() {
- None
- } else {
- Some(s.to_string())
- }
- }));
+ args.extend(
+ extra_args
+ .split("__CLIPPY_HACKERY__")
+ .filter_map(|s| if s.is_empty() { None } else { Some(s.to_string()) }),
+ );
}
}
let mut clippy = ClippyCallbacks;
-#![allow(clippy::useless_attribute)] //issue #2910
+// edition:2018
-#[macro_use]
-extern crate serde_derive;
+use serde::Deserialize;
/// Tests that we do not lint for unused underscores in a `MacroAttribute`
/// expansion
Nll,
Polonius,
Chalk,
+ SplitDwarf,
+ SplitDwarfSingle,
}
impl CompareMode {
CompareMode::Nll => "nll",
CompareMode::Polonius => "polonius",
CompareMode::Chalk => "chalk",
+ CompareMode::SplitDwarf => "split-dwarf",
+ CompareMode::SplitDwarfSingle => "split-dwarf-single",
}
}
"nll" => CompareMode::Nll,
"polonius" => CompareMode::Polonius,
"chalk" => CompareMode::Chalk,
+ "split-dwarf" => CompareMode::SplitDwarf,
+ "split-dwarf-single" => CompareMode::SplitDwarfSingle,
x => panic!("unknown --compare-mode option: {}", x),
}
}
/// created in `/<build_base>/rustfix_missing_coverage.txt`
pub rustfix_coverage: bool,
+ /// whether to run `tidy` when a rustdoc test fails
+ pub has_tidy: bool,
+
// Configuration for various run-make tests frobbing things like C compilers
// or querying about various LLVM component information.
pub cc: String,
Some(CompareMode::Nll) => name == "compare-mode-nll",
Some(CompareMode::Polonius) => name == "compare-mode-polonius",
Some(CompareMode::Chalk) => name == "compare-mode-chalk",
+ Some(CompareMode::SplitDwarf) => name == "compare-mode-split-dwarf",
+ Some(CompareMode::SplitDwarfSingle) => name == "compare-mode-split-dwarf-single",
None => false,
} ||
(cfg!(debug_assertions) && name == "debug") ||
use std::fs;
use std::io::{self, ErrorKind};
use std::path::{Path, PathBuf};
-use std::process::Command;
+use std::process::{Command, Stdio};
use std::time::SystemTime;
use test::ColorConfig;
use tracing::*;
panic!("Can't find Valgrind to run Valgrind tests");
}
+ if !config.has_tidy && config.mode == Mode::Rustdoc {
+ eprintln!("warning: `tidy` is not installed; generated diffs will be harder to read");
+ }
+
log_config(&config);
run_tests(config);
}
let src_base = opt_path(matches, "src-base");
let run_ignored = matches.opt_present("ignored");
+ let has_tidy = Command::new("tidy")
+ .arg("--version")
+ .stdout(Stdio::null())
+ .status()
+ .map_or(false, |status| status.success());
Config {
bless: matches.opt_present("bless"),
compile_lib_path: make_absolute(opt_path(matches, "compile-lib-path")),
remote_test_client: matches.opt_str("remote-test-client").map(PathBuf::from),
compare_mode: matches.opt_str("compare-mode").map(CompareMode::parse),
rustfix_coverage: matches.opt_present("rustfix-coverage"),
+ has_tidy,
cc: matches.opt_str("cc").unwrap(),
cxx: matches.opt_str("cxx").unwrap(),
Some(CompareMode::Chalk) => {
rustc.args(&["-Zchalk"]);
}
+ Some(CompareMode::SplitDwarf) => {
+ rustc.args(&["-Zsplit-dwarf=split"]);
+ }
+ Some(CompareMode::SplitDwarfSingle) => {
+ rustc.args(&["-Zsplit-dwarf=single"]);
+ }
None => {}
}
let proc_res = new_rustdoc.document(&compare_dir);
if !proc_res.status.success() {
- proc_res.fatal(Some("failed to run nightly rustdoc"), || ());
+ eprintln!("failed to run nightly rustdoc");
+ return;
}
#[rustfmt::skip]
"-modify",
];
let tidy_dir = |dir| {
- let tidy = |file: &_| {
- Command::new("tidy")
- .args(&tidy_args)
- .arg(file)
- .spawn()
- .unwrap_or_else(|err| {
- self.fatal(&format!("failed to run tidy - is it installed? - {}", err))
- })
- .wait()
- .unwrap()
- };
for entry in walkdir::WalkDir::new(dir) {
let entry = entry.expect("failed to read file");
if entry.file_type().is_file()
&& entry.path().extension().and_then(|p| p.to_str()) == Some("html".into())
{
- tidy(entry.path());
+ let status =
+ Command::new("tidy").args(&tidy_args).arg(entry.path()).status().unwrap();
+ // `tidy` returns 1 if it modified the file.
+ assert!(status.success() || status.code() == Some(1));
}
}
};
- tidy_dir(out_dir);
- tidy_dir(&compare_dir);
+ if self.config.has_tidy {
+ tidy_dir(out_dir);
+ tidy_dir(&compare_dir);
+ }
let pager = {
let output = Command::new("git").args(&["config", "--get", "core.pager"]).output().ok();
})
};
let mut diff = Command::new("diff");
- diff.args(&["-u", "-r"]).args(&[out_dir, &compare_dir]);
+ // diff recursively, showing context, and excluding .css files
+ diff.args(&["-u", "-r", "-x", "*.css"]).args(&[&compare_dir, out_dir]);
let output = if let Some(pager) = pager {
let diff_pid = diff.stdout(Stdio::piped()).spawn().expect("failed to run `diff`");
let content = std::fs::read_to_string(source)
.with_context(|| format!("failed to read {}", self.path(source)))?;
- let mut buf = HEADER_MESSAGE.replace("{source}", &self.path(source).to_string());
+ let mut buf =
+ HEADER_MESSAGE.replace("{source}", &self.path(source).to_string().replace("\\", "/"));
let documents = YamlLoader::load_from_str(&content)
.with_context(|| format!("failed to parse {}", self.path(source)))?;
//! A few exceptions are allowed as there's known bugs in rustdoc, but this
//! should catch the majority of "broken link" cases.
+#![feature(str_split_once)]
+
use std::collections::hash_map::Entry;
use std::collections::{HashMap, HashSet};
use std::env;
{
return;
}
- let mut parts = url.splitn(2, '#');
- let url = parts.next().unwrap();
- let fragment = parts.next();
- let mut parts = url.splitn(2, '?');
- let url = parts.next().unwrap();
+ let (url, fragment) = match url.split_once('#') {
+ None => (url, None),
+ Some((url, fragment)) => (url, Some(fragment)),
+ };
+ // NB: the `splitn` always succeeds, even if the delimiter is not present.
+ let url = url.splitn(2, '?').next().unwrap();
// Once we've plucked out the URL, parse it using our base url and
// then try to extract a file path.
-Subproject commit e54c5db4f0edbe51db42d2c3e63e9821537ed4f4
+Subproject commit 2065b52dfef3cd5a5216e65c21a056a69574bddc
break;
}
- let mut parts = line.splitn(2, '=');
- let krate = parts.next().unwrap().trim();
- if parts.next().is_none() {
- continue;
- }
+ let krate = match line.split_once('=') {
+ None => continue,
+ Some((krate, _)) => krate.trim(),
+ };
// Don't worry about depending on core/std while not writing `extern crate
// core/std` -- that's intentional.
"getopts",
"getrandom",
"gimli",
+ "gsgdt",
"hashbrown",
"hermit-abi",
"humantime",
for (name, license) in EXCEPTIONS {
// Check that the package actually exists.
if !metadata.packages.iter().any(|p| p.name == *name) {
- println!(
+ tidy_error!(
+ bad,
"could not find exception package `{}`\n\
Remove from EXCEPTIONS list if it is no longer used.",
name
);
- *bad = true;
}
// Check that the license hasn't changed.
for pkg in metadata.packages.iter().filter(|p| p.name == *name) {
}
match &pkg.license {
None => {
- println!(
+ tidy_error!(
+ bad,
"dependency exception `{}` does not declare a license expression",
pkg.id
);
- *bad = true;
}
Some(pkg_license) => {
if pkg_license.as_str() != *license {
let license = match &pkg.license {
Some(license) => license,
None => {
- println!("dependency `{}` does not define a license expression", pkg.id,);
- *bad = true;
+ tidy_error!(bad, "dependency `{}` does not define a license expression", pkg.id);
continue;
}
};
// general, these should never be added.
continue;
}
- println!("invalid license `{}` in `{}`", license, pkg.id);
- *bad = true;
+ tidy_error!(bad, "invalid license `{}` in `{}`", license, pkg.id);
}
}
}
// Check that the PERMITTED_DEPENDENCIES does not have unused entries.
for name in PERMITTED_DEPENDENCIES {
if !metadata.packages.iter().any(|p| p.name == *name) {
- println!(
+ tidy_error!(
+ bad,
"could not find allowed package `{}`\n\
Remove from PERMITTED_DEPENDENCIES list if it is no longer used.",
name
);
- *bad = true;
}
}
// Get the list in a convenient form.
}
if !unapproved.is_empty() {
- println!("Dependencies not explicitly permitted:");
+ tidy_error!(bad, "Dependencies not explicitly permitted:");
for dep in unapproved {
println!("* {}", dep);
}
- *bad = true;
}
}
let matches: Vec<_> = metadata.packages.iter().filter(|pkg| pkg.name == name).collect();
match matches.len() {
0 => {
- println!(
+ tidy_error!(
+ bad,
"crate `{}` is missing, update `check_crate_duplicate` \
if it is no longer used",
name
);
- *bad = true;
}
1 => {}
_ => {
- println!(
+ tidy_error!(
+ bad,
"crate `{}` is duplicated in `Cargo.lock`, \
it is too expensive to build multiple times, \
so make sure only one version appears across all dependencies",
for pkg in matches {
println!(" * {}", pkg.id);
}
- *bad = true;
}
}
}
for line in f.lines() {
let s = line.trim();
if !reached_no_explanation && s.starts_with('E') && s.contains("include_str!(\"") {
- if let Some(err_code) = s.splitn(2, ':').next() {
- let err_code = err_code.to_owned();
- if !error_codes.contains_key(&err_code) {
- error_codes.insert(err_code.clone(), false);
- }
- // Now we extract the tests from the markdown file!
- let md = some_or_continue!(s.splitn(2, "include_str!(\"").nth(1));
- let md_file_name = some_or_continue!(md.splitn(2, "\")").next());
- let path = some_or_continue!(path.parent())
- .join(md_file_name)
- .canonicalize()
- .expect("failed to canonicalize error explanation file path");
- match read_to_string(&path) {
- Ok(content) => {
- if !IGNORE_EXPLANATION_CHECK.contains(&err_code.as_str())
- && !check_if_error_code_is_test_in_explanation(&content, &err_code)
- {
- errors.push(format!(
- "`{}` doesn't use its own error code in compile_fail example",
- path.display(),
- ));
- }
- if check_error_code_explanation(&content, error_codes, err_code) {
- errors.push(format!(
- "`{}` uses invalid tag `compile-fail` instead of `compile_fail`",
- path.display(),
- ));
- }
+ let err_code = s
+ .split_once(':')
+ .expect(
+ format!(
+ "Expected a line with the format `E0xxx: include_str!(\"..\")`, but got {} without a `:` delimiter",
+ s,
+ ).as_str()
+ )
+ .0
+ .to_owned();
+ if !error_codes.contains_key(&err_code) {
+ error_codes.insert(err_code.clone(), false);
+ }
+ // Now we extract the tests from the markdown file!
+ let md_file_name = match s.split_once("include_str!(\"") {
+ None => continue,
+ Some((_, md)) => match md.split_once("\")") {
+ None => continue,
+ Some((file_name, _)) => file_name,
+ },
+ };
+ let path = some_or_continue!(path.parent())
+ .join(md_file_name)
+ .canonicalize()
+ .expect("failed to canonicalize error explanation file path");
+ match read_to_string(&path) {
+ Ok(content) => {
+ if !IGNORE_EXPLANATION_CHECK.contains(&err_code.as_str())
+ && !check_if_error_code_is_test_in_explanation(&content, &err_code)
+ {
+ errors.push(format!(
+ "`{}` doesn't use its own error code in compile_fail example",
+ path.display(),
+ ));
}
- Err(e) => {
- eprintln!("Couldn't read `{}`: {}", path.display(), e);
+ if check_error_code_explanation(&content, error_codes, err_code) {
+ errors.push(format!(
+ "`{}` uses invalid tag `compile-fail` instead of `compile_fail`",
+ path.display(),
+ ));
}
}
+ Err(e) => {
+ eprintln!("Couldn't read `{}`: {}", path.display(), e);
+ }
}
} else if reached_no_explanation && s.starts_with('E') {
- if let Some(err_code) = s.splitn(2, ',').next() {
- let err_code = err_code.to_owned();
- if !error_codes.contains_key(&err_code) {
- // this check should *never* fail!
- error_codes.insert(err_code, false);
- }
+ let err_code = match s.split_once(',') {
+ None => s,
+ Some((err_code, _)) => err_code,
+ }
+ .to_string();
+ if !error_codes.contains_key(&err_code) {
+ // this check should *never* fail!
+ error_codes.insert(err_code, false);
}
} else if s == ";" {
reached_no_explanation = true;
for line in f.lines() {
let s = line.trim();
if s.starts_with("error[E") || s.starts_with("warning[E") {
- if let Some(err_code) = s.splitn(2, ']').next() {
- if let Some(err_code) = err_code.splitn(2, '[').nth(1) {
- let nb = error_codes.entry(err_code.to_owned()).or_insert(false);
- *nb = true;
- }
- }
+ let err_code = match s.split_once(']') {
+ None => continue,
+ Some((err_code, _)) => match err_code.split_once('[') {
+ None => continue,
+ Some((_, err_code)) => err_code,
+ },
+ };
+ let nb = error_codes.entry(err_code.to_owned()).or_insert(false);
+ *nb = true;
}
}
}
}
// Extract source value.
- let source = line.splitn(2, '=').nth(1).unwrap().trim();
+ let source = line.split_once('=').unwrap().1.trim();
// Ensure source is allowed.
if !ALLOWED_SOURCES.contains(&&*source) {
- println!("invalid source: {}", source);
- *bad = true;
+ tidy_error!(bad, "invalid source: {}", source);
}
}
}
let gate_test_str = "gate-test-";
let feature_name = match line.find(gate_test_str) {
+ // NB: the `splitn` always succeeds, even if the delimiter is not present.
Some(i) => line[i + gate_test_str.len()..].splitn(2, ' ').next().unwrap(),
None => continue,
};
let issue_str = parts.next().unwrap().trim();
let tracking_issue = if issue_str.starts_with("None") {
if level == Status::Unstable && !next_feature_omits_tracking_issue {
- *bad = true;
tidy_error!(
bad,
"{}:{}: no tracking issue for feature {}",
//! This library contains the tidy lints and exposes it
//! to be used by tools.
+#![feature(str_split_once)]
+
use std::fs::File;
use std::io::Read;
use walkdir::{DirEntry, WalkDir};
}
macro_rules! tidy_error {
+ ($bad:expr, $fmt:expr) => ({
+ *$bad = true;
+ eprintln!("tidy error: {}", $fmt);
+ });
($bad:expr, $fmt:expr, $($arg:tt)*) => ({
*$bad = true;
eprint!("tidy error: ");
// Checks that only make sense for the std libs.
pal::check(&library_path, &mut bad);
- unit_tests::check(&library_path, &mut bad);
// Checks that need to be done for both the compiler and std libraries.
+ unit_tests::check(&src_path, &mut bad);
+ unit_tests::check(&compiler_path, &mut bad);
+ unit_tests::check(&library_path, &mut bad);
+
bins::check(&src_path, &output_directory, &mut bad);
bins::check(&compiler_path, &output_directory, &mut bad);
bins::check(&library_path, &output_directory, &mut bad);
-//! Tidy check to ensure that there are no stray `.stderr` files in UI test directories.
+//! Tidy check to ensure below in UI test directories:
+//! - the number of entries in each directory must be less than `ENTRY_LIMIT`
+//! - there are no stray `.stderr` files
use std::fs;
use std::path::Path;
+const ENTRY_LIMIT: usize = 1000;
+// FIXME: The following limits should be reduced eventually.
+const ROOT_ENTRY_LIMIT: usize = 1580;
+const ISSUES_ENTRY_LIMIT: usize = 2830;
+
+fn check_entries(path: &Path, bad: &mut bool) {
+ let dirs = walkdir::WalkDir::new(&path.join("test/ui"))
+ .into_iter()
+ .filter_entry(|e| e.file_type().is_dir());
+ for dir in dirs {
+ if let Ok(dir) = dir {
+ let dir_path = dir.path();
+
+ // Use special values for these dirs.
+ let is_root = path.join("test/ui") == dir_path;
+ let is_issues_dir = path.join("test/ui/issues") == dir_path;
+ let limit = if is_root {
+ ROOT_ENTRY_LIMIT
+ } else if is_issues_dir {
+ ISSUES_ENTRY_LIMIT
+ } else {
+ ENTRY_LIMIT
+ };
+
+ let count = std::fs::read_dir(dir_path).unwrap().count();
+ if count >= limit {
+ tidy_error!(
+ bad,
+ "following path contains more than {} entries, \
+ you should move the test to some relevant subdirectory (current: {}): {}",
+ limit,
+ count,
+ dir_path.display()
+ );
+ }
+ }
+ }
+}
+
pub fn check(path: &Path, bad: &mut bool) {
+ check_entries(&path, bad);
for path in &[&path.join("test/ui"), &path.join("test/ui-fulldeps")] {
super::walk_no_read(path, &mut |_| false, &mut |entry| {
let file_path = entry.path();
//
// For now, just make sure that there is a corresponding
// `$testname.rs` file.
- let testname = file_path
- .file_name()
- .unwrap()
- .to_str()
- .unwrap()
- .splitn(2, '.')
- .next()
- .unwrap();
+ //
+ // NB: We do not use file_stem() as some file names have multiple `.`s and we
+ // must strip all of them.
+ let testname =
+ file_path.file_name().unwrap().to_str().unwrap().split_once('.').unwrap().0;
if !file_path.with_file_name(testname).with_extension("rs").exists() {
- println!("Stray file with UI testing output: {:?}", file_path);
- *bad = true;
+ tidy_error!(bad, "Stray file with UI testing output: {:?}", file_path);
}
if let Ok(metadata) = fs::metadata(file_path) {
if metadata.len() == 0 {
- println!("Empty file with UI testing output: {:?}", file_path);
- *bad = true;
+ tidy_error!(bad, "Empty file with UI testing output: {:?}", file_path);
}
}
}