[[package]]
name = "cargo"
-version = "0.54.0"
+version = "0.55.0"
dependencies = [
"anyhow",
"atty",
[[package]]
name = "clippy"
-version = "0.1.53"
+version = "0.1.54"
dependencies = [
"cargo_metadata 0.12.0",
"clippy-mini-macro-test",
[[package]]
name = "clippy_lints"
-version = "0.1.53"
+version = "0.1.54"
dependencies = [
"cargo_metadata 0.12.0",
"clippy_utils",
"rustc-semver",
"semver 0.11.0",
"serde",
+ "serde_json",
"toml",
"unicode-normalization",
"url 2.1.1",
[[package]]
name = "clippy_utils"
-version = "0.1.53"
+version = "0.1.54"
dependencies = [
"if_chain",
"itertools 0.9.0",
[[package]]
name = "curl"
-version = "0.4.34"
+version = "0.4.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e268162af1a5fe89917ae25ba3b0a77c8da752bdc58e7dbb4f15b91fbd33756e"
+checksum = "d0bac9f84ca0977c4d9b8db998689de55b9e976656a6bc87fada2ca710d504c7"
dependencies = [
"curl-sys",
"libc",
"openssl-probe",
"openssl-sys",
"schannel",
- "socket2",
+ "socket2 0.4.0",
"winapi 0.3.9",
]
[[package]]
name = "curl-sys"
-version = "0.4.39+curl-7.74.0"
+version = "0.4.42+curl-7.76.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "07a8ce861e7b68a0b394e814d7ee9f1b2750ff8bd10372c6ad3bacc10e86f874"
+checksum = "4636d8d6109c842707018a104051436bffb8991ea20b2d1293db70b6e0ee4c7c"
dependencies = [
"cc",
"libc",
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897"
dependencies = [
- "socket2",
+ "socket2 0.3.16",
"winapi 0.3.9",
]
[[package]]
name = "openssl"
-version = "0.10.30"
+version = "0.10.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4"
+checksum = "a61075b62a23fef5a29815de7536d940aa35ce96d18ce0cc5076272db678a577"
dependencies = [
"bitflags",
- "cfg-if 0.1.10",
+ "cfg-if 1.0.0",
"foreign-types",
- "lazy_static",
"libc",
+ "once_cell",
"openssl-sys",
]
[[package]]
name = "openssl-src"
-version = "111.12.0+1.1.1h"
+version = "111.15.0+1.1.1k"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "858a4132194f8570a7ee9eb8629e85b23cbc4565f2d4a162e87556e5956abf61"
+checksum = "b1a5f6ae2ac04393b217ea9f700cd04fa9bf3d93fae2872069f3d15d908af70a"
dependencies = [
"cc",
]
[[package]]
name = "openssl-sys"
-version = "0.9.58"
+version = "0.9.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de"
+checksum = "313752393519e876837e09e1fa183ddef0be7735868dced3196f4472d536277f"
dependencies = [
"autocfg",
"cc",
"winapi 0.3.9",
]
+[[package]]
+name = "socket2"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2"
+dependencies = [
+ "libc",
+ "winapi 0.3.9",
+]
+
[[package]]
name = "stable_deref_trait"
version = "1.2.0"
Rustdoc
-------
- [Rustdoc lints are now treated as a tool lint, meaning that
- lints are now prefixed with `rustdoc::` (e.g. `#[warn(rustdoc::non_autolinks)]`).][80527]
+ lints are now prefixed with `rustdoc::` (e.g. `#[warn(rustdoc::broken_intra_doc_links)]`).][80527]
Using the old style is still allowed, and will become a warning in
a future release.
- [Rustdoc now supports argument files.][82261]
(Some(..), Some(..), HalfOpen) => hir::LangItem::Range,
(None, Some(..), Closed) => hir::LangItem::RangeToInclusive,
(Some(..), Some(..), Closed) => unreachable!(),
- (_, None, Closed) => {
- self.diagnostic().span_fatal(span, "inclusive range with no end").raise()
- }
+ (_, None, Closed) => self.diagnostic().span_fatal(span, "inclusive range with no end"),
};
let fields = self.arena.alloc_from_iter(
);
}
}
+
+ // Check for unstable modifiers on `#[link(..)]` attribute
+ if self.sess.check_name(attr, sym::link) {
+ for nested_meta in attr.meta_item_list().unwrap_or_default() {
+ if nested_meta.has_name(sym::modifiers) {
+ gate_feature_post!(
+ self,
+ native_link_modifiers,
+ nested_meta.span(),
+ "native link modifiers are experimental"
+ );
+
+ if let Some(modifiers) = nested_meta.value_str() {
+ for modifier in modifiers.as_str().split(',') {
+ if let Some(modifier) = modifier.strip_prefix(&['+', '-'][..]) {
+ macro_rules! gate_modifier { ($($name:literal => $feature:ident)*) => {
+ $(if modifier == $name {
+ let msg = concat!("`#[link(modifiers=\"", $name, "\")]` is unstable");
+ gate_feature_post!(
+ self,
+ $feature,
+ nested_meta.name_value_literal_span().unwrap(),
+ msg
+ );
+ })*
+ }}
+
+ gate_modifier!(
+ "bundle" => native_link_modifiers_bundle
+ "verbatim" => native_link_modifiers_verbatim
+ "whole-archive" => native_link_modifiers_whole_archive
+ "as-needed" => native_link_modifiers_as_needed
+ );
+ }
+ }
+ }
+ }
+ }
+ }
}
fn visit_item(&mut self, i: &'a ast::Item) {
item: &Annotatable,
push: &mut dyn FnMut(Annotatable),
) {
+ let span = cx.with_def_site_ctxt(span);
let inline = cx.meta_word(span, sym::inline);
- let no_coverage_ident =
- rustc_ast::attr::mk_nested_word_item(Ident::new(sym::no_coverage, span));
- let no_coverage_feature =
- rustc_ast::attr::mk_list_item(Ident::new(sym::feature, span), vec![no_coverage_ident]);
- let no_coverage = cx.meta_word(span, sym::no_coverage);
let hidden = rustc_ast::attr::mk_nested_word_item(Ident::new(sym::hidden, span));
let doc = rustc_ast::attr::mk_list_item(Ident::new(sym::doc, span), vec![hidden]);
- let attrs = vec![
- cx.attribute(inline),
- cx.attribute(no_coverage_feature),
- cx.attribute(no_coverage),
- cx.attribute(doc),
- ];
+ let no_coverage = cx.meta_word(span, sym::no_coverage);
+ let attrs = vec![cx.attribute(inline), cx.attribute(doc), cx.attribute(no_coverage)];
let trait_def = TraitDef {
span,
attributes: Vec::new(),
));
}
- fn add_native_library(&mut self, name: rustc_span::symbol::Symbol) {
- let location = find_library(name, &self.lib_search_paths, self.sess);
+ fn add_native_library(&mut self, name: rustc_span::symbol::Symbol, verbatim: bool) {
+ let location = find_library(name, verbatim, &self.lib_search_paths, self.sess);
self.add_archive(location.clone(), |_| false).unwrap_or_else(|e| {
panic!("failed to add native library {}: {}", location.to_string_lossy(), e);
});
/// Adds all of the contents of a native library to this archive. This will
/// search in the relevant locations for a library named `name`.
- fn add_native_library(&mut self, name: Symbol) {
- let location = find_library(name, &self.config.lib_search_paths, self.config.sess);
+ fn add_native_library(&mut self, name: Symbol, verbatim: bool) {
+ let location =
+ find_library(name, verbatim, &self.config.lib_search_paths, self.config.sess);
self.add_archive(&location, |_| false).unwrap_or_else(|e| {
self.config.sess.fatal(&format!(
"failed to add native library {}: {}",
pub(crate) fn run_pass_manager(
cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
module: &ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig,
thin: bool,
-) {
+) -> Result<(), FatalError> {
let _timer = cgcx.prof.extra_verbose_generic_activity("LLVM_lto_optimize", &module.name[..]);
// Now we have one massive module inside of llmod. Time to run the
if write::should_use_new_llvm_pass_manager(config) {
let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO };
let opt_level = config.opt_level.unwrap_or(config::OptLevel::No);
- // See comment below for why this is necessary.
- let opt_level = if let config::OptLevel::No = opt_level {
- config::OptLevel::Less
- } else {
- opt_level
- };
- write::optimize_with_new_llvm_pass_manager(cgcx, module, config, opt_level, opt_stage);
+ write::optimize_with_new_llvm_pass_manager(
+ cgcx,
+ diag_handler,
+ module,
+ config,
+ opt_level,
+ opt_stage,
+ )?;
debug!("lto done");
- return;
+ return Ok(());
}
let pm = llvm::LLVMCreatePassManager();
llvm::LLVMRustAddPass(pm, pass.unwrap());
}
- // When optimizing for LTO we don't actually pass in `-O0`, but we force
- // it to always happen at least with `-O1`.
- //
- // With ThinLTO we mess around a lot with symbol visibility in a way
- // that will actually cause linking failures if we optimize at O0 which
- // notable is lacking in dead code elimination. To ensure we at least
- // get some optimizations and correctly link we forcibly switch to `-O1`
- // to get dead code elimination.
- //
- // Note that in general this shouldn't matter too much as you typically
- // only turn on ThinLTO when you're compiling with optimizations
- // otherwise.
let opt_level = config
.opt_level
.map(|x| to_llvm_opt_settings(x).0)
.unwrap_or(llvm::CodeGenOptLevel::None);
- let opt_level = match opt_level {
- llvm::CodeGenOptLevel::None => llvm::CodeGenOptLevel::Less,
- level => level,
- };
with_llvm_pmb(module.module_llvm.llmod(), config, opt_level, false, &mut |b| {
if thin {
llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm);
llvm::LLVMDisposePassManager(pm);
}
debug!("lto done");
+ Ok(())
}
pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer);
{
info!("running thin lto passes over {}", module.name);
let config = cgcx.config(module.kind);
- run_pass_manager(cgcx, &module, config, true);
+ run_pass_manager(cgcx, &diag_handler, &module, config, true)?;
save_temp_bitcode(cgcx, &module, "thin-lto-after-pm");
}
}
pub(crate) fn should_use_new_llvm_pass_manager(config: &ModuleConfig) -> bool {
// The new pass manager is disabled by default.
- config.new_llvm_pass_manager
+ config.new_llvm_pass_manager.unwrap_or(false)
}
pub(crate) unsafe fn optimize_with_new_llvm_pass_manager(
cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
module: &ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig,
opt_level: config::OptLevel,
opt_stage: llvm::OptStage,
-) {
+) -> Result<(), FatalError> {
let unroll_loops =
opt_level != config::OptLevel::Size && opt_level != config::OptLevel::SizeMin;
let using_thin_buffers = opt_stage == llvm::OptStage::PreLinkThinLTO || config.bitcode_needed();
std::ptr::null_mut()
};
+ let extra_passes = config.passes.join(",");
+
// FIXME: NewPM doesn't provide a facility to pass custom InlineParams.
// We would have to add upstream support for this first, before we can support
// config.inline_threshold and our more aggressive default thresholds.
- // FIXME: NewPM uses an different and more explicit way to textually represent
- // pass pipelines. It would probably make sense to expose this, but it would
- // require a different format than the current -C passes.
- llvm::LLVMRustOptimizeWithNewPassManager(
+ let result = llvm::LLVMRustOptimizeWithNewPassManager(
module.module_llvm.llmod(),
&*module.module_llvm.tm,
to_pass_builder_opt_level(opt_level),
sanitizer_options.as_ref(),
pgo_gen_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+ config.instrument_coverage,
+ config.instrument_gcov,
llvm_selfprofiler,
selfprofile_before_pass_callback,
selfprofile_after_pass_callback,
+ extra_passes.as_ptr().cast(),
+ extra_passes.len(),
);
+ result.into_result().map_err(|()| llvm_err(diag_handler, "failed to run LLVM passes"))
}
// Unsafe due to LLVM calls.
diag_handler: &Handler,
module: &ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig,
-) {
+) -> Result<(), FatalError> {
let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &module.name[..]);
let llmod = module.module_llvm.llmod();
_ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO,
_ => llvm::OptStage::PreLinkNoLTO,
};
- optimize_with_new_llvm_pass_manager(cgcx, module, config, opt_level, opt_stage);
- return;
+ return optimize_with_new_llvm_pass_manager(
+ cgcx,
+ diag_handler,
+ module,
+ config,
+ opt_level,
+ opt_stage,
+ );
}
if cgcx.prof.llvm_recording_enabled() {
llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap());
continue;
}
- if pass_name == "insert-gcov-profiling" || pass_name == "instrprof" {
- // Instrumentation must be inserted before optimization,
- // otherwise LLVM may optimize some functions away which
- // breaks llvm-cov.
- //
- // This mirrors what Clang does in lib/CodeGen/BackendUtil.cpp.
- llvm::LLVMRustAddPass(mpm, find_pass(pass_name).unwrap());
- continue;
- }
if let Some(pass) = find_pass(pass_name) {
extra_passes.push(pass);
}
}
+ // Instrumentation must be inserted before optimization,
+ // otherwise LLVM may optimize some functions away which
+ // breaks llvm-cov.
+ //
+ // This mirrors what Clang does in lib/CodeGen/BackendUtil.cpp.
+ if config.instrument_gcov {
+ llvm::LLVMRustAddPass(mpm, find_pass("insert-gcov-profiling").unwrap());
+ }
+ if config.instrument_coverage {
+ llvm::LLVMRustAddPass(mpm, find_pass("instrprof").unwrap());
+ }
+
add_sanitizer_passes(config, &mut extra_passes);
// Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need
llvm::LLVMDisposePassManager(fpm);
llvm::LLVMDisposePassManager(mpm);
}
+ Ok(())
}
unsafe fn add_sanitizer_passes(config: &ModuleConfig, passes: &mut Vec<&'static mut llvm::Pass>) {
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
use rustc_hir::def_id::{DefId, DefIdSet, LOCAL_CRATE};
use rustc_llvm::RustString;
-use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::coverage::CodeRegion;
use rustc_span::Symbol;
///
/// We can find the unused functions (including generic functions) by the set difference of all MIR
/// `DefId`s (`tcx` query `mir_keys`) minus the codegenned `DefId`s (`tcx` query
-/// `collect_and_partition_mono_items`).
+/// `codegened_and_inlined_items`).
///
/// *HOWEVER* the codegenned `DefId`s are partitioned across multiple `CodegenUnit`s (CGUs), and
/// this function is processing a `function_coverage_map` for the functions (`Instance`/`DefId`)
let mut unused_def_ids_by_file: FxHashMap<Symbol, Vec<DefId>> = FxHashMap::default();
for &non_codegenned_def_id in all_def_ids.difference(codegenned_def_ids) {
- let codegen_fn_attrs = tcx.codegen_fn_attrs(non_codegenned_def_id);
- if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
- continue;
- }
- // Make sure the non-codegenned (unused) function has a file_name
+ // Make sure the non-codegenned (unused) function has at least one MIR
+ // `Coverage` statement with a code region, and return its file name.
if let Some(non_codegenned_file_name) = tcx.covered_file_name(non_codegenned_def_id) {
let def_ids =
unused_def_ids_by_file.entry(*non_codegenned_file_name).or_insert_with(Vec::new);
module: &ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> Result<(), FatalError> {
- Ok(back::write::optimize(cgcx, diag_handler, module, config))
+ back::write::optimize(cgcx, diag_handler, module, config)
}
unsafe fn optimize_thin(
cgcx: &CodegenContext<Self>,
module: &ModuleCodegen<Self::Module>,
config: &ModuleConfig,
thin: bool,
- ) {
- back::lto::run_pass_manager(cgcx, module, config, thin)
+ ) -> Result<(), FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+ back::lto::run_pass_manager(cgcx, &diag_handler, module, config, thin)
}
}
SanitizerOptions: Option<&SanitizerOptions>,
PGOGenPath: *const c_char,
PGOUsePath: *const c_char,
+ InstrumentCoverage: bool,
+ InstrumentGCOV: bool,
llvm_selfprofiler: *mut c_void,
begin_callback: SelfProfileBeforePassCallback,
end_callback: SelfProfileAfterPassCallback,
- );
+ ExtraPasses: *const c_char,
+ ExtraPassesLen: size_t,
+ ) -> LLVMRustResult;
pub fn LLVMRustPrintModule(
M: &'a Module,
Output: *const c_char,
Some(_) | None => {}
};
+ let filter = |s: &str| {
+ if s.is_empty() {
+ return None;
+ }
+ let feature = if s.starts_with("+") || s.starts_with("-") {
+ &s[1..]
+ } else {
+ return Some(s.to_string());
+ };
+ // Rustc-specific feature requests like `+crt-static` or `-crt-static`
+ // are not passed down to LLVM.
+ if RUSTC_SPECIFIC_FEATURES.contains(&feature) {
+ return None;
+ }
+ // ... otherwise though we run through `to_llvm_feature` feature when
+ // passing requests down to LLVM. This means that all in-language
+ // features also work on the command line instead of having two
+ // different names when the LLVM name and the Rust name differ.
+ Some(format!("{}{}", &s[..1], to_llvm_feature(sess, feature)))
+ };
+
// Features implied by an implicit or explicit `--target`.
- features.extend(
- sess.target
- .features
- .split(',')
- .filter(|f| !f.is_empty() && !RUSTC_SPECIFIC_FEATURES.iter().any(|s| f.contains(s)))
- .map(String::from),
- );
+ features.extend(sess.target.features.split(',').filter_map(&filter));
// -Ctarget-features
- features.extend(
- sess.opts
- .cg
- .target_feature
- .split(',')
- .filter(|f| !f.is_empty() && !RUSTC_SPECIFIC_FEATURES.iter().any(|s| f.contains(s)))
- .map(String::from),
- );
+ features.extend(sess.opts.cg.target_feature.split(',').filter_map(&filter));
features
}
use std::io;
use std::path::{Path, PathBuf};
-pub fn find_library(name: Symbol, search_paths: &[PathBuf], sess: &Session) -> PathBuf {
+pub fn find_library(
+ name: Symbol,
+ verbatim: bool,
+ search_paths: &[PathBuf],
+ sess: &Session,
+) -> PathBuf {
// On Windows, static libraries sometimes show up as libfoo.a and other
// times show up as foo.lib
- let oslibname =
- format!("{}{}{}", sess.target.staticlib_prefix, name, sess.target.staticlib_suffix);
+ let oslibname = if verbatim {
+ name.to_string()
+ } else {
+ format!("{}{}{}", sess.target.staticlib_prefix, name, sess.target.staticlib_suffix)
+ };
let unixlibname = format!("lib{}.a", name);
for path in search_paths {
lto: bool,
skip_objects: bool,
) -> io::Result<()>;
- fn add_native_library(&mut self, name: Symbol);
+ fn add_native_library(&mut self, name: Symbol, verbatim: bool);
fn update_symbols(&mut self);
fn build(self);
// metadata of the rlib we're generating somehow.
for lib in codegen_results.crate_info.used_libraries.iter() {
match lib.kind {
- NativeLibKind::StaticBundle => {}
- NativeLibKind::StaticNoBundle
- | NativeLibKind::Dylib
- | NativeLibKind::Framework
+ NativeLibKind::Static { bundle: None | Some(true), .. } => {}
+ NativeLibKind::Static { bundle: Some(false), .. }
+ | NativeLibKind::Dylib { .. }
+ | NativeLibKind::Framework { .. }
| NativeLibKind::RawDylib
| NativeLibKind::Unspecified => continue,
}
if let Some(name) = lib.name {
- ab.add_native_library(name);
+ ab.add_native_library(name, lib.verbatim.unwrap_or(false));
}
}
// Clearly this is not sufficient for a general purpose feature, and
// we'd want to read from the library's metadata to determine which
// object files come from where and selectively skip them.
- let skip_object_files = native_libs
- .iter()
- .any(|lib| lib.kind == NativeLibKind::StaticBundle && !relevant_lib(sess, lib));
+ let skip_object_files = native_libs.iter().any(|lib| {
+ matches!(lib.kind, NativeLibKind::Static { bundle: None | Some(true), .. })
+ && !relevant_lib(sess, lib)
+ });
ab.add_rlib(
path,
&name.as_str(),
let path = find_sanitizer_runtime(&sess, &filename);
let rpath = path.to_str().expect("non-utf8 component in path");
linker.args(&["-Wl,-rpath", "-Xlinker", rpath]);
- linker.link_dylib(Symbol::intern(&filename));
+ linker.link_dylib(Symbol::intern(&filename), false, true);
} else {
let filename = format!("librustc{}_rt.{}.a", channel, name);
let path = find_sanitizer_runtime(&sess, &filename).join(&filename);
.filter_map(|lib| {
let name = lib.name?;
match lib.kind {
- NativeLibKind::StaticNoBundle
- | NativeLibKind::Dylib
+ NativeLibKind::Static { bundle: Some(false), .. }
+ | NativeLibKind::Dylib { .. }
| NativeLibKind::Unspecified => {
+ let verbatim = lib.verbatim.unwrap_or(false);
if sess.target.is_like_msvc {
- Some(format!("{}.lib", name))
+ Some(format!("{}{}", name, if verbatim { "" } else { ".lib" }))
+ } else if sess.target.linker_is_gnu {
+ Some(format!("-l{}{}", if verbatim { ":" } else { "" }, name))
} else {
Some(format!("-l{}", name))
}
}
- NativeLibKind::Framework => {
+ NativeLibKind::Framework { .. } => {
// ld-only syntax, since there are no frameworks in MSVC
Some(format!("-framework {}", name))
}
// These are included, no need to print them
- NativeLibKind::StaticBundle | NativeLibKind::RawDylib => None,
+ NativeLibKind::Static { bundle: None | Some(true), .. }
+ | NativeLibKind::RawDylib => None,
}
})
.collect();
let target_triple = sess.opts.target_triple.triple();
let mut get_install_prefix_lib_path = || {
let install_prefix = option_env!("CFG_PREFIX").expect("CFG_PREFIX");
- let tlib = filesearch::relative_target_lib_path(&sess.sysroot, target_triple);
+ let tlib = rustc_target::target_rustlib_path(&sess.sysroot, target_triple).join("lib");
let mut path = PathBuf::from(install_prefix);
path.push(&tlib);
Some(l) => l,
None => continue,
};
+ let verbatim = lib.verbatim.unwrap_or(false);
match lib.kind {
- NativeLibKind::Dylib | NativeLibKind::Unspecified => cmd.link_dylib(name),
- NativeLibKind::Framework => cmd.link_framework(name),
- NativeLibKind::StaticNoBundle => cmd.link_staticlib(name),
- NativeLibKind::StaticBundle => cmd.link_whole_staticlib(name, &search_path),
+ NativeLibKind::Dylib { as_needed } => {
+ cmd.link_dylib(name, verbatim, as_needed.unwrap_or(true))
+ }
+ NativeLibKind::Unspecified => cmd.link_dylib(name, verbatim, true),
+ NativeLibKind::Framework { as_needed } => {
+ cmd.link_framework(name, as_needed.unwrap_or(true))
+ }
+ NativeLibKind::Static { bundle: None | Some(true), .. }
+ | NativeLibKind::Static { whole_archive: Some(true), .. } => {
+ cmd.link_whole_staticlib(name, verbatim, &search_path);
+ }
+ NativeLibKind::Static { .. } => cmd.link_staticlib(name, verbatim),
NativeLibKind::RawDylib => {
// FIXME(#58713): Proper handling for raw dylibs.
bug!("raw_dylib feature not yet implemented");
// there's a static library that's not relevant we skip all object
// files.
let native_libs = &codegen_results.crate_info.native_libraries[&cnum];
- let skip_native = native_libs
- .iter()
- .any(|lib| lib.kind == NativeLibKind::StaticBundle && !relevant_lib(sess, lib));
+ let skip_native = native_libs.iter().any(|lib| {
+ matches!(lib.kind, NativeLibKind::Static { bundle: None | Some(true), .. })
+ && !relevant_lib(sess, lib)
+ });
if (!are_upstream_rust_objects_already_included(sess)
|| ignored_for_lto(sess, &codegen_results.crate_info, cnum))
if !relevant_lib(sess, &lib) {
continue;
}
+ let verbatim = lib.verbatim.unwrap_or(false);
match lib.kind {
- NativeLibKind::Dylib | NativeLibKind::Unspecified => cmd.link_dylib(name),
- NativeLibKind::Framework => cmd.link_framework(name),
- NativeLibKind::StaticNoBundle => {
+ NativeLibKind::Dylib { as_needed } => {
+ cmd.link_dylib(name, verbatim, as_needed.unwrap_or(true))
+ }
+ NativeLibKind::Unspecified => cmd.link_dylib(name, verbatim, true),
+ NativeLibKind::Framework { as_needed } => {
+ cmd.link_framework(name, as_needed.unwrap_or(true))
+ }
+ NativeLibKind::Static { bundle: Some(false), .. } => {
// Link "static-nobundle" native libs only if the crate they originate from
// is being linked statically to the current crate. If it's linked dynamically
// or is an rlib already included via some other dylib crate, the symbols from
// native libs will have already been included in that dylib.
if data[cnum.as_usize() - 1] == Linkage::Static {
- cmd.link_staticlib(name)
+ cmd.link_staticlib(name, verbatim)
}
}
// ignore statically included native libraries here as we've
// already included them when we included the rust library
// previously
- NativeLibKind::StaticBundle => {}
+ NativeLibKind::Static { bundle: None | Some(true), .. } => {}
NativeLibKind::RawDylib => {
// FIXME(#58713): Proper handling for raw dylibs.
bug!("raw_dylib feature not yet implemented");
pub trait Linker {
fn cmd(&mut self) -> &mut Command;
fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path);
- fn link_dylib(&mut self, lib: Symbol);
+ fn link_dylib(&mut self, lib: Symbol, verbatim: bool, as_needed: bool);
fn link_rust_dylib(&mut self, lib: Symbol, path: &Path);
- fn link_framework(&mut self, framework: Symbol);
- fn link_staticlib(&mut self, lib: Symbol);
+ fn link_framework(&mut self, framework: Symbol, as_needed: bool);
+ fn link_staticlib(&mut self, lib: Symbol, verbatim: bool);
fn link_rlib(&mut self, lib: &Path);
fn link_whole_rlib(&mut self, lib: &Path);
- fn link_whole_staticlib(&mut self, lib: Symbol, search_path: &[PathBuf]);
+ fn link_whole_staticlib(&mut self, lib: Symbol, verbatim: bool, search_path: &[PathBuf]);
fn include_path(&mut self, path: &Path);
fn framework_path(&mut self, path: &Path);
fn output_filename(&mut self, path: &Path);
fn add_object(&mut self, path: &Path);
fn gc_sections(&mut self, keep_metadata: bool);
+ fn no_gc_sections(&mut self);
fn full_relro(&mut self);
fn partial_relro(&mut self);
fn no_relro(&mut self);
}
}
- fn link_dylib(&mut self, lib: Symbol) {
+ fn link_dylib(&mut self, lib: Symbol, verbatim: bool, as_needed: bool) {
+ if self.sess.target.os == "illumos" && lib.as_str() == "c" {
+ // libc will be added via late_link_args on illumos so that it will
+ // appear last in the library search order.
+ // FIXME: This should be replaced by a more complete and generic
+ // mechanism for controlling the order of library arguments passed
+ // to the linker.
+ return;
+ }
+ if !as_needed {
+ if self.sess.target.is_like_osx {
+ // FIXME(81490): ld64 doesn't support these flags but macOS 11
+ // has -needed-l{} / -needed_library {}
+ // but we have no way to detect that here.
+ self.sess.warn("`as-needed` modifier not implemented yet for ld64");
+ } else if self.sess.target.linker_is_gnu {
+ self.linker_arg("--no-as-needed");
+ } else {
+ self.sess.warn("`as-needed` modifier not supported for current linker");
+ }
+ }
self.hint_dynamic();
- self.cmd.arg(format!("-l{}", lib));
+ self.cmd.arg(format!("-l{}{}", if verbatim { ":" } else { "" }, lib));
+ if !as_needed {
+ if self.sess.target.is_like_osx {
+ // See above FIXME comment
+ } else if self.sess.target.linker_is_gnu {
+ self.linker_arg("--as-needed");
+ }
+ }
}
- fn link_staticlib(&mut self, lib: Symbol) {
+ fn link_staticlib(&mut self, lib: Symbol, verbatim: bool) {
self.hint_static();
- self.cmd.arg(format!("-l{}", lib));
+ self.cmd.arg(format!("-l{}{}", if verbatim { ":" } else { "" }, lib));
}
fn link_rlib(&mut self, lib: &Path) {
self.hint_static();
self.cmd.arg(format!("-l{}", lib));
}
- fn link_framework(&mut self, framework: Symbol) {
+ fn link_framework(&mut self, framework: Symbol, as_needed: bool) {
self.hint_dynamic();
+ if !as_needed {
+ // FIXME(81490): ld64 as of macOS 11 supports the -needed_framework
+ // flag but we have no way to detect that here.
+ // self.cmd.arg("-needed_framework").sym_arg(framework);
+ self.sess.warn("`as-needed` modifier not implemented yet for ld64");
+ }
self.cmd.arg("-framework").sym_arg(framework);
}
// don't otherwise explicitly reference them. This can occur for
// libraries which are just providing bindings, libraries with generic
// functions, etc.
- fn link_whole_staticlib(&mut self, lib: Symbol, search_path: &[PathBuf]) {
+ fn link_whole_staticlib(&mut self, lib: Symbol, verbatim: bool, search_path: &[PathBuf]) {
self.hint_static();
let target = &self.sess.target;
if !target.is_like_osx {
- self.linker_arg("--whole-archive").cmd.arg(format!("-l{}", lib));
+ self.linker_arg("--whole-archive").cmd.arg(format!(
+ "-l{}{}",
+ if verbatim { ":" } else { "" },
+ lib
+ ));
self.linker_arg("--no-whole-archive");
} else {
// -force_load is the macOS equivalent of --whole-archive, but it
// involves passing the full path to the library to link.
self.linker_arg("-force_load");
- let lib = archive::find_library(lib, search_path, &self.sess);
+ let lib = archive::find_library(lib, verbatim, search_path, &self.sess);
self.linker_arg(&lib);
}
}
// insert it here.
if self.sess.target.is_like_osx {
self.linker_arg("-dead_strip");
- } else if self.sess.target.is_like_solaris {
- self.linker_arg("-zignore");
// If we're building a dylib, we don't use --gc-sections because LLVM
// has already done the best it can do, and we also don't want to
}
}
+ fn no_gc_sections(&mut self) {
+ if self.sess.target.is_like_osx {
+ self.linker_arg("-no_dead_strip");
+ } else if self.sess.target.is_like_solaris {
+ self.linker_arg("-zrecord");
+ } else {
+ self.linker_arg("--no-gc-sections");
+ }
+ }
+
fn optimize(&mut self) {
if !self.sess.target.linker_is_gnu {
return;
fn add_as_needed(&mut self) {
if self.sess.target.linker_is_gnu {
self.linker_arg("--as-needed");
+ } else if self.sess.target.is_like_solaris {
+ // -z ignore is the Solaris equivalent to the GNU ld --as-needed option
+ self.linker_arg("-z");
+ self.linker_arg("ignore");
}
}
}
}
}
- fn link_dylib(&mut self, lib: Symbol) {
- self.cmd.arg(&format!("{}.lib", lib));
+ fn no_gc_sections(&mut self) {
+ self.cmd.arg("/OPT:NOREF,NOICF");
+ }
+
+ fn link_dylib(&mut self, lib: Symbol, verbatim: bool, _as_needed: bool) {
+ self.cmd.arg(format!("{}{}", lib, if verbatim { "" } else { ".lib" }));
}
fn link_rust_dylib(&mut self, lib: Symbol, path: &Path) {
// check to see if the file is there and just omit linking to it if it's
// not present.
let name = format!("{}.dll.lib", lib);
- if fs::metadata(&path.join(&name)).is_ok() {
+ if path.join(&name).exists() {
self.cmd.arg(name);
}
}
- fn link_staticlib(&mut self, lib: Symbol) {
- self.cmd.arg(&format!("{}.lib", lib));
+ fn link_staticlib(&mut self, lib: Symbol, verbatim: bool) {
+ self.cmd.arg(format!("{}{}", lib, if verbatim { "" } else { ".lib" }));
}
fn full_relro(&mut self) {
fn framework_path(&mut self, _path: &Path) {
bug!("frameworks are not supported on windows")
}
- fn link_framework(&mut self, _framework: Symbol) {
+ fn link_framework(&mut self, _framework: Symbol, _as_needed: bool) {
bug!("frameworks are not supported on windows")
}
- fn link_whole_staticlib(&mut self, lib: Symbol, _search_path: &[PathBuf]) {
- self.link_staticlib(lib);
- self.cmd.arg(format!("/WHOLEARCHIVE:{}.lib", lib));
+ fn link_whole_staticlib(&mut self, lib: Symbol, verbatim: bool, _search_path: &[PathBuf]) {
+ self.cmd.arg(format!("/WHOLEARCHIVE:{}{}", lib, if verbatim { "" } else { ".lib" }));
}
fn link_whole_rlib(&mut self, path: &Path) {
- self.link_rlib(path);
let mut arg = OsString::from("/WHOLEARCHIVE:");
arg.push(path);
self.cmd.arg(arg);
self.cmd.arg("-L").arg(path);
}
- fn link_staticlib(&mut self, lib: Symbol) {
+ fn link_staticlib(&mut self, lib: Symbol, _verbatim: bool) {
self.cmd.arg("-l").sym_arg(lib);
}
self.cmd.arg(path);
}
- fn link_dylib(&mut self, lib: Symbol) {
+ fn link_dylib(&mut self, lib: Symbol, verbatim: bool, _as_needed: bool) {
// Emscripten always links statically
- self.link_staticlib(lib);
+ self.link_staticlib(lib, verbatim);
}
- fn link_whole_staticlib(&mut self, lib: Symbol, _search_path: &[PathBuf]) {
+ fn link_whole_staticlib(&mut self, lib: Symbol, verbatim: bool, _search_path: &[PathBuf]) {
// not supported?
- self.link_staticlib(lib);
+ self.link_staticlib(lib, verbatim);
}
fn link_whole_rlib(&mut self, lib: &Path) {
}
fn link_rust_dylib(&mut self, lib: Symbol, _path: &Path) {
- self.link_dylib(lib);
+ self.link_dylib(lib, false, true);
}
fn link_rlib(&mut self, lib: &Path) {
bug!("frameworks are not supported on Emscripten")
}
- fn link_framework(&mut self, _framework: Symbol) {
+ fn link_framework(&mut self, _framework: Symbol, _as_needed: bool) {
bug!("frameworks are not supported on Emscripten")
}
// noop
}
+ fn no_gc_sections(&mut self) {
+ // noop
+ }
+
fn optimize(&mut self) {
// Emscripten performs own optimizations
self.cmd.arg(match self.sess.opts.optimize {
}
}
- fn link_dylib(&mut self, lib: Symbol) {
+ fn link_dylib(&mut self, lib: Symbol, _verbatim: bool, _as_needed: bool) {
self.cmd.arg("-l").sym_arg(lib);
}
- fn link_staticlib(&mut self, lib: Symbol) {
+ fn link_staticlib(&mut self, lib: Symbol, _verbatim: bool) {
self.cmd.arg("-l").sym_arg(lib);
}
self.cmd.arg("-l").sym_arg(lib);
}
- fn link_framework(&mut self, _framework: Symbol) {
+ fn link_framework(&mut self, _framework: Symbol, _as_needed: bool) {
panic!("frameworks not supported")
}
- fn link_whole_staticlib(&mut self, lib: Symbol, _search_path: &[PathBuf]) {
+ fn link_whole_staticlib(&mut self, lib: Symbol, _verbatim: bool, _search_path: &[PathBuf]) {
self.cmd.arg("-l").sym_arg(lib);
}
self.cmd.arg("--gc-sections");
}
+ fn no_gc_sections(&mut self) {
+ self.cmd.arg("--no-gc-sections");
+ }
+
fn optimize(&mut self) {
self.cmd.arg(match self.sess.opts.optimize {
OptLevel::No => "-O0",
});
}
- fn link_dylib(&mut self, _lib: Symbol) {
+ fn link_dylib(&mut self, _lib: Symbol, _verbatim: bool, _as_needed: bool) {
panic!("external dylibs not supported")
}
panic!("external dylibs not supported")
}
- fn link_staticlib(&mut self, _lib: Symbol) {
+ fn link_staticlib(&mut self, _lib: Symbol, _verbatim: bool) {
panic!("staticlibs not supported")
}
- fn link_whole_staticlib(&mut self, _lib: Symbol, _search_path: &[PathBuf]) {
+ fn link_whole_staticlib(&mut self, _lib: Symbol, _verbatim: bool, _search_path: &[PathBuf]) {
panic!("staticlibs not supported")
}
panic!("frameworks not supported")
}
- fn link_framework(&mut self, _framework: Symbol) {
+ fn link_framework(&mut self, _framework: Symbol, _as_needed: bool) {
panic!("frameworks not supported")
}
fn gc_sections(&mut self, _keep_metadata: bool) {}
+ fn no_gc_sections(&mut self) {}
+
fn pgo_gen(&mut self) {}
fn no_crt_objects(&mut self) {}
let module = module.take().unwrap();
{
let config = cgcx.config(module.kind);
- B::run_lto_pass_manager(cgcx, &module, config, false);
+ B::run_lto_pass_manager(cgcx, &module, config, false)?;
}
Ok(module)
}
pub pgo_gen: SwitchWithOptPath,
pub pgo_use: Option<PathBuf>,
+ pub instrument_coverage: bool,
+ pub instrument_gcov: bool,
pub sanitizer: SanitizerSet,
pub sanitizer_recover: SanitizerSet,
pub vectorize_slp: bool,
pub merge_functions: bool,
pub inline_threshold: Option<u32>,
- pub new_llvm_pass_manager: bool,
+ pub new_llvm_pass_manager: Option<bool>,
pub emit_lifetime_markers: bool,
}
};
ModuleConfig {
- passes: if_regular!(
- {
- let mut passes = sess.opts.cg.passes.clone();
- // compiler_builtins overrides the codegen-units settings,
- // which is incompatible with -Zprofile which requires that
- // only a single codegen unit is used per crate.
- if sess.opts.debugging_opts.profile && !is_compiler_builtins {
- passes.push("insert-gcov-profiling".to_owned());
- }
-
- // The rustc option `-Zinstrument_coverage` injects intrinsic calls to
- // `llvm.instrprof.increment()`, which requires the LLVM `instrprof` pass.
- if sess.instrument_coverage() {
- passes.push("instrprof".to_owned());
- }
- passes
- },
- vec![]
- ),
+ passes: if_regular!(sess.opts.cg.passes.clone(), vec![]),
opt_level: opt_level_and_size,
opt_size: opt_level_and_size,
SwitchWithOptPath::Disabled
),
pgo_use: if_regular!(sess.opts.cg.profile_use.clone(), None),
+ instrument_coverage: if_regular!(sess.instrument_coverage(), false),
+ instrument_gcov: if_regular!(
+ // compiler_builtins overrides the codegen-units settings,
+ // which is incompatible with -Zprofile which requires that
+ // only a single codegen unit is used per crate.
+ sess.opts.debugging_opts.profile && !is_compiler_builtins,
+ false
+ ),
sanitizer: if_regular!(sess.opts.debugging_opts.sanitizer, SanitizerSet::empty()),
sanitizer_recover: if_regular!(
if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
return None;
}
- } else {
- // FIXME: Add support for non-local main fn codegen
- let span = cx.tcx().main_def.unwrap().span;
- let n = 28937;
- cx.sess()
- .struct_span_err(span, "entry symbol `main` from foreign crate is not yet supported.")
- .note(&format!(
- "see issue #{} <https://github.com/rust-lang/rust/issues/{}> \
- for more information",
- n, n,
- ))
- .emit();
- cx.sess().abort_if_errors();
- bug!();
+ } else if !cx.codegen_unit().is_primary() {
+ // We want to create the wrapper only when the codegen unit is the primary one
+ return None;
}
let main_llfn = cx.get_fn_addr(instance);
pub kind: NativeLibKind,
pub name: Option<Symbol>,
pub cfg: Option<ast::MetaItem>,
+ pub verbatim: Option<bool>,
}
impl From<&cstore::NativeLib> for NativeLib {
fn from(lib: &cstore::NativeLib) -> Self {
- NativeLib { kind: lib.kind, name: lib.name, cfg: lib.cfg.clone() }
+ NativeLib { kind: lib.kind, name: lib.name, cfg: lib.cfg.clone(), verbatim: lib.verbatim }
}
}
llmod: &ModuleCodegen<Self::Module>,
config: &ModuleConfig,
thin: bool,
- );
+ ) -> Result<(), FatalError>;
}
pub trait ThinBufferMethods: Send + Sync {
// Returns Err(()) if we already know this obligation failed.
fn register_obligation_at(&mut self, obligation: O, parent: Option<usize>) -> Result<(), ()> {
- if self.done_cache.contains(&obligation.as_cache_key()) {
+ let cache_key = obligation.as_cache_key();
+ if self.done_cache.contains(&cache_key) {
debug!("register_obligation_at: ignoring already done obligation: {:?}", obligation);
return Ok(());
}
- match self.active_cache.entry(obligation.as_cache_key()) {
+ match self.active_cache.entry(cache_key.clone()) {
Entry::Occupied(o) => {
let node = &mut self.nodes[*o.get()];
if let Some(parent_index) = parent {
&& self
.error_cache
.get(&obligation_tree_id)
- .map(|errors| errors.contains(&obligation.as_cache_key()))
+ .map(|errors| errors.contains(&cache_key))
.unwrap_or(false);
if already_failed {
msg: &str,
suggestion: Vec<(Span, String)>,
applicability: Applicability,
+ ) -> &mut Self {
+ self.multipart_suggestion_with_style(
+ msg,
+ suggestion,
+ applicability,
+ SuggestionStyle::ShowCode,
+ )
+ }
+
+ /// [`Diagnostic::multipart_suggestion()`] but you can set the [`SuggestionStyle`].
+ pub fn multipart_suggestion_with_style(
+ &mut self,
+ msg: &str,
+ suggestion: Vec<(Span, String)>,
+ applicability: Applicability,
+ style: SuggestionStyle,
) -> &mut Self {
assert!(!suggestion.is_empty());
self.suggestions.push(CodeSuggestion {
.collect(),
}],
msg: msg.to_owned(),
- style: SuggestionStyle::ShowCode,
+ style,
applicability,
tool_metadata: Default::default(),
});
pub use diagnostic::{Diagnostic, DiagnosticId, DiagnosticStyledString, SubDiagnostic};
pub use diagnostic_builder::DiagnosticBuilder;
+use std::backtrace::Backtrace;
/// A handler deals with errors and other compiler output.
/// Certain errors (fatal, bug, unimpl) may cause immediate exit,
deduplicated_err_count: usize,
emitter: Box<dyn Emitter + sync::Send>,
delayed_span_bugs: Vec<Diagnostic>,
- delayed_good_path_bugs: Vec<Diagnostic>,
+ delayed_good_path_bugs: Vec<DelayedDiagnostic>,
/// This set contains the `DiagnosticId` of all emitted diagnostics to avoid
/// emitting the same diagnostic with extended help (`--teach`) twice, which
if !self.has_any_message() {
let bugs = std::mem::replace(&mut self.delayed_good_path_bugs, Vec::new());
self.flush_delayed(
- bugs,
+ bugs.into_iter().map(DelayedDiagnostic::decorate).collect(),
"no warnings or errors encountered even though `delayed_good_path_bugs` issued",
);
}
DiagnosticBuilder::new(self, Level::Note, msg)
}
- pub fn span_fatal(&self, span: impl Into<MultiSpan>, msg: &str) -> FatalError {
+ pub fn span_fatal(&self, span: impl Into<MultiSpan>, msg: &str) -> ! {
self.emit_diag_at_span(Diagnostic::new(Fatal, msg), span);
- FatalError
+ FatalError.raise()
}
pub fn span_fatal_with_code(
span: impl Into<MultiSpan>,
msg: &str,
code: DiagnosticId,
- ) -> FatalError {
+ ) -> ! {
self.emit_diag_at_span(Diagnostic::new_with_code(Fatal, Some(code), msg), span);
- FatalError
+ FatalError.raise()
}
pub fn span_err(&self, span: impl Into<MultiSpan>, msg: &str) {
db
}
+ // NOTE: intentionally doesn't raise an error so rustc_codegen_ssa only reports fatal errors in the main thread
pub fn fatal(&self, msg: &str) -> FatalError {
self.inner.borrow_mut().fatal(msg)
}
}
fn delay_good_path_bug(&mut self, msg: &str) {
- let mut diagnostic = Diagnostic::new(Level::Bug, msg);
+ let diagnostic = Diagnostic::new(Level::Bug, msg);
if self.flags.report_delayed_bugs {
self.emit_diagnostic(&diagnostic);
}
- diagnostic.note(&format!("delayed at {}", std::backtrace::Backtrace::force_capture()));
- self.delayed_good_path_bugs.push(diagnostic);
+ let backtrace = std::backtrace::Backtrace::force_capture();
+ self.delayed_good_path_bugs.push(DelayedDiagnostic::with_backtrace(diagnostic, backtrace));
}
fn failure(&mut self, msg: &str) {
}
}
+struct DelayedDiagnostic {
+ inner: Diagnostic,
+ note: Backtrace,
+}
+
+impl DelayedDiagnostic {
+ fn with_backtrace(diagnostic: Diagnostic, backtrace: Backtrace) -> Self {
+ DelayedDiagnostic { inner: diagnostic, note: backtrace }
+ }
+
+ fn decorate(mut self) -> Diagnostic {
+ self.inner.note(&format!("delayed at {}", self.note));
+ self.inner
+ }
+}
+
#[derive(Copy, PartialEq, Clone, Hash, Debug, Encodable, Decodable)]
pub enum Level {
Bug,
return true;
}
};
- let error = |span, msg, suggestion: &str| {
- let mut err = self.sess.parse_sess.span_diagnostic.struct_span_err(span, msg);
- if !suggestion.is_empty() {
- err.span_suggestion(
- span,
- "expected syntax is",
- suggestion.into(),
- Applicability::MaybeIncorrect,
- );
- }
- err.emit();
- true
- };
- let span = meta_item.span;
- match meta_item.meta_item_list() {
- None => error(span, "`cfg` is not followed by parentheses", "cfg(/* predicate */)"),
- Some([]) => error(span, "`cfg` predicate is not specified", ""),
- Some([_, .., l]) => error(l.span(), "multiple `cfg` predicates are specified", ""),
- Some([single]) => match single.meta_item() {
- Some(meta_item) => {
- attr::cfg_matches(meta_item, &self.sess.parse_sess, self.features)
- }
- None => error(single.span(), "`cfg` predicate key cannot be a literal", ""),
- },
- }
+ parse_cfg(&meta_item, &self.sess).map_or(true, |meta_item| {
+ attr::cfg_matches(&meta_item, &self.sess.parse_sess, self.features)
+ })
})
}
}
}
+pub fn parse_cfg<'a>(meta_item: &'a MetaItem, sess: &Session) -> Option<&'a MetaItem> {
+ let error = |span, msg, suggestion: &str| {
+ let mut err = sess.parse_sess.span_diagnostic.struct_span_err(span, msg);
+ if !suggestion.is_empty() {
+ err.span_suggestion(
+ span,
+ "expected syntax is",
+ suggestion.into(),
+ Applicability::HasPlaceholders,
+ );
+ }
+ err.emit();
+ None
+ };
+ let span = meta_item.span;
+ match meta_item.meta_item_list() {
+ None => error(span, "`cfg` is not followed by parentheses", "cfg(/* predicate */)"),
+ Some([]) => error(span, "`cfg` predicate is not specified", ""),
+ Some([_, .., l]) => error(l.span(), "multiple `cfg` predicates are specified", ""),
+ Some([single]) => match single.meta_item() {
+ Some(meta_item) => Some(meta_item),
+ None => error(single.span(), "`cfg` predicate key cannot be a literal", ""),
+ },
+ }
+}
+
fn is_cfg(sess: &Session, attr: &Attribute) -> bool {
sess.check_name(attr, sym::cfg)
}
pub enum ModError<'a> {
CircularInclusion(Vec<PathBuf>),
ModInBlock(Option<Ident>),
- FileNotFound(Ident, PathBuf),
+ FileNotFound(Ident, PathBuf, PathBuf),
MultipleCandidates(Ident, PathBuf, PathBuf),
ParserError(DiagnosticBuilder<'a>),
}
file_path: secondary_path,
dir_ownership: DirOwnership::Owned { relative: None },
}),
- (false, false) => Err(ModError::FileNotFound(ident, default_path)),
+ (false, false) => Err(ModError::FileNotFound(ident, default_path, secondary_path)),
(true, true) => Err(ModError::MultipleCandidates(ident, default_path, secondary_path)),
}
}
}
err
}
- ModError::FileNotFound(ident, default_path) => {
+ ModError::FileNotFound(ident, default_path, secondary_path) => {
let mut err = struct_span_err!(
diag,
span,
ident,
);
err.help(&format!(
- "to create the module `{}`, create file \"{}\"",
+ "to create the module `{}`, create file \"{}\" or \"{}\"",
ident,
default_path.display(),
+ secondary_path.display(),
));
err
}
/// Allows using non lexical lifetimes (RFC 2094).
(active, nll, "1.0.0", Some(43234), None),
- /// Allows the definition of `const` functions with some advanced features.
- (active, const_fn, "1.2.0", Some(57563), None),
-
/// Allows associated type defaults.
(active, associated_type_defaults, "1.2.0", Some(29661), None),
/// Allows using imported `main` function
(active, imported_main, "1.53.0", Some(28937), None),
+ /// Allows specifying modifiers in the link attribute: `#[link(modifiers = "...")]`
+ (active, native_link_modifiers, "1.53.0", Some(81490), None),
+
+ /// Allows specifying the bundle link modifier
+ (active, native_link_modifiers_bundle, "1.53.0", Some(81490), None),
+
+ /// Allows specifying the verbatim link modifier
+ (active, native_link_modifiers_verbatim, "1.53.0", Some(81490), None),
+
+ /// Allows specifying the whole-archive link modifier
+ (active, native_link_modifiers_whole_archive, "1.53.0", Some(81490), None),
+
+ /// Allows specifying the as-needed link modifier
+ (active, native_link_modifiers_as_needed, "1.53.0", Some(81490), None),
+
// -------------------------------------------------------------------------
// feature-group-end: actual feature gates
// -------------------------------------------------------------------------
sym::const_generics_defaults,
sym::inherent_associated_types,
sym::type_alias_impl_trait,
+ sym::native_link_modifiers,
+ sym::native_link_modifiers_bundle,
+ sym::native_link_modifiers_verbatim,
+ sym::native_link_modifiers_whole_archive,
+ sym::native_link_modifiers_as_needed,
];
/// Some features are not allowed to be used together at the same time, if
template!(List: "address, memory, thread"),
experimental!(no_sanitize)
),
- ungated!(
- // Not exclusively gated at the crate level (though crate-level is
- // supported). The feature can alternatively be enabled on individual
- // functions.
- no_coverage, AssumedUsed,
- template!(Word),
- ),
+ gated!(no_coverage, AssumedUsed, template!(Word), experimental!(no_coverage)),
// FIXME: #14408 assume docs are used since rustdoc looks at them.
ungated!(doc, AssumedUsed, template!(List: "hidden|inline|...", NameValueStr: "string")),
(removed, main, "1.53.0", Some(29634), None, None),
(removed, pub_macro_rules, "1.53.0", Some(78855), None,
Some("removed due to being incomplete, in particular it does not work across crates")),
+ /// Allows the definition of `const` functions with some advanced features.
+ (removed, const_fn, "1.54.0", Some(57563), None,
+ Some("split into finer-grained feature gates")),
// -------------------------------------------------------------------------
// feature-group-end: removed features
self.tcx.associated_item(def_id).ident
),
infer::EarlyBoundRegion(_, name) => format!(" for lifetime parameter `{}`", name),
- infer::BoundRegionInCoherence(name) => {
- format!(" for lifetime parameter `{}` in coherence check", name)
- }
infer::UpvarRegion(ref upvar_id, _) => {
let var_name = self.tcx.hir().name(upvar_id.var_path.hir_id);
format!(" for capture of `{}` by closure", var_name)
UpvarRegion(ty::UpvarId, Span),
- BoundRegionInCoherence(Symbol),
-
/// This origin is used for the inference variables that we create
/// during NLL region processing.
Nll(NllRegionVariableOrigin),
| EarlyBoundRegion(a, ..)
| LateBoundRegion(a, ..)
| UpvarRegion(_, a) => a,
- BoundRegionInCoherence(_) => rustc_span::DUMMY_SP,
Nll(..) => bug!("NLL variable used with `span`"),
}
}
};
use rustc_session::lint::Level;
use rustc_session::search_paths::SearchPath;
-use rustc_session::utils::{CanonicalizedPath, NativeLibKind};
+use rustc_session::utils::{CanonicalizedPath, NativeLib, NativeLibKind};
use rustc_session::{build_session, getopts, DiagnosticOutput, Session};
use rustc_span::edition::{Edition, DEFAULT_EDITION};
use rustc_span::symbol::sym;
let mut v2 = Options::default();
let mut v3 = Options::default();
let mut v4 = Options::default();
+ let mut v5 = Options::default();
// Reference
v1.libs = vec![
- (String::from("a"), None, NativeLibKind::StaticBundle),
- (String::from("b"), None, NativeLibKind::Framework),
- (String::from("c"), None, NativeLibKind::Unspecified),
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("b"),
+ new_name: None,
+ kind: NativeLibKind::Framework { as_needed: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
];
// Change label
v2.libs = vec![
- (String::from("a"), None, NativeLibKind::StaticBundle),
- (String::from("X"), None, NativeLibKind::Framework),
- (String::from("c"), None, NativeLibKind::Unspecified),
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("X"),
+ new_name: None,
+ kind: NativeLibKind::Framework { as_needed: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
];
// Change kind
v3.libs = vec![
- (String::from("a"), None, NativeLibKind::StaticBundle),
- (String::from("b"), None, NativeLibKind::StaticBundle),
- (String::from("c"), None, NativeLibKind::Unspecified),
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("b"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
];
// Change new-name
v4.libs = vec![
- (String::from("a"), None, NativeLibKind::StaticBundle),
- (String::from("b"), Some(String::from("X")), NativeLibKind::Framework),
- (String::from("c"), None, NativeLibKind::Unspecified),
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("b"),
+ new_name: Some(String::from("X")),
+ kind: NativeLibKind::Framework { as_needed: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
+ ];
+
+ // Change verbatim
+ v5.libs = vec![
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("b"),
+ new_name: None,
+ kind: NativeLibKind::Framework { as_needed: None },
+ verbatim: Some(true),
+ },
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
];
assert_different_hash(&v1, &v2);
assert_different_hash(&v1, &v3);
assert_different_hash(&v1, &v4);
+ assert_different_hash(&v1, &v5);
}
#[test]
// Reference
v1.libs = vec![
- (String::from("a"), None, NativeLibKind::StaticBundle),
- (String::from("b"), None, NativeLibKind::Framework),
- (String::from("c"), None, NativeLibKind::Unspecified),
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("b"),
+ new_name: None,
+ kind: NativeLibKind::Framework { as_needed: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
];
v2.libs = vec![
- (String::from("b"), None, NativeLibKind::Framework),
- (String::from("a"), None, NativeLibKind::StaticBundle),
- (String::from("c"), None, NativeLibKind::Unspecified),
+ NativeLib {
+ name: String::from("b"),
+ new_name: None,
+ kind: NativeLibKind::Framework { as_needed: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
];
v3.libs = vec![
- (String::from("c"), None, NativeLibKind::Unspecified),
- (String::from("a"), None, NativeLibKind::StaticBundle),
- (String::from("b"), None, NativeLibKind::Framework),
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("b"),
+ new_name: None,
+ kind: NativeLibKind::Framework { as_needed: None },
+ verbatim: None,
+ },
];
assert_same_hash(&v1, &v2);
tracked!(mir_emit_retag, true);
tracked!(mir_opt_level, Some(4));
tracked!(mutable_noalias, Some(true));
- tracked!(new_llvm_pass_manager, true);
+ tracked!(new_llvm_pass_manager, Some(true));
tracked!(no_codegen, true);
tracked!(no_generate_arange_section, true);
tracked!(no_link, true);
.iter()
.chain(sysroot_candidates.iter())
.map(|sysroot| {
- let libdir = filesearch::relative_target_lib_path(&sysroot, &target);
- sysroot.join(libdir).with_file_name("codegen-backends")
+ filesearch::make_target_lib_path(&sysroot, &target).with_file_name("codegen-backends")
})
.find(|f| {
info!("codegen backend candidate: {}", f.display());
krate,
EarlyLintPassObjects { lints: &mut passes[..] },
buffered,
- pre_expansion,
+ false,
);
}
} else {
- for pass in &mut passes {
+ for (i, pass) in passes.iter_mut().enumerate() {
buffered =
sess.prof.extra_verbose_generic_activity("run_lint", pass.name()).run(|| {
early_lint_crate(
krate,
EarlyLintPassObjects { lints: slice::from_mut(pass) },
buffered,
- pre_expansion,
+ pre_expansion && i == 0,
)
});
}
use rustc_hir as hir;
use rustc_middle::ty;
use rustc_parse_format::{ParseMode, Parser, Piece};
-use rustc_span::{sym, symbol::kw, InnerSpan, Span, Symbol};
+use rustc_span::{hygiene, sym, symbol::kw, symbol::SymbolStr, InnerSpan, Span, Symbol};
declare_lint! {
/// The `non_fmt_panic` lint detects `panic!(..)` invocations where the first
// The argument is *not* a string literal.
- let (span, panic) = panic_call(cx, f);
+ let (span, panic, symbol_str) = panic_call(cx, f);
// Find the span of the argument to `panic!()`, before expansion in the
// case of `panic!(some_macro!())`.
}
if arg_macro.map_or(false, |id| cx.tcx.is_diagnostic_item(sym::format_macro, id)) {
// A case of `panic!(format!(..))`.
- l.note("the panic!() macro supports formatting, so there's no need for the format!() macro here");
+ l.note(format!("the {}!() macro supports formatting, so there's no need for the format!() macro here", symbol_str).as_str());
if let Some((open, close, _)) = find_delimiters(cx, arg_span) {
l.multipart_suggestion(
"remove the `format!(..)` macro call",
Parser::new(fmt.as_ref(), style, snippet.clone(), false, ParseMode::Format);
let n_arguments = (&mut fmt_parser).filter(|a| matches!(a, Piece::NextArgument(_))).count();
- let (span, _) = panic_call(cx, f);
+ let (span, _, _) = panic_call(cx, f);
if n_arguments > 0 && fmt_parser.errors.is_empty() {
let arg_spans: Vec<_> = match &fmt_parser.arg_places[..] {
))
}
-fn panic_call<'tcx>(cx: &LateContext<'tcx>, f: &'tcx hir::Expr<'tcx>) -> (Span, Symbol) {
+fn panic_call<'tcx>(cx: &LateContext<'tcx>, f: &'tcx hir::Expr<'tcx>) -> (Span, Symbol, SymbolStr) {
let mut expn = f.span.ctxt().outer_expn_data();
let mut panic_macro = kw::Empty;
}
}
- (expn.call_site, panic_macro)
+ let macro_symbol = if let hygiene::ExpnKind::Macro(_, symbol) = expn.kind {
+ symbol
+ } else {
+ Symbol::intern("panic")
+ };
+ (expn.call_site, panic_macro, macro_symbol.as_str())
}
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
#include "llvm/Support/TimeProfiler.h"
+#include "llvm/Transforms/Instrumentation/GCOVProfiler.h"
+#include "llvm/Transforms/Instrumentation/InstrProfiling.h"
#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
bool SanitizeHWAddressRecover;
};
-extern "C" void
+extern "C" LLVMRustResult
LLVMRustOptimizeWithNewPassManager(
LLVMModuleRef ModuleRef,
LLVMTargetMachineRef TMRef,
bool DisableSimplifyLibCalls, bool EmitLifetimeMarkers,
LLVMRustSanitizerOptions *SanitizerOptions,
const char *PGOGenPath, const char *PGOUsePath,
+ bool InstrumentCoverage, bool InstrumentGCOV,
void* LlvmSelfProfiler,
LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
- LLVMRustSelfProfileAfterPassCallback AfterPassCallback) {
+ LLVMRustSelfProfileAfterPassCallback AfterPassCallback,
+ const char *ExtraPasses, size_t ExtraPassesLen) {
Module *TheModule = unwrap(ModuleRef);
TargetMachine *TM = unwrap(TMRef);
PassBuilder::OptimizationLevel OptLevel = fromRust(OptLevelRust);
);
}
+ if (InstrumentGCOV) {
+ PipelineStartEPCallbacks.push_back(
+ [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(GCOVProfilerPass(GCOVOptions::getDefault()));
+ }
+ );
+ }
+
+ if (InstrumentCoverage) {
+ PipelineStartEPCallbacks.push_back(
+ [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ InstrProfOptions Options;
+ MPM.addPass(InstrProfiling(Options, false));
+ }
+ );
+ }
+
if (SanitizerOptions) {
if (SanitizerOptions->SanitizeMemory) {
MemorySanitizerOptions Options(
}
}
+ if (ExtraPassesLen) {
+ if (auto Err = PB.parsePassPipeline(MPM, StringRef(ExtraPasses, ExtraPassesLen))) {
+ std::string ErrMsg = toString(std::move(Err));
+ LLVMRustSetLastError(ErrMsg.c_str());
+ return LLVMRustResult::Failure;
+ }
+ }
+
if (NeedThinLTOBufferPasses) {
MPM.addPass(CanonicalizeAliasesPass());
MPM.addPass(NameAnonGlobalPass());
UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
MPM.run(*TheModule, MAM);
+ return LLVMRustResult::Success;
}
// Callback to demangle function name
use rustc_session::parse::feature_err;
use rustc_session::utils::NativeLibKind;
use rustc_session::Session;
-use rustc_span::source_map::Span;
use rustc_span::symbol::{kw, sym, Symbol};
+use rustc_span::Span;
use rustc_target::spec::abi::Abi;
crate fn collect(tcx: TyCtxt<'_>) -> Vec<NativeLib> {
cfg: None,
foreign_module: Some(it.def_id.to_def_id()),
wasm_import_module: None,
+ verbatim: None,
};
let mut kind_specified = false;
None => continue, // skip like historical compilers
};
lib.kind = match &*kind.as_str() {
- "static" => NativeLibKind::StaticBundle,
- "static-nobundle" => NativeLibKind::StaticNoBundle,
- "dylib" => NativeLibKind::Dylib,
- "framework" => NativeLibKind::Framework,
+ "static" => NativeLibKind::Static { bundle: None, whole_archive: None },
+ "static-nobundle" => {
+ sess.struct_span_warn(
+ item.span(),
+ "library kind `static-nobundle` has been superseded by specifying \
+ modifier `-bundle` with library kind `static`",
+ )
+ .emit();
+ NativeLibKind::Static { bundle: Some(false), whole_archive: None }
+ }
+ "dylib" => NativeLibKind::Dylib { as_needed: None },
+ "framework" => NativeLibKind::Framework { as_needed: None },
"raw-dylib" => NativeLibKind::RawDylib,
k => {
struct_span_err!(sess, item.span(), E0458, "unknown kind: `{}`", k)
}
}
+ // Do this outside the above loop so we don't depend on modifiers coming
+ // after kinds
+ if let Some(item) = items.iter().find(|item| item.has_name(sym::modifiers)) {
+ if let Some(modifiers) = item.value_str() {
+ let span = item.name_value_literal_span().unwrap();
+ for modifier in modifiers.as_str().split(',') {
+ let (modifier, value) = match modifier.strip_prefix(&['+', '-'][..]) {
+ Some(m) => (m, modifier.starts_with('+')),
+ None => {
+ sess.span_err(
+ span,
+ "invalid linking modifier syntax, expected '+' or '-' prefix \
+ before one of: bundle, verbatim, whole-archive, as-needed",
+ );
+ continue;
+ }
+ };
+
+ match (modifier, &mut lib.kind) {
+ ("bundle", NativeLibKind::Static { bundle, .. }) => {
+ *bundle = Some(value);
+ }
+ ("bundle", _) => sess.span_err(
+ span,
+ "bundle linking modifier is only compatible with \
+ `static` linking kind",
+ ),
+
+ ("verbatim", _) => lib.verbatim = Some(value),
+
+ ("whole-archive", NativeLibKind::Static { whole_archive, .. }) => {
+ *whole_archive = Some(value);
+ }
+ ("whole-archive", _) => sess.span_err(
+ span,
+ "whole-archive linking modifier is only compatible with \
+ `static` linking kind",
+ ),
+
+ ("as-needed", NativeLibKind::Dylib { as_needed })
+ | ("as-needed", NativeLibKind::Framework { as_needed }) => {
+ *as_needed = Some(value);
+ }
+ ("as-needed", _) => sess.span_err(
+ span,
+ "as-needed linking modifier is only compatible with \
+ `dylib` and `framework` linking kinds",
+ ),
+
+ _ => sess.span_err(
+ span,
+ &format!(
+ "unrecognized linking modifier `{}`, expected one \
+ of: bundle, verbatim, whole-archive, as-needed",
+ modifier
+ ),
+ ),
+ }
+ }
+ } else {
+ let msg = "must be of the form `#[link(modifiers = \"...\")]`";
+ sess.span_err(item.span(), msg);
+ }
+ }
+
// In general we require #[link(name = "...")] but we allow
// #[link(wasm_import_module = "...")] without the `name`.
let requires_name = kind_specified || lib.wasm_import_module.is_none();
return;
}
let is_osx = self.tcx.sess.target.is_like_osx;
- if lib.kind == NativeLibKind::Framework && !is_osx {
+ if matches!(lib.kind, NativeLibKind::Framework { .. }) && !is_osx {
let msg = "native frameworks are only available on macOS targets";
match span {
Some(span) => struct_span_err!(self.tcx.sess, span, E0455, "{}", msg).emit(),
)
.emit();
}
- if lib.kind == NativeLibKind::StaticNoBundle && !self.tcx.features().static_nobundle {
+ if matches!(lib.kind, NativeLibKind::Static { bundle: Some(false), .. })
+ && !self.tcx.features().static_nobundle
+ {
feature_err(
&self.tcx.sess.parse_sess,
sym::static_nobundle,
fn process_command_line(&mut self) {
// First, check for errors
let mut renames = FxHashSet::default();
- for (name, new_name, _) in &self.tcx.sess.opts.libs {
- if let Some(ref new_name) = new_name {
+ for lib in &self.tcx.sess.opts.libs {
+ if let Some(ref new_name) = lib.new_name {
let any_duplicate = self
.libs
.iter()
.filter_map(|lib| lib.name.as_ref())
- .any(|n| &n.as_str() == name);
+ .any(|n| &n.as_str() == &lib.name);
if new_name.is_empty() {
self.tcx.sess.err(&format!(
"an empty renaming target was specified for library `{}`",
- name
+ lib.name
));
} else if !any_duplicate {
self.tcx.sess.err(&format!(
"renaming of the library `{}` was specified, \
however this crate contains no `#[link(...)]` \
attributes referencing this library.",
- name
+ lib.name
));
- } else if !renames.insert(name) {
+ } else if !renames.insert(&lib.name) {
self.tcx.sess.err(&format!(
"multiple renamings were \
specified for library `{}` .",
- name
+ lib.name
));
}
}
// it. (This ensures that the linker is able to see symbols from
// all possible dependent libraries before linking in the library
// in question.)
- for &(ref name, ref new_name, kind) in &self.tcx.sess.opts.libs {
+ for passed_lib in &self.tcx.sess.opts.libs {
// If we've already added any native libraries with the same
// name, they will be pulled out into `existing`, so that we
// can move them to the end of the list below.
.libs
.drain_filter(|lib| {
if let Some(lib_name) = lib.name {
- if lib_name.as_str() == *name {
- if kind != NativeLibKind::Unspecified {
- lib.kind = kind;
+ if lib_name.as_str() == passed_lib.name {
+ if passed_lib.kind != NativeLibKind::Unspecified {
+ lib.kind = passed_lib.kind;
}
- if let Some(new_name) = new_name {
+ if let Some(new_name) = &passed_lib.new_name {
lib.name = Some(Symbol::intern(new_name));
}
+ lib.verbatim = passed_lib.verbatim;
return true;
}
}
.collect::<Vec<_>>();
if existing.is_empty() {
// Add if not found
- let new_name = new_name.as_ref().map(|s| &**s); // &Option<String> -> Option<&str>
+ let new_name = passed_lib.new_name.as_ref().map(|s| &**s); // &Option<String> -> Option<&str>
let lib = NativeLib {
- name: Some(Symbol::intern(new_name.unwrap_or(name))),
- kind,
+ name: Some(Symbol::intern(new_name.unwrap_or(&passed_lib.name))),
+ kind: passed_lib.kind,
cfg: None,
foreign_module: None,
wasm_import_module: None,
+ verbatim: passed_lib.verbatim,
};
self.register_native_lib(None, lib);
} else {
// resolve! Does this work? Unsure! That's what the issue is about
*providers = Providers {
is_dllimport_foreign_item: |tcx, id| match tcx.native_library_kind(id) {
- Some(NativeLibKind::Dylib | NativeLibKind::RawDylib | NativeLibKind::Unspecified) => {
- true
- }
+ Some(
+ NativeLibKind::Dylib { .. } | NativeLibKind::RawDylib | NativeLibKind::Unspecified,
+ ) => true,
_ => false,
},
is_statically_included_foreign_item: |tcx, id| {
- matches!(
- tcx.native_library_kind(id),
- Some(NativeLibKind::StaticBundle | NativeLibKind::StaticNoBundle)
- )
+ matches!(tcx.native_library_kind(id), Some(NativeLibKind::Static { .. }))
},
native_library_kind: |tcx, id| {
tcx.native_libraries(id.krate)
use self::collector::NodeCollector;
-use crate::hir::{HirOwnerData, IndexedHir};
+use crate::hir::{AttributeMap, HirOwnerData, IndexedHir};
use crate::middle::cstore::CrateStore;
use crate::ty::TyCtxt;
use rustc_ast as ast;
}
pub(super) fn crate_hash(tcx: TyCtxt<'_>, crate_num: CrateNum) -> Svh {
+ let mut hcx = tcx.create_stable_hashing_context();
+
let mut hir_body_nodes: Vec<_> = tcx
.index_hir(crate_num)
.map
.iter_enumerated()
.filter_map(|(def_id, hod)| {
let def_path_hash = tcx.definitions.def_path_hash(def_id);
- let hash = hod.with_bodies.as_ref()?.hash;
- Some((def_path_hash, hash))
+ let mut hasher = StableHasher::new();
+ hod.with_bodies.as_ref()?.hash_stable(&mut hcx, &mut hasher);
+ AttributeMap { map: &tcx.untracked_crate.attrs, prefix: def_id }
+ .hash_stable(&mut hcx, &mut hasher);
+ Some((def_path_hash, hasher.finish()))
})
.collect();
hir_body_nodes.sort_unstable_by_key(|bn| bn.0);
source_file_names.sort_unstable();
- let mut hcx = tcx.create_stable_hashing_context();
let mut stable_hasher = StableHasher::new();
node_hashes.hash_stable(&mut hcx, &mut stable_hasher);
upstream_crates.hash_stable(&mut hcx, &mut stable_hasher);
source_file_names.hash_stable(&mut hcx, &mut stable_hasher);
tcx.sess.opts.dep_tracking_hash(true).hash_stable(&mut hcx, &mut stable_hasher);
tcx.sess.local_crate_disambiguator().to_fingerprint().hash_stable(&mut hcx, &mut stable_hasher);
+ tcx.untracked_crate.non_exported_macro_attrs.hash_stable(&mut hcx, &mut stable_hasher);
let crate_hash: Fingerprint = stable_hasher.finish();
Svh::new(crate_hash.to_smaller_hash())
pub cfg: Option<ast::MetaItem>,
pub foreign_module: Option<DefId>,
pub wasm_import_module: Option<Symbol>,
+ pub verbatim: Option<bool>,
}
#[derive(Clone, TyEncodable, TyDecodable, HashStable, Debug)]
/// Details of why a pointer had to be in-bounds.
#[derive(Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
pub enum CheckInAllocMsg {
+ /// We are access memory.
MemoryAccessTest,
+ /// We are doing pointer arithmetic.
PointerArithmeticTest,
+ /// None of the above -- generic/unspecific inbounds test.
InboundsTest,
}
impl fmt::Display for CheckInAllocMsg {
/// When this is printed as an error the context looks like this
- /// "{test name} failed: pointer must be in-bounds at offset..."
+ /// "{msg}pointer must be in-bounds at offset..."
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
match *self {
- CheckInAllocMsg::MemoryAccessTest => "memory access",
- CheckInAllocMsg::PointerArithmeticTest => "pointer arithmetic",
- CheckInAllocMsg::InboundsTest => "inbounds test",
+ CheckInAllocMsg::MemoryAccessTest => "memory access failed: ",
+ CheckInAllocMsg::PointerArithmeticTest => "pointer arithmetic failed: ",
+ CheckInAllocMsg::InboundsTest => "",
}
)
}
}
PointerOutOfBounds { ptr, msg, allocation_size } => write!(
f,
- "{} failed: pointer must be in-bounds at offset {}, \
+ "{}pointer must be in-bounds at offset {}, \
but is outside bounds of {} which has size {}",
msg,
ptr.offset.bytes(),
ptr.alloc_id,
allocation_size.bytes()
),
+ DanglingIntPointer(0, CheckInAllocMsg::InboundsTest) => {
+ write!(f, "null pointer is not a valid pointer for this operation")
+ }
DanglingIntPointer(i, msg) => {
- write!(f, "{} failed: 0x{:x} is not a valid pointer", msg, i)
+ write!(f, "{}0x{:x} is not a valid pointer", msg, i)
}
AlignmentCheckFailed { required, has } => write!(
f,
}
#[inline]
- pub fn predecessors(&self) -> impl std::ops::Deref<Target = Predecessors> + '_ {
+ pub fn predecessors(&self) -> &Predecessors {
self.predecessor_cache.compute(&self.basic_blocks)
}
impl graph::GraphPredecessors<'graph> for Body<'tcx> {
type Item = BasicBlock;
- type Iter = smallvec::IntoIter<[BasicBlock; 4]>;
+ type Iter = std::iter::Copied<std::slice::Iter<'graph, BasicBlock>>;
}
impl graph::WithPredecessors for Body<'tcx> {
#[inline]
fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter {
- self.predecessors()[node].clone().into_iter()
+ self.predecessors()[node].iter().copied()
}
}
name: Symbol,
items: FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)>,
size_estimate: Option<usize>,
+ primary: bool,
}
/// Specifies the linkage type for a `MonoItem`.
impl<'tcx> CodegenUnit<'tcx> {
pub fn new(name: Symbol) -> CodegenUnit<'tcx> {
- CodegenUnit { name, items: Default::default(), size_estimate: None }
+ CodegenUnit { name, items: Default::default(), size_estimate: None, primary: false }
}
pub fn name(&self) -> Symbol {
self.name = name;
}
+ pub fn is_primary(&self) -> bool {
+ self.primary
+ }
+
+ pub fn make_primary(&mut self) {
+ self.primary = true;
+ }
+
pub fn items(&self) -> &FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)> {
&self.items
}
name,
// The size estimate is not relevant to the hash
size_estimate: _,
+ primary: _,
} = *self;
name.hash_stable(hcx, hasher);
}
pub trait QueryEngine<'tcx>: rustc_data_structures::sync::Sync {
+ #[cfg(parallel_compiler)]
unsafe fn deadlock(&'tcx self, tcx: TyCtxt<'tcx>, registry: &rustc_rayon_core::Registry);
fn encode_query_results(
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::{
hir::place::PlaceBase,
- mir::{self, ClearCrossCrate, Local, LocalDecl, LocalInfo, Location},
+ mir::{self, ClearCrossCrate, Local, LocalDecl, LocalInfo, LocalKind, Location},
};
use rustc_span::source_map::DesugaringKind;
use rustc_span::symbol::{kw, Symbol};
match label {
Some((true, err_help_span, suggested_code)) => {
- err.span_suggestion(
- err_help_span,
- &format!(
- "consider changing this to be a mutable {}",
- pointer_desc
- ),
- suggested_code,
- Applicability::MachineApplicable,
- );
+ let (is_trait_sig, local_trait) = self.is_error_in_trait(local);
+ if !is_trait_sig {
+ err.span_suggestion(
+ err_help_span,
+ &format!(
+ "consider changing this to be a mutable {}",
+ pointer_desc
+ ),
+ suggested_code,
+ Applicability::MachineApplicable,
+ );
+ } else if let Some(x) = local_trait {
+ err.span_suggestion(
+ x,
+ &format!(
+ "consider changing that to be a mutable {}",
+ pointer_desc
+ ),
+ suggested_code,
+ Applicability::MachineApplicable,
+ );
+ }
}
Some((false, err_label_span, message)) => {
err.span_label(err_label_span, &message);
err.buffer(&mut self.errors_buffer);
}
+ /// User cannot make signature of a trait mutable without changing the
+ /// trait. So we find if this error belongs to a trait and if so we move
+ /// suggestion to the trait or disable it if it is out of scope of this crate
+ fn is_error_in_trait(&self, local: Local) -> (bool, Option<Span>) {
+ if self.body.local_kind(local) != LocalKind::Arg {
+ return (false, None);
+ }
+ let hir_map = self.infcx.tcx.hir();
+ let my_def = self.body.source.def_id();
+ let my_hir = hir_map.local_def_id_to_hir_id(my_def.as_local().unwrap());
+ let td = if let Some(a) =
+ self.infcx.tcx.impl_of_method(my_def).and_then(|x| self.infcx.tcx.trait_id_of_impl(x))
+ {
+ a
+ } else {
+ return (false, None);
+ };
+ (
+ true,
+ td.as_local().and_then(|tld| {
+ let h = hir_map.local_def_id_to_hir_id(tld);
+ match hir_map.find(h) {
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Trait(_, _, _, _, items),
+ ..
+ })) => {
+ let mut f_in_trait_opt = None;
+ for hir::TraitItemRef { id: fi, kind: k, .. } in *items {
+ let hi = fi.hir_id();
+ if !matches!(k, hir::AssocItemKind::Fn { .. }) {
+ continue;
+ }
+ if hir_map.name(hi) != hir_map.name(my_hir) {
+ continue;
+ }
+ f_in_trait_opt = Some(hi);
+ break;
+ }
+ f_in_trait_opt.and_then(|f_in_trait| match hir_map.find(f_in_trait) {
+ Some(Node::TraitItem(hir::TraitItem {
+ kind:
+ hir::TraitItemKind::Fn(
+ hir::FnSig { decl: hir::FnDecl { inputs, .. }, .. },
+ _,
+ ),
+ ..
+ })) => {
+ let hir::Ty { span, .. } = inputs[local.index() - 1];
+ Some(span)
+ }
+ _ => None,
+ })
+ }
+ _ => None,
+ }
+ }),
+ )
+ }
+
// point to span of upvar making closure call require mutable borrow
fn show_mutating_upvar(
&self,
self.write_scalar(result, dest)?;
}
sym::copy => {
- self.copy(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
+ self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
}
sym::offset => {
let ptr = self.read_scalar(&args[0])?.check_init()?;
min_ptr,
Size::from_bytes(size),
None,
- CheckInAllocMsg::InboundsTest,
+ CheckInAllocMsg::PointerArithmeticTest,
)?;
Ok(offset_ptr)
}
+
+ /// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
+ pub(crate) fn copy_intrinsic(
+ &mut self,
+ src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
+ dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
+ count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
+ nonoverlapping: bool,
+ ) -> InterpResult<'tcx> {
+ let count = self.read_scalar(&count)?.to_machine_usize(self)?;
+ let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
+ let (size, align) = (layout.size, layout.align.abi);
+ let size = size.checked_mul(count, self).ok_or_else(|| {
+ err_ub_format!(
+ "overflow computing total size of `{}`",
+ if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
+ )
+ })?;
+
+ // Make sure we check both pointers for an access of the total size and aligment,
+ // *even if* the total size is 0.
+ let src =
+ self.memory.check_ptr_access(self.read_scalar(&src)?.check_init()?, size, align)?;
+
+ let dst =
+ self.memory.check_ptr_access(self.read_scalar(&dst)?.check_init()?, size, align)?;
+
+ if let (Some(src), Some(dst)) = (src, dst) {
+ self.memory.copy(src, dst, size, nonoverlapping)?;
+ }
+ Ok(())
+ }
}
) -> InterpResult<'tcx, Pointer<Self::PointerTag>> {
Err((if int == 0 {
// This is UB, seriously.
+ // (`DanglingIntPointer` with these exact arguments has special printing code.)
err_ub!(DanglingIntPointer(0, CheckInAllocMsg::InboundsTest))
} else {
// This is just something we cannot support during const-eval.
//!
//! The main entry point is the `step` method.
-use crate::interpret::OpTy;
use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar};
use rustc_target::abi::LayoutOf;
let src = self.eval_operand(src, None)?;
let dst = self.eval_operand(dst, None)?;
let count = self.eval_operand(count, None)?;
- self.copy(&src, &dst, &count, /* nonoverlapping */ true)?;
+ self.copy_intrinsic(&src, &dst, &count, /* nonoverlapping */ true)?;
}
// Statements we do not track.
Ok(())
}
- pub(crate) fn copy(
- &mut self,
- src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
- dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
- count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
- nonoverlapping: bool,
- ) -> InterpResult<'tcx> {
- let count = self.read_scalar(&count)?.to_machine_usize(self)?;
- let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
- let (size, align) = (layout.size, layout.align.abi);
- let size = size.checked_mul(count, self).ok_or_else(|| {
- err_ub_format!(
- "overflow computing total size of `{}`",
- if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
- )
- })?;
-
- // Make sure we check both pointers for an access of the total size and aligment,
- // *even if* the total size is 0.
- let src =
- self.memory.check_ptr_access(self.read_scalar(&src)?.check_init()?, size, align)?;
-
- let dst =
- self.memory.check_ptr_access(self.read_scalar(&dst)?.check_init()?, size, align)?;
-
- if let (Some(src), Some(dst)) = (src, dst) {
- self.memory.copy(src, dst, size, nonoverlapping)?;
- }
- Ok(())
- }
-
/// Evaluate an assignment statement.
///
/// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
vtable,
3 * self.ecx.tcx.data_layout.pointer_size, // drop, size, align
Some(self.ecx.tcx.data_layout.pointer_align.abi),
- CheckInAllocMsg::InboundsTest,
+ CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
),
self.path,
err_ub!(DanglingIntPointer(..)) |
place.ptr,
size,
Some(align),
- CheckInAllocMsg::InboundsTest,
+ CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
),
self.path,
err_ub!(AlignmentCheckFailed { required, has }) =>
let (codegen_units, _) = tcx.sess.time("partition_and_assert_distinct_symbols", || {
sync::join(
|| {
- &*tcx.arena.alloc_from_iter(partition(
+ let mut codegen_units = partition(
tcx,
&mut items.iter().cloned(),
tcx.sess.codegen_units(),
&inlining_map,
- ))
+ );
+ codegen_units[0].make_primary();
+ &*tcx.arena.alloc_from_iter(codegen_units)
},
|| assert_symbols_are_distinct(tcx, items.iter()),
)
// if we applied optimizations, we potentially have some cfg to cleanup to
// make it easier for further passes
if should_simplify {
- simplify_cfg(body);
+ simplify_cfg(tcx, body);
simplify_locals(body, tcx);
}
}
impl graph::GraphPredecessors<'graph> for CoverageGraph {
type Item = BasicCoverageBlock;
- type Iter = std::vec::IntoIter<BasicCoverageBlock>;
+ type Iter = std::iter::Copied<std::slice::Iter<'graph, BasicCoverageBlock>>;
}
impl graph::WithPredecessors for CoverageGraph {
#[inline]
fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter {
- self.predecessors[node].clone().into_iter()
+ self.predecessors[node].iter().copied()
}
}
use rustc_middle::ty::TyCtxt;
use rustc_span::def_id::DefId;
use rustc_span::source_map::SourceMap;
-use rustc_span::{CharPos, Pos, SourceFile, Span, Symbol};
+use rustc_span::{CharPos, ExpnKind, Pos, SourceFile, Span, Symbol};
/// A simple error message wrapper for `coverage::Error`s.
#[derive(Debug)]
impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
fn new(pass_name: &'a str, tcx: TyCtxt<'tcx>, mir_body: &'a mut mir::Body<'tcx>) -> Self {
let source_map = tcx.sess.source_map();
- let (some_fn_sig, hir_body) = fn_sig_and_body(tcx, mir_body.source.def_id());
- let body_span = hir_body.value.span;
+ let def_id = mir_body.source.def_id();
+ let (some_fn_sig, hir_body) = fn_sig_and_body(tcx, def_id);
+
+ let mut body_span = hir_body.value.span;
+
+ if tcx.is_closure(def_id) {
+ // If the MIR function is a closure, and if the closure body span
+ // starts from a macro, but it's content is not in that macro, try
+ // to find a non-macro callsite, and instrument the spans there
+ // instead.
+ loop {
+ let expn_data = body_span.ctxt().outer_expn_data();
+ if expn_data.is_root() {
+ break;
+ }
+ if let ExpnKind::Macro(..) = expn_data.kind {
+ body_span = expn_data.call_site;
+ } else {
+ break;
+ }
+ }
+ }
+
let source_file = source_map.lookup_source_file(body_span.lo());
let fn_sig_span = match some_fn_sig.filter(|fn_sig| {
fn_sig.span.ctxt() == body_span.ctxt()
if has_opts_to_apply {
let mut opt_applier = OptApplier { tcx, duplicates };
opt_applier.visit_body(body);
- simplify_cfg(body);
+ simplify_cfg(tcx, body);
}
}
}
// Since this optimization adds new basic blocks and invalidates others,
// clean up the cfg to make it nicer for other passes
if should_cleanup {
- simplify_cfg(body);
+ simplify_cfg(tcx, body);
}
}
}
// Make sure we remove dead blocks to remove
// unrelated code from the resume part of the function
- simplify::remove_dead_blocks(&mut body);
+ simplify::remove_dead_blocks(tcx, &mut body);
dump_mir(tcx, None, "generator_drop", &0, &body, |_, _| Ok(()));
// Make sure we remove dead blocks to remove
// unrelated code from the drop part of the function
- simplify::remove_dead_blocks(body);
+ simplify::remove_dead_blocks(tcx, body);
dump_mir(tcx, None, "generator_resume", &0, body, |_, _| Ok(()));
}
if inline(tcx, body) {
debug!("running simplify cfg on {:?}", body.source);
CfgSimplifier::new(body).simplify();
- remove_dead_blocks(body);
+ remove_dead_blocks(tcx, body);
}
}
}
}
if should_cleanup {
- simplify_cfg(body);
+ simplify_cfg(tcx, body);
}
}
}
}
}
- simplify::remove_dead_blocks(body)
+ simplify::remove_dead_blocks(tcx, body)
}
}
// if we applied optimizations, we potentially have some cfg to cleanup to
// make it easier for further passes
if should_simplify {
- simplify_cfg(body);
+ simplify_cfg(tcx, body);
}
}
}
use crate::transform::MirPass;
use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::coverage::*;
use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
use rustc_middle::mir::*;
use rustc_middle::ty::TyCtxt;
}
}
-pub fn simplify_cfg(body: &mut Body<'_>) {
+pub fn simplify_cfg(tcx: TyCtxt<'tcx>, body: &mut Body<'_>) {
CfgSimplifier::new(body).simplify();
- remove_dead_blocks(body);
+ remove_dead_blocks(tcx, body);
// FIXME: Should probably be moved into some kind of pass manager
body.basic_blocks_mut().raw.shrink_to_fit();
Cow::Borrowed(&self.label)
}
- fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
debug!("SimplifyCfg({:?}) - simplifying {:?}", self.label, body.source);
- simplify_cfg(body);
+ simplify_cfg(tcx, body);
}
}
}
}
-pub fn remove_dead_blocks(body: &mut Body<'_>) {
+pub fn remove_dead_blocks(tcx: TyCtxt<'tcx>, body: &mut Body<'_>) {
let reachable = traversal::reachable_as_bitset(body);
let num_blocks = body.basic_blocks().len();
if num_blocks == reachable.count() {
}
used_blocks += 1;
}
+
+ if tcx.sess.instrument_coverage() {
+ save_unreachable_coverage(basic_blocks, used_blocks);
+ }
+
basic_blocks.raw.truncate(used_blocks);
for block in basic_blocks {
}
}
+fn save_unreachable_coverage(
+ basic_blocks: &mut IndexVec<BasicBlock, BasicBlockData<'_>>,
+ first_dead_block: usize,
+) {
+ // retain coverage info for dead blocks, so coverage reports will still
+ // report `0` executions for the uncovered code regions.
+ let mut dropped_coverage = Vec::new();
+ for dead_block in first_dead_block..basic_blocks.len() {
+ for statement in basic_blocks[BasicBlock::new(dead_block)].statements.iter() {
+ if let StatementKind::Coverage(coverage) = &statement.kind {
+ if let Some(code_region) = &coverage.code_region {
+ dropped_coverage.push((statement.source_info, code_region.clone()));
+ }
+ }
+ }
+ }
+ for (source_info, code_region) in dropped_coverage {
+ basic_blocks[START_BLOCK].statements.push(Statement {
+ source_info,
+ kind: StatementKind::Coverage(box Coverage {
+ kind: CoverageKind::Unreachable,
+ code_region: Some(code_region),
+ }),
+ })
+ }
+}
pub struct SimplifyLocals;
impl<'tcx> MirPass<'tcx> for SimplifyLocals {
if did_remove_blocks {
// We have dead blocks now, so remove those.
- simplify::remove_dead_blocks(body);
+ simplify::remove_dead_blocks(tcx, body);
}
}
}
}
if replaced {
- simplify::remove_dead_blocks(body);
+ simplify::remove_dead_blocks(tcx, body);
}
}
}
None => "unterminated block comment",
};
let last_bpos = self.pos;
- self.sess
- .span_diagnostic
- .struct_span_fatal_with_code(
- self.mk_sp(start, last_bpos),
- msg,
- error_code!(E0758),
- )
- .emit();
- FatalError.raise();
+ self.sess.span_diagnostic.span_fatal_with_code(
+ self.mk_sp(start, last_bpos),
+ msg,
+ error_code!(E0758),
+ );
}
// Skip non-doc comments
let (lit_kind, mode, prefix_len, postfix_len) = match kind {
rustc_lexer::LiteralKind::Char { terminated } => {
if !terminated {
- self.sess
- .span_diagnostic
- .struct_span_fatal_with_code(
- self.mk_sp(start, suffix_start),
- "unterminated character literal",
- error_code!(E0762),
- )
- .emit();
- FatalError.raise();
+ self.sess.span_diagnostic.span_fatal_with_code(
+ self.mk_sp(start, suffix_start),
+ "unterminated character literal",
+ error_code!(E0762),
+ )
}
(token::Char, Mode::Char, 1, 1) // ' '
}
rustc_lexer::LiteralKind::Byte { terminated } => {
if !terminated {
- self.sess
- .span_diagnostic
- .struct_span_fatal_with_code(
- self.mk_sp(start + BytePos(1), suffix_start),
- "unterminated byte constant",
- error_code!(E0763),
- )
- .emit();
- FatalError.raise();
+ self.sess.span_diagnostic.span_fatal_with_code(
+ self.mk_sp(start + BytePos(1), suffix_start),
+ "unterminated byte constant",
+ error_code!(E0763),
+ )
}
(token::Byte, Mode::Byte, 2, 1) // b' '
}
rustc_lexer::LiteralKind::Str { terminated } => {
if !terminated {
- self.sess
- .span_diagnostic
- .struct_span_fatal_with_code(
- self.mk_sp(start, suffix_start),
- "unterminated double quote string",
- error_code!(E0765),
- )
- .emit();
- FatalError.raise();
+ self.sess.span_diagnostic.span_fatal_with_code(
+ self.mk_sp(start, suffix_start),
+ "unterminated double quote string",
+ error_code!(E0765),
+ )
}
(token::Str, Mode::Str, 1, 1) // " "
}
rustc_lexer::LiteralKind::ByteStr { terminated } => {
if !terminated {
- self.sess
- .span_diagnostic
- .struct_span_fatal_with_code(
- self.mk_sp(start + BytePos(1), suffix_start),
- "unterminated double quote byte string",
- error_code!(E0766),
- )
- .emit();
- FatalError.raise();
+ self.sess.span_diagnostic.span_fatal_with_code(
+ self.mk_sp(start + BytePos(1), suffix_start),
+ "unterminated double quote byte string",
+ error_code!(E0766),
+ )
}
(token::ByteStr, Mode::ByteStr, 2, 1) // b" "
}
}
impl<'a> Parser<'a> {
- pub(super) fn span_fatal_err<S: Into<MultiSpan>>(
- &self,
- sp: S,
- err: Error,
- ) -> DiagnosticBuilder<'a> {
+ pub(super) fn span_err<S: Into<MultiSpan>>(&self, sp: S, err: Error) -> DiagnosticBuilder<'a> {
err.span_err(sp, self.diagnostic())
}
if !this.recover_nested_adt_item(kw::Enum)? {
return Ok((None, TrailingToken::None));
}
- let ident = this.parse_ident()?;
+ let ident = this.parse_field_ident("enum", vlo)?;
let struct_def = if this.check(&token::OpenDelim(token::Brace)) {
// Parse a struct variant.
- let (fields, recovered) = this.parse_record_struct_body()?;
+ let (fields, recovered) = this.parse_record_struct_body("struct")?;
VariantData::Struct(fields, recovered)
} else if this.check(&token::OpenDelim(token::Paren)) {
VariantData::Tuple(this.parse_tuple_struct_body()?, DUMMY_NODE_ID)
VariantData::Unit(DUMMY_NODE_ID)
} else {
// If we see: `struct Foo<T> where T: Copy { ... }`
- let (fields, recovered) = self.parse_record_struct_body()?;
+ let (fields, recovered) = self.parse_record_struct_body("struct")?;
VariantData::Struct(fields, recovered)
}
// No `where` so: `struct Foo<T>;`
VariantData::Unit(DUMMY_NODE_ID)
// Record-style struct definition
} else if self.token == token::OpenDelim(token::Brace) {
- let (fields, recovered) = self.parse_record_struct_body()?;
+ let (fields, recovered) = self.parse_record_struct_body("struct")?;
VariantData::Struct(fields, recovered)
// Tuple-style struct definition with optional where-clause.
} else if self.token == token::OpenDelim(token::Paren) {
let vdata = if self.token.is_keyword(kw::Where) {
generics.where_clause = self.parse_where_clause()?;
- let (fields, recovered) = self.parse_record_struct_body()?;
+ let (fields, recovered) = self.parse_record_struct_body("union")?;
VariantData::Struct(fields, recovered)
} else if self.token == token::OpenDelim(token::Brace) {
- let (fields, recovered) = self.parse_record_struct_body()?;
+ let (fields, recovered) = self.parse_record_struct_body("union")?;
VariantData::Struct(fields, recovered)
} else {
let token_str = super::token_descr(&self.token);
Ok((class_name, ItemKind::Union(vdata, generics)))
}
- fn parse_record_struct_body(&mut self) -> PResult<'a, (Vec<FieldDef>, /* recovered */ bool)> {
+ fn parse_record_struct_body(
+ &mut self,
+ adt_ty: &str,
+ ) -> PResult<'a, (Vec<FieldDef>, /* recovered */ bool)> {
let mut fields = Vec::new();
let mut recovered = false;
if self.eat(&token::OpenDelim(token::Brace)) {
while self.token != token::CloseDelim(token::Brace) {
- let field = self.parse_field_def().map_err(|e| {
+ let field = self.parse_field_def(adt_ty).map_err(|e| {
self.consume_block(token::Brace, ConsumeClosingDelim::No);
recovered = true;
e
}
/// Parses an element of a struct declaration.
- fn parse_field_def(&mut self) -> PResult<'a, FieldDef> {
+ fn parse_field_def(&mut self, adt_ty: &str) -> PResult<'a, FieldDef> {
let attrs = self.parse_outer_attributes()?;
self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
let lo = this.token.span;
let vis = this.parse_visibility(FollowedByType::No)?;
- Ok((this.parse_single_struct_field(lo, vis, attrs)?, TrailingToken::None))
+ Ok((this.parse_single_struct_field(adt_ty, lo, vis, attrs)?, TrailingToken::None))
})
}
/// Parses a structure field declaration.
fn parse_single_struct_field(
&mut self,
+ adt_ty: &str,
lo: Span,
vis: Visibility,
attrs: Vec<Attribute>,
) -> PResult<'a, FieldDef> {
let mut seen_comma: bool = false;
- let a_var = self.parse_name_and_ty(lo, vis, attrs)?;
+ let a_var = self.parse_name_and_ty(adt_ty, lo, vis, attrs)?;
if self.token == token::Comma {
seen_comma = true;
}
token::CloseDelim(token::Brace) => {}
token::DocComment(..) => {
let previous_span = self.prev_token.span;
- let mut err = self.span_fatal_err(self.token.span, Error::UselessDocComment);
+ let mut err = self.span_err(self.token.span, Error::UselessDocComment);
self.bump(); // consume the doc comment
let comma_after_doc_seen = self.eat(&token::Comma);
// `seen_comma` is always false, because we are inside doc block
/// Parses a structure field.
fn parse_name_and_ty(
&mut self,
+ adt_ty: &str,
lo: Span,
vis: Visibility,
attrs: Vec<Attribute>,
) -> PResult<'a, FieldDef> {
- let name = self.parse_ident_common(false)?;
+ let name = self.parse_field_ident(adt_ty, lo)?;
self.expect(&token::Colon)?;
let ty = self.parse_ty()?;
Ok(FieldDef {
})
}
+ /// Parses a field identifier. Specialized version of `parse_ident_common`
+ /// for better diagnostics and suggestions.
+ fn parse_field_ident(&mut self, adt_ty: &str, lo: Span) -> PResult<'a, Ident> {
+ let (ident, is_raw) = self.ident_or_err()?;
+ if !is_raw && ident.is_reserved() {
+ let err = if self.check_fn_front_matter(false) {
+ let _ = self.parse_fn(&mut Vec::new(), |_| true, lo);
+ let mut err = self.struct_span_err(
+ lo.to(self.prev_token.span),
+ &format!("functions are not allowed in {} definitions", adt_ty),
+ );
+ err.help("unlike in C++, Java, and C#, functions are declared in `impl` blocks");
+ err.help("see https://doc.rust-lang.org/book/ch05-03-method-syntax.html for more information");
+ err
+ } else {
+ self.expected_ident_found()
+ };
+ return Err(err);
+ }
+ self.bump();
+ Ok(ident)
+ }
+
/// Parses a declarative macro 2.0 definition.
/// The `macro` keyword has already been parsed.
/// ```
self.parse_ident_common(true)
}
+ fn ident_or_err(&mut self) -> PResult<'a, (Ident, /* is_raw */ bool)> {
+ self.token.ident().ok_or_else(|| match self.prev_token.kind {
+ TokenKind::DocComment(..) => {
+ self.span_err(self.prev_token.span, Error::UselessDocComment)
+ }
+ _ => self.expected_ident_found(),
+ })
+ }
+
fn parse_ident_common(&mut self, recover: bool) -> PResult<'a, Ident> {
- match self.token.ident() {
- Some((ident, is_raw)) => {
- if !is_raw && ident.is_reserved() {
- let mut err = self.expected_ident_found();
- if recover {
- err.emit();
- } else {
- return Err(err);
- }
- }
- self.bump();
- Ok(ident)
+ let (ident, is_raw) = self.ident_or_err()?;
+ if !is_raw && ident.is_reserved() {
+ let mut err = self.expected_ident_found();
+ if recover {
+ err.emit();
+ } else {
+ return Err(err);
}
- _ => Err(match self.prev_token.kind {
- TokenKind::DocComment(..) => {
- self.span_fatal_err(self.prev_token.span, Error::UselessDocComment)
- }
- _ => self.expected_ident_found(),
- }),
}
+ self.bump();
+ Ok(ident)
}
/// Checks if the next token is `tok`, and returns `true` if so.
let span = expr.span;
match &expr.kind {
- // Not gated to supporte things like `doc = $expr` that work on stable.
+ // Not gated to support things like `doc = $expr` that work on stable.
_ if is_interpolated_expr => {}
ExprKind::Lit(lit) if lit.kind.is_unsuffixed() => {}
_ => self.sess.gated_spans.gate(sym::extended_key_value_attributes, span),
fn error_outer_attrs(&self, attrs: &[Attribute]) {
if let [.., last] = attrs {
if last.is_doc_comment() {
- self.span_fatal_err(last.span, Error::UselessDocComment).emit();
+ self.span_err(last.span, Error::UselessDocComment).emit();
} else if attrs.iter().any(|a| a.style == AttrStyle::Outer) {
self.struct_span_err(last.span, "expected statement after outer attribute").emit();
}
/// Is a `dyn B0 + ... + Bn` type allowed here?
fn is_explicit_dyn_type(&mut self) -> bool {
self.check_keyword(kw::Dyn)
- && (self.token.uninterpolated_span().rust_2018()
+ && (!self.token.uninterpolated_span().rust_2015()
|| self.look_ahead(1, |t| {
t.can_begin_bound() && !can_continue_type_after_non_fn_ident(t)
}))
) -> PResult<'a, GenericBounds> {
let mut bounds = Vec::new();
let mut negative_bounds = Vec::new();
- while self.can_begin_bound() {
+
+ while self.can_begin_bound() || self.token.is_keyword(kw::Dyn) {
+ if self.token.is_keyword(kw::Dyn) {
+ // Account for `&dyn Trait + dyn Other`.
+ self.struct_span_err(self.token.span, "invalid `dyn` keyword")
+ .help("`dyn` is only needed at the start of a trait `+`-separated list")
+ .span_suggestion(
+ self.token.span,
+ "remove this keyword",
+ String::new(),
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ self.bump();
+ }
match self.parse_generic_bound()? {
Ok(bound) => bounds.push(bound),
Err(neg_sp) => negative_bounds.push(neg_sp),
let lifetime_defs = self.parse_late_bound_lifetime_defs()?;
let path = self.parse_path(PathStyle::Type)?;
if has_parens {
- self.expect(&token::CloseDelim(token::Paren))?;
+ if self.token.is_like_plus() {
+ // Someone has written something like `&dyn (Trait + Other)`. The correct code
+ // would be `&(dyn Trait + Other)`, but we don't have access to the appropriate
+ // span to suggest that. When written as `&dyn Trait + Other`, an appropriate
+ // suggestion is given.
+ let bounds = vec![];
+ self.parse_remaining_bounds(bounds, true)?;
+ self.expect(&token::CloseDelim(token::Paren))?;
+ let sp = vec![lo, self.prev_token.span];
+ let sugg: Vec<_> = sp.iter().map(|sp| (*sp, String::new())).collect();
+ self.struct_span_err(sp, "incorrect braces around trait bounds")
+ .multipart_suggestion(
+ "remove the parentheses",
+ sugg,
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ } else {
+ self.expect(&token::CloseDelim(token::Paren))?;
+ }
}
let modifier = modifiers.to_trait_bound_modifier();
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::TyCtxt;
-use rustc_ast::{Attribute, Lit, LitKind, NestedMetaItem};
+use rustc_ast::{AttrStyle, Attribute, Lit, LitKind, NestedMetaItem};
use rustc_errors::{pluralize, struct_span_err, Applicability};
use rustc_hir as hir;
use rustc_hir::def_id::LocalDefId;
};
use rustc_session::parse::feature_err;
use rustc_span::symbol::{sym, Symbol};
-use rustc_span::{Span, DUMMY_SP};
+use rustc_span::{MultiSpan, Span, DUMMY_SP};
pub(crate) fn target_from_impl_item<'tcx>(
tcx: TyCtxt<'tcx>,
item: Option<ItemLike<'_>>,
) {
let mut is_valid = true;
+ let mut specified_inline = None;
let attrs = self.tcx.hir().attrs(hir_id);
for attr in attrs {
is_valid &= match attr.name_or_empty() {
sym::track_caller => {
self.check_track_caller(hir_id, &attr.span, attrs, span, target)
}
- sym::doc => self.check_doc_attrs(attr, hir_id, target),
+ sym::doc => self.check_doc_attrs(attr, hir_id, target, &mut specified_inline),
sym::no_link => self.check_no_link(hir_id, &attr, span, target),
sym::export_name => self.check_export_name(hir_id, &attr, span, target),
sym::rustc_args_required_const => {
true
}
- fn check_attr_crate_level(
+ /// Checks `#[doc(inline)]`/`#[doc(no_inline)]` attributes. Returns `true` if valid.
+ ///
+ /// A doc inlining attribute is invalid if it is applied to a non-`use` item, or
+ /// if there are conflicting attributes for one item.
+ ///
+ /// `specified_inline` is used to keep track of whether we have
+ /// already seen an inlining attribute for this item.
+ /// If so, `specified_inline` holds the value and the span of
+ /// the first `inline`/`no_inline` attribute.
+ fn check_doc_inline(
+ &self,
+ attr: &Attribute,
+ meta: &NestedMetaItem,
+ hir_id: HirId,
+ target: Target,
+ specified_inline: &mut Option<(bool, Span)>,
+ ) -> bool {
+ if target == Target::Use {
+ let do_inline = meta.name_or_empty() == sym::inline;
+ if let Some((prev_inline, prev_span)) = *specified_inline {
+ if do_inline != prev_inline {
+ let mut spans = MultiSpan::from_spans(vec![prev_span, meta.span()]);
+ spans.push_span_label(prev_span, String::from("this attribute..."));
+ spans.push_span_label(
+ meta.span(),
+ String::from("...conflicts with this attribute"),
+ );
+ self.tcx
+ .sess
+ .struct_span_err(spans, "conflicting doc inlining attributes")
+ .help("remove one of the conflicting attributes")
+ .emit();
+ return false;
+ }
+ true
+ } else {
+ *specified_inline = Some((do_inline, meta.span()));
+ true
+ }
+ } else {
+ self.tcx.struct_span_lint_hir(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ meta.span(),
+ |lint| {
+ let mut err = lint.build(
+ "this attribute can only be applied to a `use` item",
+ );
+ err.span_label(meta.span(), "only applicable on `use` items");
+ if attr.style == AttrStyle::Outer {
+ err.span_label(
+ self.tcx.hir().span(hir_id),
+ "not a `use` item",
+ );
+ }
+ err.note("read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#docno_inlinedocinline for more information")
+ .emit();
+ },
+ );
+ false
+ }
+ }
+
+ /// Checks that an attribute is *not* used at the crate level. Returns `true` if valid.
+ fn check_attr_not_crate_level(
&self,
meta: &NestedMetaItem,
hir_id: HirId,
true
}
- fn check_doc_attrs(&self, attr: &Attribute, hir_id: HirId, target: Target) -> bool {
+ /// Checks that an attribute is used at the crate level. Returns `true` if valid.
+ fn check_attr_crate_level(
+ &self,
+ attr: &Attribute,
+ meta: &NestedMetaItem,
+ hir_id: HirId,
+ ) -> bool {
+ if hir_id != CRATE_HIR_ID {
+ self.tcx.struct_span_lint_hir(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ meta.span(),
+ |lint| {
+ let mut err = lint.build(
+ "this attribute can only be applied at the crate level",
+ );
+ if attr.style == AttrStyle::Outer && self.tcx.hir().get_parent_item(hir_id) == CRATE_HIR_ID {
+ if let Ok(mut src) =
+ self.tcx.sess.source_map().span_to_snippet(attr.span)
+ {
+ src.insert(1, '!');
+ err.span_suggestion_verbose(
+ attr.span,
+ "to apply to the crate, use an inner attribute",
+ src,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_help(
+ attr.span,
+ "to apply to the crate, use an inner attribute",
+ );
+ }
+ }
+ err.note("read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#at-the-crate-level for more information")
+ .emit();
+ },
+ );
+ return false;
+ }
+ true
+ }
+
+ /// Runs various checks on `#[doc]` attributes. Returns `true` if valid.
+ ///
+ /// `specified_inline` should be initialized to `None` and kept for the scope
+ /// of one item. Read the documentation of [`check_doc_inline`] for more information.
+ ///
+ /// [`check_doc_inline`]: Self::check_doc_inline
+ fn check_doc_attrs(
+ &self,
+ attr: &Attribute,
+ hir_id: HirId,
+ target: Target,
+ specified_inline: &mut Option<(bool, Span)>,
+ ) -> bool {
let mut is_valid = true;
if let Some(list) = attr.meta().and_then(|mi| mi.meta_item_list().map(|l| l.to_vec())) {
if let Some(i_meta) = meta.meta_item() {
match i_meta.name_or_empty() {
sym::alias
- if !self.check_attr_crate_level(&meta, hir_id, "alias")
+ if !self.check_attr_not_crate_level(&meta, hir_id, "alias")
|| !self.check_doc_alias(&meta, hir_id, target) =>
{
is_valid = false
}
sym::keyword
- if !self.check_attr_crate_level(&meta, hir_id, "keyword")
+ if !self.check_attr_not_crate_level(&meta, hir_id, "keyword")
|| !self.check_doc_keyword(&meta, hir_id) =>
{
is_valid = false
}
- sym::test if CRATE_HIR_ID != hir_id => {
- self.tcx.struct_span_lint_hir(
- INVALID_DOC_ATTRIBUTES,
+ sym::html_favicon_url
+ | sym::html_logo_url
+ | sym::html_playground_url
+ | sym::issue_tracker_base_url
+ | sym::html_root_url
+ | sym::html_no_source
+ | sym::test
+ if !self.check_attr_crate_level(&attr, &meta, hir_id) =>
+ {
+ is_valid = false;
+ }
+
+ sym::inline | sym::no_inline
+ if !self.check_doc_inline(
+ &attr,
+ &meta,
hir_id,
- meta.span(),
- |lint| {
- lint.build(
- "`#![doc(test(...)]` is only allowed \
- as a crate-level attribute",
- )
- .emit();
- },
- );
+ target,
+ specified_inline,
+ ) =>
+ {
is_valid = false;
}
}
impl QueryEngine<'tcx> for Queries<'tcx> {
- unsafe fn deadlock(&'tcx self, _tcx: TyCtxt<'tcx>, _registry: &rustc_rayon_core::Registry) {
- #[cfg(parallel_compiler)]
- {
- let tcx = QueryCtxt { tcx: _tcx, queries: self };
- rustc_query_system::query::deadlock(tcx, _registry)
- }
+ #[cfg(parallel_compiler)]
+ unsafe fn deadlock(&'tcx self, tcx: TyCtxt<'tcx>, registry: &rustc_rayon_core::Registry) {
+ let tcx = QueryCtxt { tcx, queries: self };
+ rustc_query_system::query::deadlock(tcx, registry)
}
fn encode_query_results(
type Cache;
}
-pub trait QueryStorage: Default {
+pub trait QueryStorage {
type Value: Debug;
type Stored: Clone;
fn store_nocache(&self, value: Self::Value) -> Self::Stored;
}
-pub trait QueryCache: QueryStorage {
+pub trait QueryCache: QueryStorage + Sized {
type Key: Hash + Eq + Clone + Debug;
type Sharded: Default;
(self.hash_result)(hcx, value)
}
- pub(crate) fn handle_cycle_error(&self, tcx: CTX, diag: DiagnosticBuilder<'_>) -> V {
- (self.handle_cycle_error)(tcx, diag)
- }
-
pub(crate) fn cache_on_disk(&self, tcx: CTX, key: &K, value: Option<&V>) -> bool {
(self.cache_on_disk)(tcx, key, value)
}
use std::convert::TryFrom;
use std::hash::Hash;
-use std::marker::PhantomData;
use std::num::NonZeroU32;
#[cfg(parallel_compiler)]
/// The latch that is used to wait on this job.
#[cfg(parallel_compiler)]
latch: Option<QueryLatch<D>>,
-
- dummy: PhantomData<QueryLatch<D>>,
}
impl<D> QueryJob<D>
parent,
#[cfg(parallel_compiler)]
latch: None,
- dummy: PhantomData,
}
}
#[cfg(parallel_compiler)]
- pub(super) fn latch(&mut self, _id: QueryJobId<D>) -> QueryLatch<D> {
+ pub(super) fn latch(&mut self) -> QueryLatch<D> {
if self.latch.is_none() {
self.latch = Some(QueryLatch::new());
}
self.latch.as_ref().unwrap().clone()
}
- #[cfg(not(parallel_compiler))]
- pub(super) fn latch(&mut self, id: QueryJobId<D>) -> QueryLatch<D> {
- QueryLatch { id }
- }
-
/// Signals to waiters that the query is complete.
///
/// This does nothing for single threaded rustc,
}
#[cfg(not(parallel_compiler))]
-#[derive(Clone)]
-pub(super) struct QueryLatch<D> {
- id: QueryJobId<D>,
-}
-
-#[cfg(not(parallel_compiler))]
-impl<D> QueryLatch<D>
+impl<D> QueryJobId<D>
where
D: Copy + Clone + Eq + Hash,
{
let info = query_map.get(&job).unwrap();
cycle.push(info.info.clone());
- if job == self.id {
+ if job == *self {
cycle.reverse();
// This is the end of the cycle
};
use crate::query::{QueryContext, QueryMap, QueryStackFrame};
-#[cfg(not(parallel_compiler))]
-use rustc_data_structures::cold_path;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxHasher};
use rustc_data_structures::sharded::{get_shard_index_by_hash, Sharded};
use rustc_data_structures::sync::{Lock, LockGuard};
use rustc_data_structures::thin_vec::ThinVec;
+#[cfg(not(parallel_compiler))]
+use rustc_errors::DiagnosticBuilder;
use rustc_errors::{Diagnostic, FatalError};
use rustc_span::Span;
use std::collections::hash_map::Entry;
pub cache_hits: AtomicUsize,
}
-impl<C: QueryCache> Default for QueryCacheStore<C> {
+impl<C: QueryCache + Default> Default for QueryCacheStore<C> {
fn default() -> Self {
Self {
cache: C::default(),
id: QueryJobId<D>,
}
+#[cold]
+#[inline(never)]
+#[cfg(not(parallel_compiler))]
+fn mk_cycle<CTX, V, R>(
+ tcx: CTX,
+ root: QueryJobId<CTX::DepKind>,
+ span: Span,
+ handle_cycle_error: fn(CTX, DiagnosticBuilder<'_>) -> V,
+ cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
+) -> R
+where
+ CTX: QueryContext,
+ V: std::fmt::Debug,
+ R: Clone,
+{
+ let error: CycleError = root.find_cycle_in_stack(
+ tcx.try_collect_active_jobs().unwrap(),
+ &tcx.current_query_job(),
+ span,
+ );
+ let error = report_cycle(tcx.dep_context().sess(), error);
+ let value = handle_cycle_error(tcx, error);
+ cache.store_nocache(value)
+}
+
impl<'tcx, D, C> JobOwner<'tcx, D, C>
where
D: Copy + Clone + Eq + Hash,
state: &'b QueryState<CTX::DepKind, C::Key>,
cache: &'b QueryCacheStore<C>,
span: Span,
- key: &C::Key,
+ key: C::Key,
lookup: QueryLookup,
query: &QueryVtable<CTX, C::Key, C::Value>,
) -> TryGetJob<'b, CTX::DepKind, C>
let mut state_lock = state.shards.get_shard_by_index(shard).lock();
let lock = &mut *state_lock;
- let (latch, mut _query_blocked_prof_timer) = match lock.active.entry((*key).clone()) {
- Entry::Occupied(mut entry) => {
- match entry.get_mut() {
- QueryResult::Started(job) => {
- // For parallel queries, we'll block and wait until the query running
- // in another thread has completed. Record how long we wait in the
- // self-profiler.
- let _query_blocked_prof_timer = if cfg!(parallel_compiler) {
- Some(tcx.dep_context().profiler().query_blocked())
- } else {
- None
- };
-
- // Create the id of the job we're waiting for
- let id = QueryJobId::new(job.id, shard, query.dep_kind);
-
- (job.latch(id), _query_blocked_prof_timer)
- }
- QueryResult::Poisoned => FatalError.raise(),
- }
- }
+ match lock.active.entry(key) {
Entry::Vacant(entry) => {
- // No job entry for this query. Return a new one to be started later.
-
// Generate an id unique within this shard.
let id = lock.jobs.checked_add(1).unwrap();
lock.jobs = id;
let id = QueryShardJobId(NonZeroU32::new(id).unwrap());
- let global_id = QueryJobId::new(id, shard, query.dep_kind);
-
let job = tcx.current_query_job();
let job = QueryJob::new(id, span, job);
+ let key = entry.key().clone();
entry.insert(QueryResult::Started(job));
- let owner = JobOwner { state, cache, id: global_id, key: (*key).clone() };
+ let global_id = QueryJobId::new(id, shard, query.dep_kind);
+ let owner = JobOwner { state, cache, id: global_id, key };
return TryGetJob::NotYetStarted(owner);
}
- };
- mem::drop(state_lock);
-
- // If we are single-threaded we know that we have cycle error,
- // so we just return the error.
- #[cfg(not(parallel_compiler))]
- return TryGetJob::Cycle(cold_path(|| {
- let error: CycleError = latch.find_cycle_in_stack(
- tcx.try_collect_active_jobs().unwrap(),
- &tcx.current_query_job(),
- span,
- );
- let error = report_cycle(tcx.dep_context().sess(), error);
- let value = query.handle_cycle_error(tcx, error);
- cache.cache.store_nocache(value)
- }));
-
- // With parallel queries we might just have to wait on some other
- // thread.
- #[cfg(parallel_compiler)]
- {
- let result = latch.wait_on(tcx.current_query_job(), span);
-
- if let Err(cycle) = result {
- let cycle = report_cycle(tcx.dep_context().sess(), cycle);
- let value = query.handle_cycle_error(tcx, cycle);
- let value = cache.cache.store_nocache(value);
- return TryGetJob::Cycle(value);
- }
+ Entry::Occupied(mut entry) => {
+ match entry.get_mut() {
+ #[cfg(not(parallel_compiler))]
+ QueryResult::Started(job) => {
+ let id = QueryJobId::new(job.id, shard, query.dep_kind);
- let cached = cache
- .cache
- .lookup(cache, &key, |value, index| {
- if unlikely!(tcx.dep_context().profiler().enabled()) {
- tcx.dep_context().profiler().query_cache_hit(index.into());
+ drop(state_lock);
+
+ // If we are single-threaded we know that we have cycle error,
+ // so we just return the error.
+ return TryGetJob::Cycle(mk_cycle(
+ tcx,
+ id,
+ span,
+ query.handle_cycle_error,
+ &cache.cache,
+ ));
}
- #[cfg(debug_assertions)]
- {
- cache.cache_hits.fetch_add(1, Ordering::Relaxed);
+ #[cfg(parallel_compiler)]
+ QueryResult::Started(job) => {
+ // For parallel queries, we'll block and wait until the query running
+ // in another thread has completed. Record how long we wait in the
+ // self-profiler.
+ let query_blocked_prof_timer = tcx.dep_context().profiler().query_blocked();
+
+ // Get the latch out
+ let latch = job.latch();
+ let key = entry.key().clone();
+
+ drop(state_lock);
+
+ // With parallel queries we might just have to wait on some other
+ // thread.
+ let result = latch.wait_on(tcx.current_query_job(), span);
+
+ if let Err(cycle) = result {
+ let cycle = report_cycle(tcx.dep_context().sess(), cycle);
+ let value = (query.handle_cycle_error)(tcx, cycle);
+ let value = cache.cache.store_nocache(value);
+ return TryGetJob::Cycle(value);
+ }
+
+ let cached = cache
+ .cache
+ .lookup(cache, &key, |value, index| {
+ if unlikely!(tcx.dep_context().profiler().enabled()) {
+ tcx.dep_context().profiler().query_cache_hit(index.into());
+ }
+ #[cfg(debug_assertions)]
+ {
+ cache.cache_hits.fetch_add(1, Ordering::Relaxed);
+ }
+ (value.clone(), index)
+ })
+ .unwrap_or_else(|_| panic!("value must be in cache after waiting"));
+
+ query_blocked_prof_timer.finish_with_query_invocation_id(cached.1.into());
+
+ return TryGetJob::JobCompleted(cached);
}
- (value.clone(), index)
- })
- .unwrap_or_else(|_| panic!("value must be in cache after waiting"));
-
- if let Some(prof_timer) = _query_blocked_prof_timer.take() {
- prof_timer.finish_with_query_invocation_id(cached.1.into());
+ QueryResult::Poisoned => FatalError.raise(),
+ }
}
-
- return TryGetJob::JobCompleted(cached);
}
}
CTX: QueryContext,
{
let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
- tcx, state, cache, span, &key, lookup, query,
+ tcx,
+ state,
+ cache,
+ span,
+ key.clone(),
+ lookup,
+ query,
) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::Cycle(result) => return result,
let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
- assert_eq!(
- Some(new_hash),
- old_hash,
- "found unstable fingerprints for {:?}: {:?}",
- dep_node,
- result
- );
+ if Some(new_hash) != old_hash {
+ let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name {
+ format!("`cargo clean -p {}` or `cargo clean`", crate_name)
+ } else {
+ "`cargo clean`".to_string()
+ };
+ tcx.sess().struct_err(&format!("internal compiler error: encountered incremental compilation error with {:?}", dep_node))
+ .help(&format!("This is a known issue with the compiler. Run {} to allow your project to compile", run_cmd))
+ .note(&format!("Please follow the instructions below to create a bug report with the provided information"))
+ .note(&format!("See <https://github.com/rust-lang/rust/issues/84970> for more information"))
+ .emit();
+ panic!("Found unstable fingerprints for {:?}: {:?}", dep_node, result);
+ }
}
fn force_query_with_job<C, CTX>(
};
let job = match JobOwner::<'_, CTX::DepKind, C>::try_start(
- tcx, state, cache, span, &key, lookup, query,
+ tcx,
+ state,
+ cache,
+ span,
+ key.clone(),
+ lookup,
+ query,
) {
TryGetJob::NotYetStarted(job) => job,
TryGetJob::Cycle(_) => return,
use rustc_ast::{self as ast, Expr, ExprKind, Item, ItemKind, NodeId, Path, Ty, TyKind};
use rustc_ast_pretty::pprust::path_segment_to_string;
use rustc_data_structures::fx::FxHashSet;
-use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder};
+use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder, SuggestionStyle};
use rustc_hir as hir;
use rustc_hir::def::Namespace::{self, *};
use rustc_hir::def::{self, CtorKind, CtorOf, DefKind};
impl<'tcx> LifetimeContext<'_, 'tcx> {
crate fn report_missing_lifetime_specifiers(
&self,
- span: Span,
+ spans: Vec<Span>,
count: usize,
) -> DiagnosticBuilder<'tcx> {
struct_span_err!(
self.tcx.sess,
- span,
+ spans,
E0106,
"missing lifetime specifier{}",
pluralize!(count)
crate fn add_missing_lifetime_specifiers_label(
&self,
err: &mut DiagnosticBuilder<'_>,
- span: Span,
- count: usize,
+ spans_with_counts: Vec<(Span, usize)>,
lifetime_names: &FxHashSet<Symbol>,
lifetime_spans: Vec<Span>,
params: &[ElisionFailureInfo],
) {
- let snippet = self.tcx.sess.source_map().span_to_snippet(span).ok();
-
- err.span_label(
- span,
- &format!(
- "expected {} lifetime parameter{}",
- if count == 1 { "named".to_string() } else { count.to_string() },
- pluralize!(count)
- ),
- );
+ let snippets: Vec<Option<String>> = spans_with_counts
+ .iter()
+ .map(|(span, _)| self.tcx.sess.source_map().span_to_snippet(*span).ok())
+ .collect();
- let suggest_existing = |err: &mut DiagnosticBuilder<'_>,
- name: &str,
- formatter: &dyn Fn(&str) -> String| {
- if let Some(MissingLifetimeSpot::HigherRanked { span: for_span, span_type }) =
- self.missing_named_lifetime_spots.iter().rev().next()
- {
- // When we have `struct S<'a>(&'a dyn Fn(&X) -> &X);` we want to not only suggest
- // using `'a`, but also introduce the concept of HRLTs by suggesting
- // `struct S<'a>(&'a dyn for<'b> Fn(&X) -> &'b X);`. (#72404)
- let mut introduce_suggestion = vec![];
+ for (span, count) in &spans_with_counts {
+ err.span_label(
+ *span,
+ format!(
+ "expected {} lifetime parameter{}",
+ if *count == 1 { "named".to_string() } else { count.to_string() },
+ pluralize!(*count),
+ ),
+ );
+ }
- let a_to_z_repeat_n = |n| {
- (b'a'..=b'z').map(move |c| {
- let mut s = '\''.to_string();
- s.extend(std::iter::repeat(char::from(c)).take(n));
- s
- })
- };
+ let suggest_existing =
+ |err: &mut DiagnosticBuilder<'_>,
+ name: &str,
+ formatters: Vec<Option<Box<dyn Fn(&str) -> String>>>| {
+ if let Some(MissingLifetimeSpot::HigherRanked { span: for_span, span_type }) =
+ self.missing_named_lifetime_spots.iter().rev().next()
+ {
+ // When we have `struct S<'a>(&'a dyn Fn(&X) -> &X);` we want to not only suggest
+ // using `'a`, but also introduce the concept of HRLTs by suggesting
+ // `struct S<'a>(&'a dyn for<'b> Fn(&X) -> &'b X);`. (#72404)
+ let mut introduce_suggestion = vec![];
+
+ let a_to_z_repeat_n = |n| {
+ (b'a'..=b'z').map(move |c| {
+ let mut s = '\''.to_string();
+ s.extend(std::iter::repeat(char::from(c)).take(n));
+ s
+ })
+ };
- // If all single char lifetime names are present, we wrap around and double the chars.
- let lt_name = (1..)
- .flat_map(a_to_z_repeat_n)
- .find(|lt| !lifetime_names.contains(&Symbol::intern(<)))
- .unwrap();
- let msg = format!(
- "consider making the {} lifetime-generic with a new `{}` lifetime",
- span_type.descr(),
- lt_name,
- );
- err.note(
- "for more information on higher-ranked polymorphism, visit \
+ // If all single char lifetime names are present, we wrap around and double the chars.
+ let lt_name = (1..)
+ .flat_map(a_to_z_repeat_n)
+ .find(|lt| !lifetime_names.contains(&Symbol::intern(<)))
+ .unwrap();
+ let msg = format!(
+ "consider making the {} lifetime-generic with a new `{}` lifetime",
+ span_type.descr(),
+ lt_name,
+ );
+ err.note(
+ "for more information on higher-ranked polymorphism, visit \
https://doc.rust-lang.org/nomicon/hrtb.html",
- );
- let for_sugg = span_type.suggestion(<_name);
- for param in params {
- if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(param.span) {
- if snippet.starts_with('&') && !snippet.starts_with("&'") {
- introduce_suggestion
- .push((param.span, format!("&{} {}", lt_name, &snippet[1..])));
- } else if let Some(stripped) = snippet.strip_prefix("&'_ ") {
- introduce_suggestion
- .push((param.span, format!("&{} {}", lt_name, stripped)));
+ );
+ let for_sugg = span_type.suggestion(<_name);
+ for param in params {
+ if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(param.span)
+ {
+ if snippet.starts_with('&') && !snippet.starts_with("&'") {
+ introduce_suggestion
+ .push((param.span, format!("&{} {}", lt_name, &snippet[1..])));
+ } else if let Some(stripped) = snippet.strip_prefix("&'_ ") {
+ introduce_suggestion
+ .push((param.span, format!("&{} {}", lt_name, stripped)));
+ }
+ }
+ }
+ introduce_suggestion.push((*for_span, for_sugg));
+ for ((span, _), formatter) in spans_with_counts.iter().zip(formatters.iter()) {
+ if let Some(formatter) = formatter {
+ introduce_suggestion.push((*span, formatter(<_name)));
}
}
+ err.multipart_suggestion_with_style(
+ &msg,
+ introduce_suggestion,
+ Applicability::MaybeIncorrect,
+ SuggestionStyle::ShowAlways,
+ );
}
- introduce_suggestion.push((*for_span, for_sugg));
- introduce_suggestion.push((span, formatter(<_name)));
- err.multipart_suggestion(&msg, introduce_suggestion, Applicability::MaybeIncorrect);
- }
- err.span_suggestion_verbose(
- span,
- &format!("consider using the `{}` lifetime", lifetime_names.iter().next().unwrap()),
- formatter(name),
- Applicability::MaybeIncorrect,
- );
- };
- let suggest_new = |err: &mut DiagnosticBuilder<'_>, sugg: &str| {
+ let spans_suggs: Vec<_> = formatters
+ .into_iter()
+ .zip(spans_with_counts.iter())
+ .filter_map(|(fmt, (span, _))| {
+ if let Some(formatter) = fmt { Some((formatter, span)) } else { None }
+ })
+ .map(|(formatter, span)| (*span, formatter(name)))
+ .collect();
+ err.multipart_suggestion_with_style(
+ &format!(
+ "consider using the `{}` lifetime",
+ lifetime_names.iter().next().unwrap()
+ ),
+ spans_suggs,
+ Applicability::MaybeIncorrect,
+ SuggestionStyle::ShowAlways,
+ );
+ };
+ let suggest_new = |err: &mut DiagnosticBuilder<'_>, suggs: Vec<Option<String>>| {
for missing in self.missing_named_lifetime_spots.iter().rev() {
let mut introduce_suggestion = vec![];
let msg;
(*span, span_type.suggestion("'a"))
}
MissingLifetimeSpot::Static => {
- let (span, sugg) = match snippet.as_deref() {
- Some("&") => (span.shrink_to_hi(), "'static ".to_owned()),
- Some("'_") => (span, "'static".to_owned()),
- Some(snippet) if !snippet.ends_with('>') => {
- if snippet == "" {
- (
- span,
- std::iter::repeat("'static")
- .take(count)
- .collect::<Vec<_>>()
- .join(", "),
- )
- } else {
- (
- span.shrink_to_hi(),
- format!(
- "<{}>",
+ let mut spans_suggs = Vec::new();
+ for ((span, count), snippet) in
+ spans_with_counts.iter().copied().zip(snippets.iter())
+ {
+ let (span, sugg) = match snippet.as_deref() {
+ Some("&") => (span.shrink_to_hi(), "'static ".to_owned()),
+ Some("'_") => (span, "'static".to_owned()),
+ Some(snippet) if !snippet.ends_with('>') => {
+ if snippet == "" {
+ (
+ span,
std::iter::repeat("'static")
.take(count)
.collect::<Vec<_>>()
- .join(", ")
- ),
- )
+ .join(", "),
+ )
+ } else {
+ (
+ span.shrink_to_hi(),
+ format!(
+ "<{}>",
+ std::iter::repeat("'static")
+ .take(count)
+ .collect::<Vec<_>>()
+ .join(", ")
+ ),
+ )
+ }
}
- }
- _ => continue,
- };
- err.span_suggestion_verbose(
- span,
+ _ => continue,
+ };
+ spans_suggs.push((span, sugg.to_string()));
+ }
+ err.multipart_suggestion_with_style(
"consider using the `'static` lifetime",
- sugg.to_string(),
+ spans_suggs,
Applicability::MaybeIncorrect,
+ SuggestionStyle::ShowAlways,
);
continue;
}
}
}
}
- introduce_suggestion.push((span, sugg.to_string()));
- err.multipart_suggestion(&msg, introduce_suggestion, Applicability::MaybeIncorrect);
+ for ((span, _), sugg) in spans_with_counts.iter().copied().zip(suggs.iter()) {
+ if let Some(sugg) = sugg {
+ introduce_suggestion.push((span, sugg.to_string()));
+ }
+ }
+ err.multipart_suggestion_with_style(
+ &msg,
+ introduce_suggestion,
+ Applicability::MaybeIncorrect,
+ SuggestionStyle::ShowAlways,
+ );
if should_break {
break;
}
};
let lifetime_names: Vec<_> = lifetime_names.iter().collect();
- match (&lifetime_names[..], snippet.as_deref()) {
- ([name], Some("&")) => {
- suggest_existing(err, &name.as_str()[..], &|name| format!("&{} ", name));
- }
- ([name], Some("'_")) => {
- suggest_existing(err, &name.as_str()[..], &|n| n.to_string());
- }
- ([name], Some("")) => {
- suggest_existing(err, &name.as_str()[..], &|n| format!("{}, ", n).repeat(count));
- }
- ([name], Some(snippet)) if !snippet.ends_with('>') => {
- let f = |name: &str| {
- format!(
- "{}<{}>",
- snippet,
- std::iter::repeat(name.to_string())
- .take(count)
- .collect::<Vec<_>>()
- .join(", ")
- )
- };
- suggest_existing(err, &name.as_str()[..], &f);
- }
- ([], Some("&")) if count == 1 => {
- suggest_new(err, "&'a ");
- }
- ([], Some("'_")) if count == 1 => {
- suggest_new(err, "'a");
+ match &lifetime_names[..] {
+ [name] => {
+ let mut suggs: Vec<Option<Box<dyn Fn(&str) -> String>>> = Vec::new();
+ for (snippet, (_, count)) in snippets.iter().zip(spans_with_counts.iter().copied())
+ {
+ suggs.push(match snippet.as_deref() {
+ Some("&") => Some(Box::new(|name| format!("&{} ", name))),
+ Some("'_") => Some(Box::new(|n| n.to_string())),
+ Some("") => Some(Box::new(move |n| format!("{}, ", n).repeat(count))),
+ Some(snippet) if !snippet.ends_with('>') => Some(Box::new(move |name| {
+ format!(
+ "{}<{}>",
+ snippet,
+ std::iter::repeat(name.to_string())
+ .take(count)
+ .collect::<Vec<_>>()
+ .join(", ")
+ )
+ })),
+ _ => None,
+ });
+ }
+ suggest_existing(err, &name.as_str()[..], suggs);
}
- ([], Some(snippet)) if !snippet.ends_with('>') => {
- if snippet == "" {
- // This happens when we have `type Bar<'a> = Foo<T>` where we point at the space
- // before `T`. We will suggest `type Bar<'a> = Foo<'a, T>`.
- suggest_new(
- err,
- &std::iter::repeat("'a, ").take(count).collect::<Vec<_>>().join(""),
- );
- } else {
- suggest_new(
- err,
- &format!(
+ [] => {
+ let mut suggs = Vec::new();
+ for (snippet, (_, count)) in
+ snippets.iter().cloned().zip(spans_with_counts.iter().copied())
+ {
+ suggs.push(match snippet.as_deref() {
+ Some("&") => Some("&'a ".to_string()),
+ Some("'_") => Some("'a".to_string()),
+ Some("") => {
+ Some(std::iter::repeat("'a, ").take(count).collect::<Vec<_>>().join(""))
+ }
+ Some(snippet) => Some(format!(
"{}<{}>",
snippet,
- std::iter::repeat("'a").take(count).collect::<Vec<_>>().join(", ")
- ),
- );
+ std::iter::repeat("'a").take(count).collect::<Vec<_>>().join(", "),
+ )),
+ None => None,
+ });
}
+ suggest_new(err, suggs);
}
- (lts, ..) if lts.len() > 1 => {
+ lts if lts.len() > 1 => {
err.span_note(lifetime_spans, "these named lifetimes are available to use");
- if Some("") == snippet.as_deref() {
+
+ let mut spans_suggs: Vec<_> = Vec::new();
+ for ((span, _), snippet) in spans_with_counts.iter().copied().zip(snippets.iter()) {
+ match snippet.as_deref() {
+ Some("") => spans_suggs.push((span, "'lifetime, ".to_string())),
+ Some("&") => spans_suggs.push((span, "&'lifetime ".to_string())),
+ _ => {}
+ }
+ }
+
+ if spans_suggs.len() > 0 {
// This happens when we have `Foo<T>` where we point at the space before `T`,
// but this can be confusing so we give a suggestion with placeholders.
- err.span_suggestion_verbose(
- span,
+ err.multipart_suggestion_with_style(
"consider using one of the available lifetimes here",
- "'lifetime, ".repeat(count),
+ spans_suggs,
Applicability::HasPlaceholders,
+ SuggestionStyle::ShowAlways,
);
}
}
- _ => {}
+ _ => unreachable!(),
}
}
return;
}
- let span = lifetime_refs[0].span;
let mut late_depth = 0;
let mut scope = self.scope;
let mut lifetime_names = FxHashSet::default();
}
};
- let mut err = self.report_missing_lifetime_specifiers(span, lifetime_refs.len());
+ let mut spans: Vec<_> = lifetime_refs.iter().map(|lt| lt.span).collect();
+ spans.sort();
+ let mut spans_dedup = spans.clone();
+ spans_dedup.dedup();
+ let spans_with_counts: Vec<_> = spans_dedup
+ .into_iter()
+ .map(|sp| (sp, spans.iter().filter(|nsp| *nsp == &sp).count()))
+ .collect();
+
+ let mut err = self.report_missing_lifetime_specifiers(spans.clone(), lifetime_refs.len());
if let Some(params) = error {
// If there's no lifetime available, suggest `'static`.
lifetime_names.insert(kw::StaticLifetime);
}
}
+
self.add_missing_lifetime_specifiers_label(
&mut err,
- span,
- lifetime_refs.len(),
+ spans_with_counts,
&lifetime_names,
lifetime_spans,
error.unwrap_or(&[]),
not recorded",
cgu_user_name, cgu_name
);
- diag.span_fatal(error_span.0, &msg).raise();
+ diag.span_fatal(error_span.0, &msg)
}
}
}
use crate::lint;
use crate::search_paths::SearchPath;
-use crate::utils::{CanonicalizedPath, NativeLibKind};
+use crate::utils::{CanonicalizedPath, NativeLib, NativeLibKind};
use crate::{early_error, early_warn, Session};
use rustc_data_structures::fx::FxHashSet;
target_triple: TargetTriple::from_triple(host_triple()),
test: false,
incremental: None,
- debugging_opts: basic_debugging_options(),
+ debugging_opts: Default::default(),
prints: Vec::new(),
borrowck_mode: BorrowckMode::Migrate,
- cg: basic_codegen_options(),
+ cg: Default::default(),
error_format: ErrorOutputType::default(),
externs: Externs(BTreeMap::new()),
extern_dep_specs: ExternDepSpecs(BTreeMap::new()),
user_cfg
}
-pub(super) fn build_target_config(opts: &Options, target_override: Option<Target>) -> Target {
- let target_result = target_override.map_or_else(|| Target::search(&opts.target_triple), Ok);
+pub(super) fn build_target_config(
+ opts: &Options,
+ target_override: Option<Target>,
+ sysroot: &PathBuf,
+) -> Target {
+ let target_result =
+ target_override.map_or_else(|| Target::search(&opts.target_triple, sysroot), Ok);
let target = target_result.unwrap_or_else(|e| {
early_error(
opts.error_format,
"",
"Link the generated crate(s) to the specified native
library NAME. The optional KIND can be one of
- static, framework, or dylib (the default).",
- "[KIND=]NAME",
+ static, framework, or dylib (the default).
+ Optional comma separated MODIFIERS (bundle|verbatim|whole-archive|as-needed)
+ may be specified each with a prefix of either '+' to
+ enable or '-' to disable.",
+ "[KIND[:MODIFIERS]=]NAME[:RENAME]",
),
make_crate_type_option(),
opt::opt_s("", "crate-name", "Specify the name of the crate being built", "NAME"),
}
}
-fn parse_libs(
- matches: &getopts::Matches,
+fn parse_native_lib_kind(kind: &str, error_format: ErrorOutputType) -> NativeLibKind {
+ match kind {
+ "dylib" => NativeLibKind::Dylib { as_needed: None },
+ "framework" => NativeLibKind::Framework { as_needed: None },
+ "static" => NativeLibKind::Static { bundle: None, whole_archive: None },
+ "static-nobundle" => {
+ early_warn(
+ error_format,
+ "library kind `static-nobundle` has been superseded by specifying \
+ `-bundle` on library kind `static`. Try `static:-bundle`",
+ );
+ NativeLibKind::Static { bundle: Some(false), whole_archive: None }
+ }
+ s => early_error(
+ error_format,
+ &format!("unknown library kind `{}`, expected one of dylib, framework, or static", s),
+ ),
+ }
+}
+
+fn parse_native_lib_modifiers(
+ is_nightly: bool,
+ mut kind: NativeLibKind,
+ modifiers: &str,
error_format: ErrorOutputType,
-) -> Vec<(String, Option<String>, NativeLibKind)> {
+) -> (NativeLibKind, Option<bool>) {
+ let mut verbatim = None;
+ for modifier in modifiers.split(',') {
+ let (modifier, value) = match modifier.strip_prefix(&['+', '-'][..]) {
+ Some(m) => (m, modifier.starts_with('+')),
+ None => early_error(
+ error_format,
+ "invalid linking modifier syntax, expected '+' or '-' prefix \
+ before one of: bundle, verbatim, whole-archive, as-needed",
+ ),
+ };
+
+ if !is_nightly {
+ early_error(
+ error_format,
+ "linking modifiers are currently unstable and only accepted on \
+ the nightly compiler",
+ );
+ }
+
+ match (modifier, &mut kind) {
+ ("bundle", NativeLibKind::Static { bundle, .. }) => {
+ *bundle = Some(value);
+ }
+ ("bundle", _) => early_error(
+ error_format,
+ "bundle linking modifier is only compatible with \
+ `static` linking kind",
+ ),
+
+ ("verbatim", _) => verbatim = Some(value),
+
+ ("whole-archive", NativeLibKind::Static { whole_archive, .. }) => {
+ *whole_archive = Some(value);
+ }
+ ("whole-archive", _) => early_error(
+ error_format,
+ "whole-archive linking modifier is only compatible with \
+ `static` linking kind",
+ ),
+
+ ("as-needed", NativeLibKind::Dylib { as_needed })
+ | ("as-needed", NativeLibKind::Framework { as_needed }) => {
+ *as_needed = Some(value);
+ }
+ ("as-needed", _) => early_error(
+ error_format,
+ "as-needed linking modifier is only compatible with \
+ `dylib` and `framework` linking kinds",
+ ),
+
+ _ => early_error(
+ error_format,
+ &format!(
+ "unrecognized linking modifier `{}`, expected one \
+ of: bundle, verbatim, whole-archive, as-needed",
+ modifier
+ ),
+ ),
+ }
+ }
+
+ (kind, verbatim)
+}
+
+fn parse_libs(matches: &getopts::Matches, error_format: ErrorOutputType) -> Vec<NativeLib> {
+ let is_nightly = nightly_options::match_is_nightly_build(matches);
matches
.opt_strs("l")
.into_iter()
.map(|s| {
- // Parse string of the form "[KIND=]lib[:new_name]",
- // where KIND is one of "dylib", "framework", "static".
- let (name, kind) = match s.split_once('=') {
- None => (s, NativeLibKind::Unspecified),
+ // Parse string of the form "[KIND[:MODIFIERS]=]lib[:new_name]",
+ // where KIND is one of "dylib", "framework", "static" and
+ // where MODIFIERS are a comma separated list of supported modifiers
+ // (bundle, verbatim, whole-archive, as-needed). Each modifier is prefixed
+ // with either + or - to indicate whether it is enabled or disabled.
+ // The last value specified for a given modifier wins.
+ let (name, kind, verbatim) = match s.split_once('=') {
+ None => (s, NativeLibKind::Unspecified, None),
Some((kind, name)) => {
- let kind = match kind {
- "dylib" => NativeLibKind::Dylib,
- "framework" => NativeLibKind::Framework,
- "static" => NativeLibKind::StaticBundle,
- "static-nobundle" => NativeLibKind::StaticNoBundle,
- s => {
- early_error(
- error_format,
- &format!(
- "unknown library kind `{}`, expected \
- one of dylib, framework, or static",
- s
- ),
- );
+ let (kind, verbatim) = match kind.split_once(':') {
+ None => (parse_native_lib_kind(kind, error_format), None),
+ Some((kind, modifiers)) => {
+ let kind = parse_native_lib_kind(kind, error_format);
+ parse_native_lib_modifiers(is_nightly, kind, modifiers, error_format)
}
};
- (name.to_string(), kind)
+ (name.to_string(), kind, verbatim)
}
};
- if kind == NativeLibKind::StaticNoBundle
- && !nightly_options::match_is_nightly_build(matches)
- {
- early_error(
- error_format,
- "the library kind 'static-nobundle' is only \
- accepted on the nightly compiler",
- );
- }
+
let (name, new_name) = match name.split_once(':') {
None => (name, None),
Some((name, new_name)) => (name.to_string(), Some(new_name.to_owned())),
};
- (name, new_name, kind)
+ NativeLib { name, new_name, kind, verbatim }
})
.collect()
}
let (lint_opts, describe_lints, lint_cap) = get_cmd_lint_options(matches, error_format);
- let mut debugging_opts = build_debugging_options(matches, error_format);
+ let mut debugging_opts = DebuggingOptions::build(matches, error_format);
check_debug_option_stability(&debugging_opts, error_format, json_rendered);
if !debugging_opts.unstable_options && json_unused_externs {
let output_types = parse_output_types(&debugging_opts, matches, error_format);
- let mut cg = build_codegen_options(matches, error_format);
+ let mut cg = CodegenOptions::build(matches, error_format);
let (disable_thinlto, mut codegen_units) = should_override_cgus_and_disable_thinlto(
&output_types,
matches,
};
use crate::lint;
use crate::options::WasiExecModel;
- use crate::utils::NativeLibKind;
+ use crate::utils::{NativeLib, NativeLibKind};
use rustc_feature::UnstableFeatures;
use rustc_span::edition::Edition;
use rustc_target::spec::{CodeModel, MergeFunctions, PanicStrategy, RelocModel};
DebugInfo,
UnstableFeatures,
OutputTypes,
+ NativeLib,
NativeLibKind,
SanitizerSet,
CFGuard,
PathBuf,
(PathBuf, PathBuf),
CrateType,
+ NativeLib,
(String, lint::Level),
- (String, Option<String>, NativeLibKind),
(String, u64)
);
pub use self::FileMatch::*;
-use std::borrow::Cow;
use std::env;
use std::fs;
use std::path::{Path, PathBuf};
// Returns a list of directories where target-specific tool binaries are located.
pub fn get_tools_search_paths(&self, self_contained: bool) -> Vec<PathBuf> {
- let mut p = PathBuf::from(self.sysroot);
- p.push(find_libdir(self.sysroot).as_ref());
- p.push(RUST_LIB_DIR);
- p.push(&self.triple);
- p.push("bin");
+ let rustlib_path = rustc_target::target_rustlib_path(self.sysroot, &self.triple);
+ let p = std::array::IntoIter::new([
+ Path::new(&self.sysroot),
+ Path::new(&rustlib_path),
+ Path::new("bin"),
+ ])
+ .collect::<PathBuf>();
if self_contained { vec![p.clone(), p.join("self-contained")] } else { vec![p] }
}
}
-pub fn relative_target_lib_path(sysroot: &Path, target_triple: &str) -> PathBuf {
- let mut p = PathBuf::from(find_libdir(sysroot).as_ref());
- assert!(p.is_relative());
- p.push(RUST_LIB_DIR);
- p.push(target_triple);
- p.push("lib");
- p
-}
-
pub fn make_target_lib_path(sysroot: &Path, target_triple: &str) -> PathBuf {
- sysroot.join(&relative_target_lib_path(sysroot, target_triple))
+ let rustlib_path = rustc_target::target_rustlib_path(sysroot, target_triple);
+ std::array::IntoIter::new([sysroot, Path::new(&rustlib_path), Path::new("lib")])
+ .collect::<PathBuf>()
}
// This function checks if sysroot is found using env::args().next(), and if it
return None;
}
+ // Pop off `bin/rustc`, obtaining the suspected sysroot.
p.pop();
p.pop();
- let mut libdir = PathBuf::from(&p);
- libdir.push(find_libdir(&p).as_ref());
- if libdir.exists() { Some(p) } else { None }
+ // Look for the target rustlib directory in the suspected sysroot.
+ let mut rustlib_path = rustc_target::target_rustlib_path(&p, "dummy");
+ rustlib_path.pop(); // pop off the dummy target.
+ if rustlib_path.exists() { Some(p) } else { None }
}
None => None,
}
// use env::current_exe() to imply sysroot.
from_env_args_next().unwrap_or_else(from_current_exe)
}
-
-// The name of the directory rustc expects libraries to be located.
-fn find_libdir(sysroot: &Path) -> Cow<'static, str> {
- // FIXME: This is a quick hack to make the rustc binary able to locate
- // Rust libraries in Linux environments where libraries might be installed
- // to lib64/lib32. This would be more foolproof by basing the sysroot off
- // of the directory where `librustc_driver` is located, rather than
- // where the rustc binary is.
- // If --libdir is set during configuration to the value other than
- // "lib" (i.e., non-default), this value is used (see issue #16552).
-
- #[cfg(target_pointer_width = "64")]
- const PRIMARY_LIB_DIR: &str = "lib64";
-
- #[cfg(target_pointer_width = "32")]
- const PRIMARY_LIB_DIR: &str = "lib32";
-
- const SECONDARY_LIB_DIR: &str = "lib";
-
- match option_env!("CFG_LIBDIR_RELATIVE") {
- None | Some("lib") => {
- if sysroot.join(PRIMARY_LIB_DIR).join(RUST_LIB_DIR).exists() {
- PRIMARY_LIB_DIR.into()
- } else {
- SECONDARY_LIB_DIR.into()
- }
- }
- Some(libdir) => libdir.into(),
- }
-}
-
-// The name of rustc's own place to organize libraries.
-// Used to be "rustc", now the default is "rustlib"
-const RUST_LIB_DIR: &str = "rustlib";
use crate::early_error;
use crate::lint;
use crate::search_paths::SearchPath;
-use crate::utils::NativeLibKind;
+use crate::utils::NativeLib;
use rustc_target::spec::{CodeModel, LinkerFlavor, MergeFunctions, PanicStrategy, SanitizerSet};
use rustc_target::spec::{RelocModel, RelroLevel, SplitDebuginfo, TargetTriple, TlsModel};
describe_lints: bool [UNTRACKED],
output_types: OutputTypes [TRACKED],
search_paths: Vec<SearchPath> [UNTRACKED],
- libs: Vec<(String, Option<String>, NativeLibKind)> [TRACKED],
+ libs: Vec<NativeLib> [TRACKED],
maybe_sysroot: Option<PathBuf> [UNTRACKED],
target_triple: TargetTriple [TRACKED],
/// generated code to parse an option into its respective field in the struct. There are a few
/// hand-written parsers for parsing specific types of values in this module.
macro_rules! options {
- ($struct_name:ident, $setter_name:ident, $defaultfn:ident,
- $buildfn:ident, $prefix:expr, $outputname:expr,
- $stat:ident,
+ ($struct_name:ident, $stat:ident, $prefix:expr, $outputname:expr,
$($( #[$attr:meta] )* $opt:ident : $t:ty = (
$init:expr,
$parse:ident,
#[derive(Clone)]
pub struct $struct_name { $(pub $opt: $t),* }
- pub fn $defaultfn() -> $struct_name {
- $struct_name { $( $( #[$attr] )* $opt: $init),* }
- }
-
- pub fn $buildfn(matches: &getopts::Matches, error_format: ErrorOutputType) -> $struct_name
- {
- let mut op = $defaultfn();
- for option in matches.opt_strs($prefix) {
- let (key, value) = match option.split_once('=') {
- None => (option, None),
- Some((k, v)) => (k.to_string(), Some(v)),
- };
- let option_to_lookup = key.replace("-", "_");
- let mut found = false;
- for &(candidate, setter, type_desc, _) in $stat {
- if option_to_lookup != candidate { continue }
- if !setter(&mut op, value) {
- match value {
- None => {
- early_error(error_format, &format!("{0} option `{1}` requires \
- {2} ({3} {1}=<value>)",
- $outputname, key,
- type_desc, $prefix))
- }
- Some(value) => {
- early_error(error_format, &format!("incorrect value `{}` for {} \
- option `{}` - {} was expected",
- value, $outputname,
- key, type_desc))
- }
- }
- }
- found = true;
- break;
- }
- if !found {
- early_error(error_format, &format!("unknown {} option: `{}`",
- $outputname, key));
- }
+ impl Default for $struct_name {
+ fn default() -> $struct_name {
+ $struct_name { $( $( #[$attr] )* $opt: $init),* }
}
- return op;
}
impl $struct_name {
+ pub fn build(
+ matches: &getopts::Matches,
+ error_format: ErrorOutputType,
+ ) -> $struct_name {
+ build_options(matches, $stat, $prefix, $outputname, error_format)
+ }
+
fn dep_tracking_hash(&self, _for_crate_hash: bool, error_format: ErrorOutputType) -> u64 {
let mut sub_hashes = BTreeMap::new();
$({
}
}
- pub type $setter_name = fn(&mut $struct_name, v: Option<&str>) -> bool;
- pub const $stat: &[(&str, $setter_name, &str, &str)] =
- &[ $( (stringify!($opt), $crate::options::parse::$opt, $crate::options::desc::$parse, $desc) ),* ];
-
- // Sometimes different options need to build a common structure.
- // That structure can kept in one of the options' fields, the others become dummy.
- macro_rules! redirect_field {
- ($cg:ident.link_arg) => { $cg.link_args };
- ($cg:ident.pre_link_arg) => { $cg.pre_link_args };
- ($cg:ident.$field:ident) => { $cg.$field };
- }
+ pub const $stat: OptionDescrs<$struct_name> =
+ &[ $( (stringify!($opt), $opt, desc::$parse, $desc) ),* ];
$(
- pub fn $opt(cg: &mut $struct_name, v: Option<&str>) -> bool {
- $crate::options::parse::$parse(&mut redirect_field!(cg.$opt), v)
+ fn $opt(cg: &mut $struct_name, v: Option<&str>) -> bool {
+ parse::$parse(&mut redirect_field!(cg.$opt), v)
}
)*
) }
+// Sometimes different options need to build a common structure.
+// That structure can be kept in one of the options' fields, the others become dummy.
+macro_rules! redirect_field {
+ ($cg:ident.link_arg) => {
+ $cg.link_args
+ };
+ ($cg:ident.pre_link_arg) => {
+ $cg.pre_link_args
+ };
+ ($cg:ident.$field:ident) => {
+ $cg.$field
+ };
+}
+
+type OptionSetter<O> = fn(&mut O, v: Option<&str>) -> bool;
+type OptionDescrs<O> = &'static [(&'static str, OptionSetter<O>, &'static str, &'static str)];
+
+fn build_options<O: Default>(
+ matches: &getopts::Matches,
+ descrs: OptionDescrs<O>,
+ prefix: &str,
+ outputname: &str,
+ error_format: ErrorOutputType,
+) -> O {
+ let mut op = O::default();
+ for option in matches.opt_strs(prefix) {
+ let (key, value) = match option.split_once('=') {
+ None => (option, None),
+ Some((k, v)) => (k.to_string(), Some(v)),
+ };
+
+ let option_to_lookup = key.replace("-", "_");
+ match descrs.iter().find(|(name, ..)| *name == option_to_lookup) {
+ Some((_, setter, type_desc, _)) => {
+ if !setter(&mut op, value) {
+ match value {
+ None => early_error(
+ error_format,
+ &format!(
+ "{0} option `{1}` requires {2} ({3} {1}=<value>)",
+ outputname, key, type_desc, prefix
+ ),
+ ),
+ Some(value) => early_error(
+ error_format,
+ &format!(
+ "incorrect value `{}` for {} option `{}` - {} was expected",
+ value, outputname, key, type_desc
+ ),
+ ),
+ }
+ }
+ }
+ None => early_error(error_format, &format!("unknown {} option: `{}`", outputname, key)),
+ }
+ }
+ return op;
+}
+
#[allow(non_upper_case_globals)]
mod desc {
pub const parse_no_flag: &str = "no value";
}
}
-options! {CodegenOptions, CodegenSetter, basic_codegen_options,
- build_codegen_options, "C", "codegen",
- CG_OPTIONS,
+options! {
+ CodegenOptions, CG_OPTIONS, "C", "codegen",
// This list is in alphabetical order.
//
// - src/doc/rustc/src/codegen-options/index.md
}
-options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
- build_debugging_options, "Z", "debugging",
- DB_OPTIONS,
+options! {
+ DebuggingOptions, DB_OPTIONS, "Z", "debugging",
// This list is in alphabetical order.
//
"gather statistics about the input (default: no)"),
instrument_coverage: Option<InstrumentCoverage> = (None, parse_instrument_coverage, [TRACKED],
"instrument the generated code to support LLVM source-based code coverage \
- reports (note, the compiler build config must include `profiler = true`, \
- and is mutually exclusive with `-C profile-generate`/`-C profile-use`); \
- implies `-Z symbol-mangling-version=v0`; disables/overrides some Rust \
- optimizations. Optional values are: `=all` (default coverage), \
- `=except-unused-generics`, `=except-unused-functions`, or `=off` \
- (default: instrument-coverage=off)"),
+ reports (note, the compiler build config must include `profiler = true`); \
+ implies `-Z symbol-mangling-version=v0`. Optional values are:
+ `=all` (implicit value)
+ `=except-unused-generics`
+ `=except-unused-functions`
+ `=off` (default)"),
instrument_mcount: bool = (false, parse_bool, [TRACKED],
"insert function instrument code for mcount-based tracing (default: no)"),
keep_hygiene_data: bool = (false, parse_bool, [UNTRACKED],
"MIR optimization level (0-4; default: 1 in non optimized builds and 2 in optimized builds)"),
mutable_noalias: Option<bool> = (None, parse_opt_bool, [TRACKED],
"emit noalias metadata for mutable references (default: yes for LLVM >= 12, otherwise no)"),
- new_llvm_pass_manager: bool = (false, parse_bool, [TRACKED],
+ new_llvm_pass_manager: Option<bool> = (None, parse_opt_bool, [TRACKED],
"use new LLVM pass manager (default: no)"),
nll_facts: bool = (false, parse_bool, [UNTRACKED],
"dump facts from NLL analysis into side files (default: no)"),
}
pub fn span_fatal<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> ! {
- self.diagnostic().span_fatal(sp, msg).raise()
+ self.diagnostic().span_fatal(sp, msg)
}
pub fn span_fatal_with_code<S: Into<MultiSpan>>(
&self,
msg: &str,
code: DiagnosticId,
) -> ! {
- self.diagnostic().span_fatal_with_code(sp, msg, code).raise()
+ self.diagnostic().span_fatal_with_code(sp, msg, code)
}
pub fn fatal(&self, msg: &str) -> ! {
self.diagnostic().fatal(msg).raise()
pub fn warn(&self, msg: &str) {
self.diagnostic().warn(msg)
}
- pub fn opt_span_warn<S: Into<MultiSpan>>(&self, opt_sp: Option<S>, msg: &str) {
- match opt_sp {
- Some(sp) => self.span_warn(sp, msg),
- None => self.warn(msg),
- }
- }
/// Delay a span_bug() call until abort_if_errors()
#[track_caller]
pub fn delay_span_bug<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
DiagnosticOutput::Raw(write) => Some(write),
};
- let target_cfg = config::build_target_config(&sopts, target_override);
+ let sysroot = match &sopts.maybe_sysroot {
+ Some(sysroot) => sysroot.clone(),
+ None => filesearch::get_or_default_sysroot(),
+ };
+
+ let target_cfg = config::build_target_config(&sopts, target_override, &sysroot);
let host_triple = TargetTriple::from_triple(config::host_triple());
- let host = Target::search(&host_triple).unwrap_or_else(|e| {
+ let host = Target::search(&host_triple, &sysroot).unwrap_or_else(|e| {
early_error(sopts.error_format, &format!("Error loading host specification: {}", e))
});
let mut parse_sess = ParseSess::with_span_handler(span_diagnostic, source_map);
parse_sess.assume_incomplete_release = sopts.debugging_opts.assume_incomplete_release;
- let sysroot = match &sopts.maybe_sysroot {
- Some(sysroot) => sysroot.clone(),
- None => filesearch::get_or_default_sysroot(),
- };
let host_triple = config::host_triple();
let target_triple = sopts.target_triple.triple();
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
pub enum NativeLibKind {
- /// Static library (e.g. `libfoo.a` on Linux or `foo.lib` on Windows/MSVC) included
- /// when linking a final binary, but not when archiving an rlib.
- StaticNoBundle,
- /// Static library (e.g. `libfoo.a` on Linux or `foo.lib` on Windows/MSVC) included
- /// when linking a final binary, but also included when archiving an rlib.
- StaticBundle,
+ /// Static library (e.g. `libfoo.a` on Linux or `foo.lib` on Windows/MSVC)
+ Static {
+ /// Whether to bundle objects from static library into produced rlib
+ bundle: Option<bool>,
+ /// Whether to link static library without throwing any object files away
+ whole_archive: Option<bool>,
+ },
/// Dynamic library (e.g. `libfoo.so` on Linux)
/// or an import library corresponding to a dynamic library (e.g. `foo.lib` on Windows/MSVC).
- Dylib,
+ Dylib {
+ /// Whether the dynamic library will be linked only if it satifies some undefined symbols
+ as_needed: Option<bool>,
+ },
/// Dynamic library (e.g. `foo.dll` on Windows) without a corresponding import library.
RawDylib,
/// A macOS-specific kind of dynamic libraries.
- Framework,
+ Framework {
+ /// Whether the framework will be linked only if it satifies some undefined symbols
+ as_needed: Option<bool>,
+ },
/// The library kind wasn't specified, `Dylib` is currently used as a default.
Unspecified,
}
rustc_data_structures::impl_stable_hash_via_hash!(NativeLibKind);
+#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
+pub struct NativeLib {
+ pub name: String,
+ pub new_name: Option<String>,
+ pub kind: NativeLibKind,
+ pub verbatim: Option<bool>,
+}
+
+rustc_data_structures::impl_stable_hash_via_hash!(NativeLib);
+
/// A path that has been canonicalized along with its original, non-canonicalized form
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct CanonicalizedPath {
impl FileLoader for RealFileLoader {
fn file_exists(&self, path: &Path) -> bool {
- fs::metadata(path).is_ok()
+ path.exists()
}
fn read_file(&self, path: &Path) -> io::Result<String> {
// Interned format.
debug_assert!(self.ctxt_or_zero == 0);
let index = self.base_or_index;
- with_span_interner(|interner| *interner.get(index))
+ with_span_interner(|interner| interner.spans[index as usize])
}
}
}
let (index, _) = self.spans.insert_full(*span_data);
index as u32
}
-
- #[inline]
- fn get(&self, index: u32) -> &SpanData {
- &self.spans[index as usize]
- }
}
// If an interner exists, return it. Otherwise, prepare a fresh one.
minnumf64,
mips_target_feature,
misc,
+ modifiers,
module,
module_path,
more_struct_aliases,
naked,
naked_functions,
name,
+ native_link_modifiers,
+ native_link_modifiers_as_needed,
+ native_link_modifiers_bundle,
+ native_link_modifiers_verbatim,
+ native_link_modifiers_whole_archive,
ne,
nearbyintf32,
nearbyintf64,
#![feature(associated_type_bounds)]
#![feature(exhaustive_patterns)]
+use std::path::{Path, PathBuf};
+
#[macro_use]
extern crate rustc_macros;
/// This is a hack to allow using the `HashStable_Generic` derive macro
/// instead of implementing everything in `rustc_middle`.
pub trait HashStableContext {}
+
+/// The name of rustc's own place to organize libraries.
+///
+/// Used to be `rustc`, now the default is `rustlib`.
+const RUST_LIB_DIR: &str = "rustlib";
+
+/// Returns a `rustlib` path for this particular target, relative to the provided sysroot.
+///
+/// For example: `target_sysroot_path("/usr", "x86_64-unknown-linux-gnu")` =>
+/// `"lib*/rustlib/x86_64-unknown-linux-gnu"`.
+pub fn target_rustlib_path(sysroot: &Path, target_triple: &str) -> PathBuf {
+ let libdir = find_libdir(sysroot);
+ std::array::IntoIter::new([
+ Path::new(libdir.as_ref()),
+ Path::new(RUST_LIB_DIR),
+ Path::new(target_triple),
+ ])
+ .collect::<PathBuf>()
+}
+
+/// The name of the directory rustc expects libraries to be located.
+fn find_libdir(sysroot: &Path) -> std::borrow::Cow<'static, str> {
+ // FIXME: This is a quick hack to make the rustc binary able to locate
+ // Rust libraries in Linux environments where libraries might be installed
+ // to lib64/lib32. This would be more foolproof by basing the sysroot off
+ // of the directory where `librustc_driver` is located, rather than
+ // where the rustc binary is.
+ // If --libdir is set during configuration to the value other than
+ // "lib" (i.e., non-default), this value is used (see issue #16552).
+
+ #[cfg(target_pointer_width = "64")]
+ const PRIMARY_LIB_DIR: &str = "lib64";
+
+ #[cfg(target_pointer_width = "32")]
+ const PRIMARY_LIB_DIR: &str = "lib32";
+
+ const SECONDARY_LIB_DIR: &str = "lib";
+
+ match option_env!("CFG_LIBDIR_RELATIVE") {
+ None | Some("lib") => {
+ if sysroot.join(PRIMARY_LIB_DIR).join(RUST_LIB_DIR).exists() {
+ PRIMARY_LIB_DIR.into()
+ } else {
+ SECONDARY_LIB_DIR.into()
+ }
+ }
+ Some(libdir) => libdir.into(),
+ }
+}
late_link_args.insert(
LinkerFlavor::Gcc,
vec![
+ // The illumos libc contains a stack unwinding implementation, as
+ // does libgcc_s. The latter implementation includes several
+ // additional symbols that are not always in base libc. To force
+ // the consistent use of just one unwinder, we ensure libc appears
+ // after libgcc_s in the NEEDED list for the resultant binary by
+ // ignoring any attempts to add it as a dynamic dependency until the
+ // very end.
+ // FIXME: This should be replaced by a more complete and generic
+ // mechanism for controlling the order of library arguments passed
+ // to the linker.
+ "-lc".to_string(),
// LLVM will insert calls to the stack protector functions
// "__stack_chk_fail" and "__stack_chk_guard" into code in native
// object files. Some platforms include these symbols directly in
Ok(base)
}
- /// Search RUST_TARGET_PATH for a JSON file specifying the given target
- /// triple. Note that it could also just be a bare filename already, so also
- /// check for that. If one of the hardcoded targets we know about, just
- /// return it directly.
+ /// Search for a JSON file specifying the given target triple.
///
- /// The error string could come from any of the APIs called, including
- /// filesystem access and JSON decoding.
- pub fn search(target_triple: &TargetTriple) -> Result<Target, String> {
+ /// If none is found in `$RUST_TARGET_PATH`, look for a file called `target.json` inside the
+ /// sysroot under the target-triple's `rustlib` directory. Note that it could also just be a
+ /// bare filename already, so also check for that. If one of the hardcoded targets we know
+ /// about, just return it directly.
+ ///
+ /// The error string could come from any of the APIs called, including filesystem access and
+ /// JSON decoding.
+ pub fn search(target_triple: &TargetTriple, sysroot: &PathBuf) -> Result<Target, String> {
use rustc_serialize::json;
use std::env;
use std::fs;
let target_path = env::var_os("RUST_TARGET_PATH").unwrap_or_default();
- // FIXME 16351: add a sane default search path?
-
for dir in env::split_paths(&target_path) {
let p = dir.join(&path);
if p.is_file() {
return load_file(&p);
}
}
+
+ // Additionally look in the sysroot under `lib/rustlib/<triple>/target.json`
+ // as a fallback.
+ let rustlib_path = crate::target_rustlib_path(&sysroot, &target_triple);
+ let p = std::array::IntoIter::new([
+ Path::new(sysroot),
+ Path::new(&rustlib_path),
+ Path::new("target.json"),
+ ])
+ .collect::<PathBuf>();
+ if p.is_file() {
+ return load_file(&p);
+ }
+
Err(format!("Could not find specification for target {:?}", target_triple))
}
TargetTriple::TargetPath(ref target_path) => {
base.stack_probes = StackProbeType::Call;
Target {
- llvm_target: "x86_64-unknown-none-elf".to_string(),
+ llvm_target: "x86_64-unknown-hermit".to_string(),
pointer_width: 64,
data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
.to_string(),
}
/// Returns `true` if the global caches can be used.
- /// Do note that if the type itself is not in the
- /// global tcx, the local caches will be used.
fn can_use_global_caches(&self, param_env: ty::ParamEnv<'tcx>) -> bool {
// If there are any inference variables in the `ParamEnv`, then we
// always use a cache local to this particular scope. Otherwise, we
) => false,
(ParamCandidate(other), ParamCandidate(victim)) => {
- if other.value == victim.value && victim.constness == Constness::NotConst {
+ let value_same_except_bound_vars = other.value.skip_binder()
+ == victim.value.skip_binder()
+ && !other.value.skip_binder().has_escaping_bound_vars();
+ if value_same_except_bound_vars {
+ // See issue #84398. In short, we can generate multiple ParamCandidates which are
+ // the same except for unused bound vars. Just pick the one with the fewest bound vars
+ // or the current one if tied (they should both evaluate to the same answer). This is
+ // probably best characterized as a "hack", since we might prefer to just do our
+ // best to *not* create essentially duplicate candidates in the first place.
+ other.value.bound_vars().len() <= victim.value.bound_vars().len()
+ } else if other.value == victim.value && victim.constness == Constness::NotConst {
// Drop otherwise equivalent non-const candidates in favor of const candidates.
true
} else {
pub fn ty_is_representable<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, sp: Span) -> Representability {
debug!("is_type_representable: {:?}", ty);
// To avoid a stack overflow when checking an enum variant or struct that
- // contains a different, structurally recursive type, maintain a stack
- // of seen types and check recursion for each of them (issues #3008, #3779).
+ // contains a different, structurally recursive type, maintain a stack of
+ // seen types and check recursion for each of them (issues #3008, #3779,
+ // #74224, #84611). `shadow_seen` contains the full stack and `seen` only
+ // the one for the current type (e.g. if we have structs A and B, B contains
+ // a field of type A, and we're currently looking at B, then `seen` will be
+ // cleared when recursing to check A, but `shadow_seen` won't, so that we
+ // can catch cases of mutual recursion where A also contains B).
let mut seen: Vec<Ty<'_>> = Vec::new();
+ let mut shadow_seen: Vec<&'tcx ty::AdtDef> = Vec::new();
let mut representable_cache = FxHashMap::default();
- let r = is_type_structurally_recursive(tcx, sp, &mut seen, &mut representable_cache, ty);
+ let mut force_result = false;
+ let r = is_type_structurally_recursive(
+ tcx,
+ sp,
+ &mut seen,
+ &mut shadow_seen,
+ &mut representable_cache,
+ ty,
+ &mut force_result,
+ );
debug!("is_type_representable: {:?} is {:?}", ty, r);
r
}
tcx: TyCtxt<'tcx>,
sp: Span,
seen: &mut Vec<Ty<'tcx>>,
+ shadow_seen: &mut Vec<&'tcx ty::AdtDef>,
representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
ty: Ty<'tcx>,
+ force_result: &mut bool,
) -> Representability {
+ debug!("are_inner_types_recursive({:?}, {:?}, {:?})", ty, seen, shadow_seen);
match ty.kind() {
ty::Tuple(..) => {
// Find non representable
- fold_repr(
- ty.tuple_fields().map(|ty| {
- is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty)
- }),
- )
+ fold_repr(ty.tuple_fields().map(|ty| {
+ is_type_structurally_recursive(
+ tcx,
+ sp,
+ seen,
+ shadow_seen,
+ representable_cache,
+ ty,
+ force_result,
+ )
+ }))
}
// Fixed-length vectors.
// FIXME(#11924) Behavior undecided for zero-length vectors.
- ty::Array(ty, _) => is_type_structurally_recursive(tcx, sp, seen, representable_cache, ty),
+ ty::Array(ty, _) => is_type_structurally_recursive(
+ tcx,
+ sp,
+ seen,
+ shadow_seen,
+ representable_cache,
+ ty,
+ force_result,
+ ),
ty::Adt(def, substs) => {
// Find non representable fields with their spans
fold_repr(def.all_fields().map(|field| {
Some(hir::Node::Field(field)) => field.ty.span,
_ => sp,
};
- match is_type_structurally_recursive(tcx, span, seen, representable_cache, ty) {
- Representability::SelfRecursive(_) => {
- Representability::SelfRecursive(vec![span])
+
+ let mut result = None;
+
+ // First, we check whether the field type per se is representable.
+ // This catches cases as in #74224 and #84611. There is a special
+ // case related to mutual recursion, though; consider this example:
+ //
+ // struct A<T> {
+ // z: T,
+ // x: B<T>,
+ // }
+ //
+ // struct B<T> {
+ // y: A<T>
+ // }
+ //
+ // Here, without the following special case, both A and B are
+ // ContainsRecursive, which is a problem because we only report
+ // errors for SelfRecursive. We fix this by detecting this special
+ // case (shadow_seen.first() is the type we are originally
+ // interested in, and if we ever encounter the same AdtDef again,
+ // we know that it must be SelfRecursive) and "forcibly" returning
+ // SelfRecursive (by setting force_result, which tells the calling
+ // invocations of are_inner_types_representable to forward the
+ // result without adjusting).
+ if shadow_seen.len() > seen.len() && shadow_seen.first() == Some(def) {
+ *force_result = true;
+ result = Some(Representability::SelfRecursive(vec![span]));
+ }
+
+ if result == None {
+ result = Some(Representability::Representable);
+
+ // Now, we check whether the field types per se are representable, e.g.
+ // for struct Foo { x: Option<Foo> }, we first check whether Option<_>
+ // by itself is representable (which it is), and the nesting of Foo
+ // will be detected later. This is necessary for #74224 and #84611.
+
+ // If we have encountered an ADT definition that we have not seen
+ // before (no need to check them twice), recurse to see whether that
+ // definition is SelfRecursive. If so, we must be ContainsRecursive.
+ if shadow_seen.len() > 1
+ && !shadow_seen
+ .iter()
+ .take(shadow_seen.len() - 1)
+ .any(|seen_def| seen_def == def)
+ {
+ let adt_def_id = def.did;
+ let raw_adt_ty = tcx.type_of(adt_def_id);
+ debug!("are_inner_types_recursive: checking nested type: {:?}", raw_adt_ty);
+
+ // Check independently whether the ADT is SelfRecursive. If so,
+ // we must be ContainsRecursive (except for the special case
+ // mentioned above).
+ let mut nested_seen: Vec<Ty<'_>> = vec![];
+ result = Some(
+ match is_type_structurally_recursive(
+ tcx,
+ span,
+ &mut nested_seen,
+ shadow_seen,
+ representable_cache,
+ raw_adt_ty,
+ force_result,
+ ) {
+ Representability::SelfRecursive(_) => {
+ if *force_result {
+ Representability::SelfRecursive(vec![span])
+ } else {
+ Representability::ContainsRecursive
+ }
+ }
+ x => x,
+ },
+ );
+ }
+
+ // We only enter the following block if the type looks representable
+ // so far. This is necessary for cases such as this one (#74224):
+ //
+ // struct A<T> {
+ // x: T,
+ // y: A<A<T>>,
+ // }
+ //
+ // struct B {
+ // z: A<usize>
+ // }
+ //
+ // When checking B, we recurse into A and check field y of type
+ // A<A<usize>>. We haven't seen this exact type before, so we recurse
+ // into A<A<usize>>, which contains, A<A<A<usize>>>, and so forth,
+ // ad infinitum. We can prevent this from happening by first checking
+ // A separately (the code above) and only checking for nested Bs if
+ // A actually looks representable (which it wouldn't in this example).
+ if result == Some(Representability::Representable) {
+ // Now, even if the type is representable (e.g. Option<_>),
+ // it might still contribute to a recursive type, e.g.:
+ // struct Foo { x: Option<Option<Foo>> }
+ // These cases are handled by passing the full `seen`
+ // stack to is_type_structurally_recursive (instead of the
+ // empty `nested_seen` above):
+ result = Some(
+ match is_type_structurally_recursive(
+ tcx,
+ span,
+ seen,
+ shadow_seen,
+ representable_cache,
+ ty,
+ force_result,
+ ) {
+ Representability::SelfRecursive(_) => {
+ Representability::SelfRecursive(vec![span])
+ }
+ x => x,
+ },
+ );
}
- x => x,
}
+
+ result.unwrap()
}))
}
ty::Closure(..) => {
tcx: TyCtxt<'tcx>,
sp: Span,
seen: &mut Vec<Ty<'tcx>>,
+ shadow_seen: &mut Vec<&'tcx ty::AdtDef>,
representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
ty: Ty<'tcx>,
+ force_result: &mut bool,
) -> Representability {
debug!("is_type_structurally_recursive: {:?} {:?}", ty, sp);
if let Some(representability) = representable_cache.get(ty) {
return representability.clone();
}
- let representability =
- is_type_structurally_recursive_inner(tcx, sp, seen, representable_cache, ty);
+ let representability = is_type_structurally_recursive_inner(
+ tcx,
+ sp,
+ seen,
+ shadow_seen,
+ representable_cache,
+ ty,
+ force_result,
+ );
representable_cache.insert(ty, representability.clone());
representability
tcx: TyCtxt<'tcx>,
sp: Span,
seen: &mut Vec<Ty<'tcx>>,
+ shadow_seen: &mut Vec<&'tcx ty::AdtDef>,
representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
ty: Ty<'tcx>,
+ force_result: &mut bool,
) -> Representability {
match ty.kind() {
ty::Adt(def, _) => {
{
+ debug!("is_type_structurally_recursive_inner: adt: {:?}, seen: {:?}", ty, seen);
+
// Iterate through stack of previously seen types.
let mut iter = seen.iter();
// will recurse infinitely for some inputs.
//
// It is important that we DO take generic parameters into account
- // here, so that code like this is considered SelfRecursive, not
- // ContainsRecursive:
+ // here, because nesting e.g. Options is allowed (as long as the
+ // definition of Option doesn't itself include an Option field, which
+ // would be a case of SelfRecursive above). The following, too, counts
+ // as SelfRecursive:
//
// struct Foo { Option<Option<Foo>> }
// For structs and enums, track all previously seen types by pushing them
// onto the 'seen' stack.
seen.push(ty);
- let out = are_inner_types_recursive(tcx, sp, seen, representable_cache, ty);
+ shadow_seen.push(def);
+ let out = are_inner_types_recursive(
+ tcx,
+ sp,
+ seen,
+ shadow_seen,
+ representable_cache,
+ ty,
+ force_result,
+ );
+ shadow_seen.pop();
seen.pop();
out
}
_ => {
// No need to push in other cases.
- are_inner_types_recursive(tcx, sp, seen, representable_cache, ty)
+ are_inner_types_recursive(
+ tcx,
+ sp,
+ seen,
+ shadow_seen,
+ representable_cache,
+ ty,
+ force_result,
+ )
}
}
}
pub(in super::super) fn select_obligations_where_possible(
&self,
fallback_has_occurred: bool,
- mutate_fullfillment_errors: impl Fn(&mut Vec<traits::FulfillmentError<'tcx>>),
+ mutate_fulfillment_errors: impl Fn(&mut Vec<traits::FulfillmentError<'tcx>>),
) {
let result = self.fulfillment_cx.borrow_mut().select_where_possible(self);
if let Err(mut errors) = result {
- mutate_fullfillment_errors(&mut errors);
+ mutate_fulfillment_errors(&mut errors);
self.report_fulfillment_errors(&errors, self.inh.body_id, fallback_has_occurred);
}
}
error.obligation.predicate.kind().skip_binder()
{
// If any of the type arguments in this path segment caused the
- // `FullfillmentError`, point at its span (#61860).
+ // `FulfillmentError`, point at its span (#61860).
for arg in path
.segments
.iter()
impl<'tcx> CheckWfFcxBuilder<'tcx> {
fn with_fcx<F>(&mut self, f: F)
where
- F: for<'b> FnOnce(&FnCtxt<'b, 'tcx>, TyCtxt<'tcx>) -> Vec<Ty<'tcx>>,
+ F: for<'b> FnOnce(&FnCtxt<'b, 'tcx>) -> Vec<Ty<'tcx>>,
{
let id = self.id;
let span = self.span;
// empty `param_env`.
check_false_global_bounds(&fcx, span, id);
}
- let wf_tys = f(&fcx, fcx.tcx);
+ let wf_tys = f(&fcx);
fcx.select_all_obligations_or_error();
fcx.regionck_item(id, span, &wf_tys);
});
debug!("check_associated_item: {:?}", item_id);
let code = ObligationCauseCode::MiscObligation;
- for_id(tcx, item_id, span).with_fcx(|fcx, tcx| {
+ for_id(tcx, item_id, span).with_fcx(|fcx| {
let item = fcx.tcx.associated_item(fcx.tcx.hir().local_def_id(item_id));
let (mut implied_bounds, self_ty) = match item.container {
let sig = fcx.normalize_associated_types_in(span, sig);
let hir_sig = sig_if_method.expect("bad signature for method");
check_fn_or_method(
- tcx,
fcx,
item.ident.span,
sig,
) where
F: for<'fcx> FnMut(&FnCtxt<'fcx, 'tcx>) -> Vec<AdtVariant<'tcx>>,
{
- for_item(tcx, item).with_fcx(|fcx, fcx_tcx| {
+ for_item(tcx, item).with_fcx(|fcx| {
let variants = lookup_fields(fcx);
- let packed = fcx.tcx.adt_def(item.def_id).repr.packed();
+ let packed = tcx.adt_def(item.def_id).repr.packed();
for variant in &variants {
// For DST, or when drop needs to copy things around, all
let needs_drop_copy = || {
packed && {
let ty = variant.fields.last().unwrap().ty;
- let ty = fcx.tcx.erase_regions(ty);
+ let ty = tcx.erase_regions(ty);
if ty.needs_infer() {
- fcx_tcx
- .sess
+ tcx.sess
.delay_span_bug(item.span, &format!("inference variables in {:?}", ty));
// Just treat unresolved type expression as if it needs drop.
true
} else {
- ty.needs_drop(fcx_tcx, fcx_tcx.param_env(item.def_id))
+ ty.needs_drop(tcx, tcx.param_env(item.def_id))
}
}
};
let last = idx == variant.fields.len() - 1;
fcx.register_bound(
field.ty,
- fcx.tcx.require_lang_item(LangItem::Sized, None),
+ tcx.require_lang_item(LangItem::Sized, None),
traits::ObligationCause::new(
field.span,
fcx.body_id,
// Explicit `enum` discriminant values must const-evaluate successfully.
if let Some(discr_def_id) = variant.explicit_discr {
- let discr_substs =
- InternalSubsts::identity_for_item(fcx.tcx, discr_def_id.to_def_id());
+ let discr_substs = InternalSubsts::identity_for_item(tcx, discr_def_id.to_def_id());
let cause = traits::ObligationCause::new(
- fcx.tcx.def_span(discr_def_id),
+ tcx.def_span(discr_def_id),
fcx.body_id,
traits::MiscObligation,
);
ty::WithOptConstParam::unknown(discr_def_id.to_def_id()),
discr_substs,
)
- .to_predicate(fcx.tcx),
+ .to_predicate(tcx),
));
}
}
- check_where_clauses(tcx, fcx, item.span, item.def_id.to_def_id(), None);
+ check_where_clauses(fcx, item.span, item.def_id.to_def_id(), None);
// No implied bounds in a struct definition.
vec![]
}
}
- for_item(tcx, item).with_fcx(|fcx, _| {
- check_where_clauses(tcx, fcx, item.span, item.def_id.to_def_id(), None);
+ // FIXME: this shouldn't use an `FnCtxt` at all.
+ for_item(tcx, item).with_fcx(|fcx| {
+ check_where_clauses(fcx, item.span, item.def_id.to_def_id(), None);
vec![]
});
span: Span,
decl: &hir::FnDecl<'_>,
) {
- for_id(tcx, item_id, span).with_fcx(|fcx, tcx| {
- let def_id = fcx.tcx.hir().local_def_id(item_id);
- let sig = fcx.tcx.fn_sig(def_id);
+ for_id(tcx, item_id, span).with_fcx(|fcx| {
+ let def_id = tcx.hir().local_def_id(item_id);
+ let sig = tcx.fn_sig(def_id);
let sig = fcx.normalize_associated_types_in(span, sig);
let mut implied_bounds = vec![];
- check_fn_or_method(
- tcx,
- fcx,
- ident.span,
- sig,
- decl,
- def_id.to_def_id(),
- &mut implied_bounds,
- );
+ check_fn_or_method(fcx, ident.span, sig, decl, def_id.to_def_id(), &mut implied_bounds);
implied_bounds
})
}
fn check_item_type(tcx: TyCtxt<'_>, item_id: hir::HirId, ty_span: Span, allow_foreign_ty: bool) {
debug!("check_item_type: {:?}", item_id);
- for_id(tcx, item_id, ty_span).with_fcx(|fcx, tcx| {
+ for_id(tcx, item_id, ty_span).with_fcx(|fcx| {
let ty = tcx.type_of(tcx.hir().local_def_id(item_id));
let item_ty = fcx.normalize_associated_types_in(ty_span, ty);
if forbid_unsized {
fcx.register_bound(
item_ty,
- fcx.tcx.require_lang_item(LangItem::Sized, None),
+ tcx.require_lang_item(LangItem::Sized, None),
traits::ObligationCause::new(ty_span, fcx.body_id, traits::MiscObligation),
);
}
) {
debug!("check_impl: {:?}", item);
- for_item(tcx, item).with_fcx(|fcx, tcx| {
+ for_item(tcx, item).with_fcx(|fcx| {
match *ast_trait_ref {
Some(ref ast_trait_ref) => {
// `#[rustc_reservation_impl]` impls are not real impls and
// therefore don't need to be WF (the trait's `Self: Trait` predicate
// won't hold).
- let trait_ref = fcx.tcx.impl_trait_ref(item.def_id).unwrap();
+ let trait_ref = tcx.impl_trait_ref(item.def_id).unwrap();
let trait_ref =
fcx.normalize_associated_types_in(ast_trait_ref.path.span, trait_ref);
let obligations = traits::wf::trait_obligations(
}
}
None => {
- let self_ty = fcx.tcx.type_of(item.def_id);
+ let self_ty = tcx.type_of(item.def_id);
let self_ty = fcx.normalize_associated_types_in(item.span, self_ty);
fcx.register_wf_obligation(
self_ty.into(),
}
}
- check_where_clauses(tcx, fcx, item.span, item.def_id.to_def_id(), None);
+ check_where_clauses(fcx, item.span, item.def_id.to_def_id(), None);
fcx.impl_implied_bounds(item.def_id.to_def_id(), item.span)
});
/// Checks where-clauses and inline bounds that are declared on `def_id`.
fn check_where_clauses<'tcx, 'fcx>(
- tcx: TyCtxt<'tcx>,
fcx: &FnCtxt<'fcx, 'tcx>,
span: Span,
def_id: DefId,
return_ty: Option<(Ty<'tcx>, Span)>,
) {
debug!("check_where_clauses(def_id={:?}, return_ty={:?})", def_id, return_ty);
+ let tcx = fcx.tcx;
- let predicates = fcx.tcx.predicates_of(def_id);
+ let predicates = tcx.predicates_of(def_id);
let generics = tcx.generics_of(def_id);
let is_our_default = |def: &ty::GenericParamDef| match def.kind {
match param.kind {
GenericParamDefKind::Type { .. } => {
if is_our_default(¶m) {
- let ty = fcx.tcx.type_of(param.def_id);
+ let ty = tcx.type_of(param.def_id);
// Ignore dependent defaults -- that is, where the default of one type
// parameter includes another (e.g., `<T, U = T>`). In those cases, we can't
// be sure if it will error or not as user might always specify the other.
if !ty.needs_subst() {
fcx.register_wf_obligation(
ty.into(),
- fcx.tcx.def_span(param.def_id),
+ tcx.def_span(param.def_id),
ObligationCauseCode::MiscObligation,
);
}
let default_ct = tcx.const_param_default(param.def_id);
fcx.register_wf_obligation(
default_ct.into(),
- fcx.tcx.def_span(param.def_id),
+ tcx.def_span(param.def_id),
ObligationCauseCode::MiscObligation,
);
}
// For more examples see tests `defaults-well-formedness.rs` and `type-check-defaults.rs`.
//
// First we build the defaulted substitution.
- let substs = InternalSubsts::for_item(fcx.tcx, def_id, |param, _| {
+ let substs = InternalSubsts::for_item(tcx, def_id, |param, _| {
match param.kind {
GenericParamDefKind::Lifetime => {
// All regions are identity.
- fcx.tcx.mk_param_from_def(param)
+ tcx.mk_param_from_def(param)
}
GenericParamDefKind::Type { .. } => {
// If the param has a default, ...
if is_our_default(param) {
- let default_ty = fcx.tcx.type_of(param.def_id);
+ let default_ty = tcx.type_of(param.def_id);
// ... and it's not a dependent default, ...
if !default_ty.needs_subst() {
// ... then substitute it with the default.
}
}
- fcx.tcx.mk_param_from_def(param)
+ tcx.mk_param_from_def(param)
}
GenericParamDefKind::Const { .. } => {
// FIXME(const_generics_defaults): I(@lcnr) feel like always
}
}
- fcx.tcx.mk_param_from_def(param)
+ tcx.mk_param_from_def(param)
}
}
});
}
let mut param_count = CountParams::default();
let has_region = pred.visit_with(&mut param_count).is_break();
- let substituted_pred = pred.subst(fcx.tcx, substs);
+ let substituted_pred = pred.subst(tcx, substs);
// Don't check non-defaulted params, dependent defaults (including lifetimes)
// or preds with multiple params.
if substituted_pred.has_param_types_or_consts()
traits::Obligation::new(cause, fcx.param_env, pred)
});
- let predicates = predicates.instantiate_identity(fcx.tcx);
+ let predicates = predicates.instantiate_identity(tcx);
if let Some((mut return_ty, span)) = return_ty {
if return_ty.has_infer_types_or_consts() {
fcx.select_obligations_where_possible(false, |_| {});
return_ty = fcx.resolve_vars_if_possible(return_ty);
}
- check_opaque_types(tcx, fcx, def_id.expect_local(), span, return_ty);
+ check_opaque_types(fcx, def_id.expect_local(), span, return_ty);
}
let predicates = fcx.normalize_associated_types_in(span, predicates);
}
fn check_fn_or_method<'fcx, 'tcx>(
- tcx: TyCtxt<'tcx>,
fcx: &FnCtxt<'fcx, 'tcx>,
span: Span,
sig: ty::PolyFnSig<'tcx>,
// FIXME(#25759) return types should not be implied bounds
implied_bounds.push(sig.output());
- check_where_clauses(tcx, fcx, span, def_id, Some((sig.output(), hir_decl.output.span())));
+ check_where_clauses(fcx, span, def_id, Some((sig.output(), hir_decl.output.span())));
}
/// Checks "defining uses" of opaque `impl Trait` types to ensure that they meet the restrictions
/// ```
///
fn check_opaque_types<'fcx, 'tcx>(
- tcx: TyCtxt<'tcx>,
fcx: &FnCtxt<'fcx, 'tcx>,
fn_def_id: LocalDefId,
span: Span,
ty: Ty<'tcx>,
) {
- trace!("check_opaque_types(ty={:?})", ty);
+ trace!("check_opaque_types(fn_def_id={:?}, ty={:?})", fn_def_id, ty);
+ let tcx = fcx.tcx;
+
ty.fold_with(&mut ty::fold::BottomUpFolder {
- tcx: fcx.tcx,
+ tcx,
ty_op: |ty| {
if let ty::Opaque(def_id, substs) = *ty.kind() {
trace!("check_opaque_types: opaque_ty, {:?}, {:?}", def_id, substs);
let mut inline_span = None;
let mut link_ordinal_span = None;
let mut no_sanitize_span = None;
- let mut no_coverage_feature_enabled = false;
- let mut no_coverage_attr = None;
for attr in attrs.iter() {
if tcx.sess.check_name(attr, sym::cold) {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::COLD;
codegen_fn_attrs.flags |= CodegenFnAttrFlags::NAKED;
} else if tcx.sess.check_name(attr, sym::no_mangle) {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE;
- } else if attr.has_name(sym::feature) {
- if let Some(list) = attr.meta_item_list() {
- if list.iter().any(|nested_meta_item| nested_meta_item.has_name(sym::no_coverage)) {
- tcx.sess.mark_attr_used(attr);
- no_coverage_feature_enabled = true;
- }
- }
} else if tcx.sess.check_name(attr, sym::no_coverage) {
- no_coverage_attr = Some(attr);
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_COVERAGE;
} else if tcx.sess.check_name(attr, sym::rustc_std_internal_symbol) {
codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL;
} else if tcx.sess.check_name(attr, sym::used) {
}
}
- if let Some(no_coverage_attr) = no_coverage_attr {
- if tcx.sess.features_untracked().no_coverage || no_coverage_feature_enabled {
- codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_COVERAGE
- } else {
- let mut err = feature_err(
- &tcx.sess.parse_sess,
- sym::no_coverage,
- no_coverage_attr.span,
- "the `#[no_coverage]` attribute is an experimental feature",
- );
- if tcx.sess.parse_sess.unstable_features.is_nightly_build() {
- err.help("or, alternatively, add `#[feature(no_coverage)]` to the function");
- }
- err.emit();
- }
- }
-
codegen_fn_attrs.inline = attrs.iter().fold(InlineAttr::None, |ia, attr| {
if !attr.has_name(sym::inline) {
return ia;
# This is mostly useful for tools; if you have changes to `compiler/` they will be ignored.
#
# You can set this to "if-unchanged" to only download if `compiler/` has not been modified.
-#
-# FIXME(#82739): currently, this also uses the downloaded compiler for stage0, but that causes unnecessary rebuilds.
#download-rustc = false
# Number of codegen units to use for each compiler invocation. A value of 0
/// See its documentation for more.
///
/// [`into_keys`]: BTreeMap::into_keys
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
pub struct IntoKeys<K, V> {
inner: IntoIter<K, V>,
}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K: fmt::Debug, V> fmt::Debug for IntoKeys<K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.inner.iter().map(|(key, _)| key)).finish()
/// See its documentation for more.
///
/// [`into_values`]: BTreeMap::into_values
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
pub struct IntoValues<K, V> {
inner: IntoIter<K, V>,
}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V: fmt::Debug> fmt::Debug for IntoValues<K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.inner.iter().map(|(_, val)| val)).finish()
/// # Examples
///
/// ```
- /// #![feature(map_into_keys_values)]
/// use std::collections::BTreeMap;
///
/// let mut a = BTreeMap::new();
/// assert_eq!(keys, [1, 2]);
/// ```
#[inline]
- #[unstable(feature = "map_into_keys_values", issue = "75294")]
+ #[stable(feature = "map_into_keys_values", since = "1.54.0")]
pub fn into_keys(self) -> IntoKeys<K, V> {
IntoKeys { inner: self.into_iter() }
}
/// # Examples
///
/// ```
- /// #![feature(map_into_keys_values)]
/// use std::collections::BTreeMap;
///
/// let mut a = BTreeMap::new();
/// assert_eq!(values, ["hello", "goodbye"]);
/// ```
#[inline]
- #[unstable(feature = "map_into_keys_values", issue = "75294")]
+ #[stable(feature = "map_into_keys_values", since = "1.54.0")]
pub fn into_values(self) -> IntoValues<K, V> {
IntoValues { inner: self.into_iter() }
}
}
}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V> Iterator for IntoKeys<K, V> {
type Item = K;
}
}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V> DoubleEndedIterator for IntoKeys<K, V> {
fn next_back(&mut self) -> Option<K> {
self.inner.next_back().map(|(k, _)| k)
}
}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V> ExactSizeIterator for IntoKeys<K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V> FusedIterator for IntoKeys<K, V> {}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V> Iterator for IntoValues<K, V> {
type Item = V;
}
}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V> DoubleEndedIterator for IntoValues<K, V> {
fn next_back(&mut self) -> Option<V> {
self.inner.next_back().map(|(_, v)| v)
}
}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V> ExactSizeIterator for IntoValues<K, V> {
fn len(&self) -> usize {
self.inner.len()
}
}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V> FusedIterator for IntoValues<K, V> {}
#[stable(feature = "btree_range", since = "1.17.0")]
#[stable(feature = "from_for_ptrs", since = "1.6.0")]
impl<T> From<T> for Rc<T> {
+ /// Converts a generic type `T` into a `Rc<T>`
+ ///
+ /// The conversion allocates on the heap and moves `t`
+ /// from the stack into it.
+ ///
+ /// # Example
+ /// ```rust
+ /// # use std::rc::Rc;
+ /// let x = 5;
+ /// let rc = Rc::new(5);
+ ///
+ /// assert_eq!(Rc::from(x), rc);
+ /// ```
fn from(t: T) -> Self {
Rc::new(t)
}
//
// This should never be implemented by hand.
#[doc(hidden)]
- #[cfg_attr(not(bootstrap), feature(no_coverage))]
- #[cfg_attr(not(bootstrap), no_coverage)]
+ #[cfg_attr(not(bootstrap), no_coverage)] // rust-lang/rust#84605
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
fn assert_receiver_is_total_eq(&self) {}
/// Derive macro generating an impl of the trait `Eq`.
#[rustc_builtin_macro]
#[stable(feature = "builtin_macro_prelude", since = "1.38.0")]
-#[allow_internal_unstable(core_intrinsics, derive_eq, structural_match)]
+#[allow_internal_unstable(core_intrinsics, derive_eq, structural_match, no_coverage)]
pub macro Eq($item:item) {
/* compiler built-in */
}
/// macro, which panics when it is executed, it is *undefined behavior* to
/// reach code marked with this function.
///
- /// The stabilized version of this intrinsic is [`core::hint::unreachable_unchecked`](crate::hint::unreachable_unchecked).
+ /// The stabilized version of this intrinsic is [`core::hint::unreachable_unchecked`].
#[rustc_const_unstable(feature = "const_unreachable_unchecked", issue = "53188")]
pub fn unreachable() -> !;
/// More specifically, this is the offset in bytes between successive
/// items of the same type, including alignment padding.
///
- /// The stabilized version of this intrinsic is [`core::mem::size_of`](crate::mem::size_of).
+ /// The stabilized version of this intrinsic is [`core::mem::size_of`].
#[rustc_const_stable(feature = "const_size_of", since = "1.40.0")]
pub fn size_of<T>() -> usize;
/// The minimum alignment of a type.
///
- /// The stabilized version of this intrinsic is [`core::mem::align_of`](crate::mem::align_of).
+ /// The stabilized version of this intrinsic is [`core::mem::align_of`].
#[rustc_const_stable(feature = "const_min_align_of", since = "1.40.0")]
pub fn min_align_of<T>() -> usize;
/// The preferred alignment of a type.
pub fn size_of_val<T: ?Sized>(_: *const T) -> usize;
/// The required alignment of the referenced value.
///
- /// The stabilized version of this intrinsic is [`core::mem::align_of_val`](crate::mem::align_of_val).
+ /// The stabilized version of this intrinsic is [`core::mem::align_of_val`].
#[rustc_const_unstable(feature = "const_align_of_val", issue = "46571")]
pub fn min_align_of_val<T: ?Sized>(_: *const T) -> usize;
/// Gets a static string slice containing the name of a type.
///
- /// The stabilized version of this intrinsic is [`core::any::type_name`](crate::any::type_name).
+ /// The stabilized version of this intrinsic is [`core::any::type_name`].
#[rustc_const_unstable(feature = "const_type_name", issue = "63084")]
pub fn type_name<T: ?Sized>() -> &'static str;
/// function will return the same value for a type regardless of whichever
/// crate it is invoked in.
///
- /// The stabilized version of this intrinsic is [`core::any::TypeId::of`](crate::any::TypeId::of).
+ /// The stabilized version of this intrinsic is [`core::any::TypeId::of`].
#[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
pub fn type_id<T: ?Sized + 'static>() -> u64;
/// Gets a reference to a static `Location` indicating where it was called.
///
- /// Consider using [`core::panic::Location::caller`](crate::panic::Location::caller) instead.
+ /// Consider using [`core::panic::Location::caller`] instead.
#[rustc_const_unstable(feature = "const_caller_location", issue = "76156")]
pub fn caller_location() -> &'static crate::panic::Location<'static>;
/// Performs a volatile load from the `src` pointer.
///
- /// The stabilized version of this intrinsic is [`core::ptr::read_volatile`](crate::ptr::read_volatile).
+ /// The stabilized version of this intrinsic is [`core::ptr::read_volatile`].
pub fn volatile_load<T>(src: *const T) -> T;
/// Performs a volatile store to the `dst` pointer.
///
- /// The stabilized version of this intrinsic is [`core::ptr::write_volatile`](crate::ptr::write_volatile).
+ /// The stabilized version of this intrinsic is [`core::ptr::write_volatile`].
pub fn volatile_store<T>(dst: *mut T, val: T);
/// Performs a volatile load from the `src` pointer
/// Returns the value of the discriminant for the variant in 'v';
/// if `T` has no discriminant, returns `0`.
///
- /// The stabilized version of this intrinsic is [`core::mem::discriminant`](crate::mem::discriminant).
+ /// The stabilized version of this intrinsic is [`core::mem::discriminant`].
#[rustc_const_unstable(feature = "const_discriminant", issue = "69821")]
pub fn discriminant_value<T>(v: &T) -> <T as DiscriminantKind>::Discriminant;
/// For any `a` and `n`, where no overflow occurs:
///
/// * `Step::forward_unchecked(a, n)` is equivalent to `Step::forward(a, n)`
- #[unstable(feature = "unchecked_math", reason = "niche optimization path", issue = "none")]
+ #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
Step::forward(start, count)
}
/// For any `a` and `n`, where no overflow occurs:
///
/// * `Step::backward_unchecked(a, n)` is equivalent to `Step::backward(a, n)`
- #[unstable(feature = "unchecked_math", reason = "niche optimization path", issue = "none")]
+ #[unstable(feature = "step_trait_ext", reason = "recently added", issue = "42168")]
unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
Step::backward(start, count)
}
#![feature(const_float_classify)]
#![feature(const_float_bits_conv)]
#![feature(const_int_unchecked_arith)]
+#![feature(const_inherent_unchecked_arith)]
#![feature(const_mut_refs)]
#![feature(const_refs_to_cell)]
#![feature(const_panic)]
#![feature(const_caller_location)]
#![feature(slice_ptr_get)]
#![feature(no_niche)] // rust-lang/rust#68303
+#![cfg_attr(not(bootstrap), feature(no_coverage))] // rust-lang/rust#84605
#![feature(int_error_matching)]
#![deny(unsafe_op_in_unsafe_fn)]
+// allow using `core::` in intra-doc links
+#[allow(unused_extern_crates)]
+extern crate self as core;
+
#[prelude_import]
#[allow(unused)]
use prelude::v1::*;
// SAFETY:
// * The caller guarantees that all elements of the array are initialized
// * `MaybeUninit<T>` and T are guaranteed to have the same layout
- // * MaybeUnint does not drop, so there are no double-frees
+ // * `MaybeUninit` does not drop, so there are no double-frees
// And thus the conversion is safe
unsafe {
intrinsics::assert_inhabited::<[T; N]>();
#[unstable(
feature = "unchecked_math",
reason = "niche optimization path",
- issue = "none",
+ issue = "85122",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
- pub unsafe fn unchecked_add(self, rhs: Self) -> Self {
+ pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_add`.
unsafe { intrinsics::unchecked_add(self, rhs) }
#[unstable(
feature = "unchecked_math",
reason = "niche optimization path",
- issue = "none",
+ issue = "85122",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
- pub unsafe fn unchecked_sub(self, rhs: Self) -> Self {
+ pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_sub`.
unsafe { intrinsics::unchecked_sub(self, rhs) }
#[unstable(
feature = "unchecked_math",
reason = "niche optimization path",
- issue = "none",
+ issue = "85122",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
- pub unsafe fn unchecked_mul(self, rhs: Self) -> Self {
+ pub const unsafe fn unchecked_mul(self, rhs: Self) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_mul`.
unsafe { intrinsics::unchecked_mul(self, rhs) }
#[unstable(
feature = "unchecked_math",
reason = "niche optimization path",
- issue = "none",
+ issue = "85122",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
- pub unsafe fn unchecked_add(self, rhs: Self) -> Self {
+ pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_add`.
unsafe { intrinsics::unchecked_add(self, rhs) }
#[unstable(
feature = "unchecked_math",
reason = "niche optimization path",
- issue = "none",
+ issue = "85122",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
- pub unsafe fn unchecked_sub(self, rhs: Self) -> Self {
+ pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_sub`.
unsafe { intrinsics::unchecked_sub(self, rhs) }
#[unstable(
feature = "unchecked_math",
reason = "niche optimization path",
- issue = "none",
+ issue = "85122",
)]
#[must_use = "this returns the result of the operation, \
without modifying the original"]
+ #[rustc_const_unstable(feature = "const_inherent_unchecked_arith", issue = "85122")]
#[inline(always)]
- pub unsafe fn unchecked_mul(self, rhs: Self) -> Self {
+ pub const unsafe fn unchecked_mul(self, rhs: Self) -> Self {
// SAFETY: the caller must uphold the safety contract for
// `unchecked_mul`.
unsafe { intrinsics::unchecked_mul(self, rhs) }
/// # Examples
///
/// ```
- /// #![feature(map_into_keys_values)]
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// let vec: Vec<&str> = map.into_keys().collect();
/// ```
#[inline]
- #[unstable(feature = "map_into_keys_values", issue = "75294")]
+ #[stable(feature = "map_into_keys_values", since = "1.54.0")]
pub fn into_keys(self) -> IntoKeys<K, V> {
IntoKeys { inner: self.into_iter() }
}
/// # Examples
///
/// ```
- /// #![feature(map_into_keys_values)]
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// let vec: Vec<i32> = map.into_values().collect();
/// ```
#[inline]
- #[unstable(feature = "map_into_keys_values", issue = "75294")]
+ #[stable(feature = "map_into_keys_values", since = "1.54.0")]
pub fn into_values(self) -> IntoValues<K, V> {
IntoValues { inner: self.into_iter() }
}
/// # Example
///
/// ```
-/// #![feature(map_into_keys_values)]
-///
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert("a", 1);
/// let iter_keys = map.into_keys();
/// ```
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
pub struct IntoKeys<K, V> {
inner: IntoIter<K, V>,
}
/// # Example
///
/// ```
-/// #![feature(map_into_keys_values)]
-///
/// use std::collections::HashMap;
///
/// let mut map = HashMap::new();
/// map.insert("a", 1);
/// let iter_keys = map.into_values();
/// ```
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
pub struct IntoValues<K, V> {
inner: IntoIter<K, V>,
}
}
}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V> Iterator for IntoKeys<K, V> {
type Item = K;
self.inner.size_hint()
}
}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V> ExactSizeIterator for IntoKeys<K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V> FusedIterator for IntoKeys<K, V> {}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K: Debug, V> fmt::Debug for IntoKeys<K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.inner.iter().map(|(k, _)| k)).finish()
}
}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V> Iterator for IntoValues<K, V> {
type Item = V;
self.inner.size_hint()
}
}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V> ExactSizeIterator for IntoValues<K, V> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V> FusedIterator for IntoValues<K, V> {}
-#[unstable(feature = "map_into_keys_values", issue = "75294")]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V: Debug> fmt::Debug for IntoValues<K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.inner.iter().map(|(_, v)| v)).finish()
self, Error, ErrorKind, IntoInnerError, IoSlice, Seek, SeekFrom, Write, DEFAULT_BUF_SIZE,
};
use crate::mem;
+use crate::ptr;
/// Wraps a writer and buffers its output.
///
#[stable(feature = "rust1", since = "1.0.0")]
pub struct BufWriter<W: Write> {
inner: Option<W>,
+ // The buffer. Avoid using this like a normal `Vec` in common code paths.
+ // That is, don't use `buf.push`, `buf.extend_from_slice`, or any other
+ // methods that require bounds checking or the like. This makes an enormous
+ // difference to performance (we may want to stop using a `Vec` entirely).
buf: Vec<u8>,
// #30888: If the inner writer panics in a call to write, we don't want to
// write the buffered data a second time in BufWriter's destructor. This
/// data. Writes as much as possible without exceeding capacity. Returns
/// the number of bytes written.
pub(super) fn write_to_buf(&mut self, buf: &[u8]) -> usize {
- let available = self.buf.capacity() - self.buf.len();
+ let available = self.spare_capacity();
let amt_to_buffer = available.min(buf.len());
- self.buf.extend_from_slice(&buf[..amt_to_buffer]);
+
+ // SAFETY: `amt_to_buffer` is <= buffer's spare capacity by construction.
+ unsafe {
+ self.write_to_buffer_unchecked(&buf[..amt_to_buffer]);
+ }
+
amt_to_buffer
}
let buf = if !self.panicked { Ok(buf) } else { Err(WriterPanicked { buf }) };
(self.inner.take().unwrap(), buf)
}
+
+ // Ensure this function does not get inlined into `write`, so that it
+ // remains inlineable and its common path remains as short as possible.
+ // If this function ends up being called frequently relative to `write`,
+ // it's likely a sign that the client is using an improperly sized buffer
+ // or their write patterns are somewhat pathological.
+ #[cold]
+ #[inline(never)]
+ fn write_cold(&mut self, buf: &[u8]) -> io::Result<usize> {
+ if buf.len() > self.spare_capacity() {
+ self.flush_buf()?;
+ }
+
+ // Why not len > capacity? To avoid a needless trip through the buffer when the input
+ // exactly fills it. We'd just need to flush it to the underlying writer anyway.
+ if buf.len() >= self.buf.capacity() {
+ self.panicked = true;
+ let r = self.get_mut().write(buf);
+ self.panicked = false;
+ r
+ } else {
+ // Write to the buffer. In this case, we write to the buffer even if it fills it
+ // exactly. Doing otherwise would mean flushing the buffer, then writing this
+ // input to the inner writer, which in many cases would be a worse strategy.
+
+ // SAFETY: There was either enough spare capacity already, or there wasn't and we
+ // flushed the buffer to ensure that there is. In the latter case, we know that there
+ // is because flushing ensured that our entire buffer is spare capacity, and we entered
+ // this block because the input buffer length is less than that capacity. In either
+ // case, it's safe to write the input buffer to our buffer.
+ unsafe {
+ self.write_to_buffer_unchecked(buf);
+ }
+
+ Ok(buf.len())
+ }
+ }
+
+ // Ensure this function does not get inlined into `write_all`, so that it
+ // remains inlineable and its common path remains as short as possible.
+ // If this function ends up being called frequently relative to `write_all`,
+ // it's likely a sign that the client is using an improperly sized buffer
+ // or their write patterns are somewhat pathological.
+ #[cold]
+ #[inline(never)]
+ fn write_all_cold(&mut self, buf: &[u8]) -> io::Result<()> {
+ // Normally, `write_all` just calls `write` in a loop. We can do better
+ // by calling `self.get_mut().write_all()` directly, which avoids
+ // round trips through the buffer in the event of a series of partial
+ // writes in some circumstances.
+
+ if buf.len() > self.spare_capacity() {
+ self.flush_buf()?;
+ }
+
+ // Why not len > capacity? To avoid a needless trip through the buffer when the input
+ // exactly fills it. We'd just need to flush it to the underlying writer anyway.
+ if buf.len() >= self.buf.capacity() {
+ self.panicked = true;
+ let r = self.get_mut().write_all(buf);
+ self.panicked = false;
+ r
+ } else {
+ // Write to the buffer. In this case, we write to the buffer even if it fills it
+ // exactly. Doing otherwise would mean flushing the buffer, then writing this
+ // input to the inner writer, which in many cases would be a worse strategy.
+
+ // SAFETY: There was either enough spare capacity already, or there wasn't and we
+ // flushed the buffer to ensure that there is. In the latter case, we know that there
+ // is because flushing ensured that our entire buffer is spare capacity, and we entered
+ // this block because the input buffer length is less than that capacity. In either
+ // case, it's safe to write the input buffer to our buffer.
+ unsafe {
+ self.write_to_buffer_unchecked(buf);
+ }
+
+ Ok(())
+ }
+ }
+
+ // SAFETY: Requires `buf.len() <= self.buf.capacity() - self.buf.len()`,
+ // i.e., that input buffer length is less than or equal to spare capacity.
+ #[inline]
+ unsafe fn write_to_buffer_unchecked(&mut self, buf: &[u8]) {
+ debug_assert!(buf.len() <= self.spare_capacity());
+ let old_len = self.buf.len();
+ let buf_len = buf.len();
+ let src = buf.as_ptr();
+ let dst = self.buf.as_mut_ptr().add(old_len);
+ ptr::copy_nonoverlapping(src, dst, buf_len);
+ self.buf.set_len(old_len + buf_len);
+ }
+
+ #[inline]
+ fn spare_capacity(&self) -> usize {
+ self.buf.capacity() - self.buf.len()
+ }
}
#[unstable(feature = "bufwriter_into_raw_parts", issue = "80690")]
#[stable(feature = "rust1", since = "1.0.0")]
impl<W: Write> Write for BufWriter<W> {
+ #[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
- if self.buf.len() + buf.len() > self.buf.capacity() {
- self.flush_buf()?;
- }
- // FIXME: Why no len > capacity? Why not buffer len == capacity? #72919
- if buf.len() >= self.buf.capacity() {
- self.panicked = true;
- let r = self.get_mut().write(buf);
- self.panicked = false;
- r
- } else {
- self.buf.extend_from_slice(buf);
+ // Use < instead of <= to avoid a needless trip through the buffer in some cases.
+ // See `write_cold` for details.
+ if buf.len() < self.spare_capacity() {
+ // SAFETY: safe by above conditional.
+ unsafe {
+ self.write_to_buffer_unchecked(buf);
+ }
+
Ok(buf.len())
+ } else {
+ self.write_cold(buf)
}
}
+ #[inline]
fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
- // Normally, `write_all` just calls `write` in a loop. We can do better
- // by calling `self.get_mut().write_all()` directly, which avoids
- // round trips through the buffer in the event of a series of partial
- // writes in some circumstances.
- if self.buf.len() + buf.len() > self.buf.capacity() {
- self.flush_buf()?;
- }
- // FIXME: Why no len > capacity? Why not buffer len == capacity? #72919
- if buf.len() >= self.buf.capacity() {
- self.panicked = true;
- let r = self.get_mut().write_all(buf);
- self.panicked = false;
- r
- } else {
- self.buf.extend_from_slice(buf);
+ // Use < instead of <= to avoid a needless trip through the buffer in some cases.
+ // See `write_all_cold` for details.
+ if buf.len() < self.spare_capacity() {
+ // SAFETY: safe by above conditional.
+ unsafe {
+ self.write_to_buffer_unchecked(buf);
+ }
+
Ok(())
+ } else {
+ self.write_all_cold(buf)
}
}
fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ // FIXME: Consider applying `#[inline]` / `#[inline(never)]` optimizations already applied
+ // to `write` and `write_all`. The performance benefits can be significant. See #79930.
if self.get_ref().is_write_vectored() {
- let total_len = bufs.iter().map(|b| b.len()).sum::<usize>();
- if self.buf.len() + total_len > self.buf.capacity() {
+ // We have to handle the possibility that the total length of the buffers overflows
+ // `usize` (even though this can only happen if multiple `IoSlice`s reference the
+ // same underlying buffer, as otherwise the buffers wouldn't fit in memory). If the
+ // computation overflows, then surely the input cannot fit in our buffer, so we forward
+ // to the inner writer's `write_vectored` method to let it handle it appropriately.
+ let saturated_total_len =
+ bufs.iter().fold(0usize, |acc, b| acc.saturating_add(b.len()));
+
+ if saturated_total_len > self.spare_capacity() {
+ // Flush if the total length of the input exceeds our buffer's spare capacity.
+ // If we would have overflowed, this condition also holds, and we need to flush.
self.flush_buf()?;
}
- if total_len >= self.buf.capacity() {
+
+ if saturated_total_len >= self.buf.capacity() {
+ // Forward to our inner writer if the total length of the input is greater than or
+ // equal to our buffer capacity. If we would have overflowed, this condition also
+ // holds, and we punt to the inner writer.
self.panicked = true;
let r = self.get_mut().write_vectored(bufs);
self.panicked = false;
r
} else {
- bufs.iter().for_each(|b| self.buf.extend_from_slice(b));
- Ok(total_len)
+ // `saturated_total_len < self.buf.capacity()` implies that we did not saturate.
+
+ // SAFETY: We checked whether or not the spare capacity was large enough above. If
+ // it was, then we're safe already. If it wasn't, we flushed, making sufficient
+ // room for any input <= the buffer size, which includes this input.
+ unsafe {
+ bufs.iter().for_each(|b| self.write_to_buffer_unchecked(b));
+ };
+
+ Ok(saturated_total_len)
}
} else {
let mut iter = bufs.iter();
let mut total_written = if let Some(buf) = iter.by_ref().find(|&buf| !buf.is_empty()) {
// This is the first non-empty slice to write, so if it does
// not fit in the buffer, we still get to flush and proceed.
- if self.buf.len() + buf.len() > self.buf.capacity() {
+ if buf.len() > self.spare_capacity() {
self.flush_buf()?;
}
if buf.len() >= self.buf.capacity() {
self.panicked = false;
return r;
} else {
- self.buf.extend_from_slice(buf);
+ // SAFETY: We checked whether or not the spare capacity was large enough above.
+ // If it was, then we're safe already. If it wasn't, we flushed, making
+ // sufficient room for any input <= the buffer size, which includes this input.
+ unsafe {
+ self.write_to_buffer_unchecked(buf);
+ }
+
buf.len()
}
} else {
};
debug_assert!(total_written != 0);
for buf in iter {
- if self.buf.len() + buf.len() > self.buf.capacity() {
- break;
- } else {
- self.buf.extend_from_slice(buf);
+ if buf.len() <= self.spare_capacity() {
+ // SAFETY: safe by above conditional.
+ unsafe {
+ self.write_to_buffer_unchecked(buf);
+ }
+
+ // This cannot overflow `usize`. If we are here, we've written all of the bytes
+ // so far to our buffer, and we've ensured that we never exceed the buffer's
+ // capacity. Therefore, `total_written` <= `self.buf.capacity()` <= `usize::MAX`.
total_written += buf.len();
+ } else {
+ break;
}
}
Ok(total_written)
///
/// # Errors
///
+ /// Seeking can fail, for example becaue it might involve flushing a buffer.
+ ///
/// Seeking to a negative offset is considered an error.
#[stable(feature = "rust1", since = "1.0.0")]
fn seek(&mut self, pos: SeekFrom) -> Result<u64>;
+ /// Rewind to the beginning of a stream.
+ ///
+ /// This is a convenience method, equivalent to `seek(SeekFrom::Start(0))`.
+ ///
+ /// # Errors
+ ///
+ /// Rewinding can fail, for example becaue it might involve flushing a buffer.
+ ///
+ /// # Example
+ ///
+ /// ```no_run
+ /// #![feature(seek_rewind)]
+ /// use std::io::{Read, Seek, Write};
+ /// use std::fs::OpenOptions;
+ ///
+ /// let mut f = OpenOptions::new()
+ /// .write(true)
+ /// .read(true)
+ /// .create(true)
+ /// .open("foo.txt").unwrap();
+ ///
+ /// let hello = "Hello!\n";
+ /// write!(f, "{}", hello).unwrap();
+ /// f.rewind().unwrap();
+ ///
+ /// let mut buf = String::new();
+ /// f.read_to_string(&mut buf).unwrap();
+ /// assert_eq!(&buf, hello);
+ /// ```
+ #[unstable(feature = "seek_rewind", issue = "85149")]
+ fn rewind(&mut self) -> Result<()> {
+ self.seek(SeekFrom::Start(0))?;
+ Ok(())
+ }
+
/// Returns the length of this stream (in bytes).
///
/// This method is implemented using up to three seek operations. If this
#![feature(const_cstr_unchecked)]
#![feature(const_fn_floating_point_arithmetic)]
#![feature(const_fn_transmute)]
-#![feature(const_fn)]
#![feature(const_fn_fn_ptr_basics)]
#![feature(const_io_structs)]
#![feature(const_ip)]
#[stable(feature = "pthread_t", since = "1.8.0")]
pub type pthread_t = c_ulong;
-#[doc(inline)]
#[stable(feature = "raw_ext", since = "1.1.0")]
pub type blkcnt_t = u64;
#[stable(feature = "raw_ext", since = "1.1.0")]
extern "C" fn entry(p1: u64, p2: u64, p3: u64, secondary: bool, p4: u64, p5: u64) -> EntryReturn {
// FIXME: how to support TLS in library mode?
let tls = Box::new(tls::Tls::new());
- let _tls_guard = unsafe { tls.activate() };
+ let tls_guard = unsafe { tls.activate() };
if secondary {
- super::thread::Thread::entry();
+ let join_notifier = super::thread::Thread::entry();
+ drop(tls_guard);
+ drop(join_notifier);
EntryReturn(0, 0)
} else {
+++ /dev/null
-mod sync_bitset;
-
-use self::sync_bitset::*;
-use crate::cell::Cell;
-use crate::mem;
-use crate::num::NonZeroUsize;
-use crate::ptr;
-use crate::sync::atomic::{AtomicUsize, Ordering};
-
-#[cfg(target_pointer_width = "64")]
-const USIZE_BITS: usize = 64;
-const TLS_KEYS: usize = 128; // Same as POSIX minimum
-const TLS_KEYS_BITSET_SIZE: usize = (TLS_KEYS + (USIZE_BITS - 1)) / USIZE_BITS;
-
-#[cfg_attr(test, linkage = "available_externally")]
-#[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_KEY_IN_USEE"]
-static TLS_KEY_IN_USE: SyncBitset = SYNC_BITSET_INIT;
-macro_rules! dup {
- ((* $($exp:tt)*) $($val:tt)*) => (dup!( ($($exp)*) $($val)* $($val)* ));
- (() $($val:tt)*) => ([$($val),*])
-}
-#[cfg_attr(test, linkage = "available_externally")]
-#[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_DESTRUCTORE"]
-static TLS_DESTRUCTOR: [AtomicUsize; TLS_KEYS] = dup!((* * * * * * *) (AtomicUsize::new(0)));
-
-extern "C" {
- fn get_tls_ptr() -> *const u8;
- fn set_tls_ptr(tls: *const u8);
-}
-
-#[derive(Copy, Clone)]
-#[repr(C)]
-pub struct Key(NonZeroUsize);
-
-impl Key {
- fn to_index(self) -> usize {
- self.0.get() - 1
- }
-
- fn from_index(index: usize) -> Self {
- Key(NonZeroUsize::new(index + 1).unwrap())
- }
-
- pub fn as_usize(self) -> usize {
- self.0.get()
- }
-
- pub fn from_usize(index: usize) -> Self {
- Key(NonZeroUsize::new(index).unwrap())
- }
-}
-
-#[repr(C)]
-pub struct Tls {
- data: [Cell<*mut u8>; TLS_KEYS],
-}
-
-pub struct ActiveTls<'a> {
- tls: &'a Tls,
-}
-
-impl<'a> Drop for ActiveTls<'a> {
- fn drop(&mut self) {
- let value_with_destructor = |key: usize| {
- let ptr = TLS_DESTRUCTOR[key].load(Ordering::Relaxed);
- unsafe { mem::transmute::<_, Option<unsafe extern "C" fn(*mut u8)>>(ptr) }
- .map(|dtor| (&self.tls.data[key], dtor))
- };
-
- let mut any_non_null_dtor = true;
- while any_non_null_dtor {
- any_non_null_dtor = false;
- for (value, dtor) in TLS_KEY_IN_USE.iter().filter_map(&value_with_destructor) {
- let value = value.replace(ptr::null_mut());
- if !value.is_null() {
- any_non_null_dtor = true;
- unsafe { dtor(value) }
- }
- }
- }
- }
-}
-
-impl Tls {
- pub fn new() -> Tls {
- Tls { data: dup!((* * * * * * *) (Cell::new(ptr::null_mut()))) }
- }
-
- pub unsafe fn activate(&self) -> ActiveTls<'_> {
- // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition.
- unsafe { set_tls_ptr(self as *const Tls as _) };
- ActiveTls { tls: self }
- }
-
- #[allow(unused)]
- pub unsafe fn activate_persistent(self: Box<Self>) {
- // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition.
- unsafe { set_tls_ptr((&*self) as *const Tls as _) };
- mem::forget(self);
- }
-
- unsafe fn current<'a>() -> &'a Tls {
- // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition.
- unsafe { &*(get_tls_ptr() as *const Tls) }
- }
-
- pub fn create(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key {
- let index = if let Some(index) = TLS_KEY_IN_USE.set() {
- index
- } else {
- rtabort!("TLS limit exceeded")
- };
- TLS_DESTRUCTOR[index].store(dtor.map_or(0, |f| f as usize), Ordering::Relaxed);
- Key::from_index(index)
- }
-
- pub fn set(key: Key, value: *mut u8) {
- let index = key.to_index();
- rtassert!(TLS_KEY_IN_USE.get(index));
- unsafe { Self::current() }.data[index].set(value);
- }
-
- pub fn get(key: Key) -> *mut u8 {
- let index = key.to_index();
- rtassert!(TLS_KEY_IN_USE.get(index));
- unsafe { Self::current() }.data[index].get()
- }
-
- pub fn destroy(key: Key) {
- TLS_KEY_IN_USE.clear(key.to_index());
- }
-}
--- /dev/null
+mod sync_bitset;
+
+use self::sync_bitset::*;
+use crate::cell::Cell;
+use crate::mem;
+use crate::num::NonZeroUsize;
+use crate::ptr;
+use crate::sync::atomic::{AtomicUsize, Ordering};
+
+#[cfg(target_pointer_width = "64")]
+const USIZE_BITS: usize = 64;
+const TLS_KEYS: usize = 128; // Same as POSIX minimum
+const TLS_KEYS_BITSET_SIZE: usize = (TLS_KEYS + (USIZE_BITS - 1)) / USIZE_BITS;
+
+#[cfg_attr(test, linkage = "available_externally")]
+#[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_KEY_IN_USEE"]
+static TLS_KEY_IN_USE: SyncBitset = SYNC_BITSET_INIT;
+macro_rules! dup {
+ ((* $($exp:tt)*) $($val:tt)*) => (dup!( ($($exp)*) $($val)* $($val)* ));
+ (() $($val:tt)*) => ([$($val),*])
+}
+#[cfg_attr(test, linkage = "available_externally")]
+#[export_name = "_ZN16__rust_internals3std3sys3sgx3abi3tls14TLS_DESTRUCTORE"]
+static TLS_DESTRUCTOR: [AtomicUsize; TLS_KEYS] = dup!((* * * * * * *) (AtomicUsize::new(0)));
+
+extern "C" {
+ fn get_tls_ptr() -> *const u8;
+ fn set_tls_ptr(tls: *const u8);
+}
+
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub struct Key(NonZeroUsize);
+
+impl Key {
+ fn to_index(self) -> usize {
+ self.0.get() - 1
+ }
+
+ fn from_index(index: usize) -> Self {
+ Key(NonZeroUsize::new(index + 1).unwrap())
+ }
+
+ pub fn as_usize(self) -> usize {
+ self.0.get()
+ }
+
+ pub fn from_usize(index: usize) -> Self {
+ Key(NonZeroUsize::new(index).unwrap())
+ }
+}
+
+#[repr(C)]
+pub struct Tls {
+ data: [Cell<*mut u8>; TLS_KEYS],
+}
+
+pub struct ActiveTls<'a> {
+ tls: &'a Tls,
+}
+
+impl<'a> Drop for ActiveTls<'a> {
+ fn drop(&mut self) {
+ let value_with_destructor = |key: usize| {
+ let ptr = TLS_DESTRUCTOR[key].load(Ordering::Relaxed);
+ unsafe { mem::transmute::<_, Option<unsafe extern "C" fn(*mut u8)>>(ptr) }
+ .map(|dtor| (&self.tls.data[key], dtor))
+ };
+
+ let mut any_non_null_dtor = true;
+ while any_non_null_dtor {
+ any_non_null_dtor = false;
+ for (value, dtor) in TLS_KEY_IN_USE.iter().filter_map(&value_with_destructor) {
+ let value = value.replace(ptr::null_mut());
+ if !value.is_null() {
+ any_non_null_dtor = true;
+ unsafe { dtor(value) }
+ }
+ }
+ }
+ }
+}
+
+impl Tls {
+ pub fn new() -> Tls {
+ Tls { data: dup!((* * * * * * *) (Cell::new(ptr::null_mut()))) }
+ }
+
+ pub unsafe fn activate(&self) -> ActiveTls<'_> {
+ // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition.
+ unsafe { set_tls_ptr(self as *const Tls as _) };
+ ActiveTls { tls: self }
+ }
+
+ #[allow(unused)]
+ pub unsafe fn activate_persistent(self: Box<Self>) {
+ // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition.
+ unsafe { set_tls_ptr((&*self) as *const Tls as _) };
+ mem::forget(self);
+ }
+
+ unsafe fn current<'a>() -> &'a Tls {
+ // FIXME: Needs safety information. See entry.S for `set_tls_ptr` definition.
+ unsafe { &*(get_tls_ptr() as *const Tls) }
+ }
+
+ pub fn create(dtor: Option<unsafe extern "C" fn(*mut u8)>) -> Key {
+ let index = if let Some(index) = TLS_KEY_IN_USE.set() {
+ index
+ } else {
+ rtabort!("TLS limit exceeded")
+ };
+ TLS_DESTRUCTOR[index].store(dtor.map_or(0, |f| f as usize), Ordering::Relaxed);
+ Key::from_index(index)
+ }
+
+ pub fn set(key: Key, value: *mut u8) {
+ let index = key.to_index();
+ rtassert!(TLS_KEY_IN_USE.get(index));
+ unsafe { Self::current() }.data[index].set(value);
+ }
+
+ pub fn get(key: Key) -> *mut u8 {
+ let index = key.to_index();
+ rtassert!(TLS_KEY_IN_USE.get(index));
+ unsafe { Self::current() }.data[index].get()
+ }
+
+ pub fn destroy(key: Key) {
+ TLS_KEY_IN_USE.clear(key.to_index());
+ }
+}
inner: SpinMutex<WaitVariable<bool>>,
}
-pub type MovableMutex = Box<Mutex>;
+pub type MovableMutex = Mutex;
// Implementation according to “Operating Systems: Three Easy Pieces”, chapter 28
impl Mutex {
pub const DEFAULT_MIN_STACK_SIZE: usize = 4096;
+pub use self::task_queue::JoinNotifier;
+
mod task_queue {
- use crate::sync::mpsc;
+ use super::wait_notify;
use crate::sync::{Mutex, MutexGuard, Once};
- pub type JoinHandle = mpsc::Receiver<()>;
+ pub type JoinHandle = wait_notify::Waiter;
+
+ pub struct JoinNotifier(Option<wait_notify::Notifier>);
+
+ impl Drop for JoinNotifier {
+ fn drop(&mut self) {
+ self.0.take().unwrap().notify();
+ }
+ }
pub(super) struct Task {
p: Box<dyn FnOnce()>,
- done: mpsc::Sender<()>,
+ done: JoinNotifier,
}
impl Task {
pub(super) fn new(p: Box<dyn FnOnce()>) -> (Task, JoinHandle) {
- let (done, recv) = mpsc::channel();
+ let (done, recv) = wait_notify::new();
+ let done = JoinNotifier(Some(done));
(Task { p, done }, recv)
}
- pub(super) fn run(self) {
+ pub(super) fn run(self) -> JoinNotifier {
(self.p)();
- let _ = self.done.send(());
+ self.done
}
}
}
}
+/// This module provides a synchronization primitive that does not use thread
+/// local variables. This is needed for signaling that a thread has finished
+/// execution. The signal is sent once all TLS destructors have finished at
+/// which point no new thread locals should be created.
+pub mod wait_notify {
+ use super::super::waitqueue::{SpinMutex, WaitQueue, WaitVariable};
+ use crate::sync::Arc;
+
+ pub struct Notifier(Arc<SpinMutex<WaitVariable<bool>>>);
+
+ impl Notifier {
+ /// Notify the waiter. The waiter is either notified right away (if
+ /// currently blocked in `Waiter::wait()`) or later when it calls the
+ /// `Waiter::wait()` method.
+ pub fn notify(self) {
+ let mut guard = self.0.lock();
+ *guard.lock_var_mut() = true;
+ let _ = WaitQueue::notify_one(guard);
+ }
+ }
+
+ pub struct Waiter(Arc<SpinMutex<WaitVariable<bool>>>);
+
+ impl Waiter {
+ /// Wait for a notification. If `Notifier::notify()` has already been
+ /// called, this will return immediately, otherwise the current thread
+ /// is blocked until notified.
+ pub fn wait(self) {
+ let guard = self.0.lock();
+ if *guard.lock_var() {
+ return;
+ }
+ WaitQueue::wait(guard, || {});
+ }
+ }
+
+ pub fn new() -> (Notifier, Waiter) {
+ let inner = Arc::new(SpinMutex::new(WaitVariable::new(false)));
+ (Notifier(inner.clone()), Waiter(inner))
+ }
+}
+
impl Thread {
// unsafe: see thread::Builder::spawn_unchecked for safety requirements
pub unsafe fn new(_stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
Ok(Thread(handle))
}
- pub(super) fn entry() {
+ pub(super) fn entry() -> JoinNotifier {
let mut pending_tasks = task_queue::lock();
let task = rtunwrap!(Some, pending_tasks.pop());
drop(pending_tasks); // make sure to not hold the task queue lock longer than necessary
}
pub fn join(self) {
- let _ = self.0.recv();
+ self.0.wait();
}
}
+++ /dev/null
-//! A simple queue implementation for synchronization primitives.
-//!
-//! This queue is used to implement condition variable and mutexes.
-//!
-//! Users of this API are expected to use the `WaitVariable<T>` type. Since
-//! that type is not `Sync`, it needs to be protected by e.g., a `SpinMutex` to
-//! allow shared access.
-//!
-//! Since userspace may send spurious wake-ups, the wakeup event state is
-//! recorded in the enclave. The wakeup event state is protected by a spinlock.
-//! The queue and associated wait state are stored in a `WaitVariable`.
-
-#[cfg(test)]
-mod tests;
-
-/// A doubly-linked list where callers are in charge of memory allocation
-/// of the nodes in the list.
-mod unsafe_list;
-
-/// Trivial spinlock-based implementation of `sync::Mutex`.
-// FIXME: Perhaps use Intel TSX to avoid locking?
-mod spin_mutex;
-
-use crate::num::NonZeroUsize;
-use crate::ops::{Deref, DerefMut};
-use crate::time::Duration;
-
-use super::abi::thread;
-use super::abi::usercalls;
-use fortanix_sgx_abi::{Tcs, EV_UNPARK, WAIT_INDEFINITE};
-
-pub use self::spin_mutex::{try_lock_or_false, SpinMutex, SpinMutexGuard};
-use self::unsafe_list::{UnsafeList, UnsafeListEntry};
-
-/// An queue entry in a `WaitQueue`.
-struct WaitEntry {
- /// TCS address of the thread that is waiting
- tcs: Tcs,
- /// Whether this thread has been notified to be awoken
- wake: bool,
-}
-
-/// Data stored with a `WaitQueue` alongside it. This ensures accesses to the
-/// queue and the data are synchronized, since the type itself is not `Sync`.
-///
-/// Consumers of this API should use a synchronization primitive for shared
-/// access, such as `SpinMutex`.
-#[derive(Default)]
-pub struct WaitVariable<T> {
- queue: WaitQueue,
- lock: T,
-}
-
-impl<T> WaitVariable<T> {
- pub const fn new(var: T) -> Self {
- WaitVariable { queue: WaitQueue::new(), lock: var }
- }
-
- pub fn queue_empty(&self) -> bool {
- self.queue.is_empty()
- }
-
- pub fn lock_var(&self) -> &T {
- &self.lock
- }
-
- pub fn lock_var_mut(&mut self) -> &mut T {
- &mut self.lock
- }
-}
-
-#[derive(Copy, Clone)]
-pub enum NotifiedTcs {
- Single(Tcs),
- All { count: NonZeroUsize },
-}
-
-/// An RAII guard that will notify a set of target threads as well as unlock
-/// a mutex on drop.
-pub struct WaitGuard<'a, T: 'a> {
- mutex_guard: Option<SpinMutexGuard<'a, WaitVariable<T>>>,
- notified_tcs: NotifiedTcs,
-}
-
-/// A queue of threads that are waiting on some synchronization primitive.
-///
-/// `UnsafeList` entries are allocated on the waiting thread's stack. This
-/// avoids any global locking that might happen in the heap allocator. This is
-/// safe because the waiting thread will not return from that stack frame until
-/// after it is notified. The notifying thread ensures to clean up any
-/// references to the list entries before sending the wakeup event.
-pub struct WaitQueue {
- // We use an inner Mutex here to protect the data in the face of spurious
- // wakeups.
- inner: UnsafeList<SpinMutex<WaitEntry>>,
-}
-unsafe impl Send for WaitQueue {}
-
-impl Default for WaitQueue {
- fn default() -> Self {
- Self::new()
- }
-}
-
-impl<'a, T> WaitGuard<'a, T> {
- /// Returns which TCSes will be notified when this guard drops.
- pub fn notified_tcs(&self) -> NotifiedTcs {
- self.notified_tcs
- }
-
- /// Drop this `WaitGuard`, after dropping another `guard`.
- pub fn drop_after<U>(self, guard: U) {
- drop(guard);
- drop(self);
- }
-}
-
-impl<'a, T> Deref for WaitGuard<'a, T> {
- type Target = SpinMutexGuard<'a, WaitVariable<T>>;
-
- fn deref(&self) -> &Self::Target {
- self.mutex_guard.as_ref().unwrap()
- }
-}
-
-impl<'a, T> DerefMut for WaitGuard<'a, T> {
- fn deref_mut(&mut self) -> &mut Self::Target {
- self.mutex_guard.as_mut().unwrap()
- }
-}
-
-impl<'a, T> Drop for WaitGuard<'a, T> {
- fn drop(&mut self) {
- drop(self.mutex_guard.take());
- let target_tcs = match self.notified_tcs {
- NotifiedTcs::Single(tcs) => Some(tcs),
- NotifiedTcs::All { .. } => None,
- };
- rtunwrap!(Ok, usercalls::send(EV_UNPARK, target_tcs));
- }
-}
-
-impl WaitQueue {
- pub const fn new() -> Self {
- WaitQueue { inner: UnsafeList::new() }
- }
-
- pub fn is_empty(&self) -> bool {
- self.inner.is_empty()
- }
-
- /// Adds the calling thread to the `WaitVariable`'s wait queue, then wait
- /// until a wakeup event.
- ///
- /// This function does not return until this thread has been awoken.
- pub fn wait<T, F: FnOnce()>(mut guard: SpinMutexGuard<'_, WaitVariable<T>>, before_wait: F) {
- // very unsafe: check requirements of UnsafeList::push
- unsafe {
- let mut entry = UnsafeListEntry::new(SpinMutex::new(WaitEntry {
- tcs: thread::current(),
- wake: false,
- }));
- let entry = guard.queue.inner.push(&mut entry);
- drop(guard);
- before_wait();
- while !entry.lock().wake {
- // don't panic, this would invalidate `entry` during unwinding
- let eventset = rtunwrap!(Ok, usercalls::wait(EV_UNPARK, WAIT_INDEFINITE));
- rtassert!(eventset & EV_UNPARK == EV_UNPARK);
- }
- }
- }
-
- /// Adds the calling thread to the `WaitVariable`'s wait queue, then wait
- /// until a wakeup event or timeout. If event was observed, returns true.
- /// If not, it will remove the calling thread from the wait queue.
- pub fn wait_timeout<T, F: FnOnce()>(
- lock: &SpinMutex<WaitVariable<T>>,
- timeout: Duration,
- before_wait: F,
- ) -> bool {
- // very unsafe: check requirements of UnsafeList::push
- unsafe {
- let mut entry = UnsafeListEntry::new(SpinMutex::new(WaitEntry {
- tcs: thread::current(),
- wake: false,
- }));
- let entry_lock = lock.lock().queue.inner.push(&mut entry);
- before_wait();
- usercalls::wait_timeout(EV_UNPARK, timeout, || entry_lock.lock().wake);
- // acquire the wait queue's lock first to avoid deadlock.
- let mut guard = lock.lock();
- let success = entry_lock.lock().wake;
- if !success {
- // nobody is waking us up, so remove our entry from the wait queue.
- guard.queue.inner.remove(&mut entry);
- }
- success
- }
- }
-
- /// Either find the next waiter on the wait queue, or return the mutex
- /// guard unchanged.
- ///
- /// If a waiter is found, a `WaitGuard` is returned which will notify the
- /// waiter when it is dropped.
- pub fn notify_one<T>(
- mut guard: SpinMutexGuard<'_, WaitVariable<T>>,
- ) -> Result<WaitGuard<'_, T>, SpinMutexGuard<'_, WaitVariable<T>>> {
- unsafe {
- if let Some(entry) = guard.queue.inner.pop() {
- let mut entry_guard = entry.lock();
- let tcs = entry_guard.tcs;
- entry_guard.wake = true;
- drop(entry);
- Ok(WaitGuard { mutex_guard: Some(guard), notified_tcs: NotifiedTcs::Single(tcs) })
- } else {
- Err(guard)
- }
- }
- }
-
- /// Either find any and all waiters on the wait queue, or return the mutex
- /// guard unchanged.
- ///
- /// If at least one waiter is found, a `WaitGuard` is returned which will
- /// notify all waiters when it is dropped.
- pub fn notify_all<T>(
- mut guard: SpinMutexGuard<'_, WaitVariable<T>>,
- ) -> Result<WaitGuard<'_, T>, SpinMutexGuard<'_, WaitVariable<T>>> {
- unsafe {
- let mut count = 0;
- while let Some(entry) = guard.queue.inner.pop() {
- count += 1;
- let mut entry_guard = entry.lock();
- entry_guard.wake = true;
- }
- if let Some(count) = NonZeroUsize::new(count) {
- Ok(WaitGuard { mutex_guard: Some(guard), notified_tcs: NotifiedTcs::All { count } })
- } else {
- Err(guard)
- }
- }
- }
-}
--- /dev/null
+//! A simple queue implementation for synchronization primitives.
+//!
+//! This queue is used to implement condition variable and mutexes.
+//!
+//! Users of this API are expected to use the `WaitVariable<T>` type. Since
+//! that type is not `Sync`, it needs to be protected by e.g., a `SpinMutex` to
+//! allow shared access.
+//!
+//! Since userspace may send spurious wake-ups, the wakeup event state is
+//! recorded in the enclave. The wakeup event state is protected by a spinlock.
+//! The queue and associated wait state are stored in a `WaitVariable`.
+
+#[cfg(test)]
+mod tests;
+
+mod spin_mutex;
+mod unsafe_list;
+
+use crate::num::NonZeroUsize;
+use crate::ops::{Deref, DerefMut};
+use crate::time::Duration;
+
+use super::abi::thread;
+use super::abi::usercalls;
+use fortanix_sgx_abi::{Tcs, EV_UNPARK, WAIT_INDEFINITE};
+
+pub use self::spin_mutex::{try_lock_or_false, SpinMutex, SpinMutexGuard};
+use self::unsafe_list::{UnsafeList, UnsafeListEntry};
+
+/// An queue entry in a `WaitQueue`.
+struct WaitEntry {
+ /// TCS address of the thread that is waiting
+ tcs: Tcs,
+ /// Whether this thread has been notified to be awoken
+ wake: bool,
+}
+
+/// Data stored with a `WaitQueue` alongside it. This ensures accesses to the
+/// queue and the data are synchronized, since the type itself is not `Sync`.
+///
+/// Consumers of this API should use a synchronization primitive for shared
+/// access, such as `SpinMutex`.
+#[derive(Default)]
+pub struct WaitVariable<T> {
+ queue: WaitQueue,
+ lock: T,
+}
+
+impl<T> WaitVariable<T> {
+ pub const fn new(var: T) -> Self {
+ WaitVariable { queue: WaitQueue::new(), lock: var }
+ }
+
+ pub fn queue_empty(&self) -> bool {
+ self.queue.is_empty()
+ }
+
+ pub fn lock_var(&self) -> &T {
+ &self.lock
+ }
+
+ pub fn lock_var_mut(&mut self) -> &mut T {
+ &mut self.lock
+ }
+}
+
+#[derive(Copy, Clone)]
+pub enum NotifiedTcs {
+ Single(Tcs),
+ All { count: NonZeroUsize },
+}
+
+/// An RAII guard that will notify a set of target threads as well as unlock
+/// a mutex on drop.
+pub struct WaitGuard<'a, T: 'a> {
+ mutex_guard: Option<SpinMutexGuard<'a, WaitVariable<T>>>,
+ notified_tcs: NotifiedTcs,
+}
+
+/// A queue of threads that are waiting on some synchronization primitive.
+///
+/// `UnsafeList` entries are allocated on the waiting thread's stack. This
+/// avoids any global locking that might happen in the heap allocator. This is
+/// safe because the waiting thread will not return from that stack frame until
+/// after it is notified. The notifying thread ensures to clean up any
+/// references to the list entries before sending the wakeup event.
+pub struct WaitQueue {
+ // We use an inner Mutex here to protect the data in the face of spurious
+ // wakeups.
+ inner: UnsafeList<SpinMutex<WaitEntry>>,
+}
+unsafe impl Send for WaitQueue {}
+
+impl Default for WaitQueue {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<'a, T> WaitGuard<'a, T> {
+ /// Returns which TCSes will be notified when this guard drops.
+ pub fn notified_tcs(&self) -> NotifiedTcs {
+ self.notified_tcs
+ }
+
+ /// Drop this `WaitGuard`, after dropping another `guard`.
+ pub fn drop_after<U>(self, guard: U) {
+ drop(guard);
+ drop(self);
+ }
+}
+
+impl<'a, T> Deref for WaitGuard<'a, T> {
+ type Target = SpinMutexGuard<'a, WaitVariable<T>>;
+
+ fn deref(&self) -> &Self::Target {
+ self.mutex_guard.as_ref().unwrap()
+ }
+}
+
+impl<'a, T> DerefMut for WaitGuard<'a, T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.mutex_guard.as_mut().unwrap()
+ }
+}
+
+impl<'a, T> Drop for WaitGuard<'a, T> {
+ fn drop(&mut self) {
+ drop(self.mutex_guard.take());
+ let target_tcs = match self.notified_tcs {
+ NotifiedTcs::Single(tcs) => Some(tcs),
+ NotifiedTcs::All { .. } => None,
+ };
+ rtunwrap!(Ok, usercalls::send(EV_UNPARK, target_tcs));
+ }
+}
+
+impl WaitQueue {
+ pub const fn new() -> Self {
+ WaitQueue { inner: UnsafeList::new() }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.inner.is_empty()
+ }
+
+ /// Adds the calling thread to the `WaitVariable`'s wait queue, then wait
+ /// until a wakeup event.
+ ///
+ /// This function does not return until this thread has been awoken.
+ pub fn wait<T, F: FnOnce()>(mut guard: SpinMutexGuard<'_, WaitVariable<T>>, before_wait: F) {
+ // very unsafe: check requirements of UnsafeList::push
+ unsafe {
+ let mut entry = UnsafeListEntry::new(SpinMutex::new(WaitEntry {
+ tcs: thread::current(),
+ wake: false,
+ }));
+ let entry = guard.queue.inner.push(&mut entry);
+ drop(guard);
+ before_wait();
+ while !entry.lock().wake {
+ // don't panic, this would invalidate `entry` during unwinding
+ let eventset = rtunwrap!(Ok, usercalls::wait(EV_UNPARK, WAIT_INDEFINITE));
+ rtassert!(eventset & EV_UNPARK == EV_UNPARK);
+ }
+ }
+ }
+
+ /// Adds the calling thread to the `WaitVariable`'s wait queue, then wait
+ /// until a wakeup event or timeout. If event was observed, returns true.
+ /// If not, it will remove the calling thread from the wait queue.
+ pub fn wait_timeout<T, F: FnOnce()>(
+ lock: &SpinMutex<WaitVariable<T>>,
+ timeout: Duration,
+ before_wait: F,
+ ) -> bool {
+ // very unsafe: check requirements of UnsafeList::push
+ unsafe {
+ let mut entry = UnsafeListEntry::new(SpinMutex::new(WaitEntry {
+ tcs: thread::current(),
+ wake: false,
+ }));
+ let entry_lock = lock.lock().queue.inner.push(&mut entry);
+ before_wait();
+ usercalls::wait_timeout(EV_UNPARK, timeout, || entry_lock.lock().wake);
+ // acquire the wait queue's lock first to avoid deadlock.
+ let mut guard = lock.lock();
+ let success = entry_lock.lock().wake;
+ if !success {
+ // nobody is waking us up, so remove our entry from the wait queue.
+ guard.queue.inner.remove(&mut entry);
+ }
+ success
+ }
+ }
+
+ /// Either find the next waiter on the wait queue, or return the mutex
+ /// guard unchanged.
+ ///
+ /// If a waiter is found, a `WaitGuard` is returned which will notify the
+ /// waiter when it is dropped.
+ pub fn notify_one<T>(
+ mut guard: SpinMutexGuard<'_, WaitVariable<T>>,
+ ) -> Result<WaitGuard<'_, T>, SpinMutexGuard<'_, WaitVariable<T>>> {
+ unsafe {
+ if let Some(entry) = guard.queue.inner.pop() {
+ let mut entry_guard = entry.lock();
+ let tcs = entry_guard.tcs;
+ entry_guard.wake = true;
+ drop(entry);
+ Ok(WaitGuard { mutex_guard: Some(guard), notified_tcs: NotifiedTcs::Single(tcs) })
+ } else {
+ Err(guard)
+ }
+ }
+ }
+
+ /// Either find any and all waiters on the wait queue, or return the mutex
+ /// guard unchanged.
+ ///
+ /// If at least one waiter is found, a `WaitGuard` is returned which will
+ /// notify all waiters when it is dropped.
+ pub fn notify_all<T>(
+ mut guard: SpinMutexGuard<'_, WaitVariable<T>>,
+ ) -> Result<WaitGuard<'_, T>, SpinMutexGuard<'_, WaitVariable<T>>> {
+ unsafe {
+ let mut count = 0;
+ while let Some(entry) = guard.queue.inner.pop() {
+ count += 1;
+ let mut entry_guard = entry.lock();
+ entry_guard.wake = true;
+ }
+ if let Some(count) = NonZeroUsize::new(count) {
+ Ok(WaitGuard { mutex_guard: Some(guard), notified_tcs: NotifiedTcs::All { count } })
+ } else {
+ Err(guard)
+ }
+ }
+ }
+}
+//! Trivial spinlock-based implementation of `sync::Mutex`.
+// FIXME: Perhaps use Intel TSX to avoid locking?
+
#[cfg(test)]
mod tests;
+//! A doubly-linked list where callers are in charge of memory allocation
+//! of the nodes in the list.
+
#[cfg(test)]
mod tests;
pub fn chdir(p: &path::Path) -> io::Result<()> {
let p: &OsStr = p.as_ref();
let p = CString::new(p.as_bytes())?;
- unsafe {
- match libc::chdir(p.as_ptr()) == (0 as c_int) {
- true => Ok(()),
- false => Err(io::Error::last_os_error()),
- }
+ if unsafe { libc::chdir(p.as_ptr()) } != 0 {
+ return Err(io::Error::last_os_error());
}
+ Ok(())
}
pub struct SplitPaths<'a> {
use crate::ffi::OsString;
+use crate::fmt;
pub struct Args {}
+++ /dev/null
-use crate::ffi::OsString;
-use crate::fmt;
-use crate::vec;
-
-pub fn args() -> Args {
- Args { iter: Vec::new().into_iter() }
-}
-
-pub struct Args {
- iter: vec::IntoIter<OsString>,
-}
-
-impl !Send for Args {}
-impl !Sync for Args {}
-
-impl fmt::Debug for Args {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- self.iter.as_slice().fmt(f)
- }
-}
-
-impl Iterator for Args {
- type Item = OsString;
- fn next(&mut self) -> Option<OsString> {
- self.iter.next()
- }
- fn size_hint(&self) -> (usize, Option<usize>) {
- self.iter.size_hint()
- }
-}
-
-impl ExactSizeIterator for Args {
- fn len(&self) -> usize {
- self.iter.len()
- }
-}
-
-impl DoubleEndedIterator for Args {
- fn next_back(&mut self) -> Option<OsString> {
- self.iter.next_back()
- }
-}
--- /dev/null
+use crate::arch::wasm32;
+use crate::cmp;
+use crate::mem;
+use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst};
+use crate::sys::mutex::Mutex;
+use crate::time::Duration;
+
+pub struct Condvar {
+ cnt: AtomicUsize,
+}
+
+pub type MovableCondvar = Condvar;
+
+// Condition variables are implemented with a simple counter internally that is
+// likely to cause spurious wakeups. Blocking on a condition variable will first
+// read the value of the internal counter, unlock the given mutex, and then
+// block if and only if the counter's value is still the same. Notifying a
+// condition variable will modify the counter (add one for now) and then wake up
+// a thread waiting on the address of the counter.
+//
+// A thread waiting on the condition variable will as a result avoid going to
+// sleep if it's notified after the lock is unlocked but before it fully goes to
+// sleep. A sleeping thread is guaranteed to be woken up at some point as it can
+// only be woken up with a call to `wake`.
+//
+// Note that it's possible for 2 or more threads to be woken up by a call to
+// `notify_one` with this implementation. That can happen where the modification
+// of `cnt` causes any threads in the middle of `wait` to avoid going to sleep,
+// and the subsequent `wake` may wake up a thread that's actually blocking. We
+// consider this a spurious wakeup, though, which all users of condition
+// variables must already be prepared to handle. As a result, this source of
+// spurious wakeups is currently though to be ok, although it may be problematic
+// later on if it causes too many spurious wakeups.
+
+impl Condvar {
+ pub const fn new() -> Condvar {
+ Condvar { cnt: AtomicUsize::new(0) }
+ }
+
+ #[inline]
+ pub unsafe fn init(&mut self) {
+ // nothing to do
+ }
+
+ pub unsafe fn notify_one(&self) {
+ self.cnt.fetch_add(1, SeqCst);
+ // SAFETY: ptr() is always valid
+ unsafe {
+ wasm32::memory_atomic_notify(self.ptr(), 1);
+ }
+ }
+
+ #[inline]
+ pub unsafe fn notify_all(&self) {
+ self.cnt.fetch_add(1, SeqCst);
+ // SAFETY: ptr() is always valid
+ unsafe {
+ wasm32::memory_atomic_notify(self.ptr(), u32::MAX); // -1 == "wake everyone"
+ }
+ }
+
+ pub unsafe fn wait(&self, mutex: &Mutex) {
+ // "atomically block and unlock" implemented by loading our current
+ // counter's value, unlocking the mutex, and blocking if the counter
+ // still has the same value.
+ //
+ // Notifications happen by incrementing the counter and then waking a
+ // thread. Incrementing the counter after we unlock the mutex will
+ // prevent us from sleeping and otherwise the call to `wake` will
+ // wake us up once we're asleep.
+ let ticket = self.cnt.load(SeqCst) as i32;
+ mutex.unlock();
+ let val = wasm32::memory_atomic_wait32(self.ptr(), ticket, -1);
+ // 0 == woken, 1 == not equal to `ticket`, 2 == timeout (shouldn't happen)
+ debug_assert!(val == 0 || val == 1);
+ mutex.lock();
+ }
+
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
+ let ticket = self.cnt.load(SeqCst) as i32;
+ mutex.unlock();
+ let nanos = dur.as_nanos();
+ let nanos = cmp::min(i64::MAX as u128, nanos);
+
+ // If the return value is 2 then a timeout happened, so we return
+ // `false` as we weren't actually notified.
+ let ret = wasm32::memory_atomic_wait32(self.ptr(), ticket, nanos as i64) != 2;
+ mutex.lock();
+ return ret;
+ }
+
+ #[inline]
+ pub unsafe fn destroy(&self) {
+ // nothing to do
+ }
+
+ #[inline]
+ fn ptr(&self) -> *mut i32 {
+ assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>());
+ self.cnt.as_mut_ptr() as *mut i32
+ }
+}
--- /dev/null
+use crate::arch::wasm32;
+use crate::convert::TryInto;
+use crate::sync::atomic::AtomicI32;
+use crate::time::Duration;
+
+pub fn futex_wait(futex: &AtomicI32, expected: i32, timeout: Option<Duration>) {
+ let timeout = timeout.and_then(|t| t.as_nanos().try_into().ok()).unwrap_or(-1);
+ unsafe {
+ wasm32::memory_atomic_wait32(futex as *const AtomicI32 as *mut i32, expected, timeout);
+ }
+}
+
+pub fn futex_wake(futex: &AtomicI32) {
+ unsafe {
+ wasm32::memory_atomic_notify(futex as *const AtomicI32 as *mut i32, 1);
+ }
+}
--- /dev/null
+use crate::arch::wasm32;
+use crate::cell::UnsafeCell;
+use crate::mem;
+use crate::sync::atomic::{AtomicU32, AtomicUsize, Ordering::SeqCst};
+use crate::sys::thread;
+
+pub struct Mutex {
+ locked: AtomicUsize,
+}
+
+pub type MovableMutex = Mutex;
+
+// Mutexes have a pretty simple implementation where they contain an `i32`
+// internally that is 0 when unlocked and 1 when the mutex is locked.
+// Acquisition has a fast path where it attempts to cmpxchg the 0 to a 1, and
+// if it fails it then waits for a notification. Releasing a lock is then done
+// by swapping in 0 and then notifying any waiters, if present.
+
+impl Mutex {
+ pub const fn new() -> Mutex {
+ Mutex { locked: AtomicUsize::new(0) }
+ }
+
+ #[inline]
+ pub unsafe fn init(&mut self) {
+ // nothing to do
+ }
+
+ pub unsafe fn lock(&self) {
+ while !self.try_lock() {
+ // SAFETY: the caller must uphold the safety contract for `memory_atomic_wait32`.
+ let val = unsafe {
+ wasm32::memory_atomic_wait32(
+ self.ptr(),
+ 1, // we expect our mutex is locked
+ -1, // wait infinitely
+ )
+ };
+ // we should have either woke up (0) or got a not-equal due to a
+ // race (1). We should never time out (2)
+ debug_assert!(val == 0 || val == 1);
+ }
+ }
+
+ pub unsafe fn unlock(&self) {
+ let prev = self.locked.swap(0, SeqCst);
+ debug_assert_eq!(prev, 1);
+ wasm32::memory_atomic_notify(self.ptr(), 1); // wake up one waiter, if any
+ }
+
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok()
+ }
+
+ #[inline]
+ pub unsafe fn destroy(&self) {
+ // nothing to do
+ }
+
+ #[inline]
+ fn ptr(&self) -> *mut i32 {
+ assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>());
+ self.locked.as_mut_ptr() as *mut i32
+ }
+}
+
+pub struct ReentrantMutex {
+ owner: AtomicU32,
+ recursions: UnsafeCell<u32>,
+}
+
+unsafe impl Send for ReentrantMutex {}
+unsafe impl Sync for ReentrantMutex {}
+
+// Reentrant mutexes are similarly implemented to mutexs above except that
+// instead of "1" meaning unlocked we use the id of a thread to represent
+// whether it has locked a mutex. That way we have an atomic counter which
+// always holds the id of the thread that currently holds the lock (or 0 if the
+// lock is unlocked).
+//
+// Once a thread acquires a lock recursively, which it detects by looking at
+// the value that's already there, it will update a local `recursions` counter
+// in a nonatomic fashion (as we hold the lock). The lock is then fully
+// released when this recursion counter reaches 0.
+
+impl ReentrantMutex {
+ pub const unsafe fn uninitialized() -> ReentrantMutex {
+ ReentrantMutex { owner: AtomicU32::new(0), recursions: UnsafeCell::new(0) }
+ }
+
+ pub unsafe fn init(&self) {
+ // nothing to do...
+ }
+
+ pub unsafe fn lock(&self) {
+ let me = thread::my_id();
+ while let Err(owner) = self._try_lock(me) {
+ // SAFETY: the caller must gurantee that `self.ptr()` and `owner` are valid i32.
+ let val = unsafe { wasm32::memory_atomic_wait32(self.ptr(), owner as i32, -1) };
+ debug_assert!(val == 0 || val == 1);
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ unsafe { self._try_lock(thread::my_id()).is_ok() }
+ }
+
+ #[inline]
+ unsafe fn _try_lock(&self, id: u32) -> Result<(), u32> {
+ let id = id.checked_add(1).unwrap();
+ match self.owner.compare_exchange(0, id, SeqCst, SeqCst) {
+ // we transitioned from unlocked to locked
+ Ok(_) => {
+ debug_assert_eq!(*self.recursions.get(), 0);
+ Ok(())
+ }
+
+ // we currently own this lock, so let's update our count and return
+ // true.
+ Err(n) if n == id => {
+ *self.recursions.get() += 1;
+ Ok(())
+ }
+
+ // Someone else owns the lock, let our caller take care of it
+ Err(other) => Err(other),
+ }
+ }
+
+ pub unsafe fn unlock(&self) {
+ // If we didn't ever recursively lock the lock then we fully unlock the
+ // mutex and wake up a waiter, if any. Otherwise we decrement our
+ // recursive counter and let some one else take care of the zero.
+ match *self.recursions.get() {
+ 0 => {
+ self.owner.swap(0, SeqCst);
+ // SAFETY: the caller must gurantee that `self.ptr()` is valid i32.
+ unsafe {
+ wasm32::memory_atomic_notify(self.ptr() as *mut i32, 1);
+ } // wake up one waiter, if any
+ }
+ ref mut n => *n -= 1,
+ }
+ }
+
+ pub unsafe fn destroy(&self) {
+ // nothing to do...
+ }
+
+ #[inline]
+ fn ptr(&self) -> *mut i32 {
+ self.owner.as_mut_ptr() as *mut i32
+ }
+}
--- /dev/null
+use crate::cell::UnsafeCell;
+use crate::sys::condvar::Condvar;
+use crate::sys::mutex::Mutex;
+
+pub struct RWLock {
+ lock: Mutex,
+ cond: Condvar,
+ state: UnsafeCell<State>,
+}
+
+enum State {
+ Unlocked,
+ Reading(usize),
+ Writing,
+}
+
+unsafe impl Send for RWLock {}
+unsafe impl Sync for RWLock {}
+
+// This rwlock implementation is a relatively simple implementation which has a
+// condition variable for readers/writers as well as a mutex protecting the
+// internal state of the lock. A current downside of the implementation is that
+// unlocking the lock will notify *all* waiters rather than just readers or just
+// writers. This can cause lots of "thundering stampede" problems. While
+// hopefully correct this implementation is very likely to want to be changed in
+// the future.
+
+impl RWLock {
+ pub const fn new() -> RWLock {
+ RWLock { lock: Mutex::new(), cond: Condvar::new(), state: UnsafeCell::new(State::Unlocked) }
+ }
+
+ #[inline]
+ pub unsafe fn read(&self) {
+ self.lock.lock();
+ while !(*self.state.get()).inc_readers() {
+ self.cond.wait(&self.lock);
+ }
+ self.lock.unlock();
+ }
+
+ #[inline]
+ pub unsafe fn try_read(&self) -> bool {
+ self.lock.lock();
+ let ok = (*self.state.get()).inc_readers();
+ self.lock.unlock();
+ return ok;
+ }
+
+ #[inline]
+ pub unsafe fn write(&self) {
+ self.lock.lock();
+ while !(*self.state.get()).inc_writers() {
+ self.cond.wait(&self.lock);
+ }
+ self.lock.unlock();
+ }
+
+ #[inline]
+ pub unsafe fn try_write(&self) -> bool {
+ self.lock.lock();
+ let ok = (*self.state.get()).inc_writers();
+ self.lock.unlock();
+ return ok;
+ }
+
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ self.lock.lock();
+ let notify = (*self.state.get()).dec_readers();
+ self.lock.unlock();
+ if notify {
+ // FIXME: should only wake up one of these some of the time
+ self.cond.notify_all();
+ }
+ }
+
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ self.lock.lock();
+ (*self.state.get()).dec_writers();
+ self.lock.unlock();
+ // FIXME: should only wake up one of these some of the time
+ self.cond.notify_all();
+ }
+
+ #[inline]
+ pub unsafe fn destroy(&self) {
+ self.lock.destroy();
+ self.cond.destroy();
+ }
+}
+
+impl State {
+ fn inc_readers(&mut self) -> bool {
+ match *self {
+ State::Unlocked => {
+ *self = State::Reading(1);
+ true
+ }
+ State::Reading(ref mut cnt) => {
+ *cnt += 1;
+ true
+ }
+ State::Writing => false,
+ }
+ }
+
+ fn inc_writers(&mut self) -> bool {
+ match *self {
+ State::Unlocked => {
+ *self = State::Writing;
+ true
+ }
+ State::Reading(_) | State::Writing => false,
+ }
+ }
+
+ fn dec_readers(&mut self) -> bool {
+ let zero = match *self {
+ State::Reading(ref mut cnt) => {
+ *cnt -= 1;
+ *cnt == 0
+ }
+ State::Unlocked | State::Writing => invalid(),
+ };
+ if zero {
+ *self = State::Unlocked;
+ }
+ zero
+ }
+
+ fn dec_writers(&mut self) {
+ match *self {
+ State::Writing => {}
+ State::Unlocked | State::Reading(_) => invalid(),
+ }
+ *self = State::Unlocked;
+ }
+}
+
+fn invalid() -> ! {
+ panic!("inconsistent rwlock");
+}
--- /dev/null
+use crate::ffi::CStr;
+use crate::io;
+use crate::sys::unsupported;
+use crate::time::Duration;
+
+pub struct Thread(!);
+
+pub const DEFAULT_MIN_STACK_SIZE: usize = 4096;
+
+impl Thread {
+ // unsafe: see thread::Builder::spawn_unchecked for safety requirements
+ pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> {
+ unsupported()
+ }
+
+ pub fn yield_now() {}
+
+ pub fn set_name(_name: &CStr) {}
+
+ pub fn sleep(dur: Duration) {
+ use crate::arch::wasm32;
+ use crate::cmp;
+
+ // Use an atomic wait to block the current thread artificially with a
+ // timeout listed. Note that we should never be notified (return value
+ // of 0) or our comparison should never fail (return value of 1) so we
+ // should always only resume execution through a timeout (return value
+ // 2).
+ let mut nanos = dur.as_nanos();
+ while nanos > 0 {
+ let amt = cmp::min(i64::MAX as u128, nanos);
+ let mut x = 0;
+ let val = unsafe { wasm32::memory_atomic_wait32(&mut x, 0, amt as i64) };
+ debug_assert_eq!(val, 2);
+ nanos -= amt;
+ }
+ }
+
+ pub fn join(self) {}
+}
+
+pub mod guard {
+ pub type Guard = !;
+ pub unsafe fn current() -> Option<Guard> {
+ None
+ }
+ pub unsafe fn init() -> Option<Guard> {
+ None
+ }
+}
+
+// We currently just use our own thread-local to store our
+// current thread's ID, and then we lazily initialize it to something allocated
+// from a global counter.
+pub fn my_id() -> u32 {
+ use crate::sync::atomic::{AtomicU32, Ordering::SeqCst};
+
+ static NEXT_ID: AtomicU32 = AtomicU32::new(0);
+
+ #[thread_local]
+ static mut MY_ID: u32 = 0;
+
+ unsafe {
+ // If our thread ID isn't set yet then we need to allocate one. Do so
+ // with with a simple "atomically add to a global counter" strategy.
+ // This strategy doesn't handled what happens when the counter
+ // overflows, however, so just abort everything once the counter
+ // overflows and eventually we could have some sort of recycling scheme
+ // (or maybe this is all totally irrelevant by that point!). In any case
+ // though we're using a CAS loop instead of a `fetch_add` to ensure that
+ // the global counter never overflows.
+ if MY_ID == 0 {
+ let mut cur = NEXT_ID.load(SeqCst);
+ MY_ID = loop {
+ let next = cur.checked_add(1).unwrap_or_else(|| crate::process::abort());
+ match NEXT_ID.compare_exchange(cur, next, SeqCst, SeqCst) {
+ Ok(_) => break next,
+ Err(i) => cur = i,
+ }
+ };
+ }
+ MY_ID
+ }
+}
+++ /dev/null
-use crate::arch::wasm32;
-use crate::cmp;
-use crate::mem;
-use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst};
-use crate::sys::mutex::Mutex;
-use crate::time::Duration;
-
-pub struct Condvar {
- cnt: AtomicUsize,
-}
-
-pub type MovableCondvar = Condvar;
-
-// Condition variables are implemented with a simple counter internally that is
-// likely to cause spurious wakeups. Blocking on a condition variable will first
-// read the value of the internal counter, unlock the given mutex, and then
-// block if and only if the counter's value is still the same. Notifying a
-// condition variable will modify the counter (add one for now) and then wake up
-// a thread waiting on the address of the counter.
-//
-// A thread waiting on the condition variable will as a result avoid going to
-// sleep if it's notified after the lock is unlocked but before it fully goes to
-// sleep. A sleeping thread is guaranteed to be woken up at some point as it can
-// only be woken up with a call to `wake`.
-//
-// Note that it's possible for 2 or more threads to be woken up by a call to
-// `notify_one` with this implementation. That can happen where the modification
-// of `cnt` causes any threads in the middle of `wait` to avoid going to sleep,
-// and the subsequent `wake` may wake up a thread that's actually blocking. We
-// consider this a spurious wakeup, though, which all users of condition
-// variables must already be prepared to handle. As a result, this source of
-// spurious wakeups is currently though to be ok, although it may be problematic
-// later on if it causes too many spurious wakeups.
-
-impl Condvar {
- pub const fn new() -> Condvar {
- Condvar { cnt: AtomicUsize::new(0) }
- }
-
- #[inline]
- pub unsafe fn init(&mut self) {
- // nothing to do
- }
-
- pub unsafe fn notify_one(&self) {
- self.cnt.fetch_add(1, SeqCst);
- // SAFETY: ptr() is always valid
- unsafe {
- wasm32::memory_atomic_notify(self.ptr(), 1);
- }
- }
-
- #[inline]
- pub unsafe fn notify_all(&self) {
- self.cnt.fetch_add(1, SeqCst);
- // SAFETY: ptr() is always valid
- unsafe {
- wasm32::memory_atomic_notify(self.ptr(), u32::MAX); // -1 == "wake everyone"
- }
- }
-
- pub unsafe fn wait(&self, mutex: &Mutex) {
- // "atomically block and unlock" implemented by loading our current
- // counter's value, unlocking the mutex, and blocking if the counter
- // still has the same value.
- //
- // Notifications happen by incrementing the counter and then waking a
- // thread. Incrementing the counter after we unlock the mutex will
- // prevent us from sleeping and otherwise the call to `wake` will
- // wake us up once we're asleep.
- let ticket = self.cnt.load(SeqCst) as i32;
- mutex.unlock();
- let val = wasm32::memory_atomic_wait32(self.ptr(), ticket, -1);
- // 0 == woken, 1 == not equal to `ticket`, 2 == timeout (shouldn't happen)
- debug_assert!(val == 0 || val == 1);
- mutex.lock();
- }
-
- pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
- let ticket = self.cnt.load(SeqCst) as i32;
- mutex.unlock();
- let nanos = dur.as_nanos();
- let nanos = cmp::min(i64::MAX as u128, nanos);
-
- // If the return value is 2 then a timeout happened, so we return
- // `false` as we weren't actually notified.
- let ret = wasm32::memory_atomic_wait32(self.ptr(), ticket, nanos as i64) != 2;
- mutex.lock();
- return ret;
- }
-
- #[inline]
- pub unsafe fn destroy(&self) {
- // nothing to do
- }
-
- #[inline]
- fn ptr(&self) -> *mut i32 {
- assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>());
- self.cnt.as_mut_ptr() as *mut i32
- }
-}
+++ /dev/null
-use crate::arch::wasm32;
-use crate::convert::TryInto;
-use crate::sync::atomic::AtomicI32;
-use crate::time::Duration;
-
-pub fn futex_wait(futex: &AtomicI32, expected: i32, timeout: Option<Duration>) {
- let timeout = timeout.and_then(|t| t.as_nanos().try_into().ok()).unwrap_or(-1);
- unsafe {
- wasm32::memory_atomic_wait32(futex as *const AtomicI32 as *mut i32, expected, timeout);
- }
-}
-
-pub fn futex_wake(futex: &AtomicI32) {
- unsafe {
- wasm32::memory_atomic_notify(futex as *const AtomicI32 as *mut i32, 1);
- }
-}
#![deny(unsafe_op_in_unsafe_fn)]
pub mod alloc;
+#[path = "../unsupported/args.rs"]
pub mod args;
#[path = "../unix/cmath.rs"]
pub mod cmath;
pub mod process;
#[path = "../unsupported/stdio.rs"]
pub mod stdio;
-pub mod thread;
#[path = "../unsupported/thread_local_dtor.rs"]
pub mod thread_local_dtor;
#[path = "../unsupported/thread_local_key.rs"]
cfg_if::cfg_if! {
if #[cfg(target_feature = "atomics")] {
- #[path = "condvar_atomics.rs"]
+ #[path = "atomics/condvar.rs"]
pub mod condvar;
- #[path = "mutex_atomics.rs"]
+ #[path = "atomics/mutex.rs"]
pub mod mutex;
- #[path = "rwlock_atomics.rs"]
+ #[path = "atomics/rwlock.rs"]
pub mod rwlock;
- #[path = "futex_atomics.rs"]
+ #[path = "atomics/futex.rs"]
pub mod futex;
+ #[path = "atomics/thread.rs"]
+ pub mod thread;
} else {
#[path = "../unsupported/condvar.rs"]
pub mod condvar;
pub mod mutex;
#[path = "../unsupported/rwlock.rs"]
pub mod rwlock;
+ #[path = "../unsupported/thread.rs"]
+ pub mod thread;
}
}
+++ /dev/null
-use crate::arch::wasm32;
-use crate::cell::UnsafeCell;
-use crate::mem;
-use crate::sync::atomic::{AtomicU32, AtomicUsize, Ordering::SeqCst};
-use crate::sys::thread;
-
-pub struct Mutex {
- locked: AtomicUsize,
-}
-
-pub type MovableMutex = Mutex;
-
-// Mutexes have a pretty simple implementation where they contain an `i32`
-// internally that is 0 when unlocked and 1 when the mutex is locked.
-// Acquisition has a fast path where it attempts to cmpxchg the 0 to a 1, and
-// if it fails it then waits for a notification. Releasing a lock is then done
-// by swapping in 0 and then notifying any waiters, if present.
-
-impl Mutex {
- pub const fn new() -> Mutex {
- Mutex { locked: AtomicUsize::new(0) }
- }
-
- #[inline]
- pub unsafe fn init(&mut self) {
- // nothing to do
- }
-
- pub unsafe fn lock(&self) {
- while !self.try_lock() {
- // SAFETY: the caller must uphold the safety contract for `memory_atomic_wait32`.
- let val = unsafe {
- wasm32::memory_atomic_wait32(
- self.ptr(),
- 1, // we expect our mutex is locked
- -1, // wait infinitely
- )
- };
- // we should have either woke up (0) or got a not-equal due to a
- // race (1). We should never time out (2)
- debug_assert!(val == 0 || val == 1);
- }
- }
-
- pub unsafe fn unlock(&self) {
- let prev = self.locked.swap(0, SeqCst);
- debug_assert_eq!(prev, 1);
- wasm32::memory_atomic_notify(self.ptr(), 1); // wake up one waiter, if any
- }
-
- #[inline]
- pub unsafe fn try_lock(&self) -> bool {
- self.locked.compare_exchange(0, 1, SeqCst, SeqCst).is_ok()
- }
-
- #[inline]
- pub unsafe fn destroy(&self) {
- // nothing to do
- }
-
- #[inline]
- fn ptr(&self) -> *mut i32 {
- assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>());
- self.locked.as_mut_ptr() as *mut i32
- }
-}
-
-pub struct ReentrantMutex {
- owner: AtomicU32,
- recursions: UnsafeCell<u32>,
-}
-
-unsafe impl Send for ReentrantMutex {}
-unsafe impl Sync for ReentrantMutex {}
-
-// Reentrant mutexes are similarly implemented to mutexs above except that
-// instead of "1" meaning unlocked we use the id of a thread to represent
-// whether it has locked a mutex. That way we have an atomic counter which
-// always holds the id of the thread that currently holds the lock (or 0 if the
-// lock is unlocked).
-//
-// Once a thread acquires a lock recursively, which it detects by looking at
-// the value that's already there, it will update a local `recursions` counter
-// in a nonatomic fashion (as we hold the lock). The lock is then fully
-// released when this recursion counter reaches 0.
-
-impl ReentrantMutex {
- pub const unsafe fn uninitialized() -> ReentrantMutex {
- ReentrantMutex { owner: AtomicU32::new(0), recursions: UnsafeCell::new(0) }
- }
-
- pub unsafe fn init(&self) {
- // nothing to do...
- }
-
- pub unsafe fn lock(&self) {
- let me = thread::my_id();
- while let Err(owner) = self._try_lock(me) {
- // SAFETY: the caller must gurantee that `self.ptr()` and `owner` are valid i32.
- let val = unsafe { wasm32::memory_atomic_wait32(self.ptr(), owner as i32, -1) };
- debug_assert!(val == 0 || val == 1);
- }
- }
-
- #[inline]
- pub unsafe fn try_lock(&self) -> bool {
- unsafe { self._try_lock(thread::my_id()).is_ok() }
- }
-
- #[inline]
- unsafe fn _try_lock(&self, id: u32) -> Result<(), u32> {
- let id = id.checked_add(1).unwrap();
- match self.owner.compare_exchange(0, id, SeqCst, SeqCst) {
- // we transitioned from unlocked to locked
- Ok(_) => {
- debug_assert_eq!(*self.recursions.get(), 0);
- Ok(())
- }
-
- // we currently own this lock, so let's update our count and return
- // true.
- Err(n) if n == id => {
- *self.recursions.get() += 1;
- Ok(())
- }
-
- // Someone else owns the lock, let our caller take care of it
- Err(other) => Err(other),
- }
- }
-
- pub unsafe fn unlock(&self) {
- // If we didn't ever recursively lock the lock then we fully unlock the
- // mutex and wake up a waiter, if any. Otherwise we decrement our
- // recursive counter and let some one else take care of the zero.
- match *self.recursions.get() {
- 0 => {
- self.owner.swap(0, SeqCst);
- // SAFETY: the caller must gurantee that `self.ptr()` is valid i32.
- unsafe {
- wasm32::memory_atomic_notify(self.ptr() as *mut i32, 1);
- } // wake up one waiter, if any
- }
- ref mut n => *n -= 1,
- }
- }
-
- pub unsafe fn destroy(&self) {
- // nothing to do...
- }
-
- #[inline]
- fn ptr(&self) -> *mut i32 {
- self.owner.as_mut_ptr() as *mut i32
- }
-}
+++ /dev/null
-use crate::cell::UnsafeCell;
-use crate::sys::condvar::Condvar;
-use crate::sys::mutex::Mutex;
-
-pub struct RWLock {
- lock: Mutex,
- cond: Condvar,
- state: UnsafeCell<State>,
-}
-
-enum State {
- Unlocked,
- Reading(usize),
- Writing,
-}
-
-unsafe impl Send for RWLock {}
-unsafe impl Sync for RWLock {}
-
-// This rwlock implementation is a relatively simple implementation which has a
-// condition variable for readers/writers as well as a mutex protecting the
-// internal state of the lock. A current downside of the implementation is that
-// unlocking the lock will notify *all* waiters rather than just readers or just
-// writers. This can cause lots of "thundering stampede" problems. While
-// hopefully correct this implementation is very likely to want to be changed in
-// the future.
-
-impl RWLock {
- pub const fn new() -> RWLock {
- RWLock { lock: Mutex::new(), cond: Condvar::new(), state: UnsafeCell::new(State::Unlocked) }
- }
-
- #[inline]
- pub unsafe fn read(&self) {
- self.lock.lock();
- while !(*self.state.get()).inc_readers() {
- self.cond.wait(&self.lock);
- }
- self.lock.unlock();
- }
-
- #[inline]
- pub unsafe fn try_read(&self) -> bool {
- self.lock.lock();
- let ok = (*self.state.get()).inc_readers();
- self.lock.unlock();
- return ok;
- }
-
- #[inline]
- pub unsafe fn write(&self) {
- self.lock.lock();
- while !(*self.state.get()).inc_writers() {
- self.cond.wait(&self.lock);
- }
- self.lock.unlock();
- }
-
- #[inline]
- pub unsafe fn try_write(&self) -> bool {
- self.lock.lock();
- let ok = (*self.state.get()).inc_writers();
- self.lock.unlock();
- return ok;
- }
-
- #[inline]
- pub unsafe fn read_unlock(&self) {
- self.lock.lock();
- let notify = (*self.state.get()).dec_readers();
- self.lock.unlock();
- if notify {
- // FIXME: should only wake up one of these some of the time
- self.cond.notify_all();
- }
- }
-
- #[inline]
- pub unsafe fn write_unlock(&self) {
- self.lock.lock();
- (*self.state.get()).dec_writers();
- self.lock.unlock();
- // FIXME: should only wake up one of these some of the time
- self.cond.notify_all();
- }
-
- #[inline]
- pub unsafe fn destroy(&self) {
- self.lock.destroy();
- self.cond.destroy();
- }
-}
-
-impl State {
- fn inc_readers(&mut self) -> bool {
- match *self {
- State::Unlocked => {
- *self = State::Reading(1);
- true
- }
- State::Reading(ref mut cnt) => {
- *cnt += 1;
- true
- }
- State::Writing => false,
- }
- }
-
- fn inc_writers(&mut self) -> bool {
- match *self {
- State::Unlocked => {
- *self = State::Writing;
- true
- }
- State::Reading(_) | State::Writing => false,
- }
- }
-
- fn dec_readers(&mut self) -> bool {
- let zero = match *self {
- State::Reading(ref mut cnt) => {
- *cnt -= 1;
- *cnt == 0
- }
- State::Unlocked | State::Writing => invalid(),
- };
- if zero {
- *self = State::Unlocked;
- }
- zero
- }
-
- fn dec_writers(&mut self) {
- match *self {
- State::Writing => {}
- State::Unlocked | State::Reading(_) => invalid(),
- }
- *self = State::Unlocked;
- }
-}
-
-fn invalid() -> ! {
- panic!("inconsistent rwlock");
-}
+++ /dev/null
-use crate::ffi::CStr;
-use crate::io;
-use crate::sys::unsupported;
-use crate::time::Duration;
-
-pub struct Thread(!);
-
-pub const DEFAULT_MIN_STACK_SIZE: usize = 4096;
-
-impl Thread {
- // unsafe: see thread::Builder::spawn_unchecked for safety requirements
- pub unsafe fn new(_stack: usize, _p: Box<dyn FnOnce()>) -> io::Result<Thread> {
- unsupported()
- }
-
- pub fn yield_now() {
- // do nothing
- }
-
- pub fn set_name(_name: &CStr) {
- // nope
- }
-
- #[cfg(not(target_feature = "atomics"))]
- pub fn sleep(_dur: Duration) {
- panic!("can't sleep");
- }
-
- #[cfg(target_feature = "atomics")]
- pub fn sleep(dur: Duration) {
- use crate::arch::wasm32;
- use crate::cmp;
-
- // Use an atomic wait to block the current thread artificially with a
- // timeout listed. Note that we should never be notified (return value
- // of 0) or our comparison should never fail (return value of 1) so we
- // should always only resume execution through a timeout (return value
- // 2).
- let mut nanos = dur.as_nanos();
- while nanos > 0 {
- let amt = cmp::min(i64::MAX as u128, nanos);
- let mut x = 0;
- let val = unsafe { wasm32::memory_atomic_wait32(&mut x, 0, amt as i64) };
- debug_assert_eq!(val, 2);
- nanos -= amt;
- }
- }
-
- pub fn join(self) {
- self.0
- }
-}
-
-pub mod guard {
- pub type Guard = !;
- pub unsafe fn current() -> Option<Guard> {
- None
- }
- pub unsafe fn init() -> Option<Guard> {
- None
- }
-}
-
-// This is only used by atomics primitives when the `atomics` feature is
-// enabled. In that mode we currently just use our own thread-local to store our
-// current thread's ID, and then we lazily initialize it to something allocated
-// from a global counter.
-#[cfg(target_feature = "atomics")]
-pub fn my_id() -> u32 {
- use crate::sync::atomic::{AtomicU32, Ordering::SeqCst};
-
- static NEXT_ID: AtomicU32 = AtomicU32::new(0);
-
- #[thread_local]
- static mut MY_ID: u32 = 0;
-
- unsafe {
- // If our thread ID isn't set yet then we need to allocate one. Do so
- // with with a simple "atomically add to a global counter" strategy.
- // This strategy doesn't handled what happens when the counter
- // overflows, however, so just abort everything once the counter
- // overflows and eventually we could have some sort of recycling scheme
- // (or maybe this is all totally irrelevant by that point!). In any case
- // though we're using a CAS loop instead of a `fetch_add` to ensure that
- // the global counter never overflows.
- if MY_ID == 0 {
- let mut cur = NEXT_ID.load(SeqCst);
- MY_ID = loop {
- let next = cur.checked_add(1).unwrap_or_else(|| crate::process::abort());
- match NEXT_ID.compare_exchange(cur, next, SeqCst, SeqCst) {
- Ok(_) => break next,
- Err(i) => cur = i,
- }
- };
- }
- MY_ID
- }
-}
use crate::cell::{Cell, UnsafeCell};
+use crate::sync::atomic::{AtomicU8, Ordering};
use crate::sync::mpsc::{channel, Sender};
use crate::thread::{self, LocalKey};
use crate::thread_local;
});
rx.recv().unwrap();
}
+
+// This test tests that TLS destructors have run before the thread joins. The
+// test has no false positives (meaning: if the test fails, there's actually
+// an ordering problem). It may have false negatives, where the test passes but
+// join is not guaranteed to be after the TLS destructors. However, false
+// negatives should be exceedingly rare due to judicious use of
+// thread::yield_now and running the test several times.
+#[test]
+fn join_orders_after_tls_destructors() {
+ // We emulate a synchronous MPSC rendezvous channel using only atomics and
+ // thread::yield_now. We can't use std::mpsc as the implementation itself
+ // may rely on thread locals.
+ //
+ // The basic state machine for an SPSC rendezvous channel is:
+ // FRESH -> THREAD1_WAITING -> MAIN_THREAD_RENDEZVOUS
+ // where the first transition is done by the “receiving” thread and the 2nd
+ // transition is done by the “sending” thread.
+ //
+ // We add an additional state `THREAD2_LAUNCHED` between `FRESH` and
+ // `THREAD1_WAITING` to block until all threads are actually running.
+ //
+ // A thread that joins on the “receiving” thread completion should never
+ // observe the channel in the `THREAD1_WAITING` state. If this does occur,
+ // we switch to the “poison” state `THREAD2_JOINED` and panic all around.
+ // (This is equivalent to “sending” from an alternate producer thread.)
+ const FRESH: u8 = 0;
+ const THREAD2_LAUNCHED: u8 = 1;
+ const THREAD1_WAITING: u8 = 2;
+ const MAIN_THREAD_RENDEZVOUS: u8 = 3;
+ const THREAD2_JOINED: u8 = 4;
+ static SYNC_STATE: AtomicU8 = AtomicU8::new(FRESH);
+
+ for _ in 0..10 {
+ SYNC_STATE.store(FRESH, Ordering::SeqCst);
+
+ let jh = thread::Builder::new()
+ .name("thread1".into())
+ .spawn(move || {
+ struct TlDrop;
+
+ impl Drop for TlDrop {
+ fn drop(&mut self) {
+ let mut sync_state = SYNC_STATE.swap(THREAD1_WAITING, Ordering::SeqCst);
+ loop {
+ match sync_state {
+ THREAD2_LAUNCHED | THREAD1_WAITING => thread::yield_now(),
+ MAIN_THREAD_RENDEZVOUS => break,
+ THREAD2_JOINED => panic!(
+ "Thread 1 still running after thread 2 joined on thread 1"
+ ),
+ v => unreachable!("sync state: {}", v),
+ }
+ sync_state = SYNC_STATE.load(Ordering::SeqCst);
+ }
+ }
+ }
+
+ thread_local! {
+ static TL_DROP: TlDrop = TlDrop;
+ }
+
+ TL_DROP.with(|_| {});
+
+ loop {
+ match SYNC_STATE.load(Ordering::SeqCst) {
+ FRESH => thread::yield_now(),
+ THREAD2_LAUNCHED => break,
+ v => unreachable!("sync state: {}", v),
+ }
+ }
+ })
+ .unwrap();
+
+ let jh2 = thread::Builder::new()
+ .name("thread2".into())
+ .spawn(move || {
+ assert_eq!(SYNC_STATE.swap(THREAD2_LAUNCHED, Ordering::SeqCst), FRESH);
+ jh.join().unwrap();
+ match SYNC_STATE.swap(THREAD2_JOINED, Ordering::SeqCst) {
+ MAIN_THREAD_RENDEZVOUS => return,
+ THREAD2_LAUNCHED | THREAD1_WAITING => {
+ panic!("Thread 2 running after thread 1 join before main thread rendezvous")
+ }
+ v => unreachable!("sync state: {:?}", v),
+ }
+ })
+ .unwrap();
+
+ loop {
+ match SYNC_STATE.compare_exchange_weak(
+ THREAD1_WAITING,
+ MAIN_THREAD_RENDEZVOUS,
+ Ordering::SeqCst,
+ Ordering::SeqCst,
+ ) {
+ Ok(_) => break,
+ Err(FRESH) => thread::yield_now(),
+ Err(THREAD2_LAUNCHED) => thread::yield_now(),
+ Err(THREAD2_JOINED) => {
+ panic!("Main thread rendezvous after thread 2 joined thread 1")
+ }
+ v => unreachable!("sync state: {:?}", v),
+ }
+ }
+ jh2.join().unwrap();
+ }
+}
-Subproject commit 19f5459dd0f89e466b7bcaa0f69ecca90f21a4d1
+Subproject commit 6c4f4e1990b76be8a07bde1956d2e3452fd55ee4
target_linker = self.get_toml("linker", build_section)
if target_linker is not None:
env["RUSTFLAGS"] += " -C linker=" + target_linker
- # cfg(bootstrap): Add `-Wsemicolon_in_expressions_from_macros` after the next beta bump
env["RUSTFLAGS"] += " -Wrust_2018_idioms -Wunused_lifetimes"
+ env["RUSTFLAGS"] += " -Wsemicolon_in_expressions_from_macros"
if self.get_toml("deny-warnings", "rust") != "false":
env["RUSTFLAGS"] += " -Dwarnings"
check::Rustdoc,
check::CodegenBackend,
check::Clippy,
+ check::Miri,
+ check::Rls,
check::Bootstrap
),
Kind::Test => describe!(
// some code doesn't go through this `rustc` wrapper.
lint_flags.push("-Wrust_2018_idioms");
lint_flags.push("-Wunused_lifetimes");
- // cfg(bootstrap): unconditionally enable this warning after the next beta bump
- // This is currently disabled for the stage1 libstd, since build scripts
- // will end up using the bootstrap compiler (which doesn't yet support this lint)
- if compiler.stage != 0 && mode != Mode::Std {
- lint_flags.push("-Wsemicolon_in_expressions_from_macros");
- }
+ lint_flags.push("-Wsemicolon_in_expressions_from_macros");
if self.config.deny_warnings {
lint_flags.push("-Dwarnings");
compare_mode: None,
rustfix_coverage: false,
pass: None,
+ run: None,
};
let build = Build::new(config);
compare_mode: None,
rustfix_coverage: false,
pass: None,
+ run: None,
};
let build = Build::new(config);
compare_mode: None,
rustfix_coverage: false,
pass: None,
+ run: None,
};
// Make sure rustfmt binary not being found isn't an error.
config.channel = "beta".to_string();
}
macro_rules! tool_check_step {
- ($name:ident, $path:literal, $($alias:literal, )* $source_type:path) => {
+ ($name:ident, $path:literal, $($alias:literal, )* $source_type:path $(, $default:literal )?) => {
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct $name {
pub target: TargetSelection,
impl Step for $name {
type Output = ();
const ONLY_HOSTS: bool = true;
- const DEFAULT: bool = true;
+ // don't ever check out-of-tree tools by default, they'll fail when toolstate is broken
+ const DEFAULT: bool = matches!($source_type, SourceType::InTree) $( && $default )?;
fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> {
run.paths(&[ $path, $($alias),* ])
// behavior, treat it as in-tree so that any new warnings in clippy will be
// rejected.
tool_check_step!(Clippy, "src/tools/clippy", SourceType::InTree);
+tool_check_step!(Miri, "src/tools/miri", SourceType::Submodule);
+tool_check_step!(Rls, "src/tools/rls", SourceType::Submodule);
-tool_check_step!(Bootstrap, "src/bootstrap", SourceType::InTree);
+tool_check_step!(Bootstrap, "src/bootstrap", SourceType::InTree, false);
/// Cargo's output path for the standard library in a given stage, compiled
/// by a particular compiler for the specified target.
}
if builder.config.rustc_parallel {
cargo.rustflag("--cfg=parallel_compiler");
+ cargo.rustdocflag("--cfg=parallel_compiler");
}
if builder.config.rust_verify_llvm_ir {
cargo.env("RUSTC_VERIFY_LLVM_IR", "1");
paths: Vec<PathBuf>,
},
Format {
+ paths: Vec<PathBuf>,
check: bool,
},
Doc {
bless: bool,
compare_mode: Option<String>,
pass: Option<String>,
+ run: Option<String>,
test_args: Vec<String>,
rustc_args: Vec<String>,
fail_fast: bool,
VALUE overrides the skip-rebuild option in config.toml.",
"VALUE",
);
- opts.optopt("", "rust-profile-generate", "rustc error format", "FORMAT");
- opts.optopt("", "rust-profile-use", "rustc error format", "FORMAT");
+ opts.optopt("", "rust-profile-generate", "generate PGO profile with rustc build", "FORMAT");
+ opts.optopt("", "rust-profile-use", "use PGO profile for rustc build", "FORMAT");
// We can't use getopt to parse the options until we have completed specifying which
// options are valid, but under the current implementation, some options are conditional on
"force {check,build,run}-pass tests to this mode.",
"check | build | run",
);
+ opts.optopt("", "run", "whether to execute run-* tests", "auto | always | never");
opts.optflag(
"",
"rustfix-coverage",
bless: matches.opt_present("bless"),
compare_mode: matches.opt_str("compare-mode"),
pass: matches.opt_str("pass"),
+ run: matches.opt_str("run"),
test_args: matches.opt_strs("test-args"),
rustc_args: matches.opt_strs("rustc-args"),
fail_fast: !matches.opt_present("no-fail-fast"),
Subcommand::Clean { all: matches.opt_present("all") }
}
- "fmt" => Subcommand::Format { check: matches.opt_present("check") },
+ "fmt" => Subcommand::Format { check: matches.opt_present("check"), paths },
"dist" => Subcommand::Dist { paths },
"install" => Subcommand::Install { paths },
"run" | "r" => {
}
}
+ pub fn run(&self) -> Option<&str> {
+ match *self {
+ Subcommand::Test { ref run, .. } => run.as_ref().map(|s| &s[..]),
+ _ => None,
+ }
+ }
+
pub fn open(&self) -> bool {
match *self {
Subcommand::Doc { open, .. } => open,
ignore: Vec<String>,
}
-pub fn format(build: &Build, check: bool) {
+pub fn format(build: &Build, check: bool, paths: &[PathBuf]) {
if build.config.dry_run {
return;
}
.to_path_buf();
let src = build.src.clone();
let (tx, rx): (SyncSender<PathBuf>, _) = std::sync::mpsc::sync_channel(128);
- let walker =
- WalkBuilder::new(src.clone()).types(matcher).overrides(ignore_fmt).build_parallel();
+ let walker = match paths.get(0) {
+ Some(first) => {
+ let mut walker = WalkBuilder::new(first);
+ for path in &paths[1..] {
+ walker.add(path);
+ }
+ walker
+ }
+ None => WalkBuilder::new(src.clone()),
+ }
+ .types(matcher)
+ .overrides(ignore_fmt)
+ .build_parallel();
// there is a lot of blocking involved in spawning a child process and reading files to format.
// spawn more processes than available concurrency to keep the CPU busy
job::setup(self);
}
- if let Subcommand::Format { check } = self.config.cmd {
- return format::format(self, check);
+ if let Subcommand::Format { check, paths } = &self.config.cmd {
+ return format::format(self, *check, &paths);
}
if let Subcommand::Clean { all } = self.config.cmd {
builder,
cmd.arg(&cargo)
.arg(&out_dir)
+ .args(builder.config.cmd.test_args())
.env("RUSTC", builder.rustc(compiler))
.env("RUSTDOC", builder.rustdoc(compiler)),
);
command.arg("src/test/rustdoc-gui/lib.rs").arg("-o").arg(&out_dir);
builder.run(&mut command);
- for file in fs::read_dir("src/test/rustdoc-gui").unwrap() {
- let file = file.unwrap();
- let file_path = file.path();
- let file_name = file.file_name();
-
- if !file_name.to_str().unwrap().ends_with(".goml") {
- continue;
- }
- let mut command = Command::new(&nodejs);
- command
- .arg("src/tools/rustdoc-gui/tester.js")
- .arg("--doc-folder")
- .arg(out_dir.join("test_docs"))
- .arg("--test-file")
- .arg(file_path);
- builder.run(&mut command);
- }
+ let mut command = Command::new(&nodejs);
+ command
+ .arg("src/tools/rustdoc-gui/tester.js")
+ .arg("--doc-folder")
+ .arg(out_dir.join("test_docs"))
+ .arg("--tests-folder")
+ .arg("src/test/rustdoc-gui");
+ builder.run(&mut command);
} else {
builder.info("No nodejs found, skipping \"src/test/rustdoc-gui\" tests");
}
);
std::process::exit(1);
}
- crate::format::format(&builder.build, !builder.config.cmd.bless());
+ crate::format::format(&builder.build, !builder.config.cmd.bless(), &[]);
}
}
cmd.arg(pass);
}
+ if let Some(ref run) = builder.config.cmd.run() {
+ cmd.arg("--run");
+ cmd.arg(run);
+ }
+
if let Some(ref nodejs) = builder.config.nodejs {
cmd.arg("--nodejs").arg(nodejs);
}
# Download and build a single-file stress test benchmark on perf.rust-lang.org.
function pgo_perf_benchmark {
- local PERF=9442def56a39d742bf27ebcc3e0614cf117e1bc2
+ local PERF=1e19fc4c6168d2f7596e512f42f358f245d8f09d
local github_prefix=https://raw.githubusercontent.com/rust-lang/rustc-perf/$PERF
local name=$1
curl -o /tmp/$name.rs $github_prefix/collector/benchmarks/$name/src/lib.rs
bindir="$(xcode-select --print-path)/Toolchains/XcodeDefault.xctoolchain/usr/bin"
else
file="${MIRRORS_BASE}/clang%2Bllvm-${LLVM_VERSION}-x86_64-apple-darwin.tar.xz"
- curl -f "${file}" | tar xJf -
+ retry curl -f "${file}" -o "clang+llvm-${LLVM_VERSION}-x86_64-apple-darwin.tar.xz"
+ tar xJf "clang+llvm-${LLVM_VERSION}-x86_64-apple-darwin.tar.xz"
bindir="$(pwd)/clang+llvm-${LLVM_VERSION}-x86_64-apple-darwin/bin"
fi
mkdir -p citools/clang-rust
cd citools
- curl -f "${MIRRORS_BASE}/LLVM-${LLVM_VERSION}-win64.exe" -o "LLVM-${LLVM_VERSION}-win64.exe"
+ retry curl -f "${MIRRORS_BASE}/LLVM-${LLVM_VERSION}-win64.exe" \
+ -o "LLVM-${LLVM_VERSION}-win64.exe"
7z x -oclang-rust/ "LLVM-${LLVM_VERSION}-win64.exe"
ciCommandSetEnv RUST_CONFIGURE_ARGS \
"${RUST_CONFIGURE_ARGS} --set llvm.clang-cl=$(pwd)/clang-rust/bin/clang-cl.exe"
--- /dev/null
+# `instrument-coverage`
+
+The tracking issue for this feature is: [#79121].
+
+[#79121]: https://github.com/rust-lang/rust/issues/79121
+
+---
+
+## Introduction
+
+The Rust compiler includes two code coverage implementations:
+
+- A GCC-compatible, gcov-based coverage implementation, enabled with `-Z profile`, which derives coverage data based on DebugInfo.
+- A source-based code coverage implementation, enabled with `-Z instrument-coverage`, which uses LLVM's native, efficient coverage instrumentation to generate very precise coverage data.
+
+This document describes how to enable and use the LLVM instrumentation-based coverage, via the `-Z instrument-coverage` compiler flag.
+
+## How it works
+
+When `-Z instrument-coverage` is enabled, the Rust compiler enhances rust-based libraries and binaries by:
+
+- Automatically injecting calls to an LLVM intrinsic ([`llvm.instrprof.increment`]), at functions and branches in compiled code, to increment counters when conditional sections of code are executed.
+- Embedding additional information in the data section of each library and binary (using the [LLVM Code Coverage Mapping Format] _Version 4_, supported _only_ in LLVM 11 and up), to define the code regions (start and end positions in the source code) being counted.
+
+When running a coverage-instrumented program, the counter values are written to a `profraw` file at program termination. LLVM bundles tools that read the counter results, combine those results with the coverage map (embedded in the program binary), and generate coverage reports in multiple formats.
+
+[`llvm.instrprof.increment`]: https://llvm.org/docs/LangRef.html#llvm-instrprof-increment-intrinsic
+[llvm code coverage mapping format]: https://llvm.org/docs/CoverageMappingFormat.html
+
+> **Note**: `-Z instrument-coverage` also automatically enables `-Z symbol-mangling-version=v0` (tracking issue [#60705]). The `v0` symbol mangler is strongly recommended, but be aware that this demangler is also experimental. The `v0` demangler can be overridden by explicitly adding `-Z symbol-mangling-version=legacy`.
+
+[#60705]: https://github.com/rust-lang/rust/issues/60705
+
+## Enable coverage profiling in the Rust compiler
+
+Rust's source-based code coverage requires the Rust "profiler runtime". Without it, compiling with `-Z instrument-coverage` generates an error that the profiler runtime is missing.
+
+The Rust `nightly` distribution channel includes the profiler runtime, by default.
+
+> **Important**: If you are building the Rust compiler from the source distribution, the profiler runtime is _not_ enabled in the default `config.toml.example`. Edit your `config.toml` file and ensure the `profiler` feature is set it to `true` (either under the `[build]` section, or under the settings for an individual `[target.<triple>]`):
+>
+> ```toml
+> # Build the profiler runtime (required when compiling with options that depend
+> # on this runtime, such as `-C profile-generate` or `-Z instrument-coverage`).
+> profiler = true
+> ```
+
+### Building the demangler
+
+LLVM coverage reporting tools generate results that can include function names and other symbol references, and the raw coverage results report symbols using the compiler's "mangled" version of the symbol names, which can be difficult to interpret. To work around this issue, LLVM coverage tools also support a user-specified symbol name demangler.
+
+One option for a Rust demangler is [`rustfilt`], which can be installed with:
+
+```shell
+cargo install rustfilt
+```
+
+Another option, if you are building from the Rust compiler source distribution, is to use the `rust-demangler` tool included in the Rust source distribution, which can be built with:
+
+```shell
+$ ./x.py build rust-demangler
+```
+
+[`rustfilt`]: https://crates.io/crates/rustfilt
+
+## Compiling with coverage enabled
+
+Set the `-Z instrument-coverage` compiler flag in order to enable LLVM source-based code coverage profiling.
+
+The default option generates coverage for all functions, including unused (never called) functions and generics. The compiler flag supports an optional value to tailor this behavior. (See [`-Z instrument-coverage=<options>`](#-z-instrument-coverageoptions), below.)
+
+With `cargo`, you can instrument your program binary _and_ dependencies at the same time.
+
+For example (if your project's Cargo.toml builds a binary by default):
+
+```shell
+$ cd your-project
+$ cargo clean
+$ RUSTFLAGS="-Z instrument-coverage" cargo build
+```
+
+If `cargo` is not configured to use your `profiler`-enabled version of `rustc`, set the path explicitly via the `RUSTC` environment variable. Here is another example, using a `stage1` build of `rustc` to compile an `example` binary (from the [`json5format`] crate):
+
+```shell
+$ RUSTC=$HOME/rust/build/x86_64-unknown-linux-gnu/stage1/bin/rustc \
+ RUSTFLAGS="-Z instrument-coverage" \
+ cargo build --example formatjson5
+```
+
+> **Note**: that some compiler options, combined with `-Z instrument-coverage`, can produce LLVM IR and/or linked binaries that are incompatible with LLVM coverage maps. For example, coverage requires references to actual functions in LLVM IR. If any covered function is optimized out, the coverage tools may not be able to process the coverage results. If you need to pass additional options, with coverage enabled, test them early, to confirm you will get the coverage results you expect.
+
+## Running the instrumented binary to generate raw coverage profiling data
+
+In the previous example, `cargo` generated the coverage-instrumented binary `formatjson5`:
+
+```shell
+$ echo "{some: 'thing'}" | target/debug/examples/formatjson5 -
+```
+
+```json5
+{
+ some: "thing",
+}
+```
+
+After running this program, a new file, `default.profraw`, should be in the current working directory. It's often preferable to set a specific file name or path. You can change the output file using the environment variable `LLVM_PROFILE_FILE`:
+
+```shell
+$ echo "{some: 'thing'}" \
+ | LLVM_PROFILE_FILE="formatjson5.profraw" target/debug/examples/formatjson5 -
+...
+$ ls formatjson5.profraw
+formatjson5.profraw
+```
+
+If `LLVM_PROFILE_FILE` contains a path to a non-existent directory, the missing directory structure will be created. Additionally, the following special pattern strings are rewritten:
+
+- `%p` - The process ID.
+- `%h` - The hostname of the machine running the program.
+- `%t` - The value of the TMPDIR environment variable.
+- `%Nm` - the instrumented binary’s signature: The runtime creates a pool of N raw profiles, used for on-line profile merging. The runtime takes care of selecting a raw profile from the pool, locking it, and updating it before the program exits. `N` must be between `1` and `9`, and defaults to `1` if omitted (with simply `%m`).
+- `%c` - Does not add anything to the filename, but enables a mode (on some platforms, including Darwin) in which profile counter updates are continuously synced to a file. This means that if the instrumented program crashes, or is killed by a signal, perfect coverage information can still be recovered.
+
+## Installing LLVM coverage tools
+
+LLVM's supplies two tools—`llvm-profdata` and `llvm-cov`—that process coverage data and generate reports. There are several ways to find and/or install these tools, but note that the coverage mapping data generated by the Rust compiler requires LLVM version 11 or higher. (`llvm-cov --version` typically shows the tool's LLVM version number.):
+
+- The LLVM tools may be installed (or installable) directly to your OS (such as via `apt-get`, for Linux).
+- If you are building the Rust compiler from source, you can optionally use the bundled LLVM tools, built from source. Those tool binaries can typically be found in your build platform directory at something like: `rust/build/x86_64-unknown-linux-gnu/llvm/bin/llvm-*`.
+- You can install compatible versions of these tools via `rustup`.
+
+The `rustup` option is guaranteed to install a compatible version of the LLVM tools, but they can be hard to find. We recommend [`cargo-binutils`], which installs Rust-specific wrappers around these and other LLVM tools, so you can invoke them via `cargo` commands!
+
+```shell
+$ rustup component add llvm-tools-preview
+$ cargo install cargo-binutils
+$ cargo profdata -- --help # note the additional "--" preceding the tool-specific arguments
+```
+
+[`cargo-binutils`]: https://crates.io/crates/cargo-binutils
+
+## Creating coverage reports
+
+Raw profiles have to be indexed before they can be used to generate coverage reports. This is done using [`llvm-profdata merge`] (or `cargo profdata -- merge`), which can combine multiple raw profiles and index them at the same time:
+
+```shell
+$ llvm-profdata merge -sparse formatjson5.profraw -o formatjson5.profdata
+```
+
+Finally, the `.profdata` file is used, in combination with the coverage map (from the program binary) to generate coverage reports using [`llvm-cov report`] (or `cargo cov -- report`), for a coverage summaries; and [`llvm-cov show`] (or `cargo cov -- show`), to see detailed coverage of lines and regions (character ranges) overlaid on the original source code.
+
+These commands have several display and filtering options. For example:
+
+```shell
+$ llvm-cov show -Xdemangler=rustfilt target/debug/examples/formatjson5 \
+ -instr-profile=formatjson5.profdata \
+ -show-line-counts-or-regions \
+ -show-instantiations \
+ -name=add_quoted_string
+```
+
+<img alt="Screenshot of sample `llvm-cov show` result, for function add_quoted_string" src="img/llvm-cov-show-01.png" class="center"/>
+<br/>
+<br/>
+
+Some of the more notable options in this example include:
+
+- `--Xdemangler=rustfilt` - the command name or path used to demangle Rust symbols (`rustfilt` in the example, but this could also be a path to the `rust-demangler` tool)
+- `target/debug/examples/formatjson5` - the instrumented binary (from which to extract the coverage map)
+- `--instr-profile=<path-to-file>.profdata` - the location of the `.profdata` file created by `llvm-profdata merge` (from the `.profraw` file generated by the instrumented binary)
+- `--name=<exact-function-name>` - to show coverage for a specific function (or, consider using another filter option, such as `--name-regex=<pattern>`)
+
+[`llvm-profdata merge`]: https://llvm.org/docs/CommandGuide/llvm-profdata.html#profdata-merge
+[`llvm-cov report`]: https://llvm.org/docs/CommandGuide/llvm-cov.html#llvm-cov-report
+[`llvm-cov show`]: https://llvm.org/docs/CommandGuide/llvm-cov.html#llvm-cov-show
+
+> **Note**: Coverage can also be disabled on an individual function by annotating the function with the [`no_coverage` attribute] (which requires the feature flag `#![feature(no_coverage)]`).
+
+[`no_coverage` attribute]: ../language-features/no-coverage.md
+
+## Interpreting reports
+
+There are four statistics tracked in a coverage summary:
+
+- Function coverage is the percentage of functions that have been executed at least once. A function is considered to be executed if any of its instantiations are executed.
+- Instantiation coverage is the percentage of function instantiations that have been executed at least once. Generic functions and functions generated from macros are two kinds of functions that may have multiple instantiations.
+- Line coverage is the percentage of code lines that have been executed at least once. Only executable lines within function bodies are considered to be code lines.
+- Region coverage is the percentage of code regions that have been executed at least once. A code region may span multiple lines: for example, in a large function body with no control flow. In other cases, a single line can contain multiple code regions: `return x || (y && z)` has countable code regions for `x` (which may resolve the expression, if `x` is `true`), `|| (y && z)` (executed only if `x` was `false`), and `return` (executed in either situation).
+
+Of these four statistics, function coverage is usually the least granular while region coverage is the most granular. The project-wide totals for each statistic are listed in the summary.
+
+## Test coverage
+
+A typical use case for coverage analysis is test coverage. Rust's source-based coverage tools can both measure your tests' code coverage as percentage, and pinpoint functions and branches not tested.
+
+The following example (using the [`json5format`] crate, for demonstration purposes) show how to generate and analyze coverage results for all tests in a crate.
+
+Since `cargo test` both builds and runs the tests, we set both the additional `RUSTFLAGS`, to add the `-Z instrument-coverage` flag, and `LLVM_PROFILE_FILE`, to set a custom filename for the raw profiling data generated during the test runs. Since there may be more than one test binary, apply `%m` in the filename pattern. This generates unique names for each test binary. (Otherwise, each executed test binary would overwrite the coverage results from the previous binary.)
+
+```shell
+$ RUSTFLAGS="-Z instrument-coverage" \
+ LLVM_PROFILE_FILE="json5format-%m.profraw" \
+ cargo test --tests
+```
+
+Make note of the test binary file paths, displayed after the word "`Running`" in the test output:
+
+```text
+ ...
+ Compiling json5format v0.1.3 ($HOME/json5format)
+ Finished test [unoptimized + debuginfo] target(s) in 14.60s
+
+ Running target/debug/deps/json5format-fececd4653271682
+running 25 tests
+...
+test result: ok. 25 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
+
+ Running target/debug/deps/lib-30768f9c53506dc5
+running 31 tests
+...
+test result: ok. 31 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
+```
+
+You should have one or more `.profraw` files now, one for each test binary. Run the `profdata` tool to merge them:
+
+```shell
+$ cargo profdata -- merge \
+ -sparse json5format-*.profraw -o json5format.profdata
+```
+
+Then run the `cov` tool, with the `profdata` file and all test binaries:
+
+```shell
+$ cargo cov -- report \
+ --use-color --ignore-filename-regex='/.cargo/registry' \
+ --instr-profile=json5format.profdata \
+ --object target/debug/deps/lib-30768f9c53506dc5 \
+ --object target/debug/deps/json5format-fececd4653271682
+$ cargo cov -- show \
+ --use-color --ignore-filename-regex='/.cargo/registry' \
+ --instr-profile=json5format.profdata \
+ --object target/debug/deps/lib-30768f9c53506dc5 \
+ --object target/debug/deps/json5format-fececd4653271682 \
+ --show-instantiations --show-line-counts-or-regions \
+ --Xdemangler=rustfilt | less -R
+```
+
+> **Note**: The command line option `--ignore-filename-regex=/.cargo/registry`, which excludes the sources for dependencies from the coverage results.\_
+
+### Tips for listing the binaries automatically
+
+For `bash` users, one suggested way to automatically complete the `cov` command with the list of binaries is with a command like:
+
+```bash
+$ cargo cov -- report \
+ $( \
+ for file in \
+ $( \
+ RUSTFLAGS="-Z instrument-coverage" \
+ cargo test --tests --no-run --message-format=json \
+ | jq -r "select(.profile.test == true) | .filenames[]" \
+ | grep -v dSYM - \
+ ); \
+ do \
+ printf "%s %s " -object $file; \
+ done \
+ ) \
+ --instr-profile=json5format.profdata --summary-only # and/or other options
+```
+
+Adding `--no-run --message-format=json` to the _same_ `cargo test` command used to run
+the tests (including the same environment variables and flags) generates output in a JSON
+format that `jq` can easily query.
+
+The `printf` command takes this list and generates the `--object <binary>` arguments
+for each listed test binary.
+
+### Including doc tests
+
+The previous examples run `cargo test` with `--tests`, which excludes doc tests.[^79417]
+
+To include doc tests in the coverage results, drop the `--tests` flag, and apply the
+`-Z instrument-coverage` flag, and some doc-test-specific options in the
+`RUSTDOCFLAGS` environment variable. (The `cargo profdata` command does not change.)
+
+```bash
+$ RUSTFLAGS="-Z instrument-coverage" \
+ RUSTDOCFLAGS="-Z instrument-coverage -Z unstable-options --persist-doctests target/debug/doctestbins" \
+ LLVM_PROFILE_FILE="json5format-%m.profraw" \
+ cargo test
+$ cargo profdata -- merge \
+ -sparse json5format-*.profraw -o json5format.profdata
+```
+
+The `-Z unstable-options --persist-doctests` flag is required, to save the test binaries
+(with their coverage maps) for `llvm-cov`.
+
+```bash
+$ cargo cov -- report \
+ $( \
+ for file in \
+ $( \
+ RUSTFLAGS="-Z instrument-coverage" \
+ RUSTDOCFLAGS="-Z instrument-coverage -Z unstable-options --persist-doctests target/debug/doctestbins" \
+ cargo test --no-run --message-format=json \
+ | jq -r "select(.profile.test == true) | .filenames[]" \
+ | grep -v dSYM - \
+ ) \
+ target/debug/doctestbins/*/rust_out; \
+ do \
+ [[ -x $file ]] && printf "%s %s " -object $file; \
+ done \
+ ) \
+ --instr-profile=json5format.profdata --summary-only # and/or other options
+```
+
+> **Note**: The differences in this `cargo cov` command, compared with the version without
+> doc tests, include:
+
+- The `cargo test ... --no-run` command is updated with the same environment variables
+ and flags used to _build_ the tests, _including_ the doc tests. (`LLVM_PROFILE_FILE`
+ is only used when _running_ the tests.)
+- The file glob pattern `target/debug/doctestbins/*/rust_out` adds the `rust_out`
+ binaries generated for doc tests (note, however, that some `rust_out` files may not
+ be executable binaries).
+- `[[ -x $file ]] &&` filters the files passed on to the `printf`, to include only
+ executable binaries.
+
+[^79417]:
+ There is ongoing work to resolve a known issue
+ [(#79417)](https://github.com/rust-lang/rust/issues/79417) that doc test coverage
+ generates incorrect source line numbers in `llvm-cov show` results.
+
+## `-Z instrument-coverage=<options>`
+
+- `-Z instrument-coverage=all`: Instrument all functions, including unused functions and unused generics. (This is the same as `-Z instrument-coverage`, with no value.)
+- `-Z instrument-coverage=except-unused-generics`: Instrument all functions except unused generics.
+- `-Z instrument-coverage=except-unused-functions`: Instrument only used (called) functions and instantiated generic functions.
+- `-Z instrument-coverage=off`: Do not instrument any functions. (This is the same as simply not including the `-Z instrument-coverage` option.)
+
+## Other references
+
+Rust's implementation and workflow for source-based code coverage is based on the same library and tools used to implement [source-based code coverage in Clang]. (This document is partially based on the Clang guide.)
+
+[source-based code coverage in clang]: https://clang.llvm.org/docs/SourceBasedCodeCoverage.html
+[`json5format`]: https://crates.io/crates/json5format
# `source-based-code-coverage`
-The tracking issue for this feature is: [#79121].
+See compiler flag [`-Z instrument-coverage`].
-------------------------
-
-## Introduction
-
-The Rust compiler includes two code coverage implementations:
-
-* A GCC-compatible, gcov-based coverage implementation, enabled with [`-Zprofile`], which operates on DebugInfo.
-* A source-based code coverage implementation, enabled with `-Zinstrument-coverage`, which uses LLVM's native coverage instrumentation to generate very precise coverage data.
-
-This document describes how to enable and use the LLVM instrumentation-based coverage, via the `-Zinstrument-coverage` compiler flag.
-
-## How it works
-
-When `-Zinstrument-coverage` is enabled, the Rust compiler enhances rust-based libraries and binaries by:
-
-* Automatically injecting calls to an LLVM intrinsic ([`llvm.instrprof.increment`]), at functions and branches in compiled code, to increment counters when conditional sections of code are executed.
-* Embedding additional information in the data section of each library and binary (using the [LLVM Code Coverage Mapping Format] _Version 4_, supported _only_ in LLVM 11 and up), to define the code regions (start and end positions in the source code) being counted.
-
-When running a coverage-instrumented program, the counter values are written to a `profraw` file at program termination. LLVM bundles tools that read the counter results, combine those results with the coverage map (embedded in the program binary), and generate coverage reports in multiple formats.
-
-## Enable coverage profiling in the Rust compiler
-
-Rust's source-based code coverage requires the Rust "profiler runtime". Without it, compiling with `-Zinstrument-coverage` generates an error that the profiler runtime is missing.
-
-The Rust `nightly` distribution channel should include the profiler runtime, by default.
-
-*IMPORTANT:* If you are building the Rust compiler from the source distribution, the profiler runtime is *not* enabled in the default `config.toml.example`. Edit your `config.toml` file and ensure the `profiler` feature is set it to `true`:
-
-```toml
-# Build the profiler runtime (required when compiling with options that depend
-# on this runtime, such as `-C profile-generate` or `-Z instrument-coverage`).
-profiler = true
-```
-
-If changed, rebuild the Rust compiler (see [rustc-dev-guide-how-to-build-and-run]).
-
-### Building the demangler
-
-LLVM coverage reporting tools generate results that can include function names and other symbol references, and the raw coverage results report symbols using the compiler's "mangled" version of the symbol names, which can be difficult to interpret. To work around this issue, LLVM coverage tools also support a user-specified symbol name demangler.
-
-One option for a Rust demangler is [`rustfilt`], which can be installed with:
-
-```shell
-cargo install rustfilt
-```
-
-Another option, if you are building from the Rust compiler source distribution, is to use the `rust-demangler` tool included in the Rust source distribution, which can be built with:
-
-```shell
-$ ./x.py build rust-demangler
-```
-
-## Compiling with coverage enabled
-
-Set the `-Zinstrument-coverage` compiler flag in order to enable LLVM source-based code coverage profiling.
-
-With `cargo`, you can instrument your program binary *and* dependencies at the same time.
-
-For example (if your project's Cargo.toml builds a binary by default):
-
-```shell
-$ cd your-project
-$ cargo clean
-$ RUSTFLAGS="-Zinstrument-coverage" cargo build
-```
-
-If `cargo` is not configured to use your `profiler`-enabled version of `rustc`, set the path explicitly via the `RUSTC` environment variable. Here is another example, using a `stage1` build of `rustc` to compile an `example` binary (from the [`json5format`] crate):
-
-```shell
-$ RUSTC=$HOME/rust/build/x86_64-unknown-linux-gnu/stage1/bin/rustc \
- RUSTFLAGS="-Zinstrument-coverage" \
- cargo build --example formatjson5
-```
-
-Note that some compiler options, combined with `-Zinstrument-coverage`, can produce LLVM IR and/or linked binaries that are incompatible with LLVM coverage maps. For example, coverage requires references to actual functions in LLVM IR. If any covered function is optimized out, the coverage tools may not be able to process the coverage results. If you need to pass additional options, with coverage enabled, test them early, to confirm you will get the coverage results you expect.
-
-## Running the instrumented binary to generate raw coverage profiling data
-
-In the previous example, `cargo` generated the coverage-instrumented binary `formatjson5`:
-
-```shell
-$ echo "{some: 'thing'}" | target/debug/examples/formatjson5 -
-```
-```json5
-{
- some: 'thing',
-}
-```
-
-After running this program, a new file, `default.profraw`, should be in the current working directory. It's often preferable to set a specific file name or path. You can change the output file using the environment variable `LLVM_PROFILE_FILE`:
-
-
-```shell
-$ echo "{some: 'thing'}" \
- | LLVM_PROFILE_FILE="formatjson5.profraw" target/debug/examples/formatjson5 -
-...
-$ ls formatjson5.profraw
-formatjson5.profraw
-```
-
-If `LLVM_PROFILE_FILE` contains a path to a non-existent directory, the missing directory structure will be created. Additionally, the following special pattern strings are rewritten:
-
-* `%p` - The process ID.
-* `%h` - The hostname of the machine running the program.
-* `%t` - The value of the TMPDIR environment variable.
-* `%Nm` - the instrumented binary’s signature: The runtime creates a pool of N raw profiles, used for on-line profile merging. The runtime takes care of selecting a raw profile from the pool, locking it, and updating it before the program exits. `N` must be between `1` and `9`, and defaults to `1` if omitted (with simply `%m`).
-* `%c` - Does not add anything to the filename, but enables a mode (on some platforms, including Darwin) in which profile counter updates are continuously synced to a file. This means that if the instrumented program crashes, or is killed by a signal, perfect coverage information can still be recovered.
-
-## Installing LLVM coverage tools
-
-LLVM's supplies two tools—`llvm-profdata` and `llvm-cov`—that process coverage data and generate reports. There are several ways to find and/or install these tools, but note that the coverage mapping data generated by the Rust compiler requires LLVM version 11 or higher. (`llvm-cov --version` typically shows the tool's LLVM version number.):
-
-* The LLVM tools may be installed (or installable) directly to your OS (such as via `apt-get`, for Linux).
-* If you are building the Rust compiler from source, you can optionally use the bundled LLVM tools, built from source. Those tool binaries can typically be found in your build platform directory at something like: `rust/build/x86_64-unknown-linux-gnu/llvm/bin/llvm-*`.
-* You can install compatible versions of these tools via `rustup`.
-
-The `rustup` option is guaranteed to install a compatible version of the LLVM tools, but they can be hard to find. We recommend [`cargo-binutils`], which installs Rust-specific wrappers around these and other LLVM tools, so you can invoke them via `cargo` commands!
-
-```shell
-$ rustup component add llvm-tools-preview
-$ cargo install cargo-binutils
-$ cargo profdata -- --help # note the additional "--" preceding the tool-specific arguments
-```
-
-## Creating coverage reports
-
-Raw profiles have to be indexed before they can be used to generate coverage reports. This is done using [`llvm-profdata merge`] (or `cargo profdata -- merge`), which can combine multiple raw profiles and index them at the same time:
-
-```shell
-$ llvm-profdata merge -sparse formatjson5.profraw -o formatjson5.profdata
-```
-
-Finally, the `.profdata` file is used, in combination with the coverage map (from the program binary) to generate coverage reports using [`llvm-cov report`] (or `cargo cov -- report`), for a coverage summaries; and [`llvm-cov show`] (or `cargo cov -- show`), to see detailed coverage of lines and regions (character ranges) overlaid on the original source code.
-
-These commands have several display and filtering options. For example:
-
-```shell
-$ llvm-cov show -Xdemangler=rustfilt target/debug/examples/formatjson5 \
- -instr-profile=formatjson5.profdata \
- -show-line-counts-or-regions \
- -show-instantiations \
- -name=add_quoted_string
-```
-
-<img alt="Screenshot of sample `llvm-cov show` result, for function add_quoted_string" src="img/llvm-cov-show-01.png" class="center"/>
-<br/>
-<br/>
-
-Some of the more notable options in this example include:
-
-* `--Xdemangler=rustfilt` - the command name or path used to demangle Rust symbols (`rustfilt` in the example, but this could also be a path to the `rust-demangler` tool)
-* `target/debug/examples/formatjson5` - the instrumented binary (from which to extract the coverage map)
-* `--instr-profile=<path-to-file>.profdata` - the location of the `.profdata` file created by `llvm-profdata merge` (from the `.profraw` file generated by the instrumented binary)
-* `--name=<exact-function-name>` - to show coverage for a specific function (or, consider using another filter option, such as `--name-regex=<pattern>`)
-
-## Interpreting reports
-
-There are four statistics tracked in a coverage summary:
-
-* Function coverage is the percentage of functions that have been executed at least once. A function is considered to be executed if any of its instantiations are executed.
-* Instantiation coverage is the percentage of function instantiations that have been executed at least once. Generic functions and functions generated from macros are two kinds of functions that may have multiple instantiations.
-* Line coverage is the percentage of code lines that have been executed at least once. Only executable lines within function bodies are considered to be code lines.
-* Region coverage is the percentage of code regions that have been executed at least once. A code region may span multiple lines: for example, in a large function body with no control flow. In other cases, a single line can contain multiple code regions: `return x || (y && z)` has countable code regions for `x` (which may resolve the expression, if `x` is `true`), `|| (y && z)` (executed only if `x` was `false`), and `return` (executed in either situation).
-
-Of these four statistics, function coverage is usually the least granular while region coverage is the most granular. The project-wide totals for each statistic are listed in the summary.
-
-## Test coverage
-
-A typical use case for coverage analysis is test coverage. Rust's source-based coverage tools can both measure your tests' code coverage as percentage, and pinpoint functions and branches not tested.
-
-The following example (using the [`json5format`] crate, for demonstration purposes) show how to generate and analyze coverage results for all tests in a crate.
-
-Since `cargo test` both builds and runs the tests, we set both the additional `RUSTFLAGS`, to add the `-Zinstrument-coverage` flag, and `LLVM_PROFILE_FILE`, to set a custom filename for the raw profiling data generated during the test runs. Since there may be more than one test binary, apply `%m` in the filename pattern. This generates unique names for each test binary. (Otherwise, each executed test binary would overwrite the coverage results from the previous binary.)
-
-```shell
-$ RUSTFLAGS="-Zinstrument-coverage" \
- LLVM_PROFILE_FILE="json5format-%m.profraw" \
- cargo test --tests
-```
-
-Make note of the test binary file paths, displayed after the word "`Running`" in the test output:
-
-```text
- ...
- Compiling json5format v0.1.3 ($HOME/json5format)
- Finished test [unoptimized + debuginfo] target(s) in 14.60s
-
- Running target/debug/deps/json5format-fececd4653271682
-running 25 tests
-...
-test result: ok. 25 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
-
- Running target/debug/deps/lib-30768f9c53506dc5
-running 31 tests
-...
-test result: ok. 31 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out
-```
-
-You should have one or more `.profraw` files now, one for each test binary. Run the `profdata` tool to merge them:
-
-```shell
-$ cargo profdata -- merge \
- -sparse json5format-*.profraw -o json5format.profdata
-```
-
-Then run the `cov` tool, with the `profdata` file and all test binaries:
-
-```shell
-$ cargo cov -- report \
- --use-color --ignore-filename-regex='/.cargo/registry' \
- --instr-profile=json5format.profdata \
- --object target/debug/deps/lib-30768f9c53506dc5 \
- --object target/debug/deps/json5format-fececd4653271682
-$ cargo cov -- show \
- --use-color --ignore-filename-regex='/.cargo/registry' \
- --instr-profile=json5format.profdata \
- --object target/debug/deps/lib-30768f9c53506dc5 \
- --object target/debug/deps/json5format-fececd4653271682 \
- --show-instantiations --show-line-counts-or-regions \
- --Xdemangler=rustfilt | less -R
-```
-
-_Note the command line option `--ignore-filename-regex=/.cargo/registry`, which excludes the sources for dependencies from the coverage results._
-
-### Tips for listing the binaries automatically
-
-For `bash` users, one suggested way to automatically complete the `cov` command with the list of binaries is with a command like:
-
-```bash
-$ cargo cov -- report \
- $( \
- for file in \
- $( \
- RUSTFLAGS="-Zinstrument-coverage" \
- cargo test --tests --no-run --message-format=json \
- | jq -r "select(.profile.test == true) | .filenames[]" \
- | grep -v dSYM - \
- ); \
- do \
- printf "%s %s " -object $file; \
- done \
- ) \
- --instr-profile=json5format.profdata --summary-only # and/or other options
-```
-
-Adding `--no-run --message-format=json` to the _same_ `cargo test` command used to run
-the tests (including the same environment variables and flags) generates output in a JSON
-format that `jq` can easily query.
-
-The `printf` command takes this list and generates the `--object <binary>` arguments
-for each listed test binary.
-
-### Including doc tests
-
-The previous examples run `cargo test` with `--tests`, which excludes doc tests.[^79417]
-
-To include doc tests in the coverage results, drop the `--tests` flag, and apply the
-`-Zinstrument-coverage` flag, and some doc-test-specific options in the
-`RUSTDOCFLAGS` environment variable. (The `cargo profdata` command does not change.)
-
-```bash
-$ RUSTFLAGS="-Zinstrument-coverage" \
- RUSTDOCFLAGS="-Zinstrument-coverage -Zunstable-options --persist-doctests target/debug/doctestbins" \
- LLVM_PROFILE_FILE="json5format-%m.profraw" \
- cargo test
-$ cargo profdata -- merge \
- -sparse json5format-*.profraw -o json5format.profdata
-```
-
-The `-Zunstable-options --persist-doctests` flag is required, to save the test binaries
-(with their coverage maps) for `llvm-cov`.
-
-```bash
-$ cargo cov -- report \
- $( \
- for file in \
- $( \
- RUSTFLAGS="-Zinstrument-coverage" \
- RUSTDOCFLAGS="-Zinstrument-coverage -Zunstable-options --persist-doctests target/debug/doctestbins" \
- cargo test --no-run --message-format=json \
- | jq -r "select(.profile.test == true) | .filenames[]" \
- | grep -v dSYM - \
- ) \
- target/debug/doctestbins/*/rust_out; \
- do \
- [[ -x $file ]] && printf "%s %s " -object $file; \
- done \
- ) \
- --instr-profile=json5format.profdata --summary-only # and/or other options
-```
-
-Note, the differences in this `cargo cov` command, compared with the version without
-doc tests, include:
-
-* The `cargo test ... --no-run` command is updated with the same environment variables
- and flags used to _build_ the tests, _including_ the doc tests. (`LLVM_PROFILE_FILE`
- is only used when _running_ the tests.)
-* The file glob pattern `target/debug/doctestbins/*/rust_out` adds the `rust_out`
- binaries generated for doc tests (note, however, that some `rust_out` files may not
- be executable binaries).
-* `[[ -x $file ]] &&` filters the files passed on to the `printf`, to include only
- executable binaries.
-
-[^79417]: There is ongoing work to resolve a known issue
-[(#79417)](https://github.com/rust-lang/rust/issues/79417) that doc test coverage
-generates incorrect source line numbers in `llvm-cov show` results.
-
-## Other references
-
-Rust's implementation and workflow for source-based code coverage is based on the same library and tools used to implement [source-based code coverage in Clang]. (This document is partially based on the Clang guide.)
-
-[#79121]: https://github.com/rust-lang/rust/issues/79121
-[`-Zprofile`]: profile.md
-[`llvm.instrprof.increment`]: https://llvm.org/docs/LangRef.html#llvm-instrprof-increment-intrinsic
-[LLVM Code Coverage Mapping Format]: https://llvm.org/docs/CoverageMappingFormat.html
-[rustc-dev-guide-how-to-build-and-run]: https://rustc-dev-guide.rust-lang.org/building/how-to-build-and-run.html
-[`rustfilt`]: https://crates.io/crates/rustfilt
-[`json5format`]: https://crates.io/crates/json5format
-[`cargo-binutils`]: https://crates.io/crates/cargo-binutils
-[`llvm-profdata merge`]: https://llvm.org/docs/CommandGuide/llvm-profdata.html#profdata-merge
-[`llvm-cov report`]: https://llvm.org/docs/CommandGuide/llvm-cov.html#llvm-cov-report
-[`llvm-cov show`]: https://llvm.org/docs/CommandGuide/llvm-cov.html#llvm-cov-show
-[source-based code coverage in Clang]: https://clang.llvm.org/docs/SourceBasedCodeCoverage.html
+[`-z instrument-coverage`]: ./instrument-coverage.html
+++ /dev/null
-# `const_fn`
-
-The tracking issue for this feature is: [#57563]
-
-[#57563]: https://github.com/rust-lang/rust/issues/57563
-
-------------------------
-
-The `const_fn` feature enables additional functionality not stabilized in the
-[minimal subset of `const_fn`](https://github.com/rust-lang/rust/issues/53555)
--- /dev/null
+# `native_link_modifiers_as_needed`
+
+The tracking issue for this feature is: [#81490]
+
+[#81490]: https://github.com/rust-lang/rust/issues/81490
+
+------------------------
+
+The `native_link_modifiers_as_needed` feature allows you to use the `as-needed` modifier.
+
+`as-needed` is only compatible with the `dynamic` and `framework` linking kinds. Using any other kind will result in a compiler error.
+
+`+as-needed` means that the library will be actually linked only if it satisfies some undefined symbols at the point at which it is specified on the command line, making it similar to static libraries in this regard.
+
+This modifier translates to `--as-needed` for ld-like linkers, and to `-dead_strip_dylibs` / `-needed_library` / `-needed_framework` for ld64.
+The modifier does nothing for linkers that don't support it (e.g. `link.exe`).
+
+The default for this modifier is unclear, some targets currently specify it as `+as-needed`, some do not. We may want to try making `+as-needed` a default for all targets.
--- /dev/null
+# `native_link_modifiers_bundle`
+
+The tracking issue for this feature is: [#81490]
+
+[#81490]: https://github.com/rust-lang/rust/issues/81490
+
+------------------------
+
+The `native_link_modifiers_bundle` feature allows you to use the `bundle` modifier.
+
+Only compatible with the `static` linking kind. Using any other kind will result in a compiler error.
+
+`+bundle` means objects from the static library are bundled into the produced crate (a rlib, for example) and are used from this crate later during linking of the final binary.
+
+`-bundle` means the static library is included into the produced rlib "by name" and object files from it are included only during linking of the final binary, the file search by that name is also performed during final linking.
+
+This modifier is supposed to supersede the `static-nobundle` linking kind defined by [RFC 1717](https://github.com/rust-lang/rfcs/pull/1717).
+
+The default for this modifier is currently `+bundle`, but it could be changed later on some future edition boundary.
--- /dev/null
+# `native_link_modifiers_verbatim`
+
+The tracking issue for this feature is: [#81490]
+
+[#81490]: https://github.com/rust-lang/rust/issues/81490
+
+------------------------
+
+The `native_link_modifiers_verbatim` feature allows you to use the `verbatim` modifier.
+
+`+verbatim` means that rustc itself won't add any target-specified library prefixes or suffixes (like `lib` or `.a`) to the library name, and will try its best to ask for the same thing from the linker.
+
+For `ld`-like linkers rustc will use the `-l:filename` syntax (note the colon) when passing the library, so the linker won't add any prefixes or suffixes as well.
+See [`-l namespec`](https://sourceware.org/binutils/docs/ld/Options.html) in ld documentation for more details.
+For linkers not supporting any verbatim modifiers (e.g. `link.exe` or `ld64`) the library name will be passed as is.
+
+The default for this modifier is `-verbatim`.
+
+This RFC changes the behavior of `raw-dylib` linking kind specified by [RFC 2627](https://github.com/rust-lang/rfcs/pull/2627). The `.dll` suffix (or other target-specified suffixes for other targets) is now added automatically.
+If your DLL doesn't have the `.dll` suffix, it can be specified with `+verbatim`.
--- /dev/null
+# `native_link_modifiers_whole_archive`
+
+The tracking issue for this feature is: [#81490]
+
+[#81490]: https://github.com/rust-lang/rust/issues/81490
+
+------------------------
+
+The `native_link_modifiers_whole_archive` feature allows you to use the `whole-archive` modifier.
+
+Only compatible with the `static` linking kind. Using any other kind will result in a compiler error.
+
+`+whole-archive` means that the static library is linked as a whole archive without throwing any object files away.
+
+This modifier translates to `--whole-archive` for `ld`-like linkers, to `/WHOLEARCHIVE` for `link.exe`, and to `-force_load` for `ld64`.
+The modifier does nothing for linkers that don't support it.
+
+The default for this modifier is `-whole-archive`.
--- /dev/null
+# `native_link_modifiers`
+
+The tracking issue for this feature is: [#81490]
+
+[#81490]: https://github.com/rust-lang/rust/issues/81490
+
+------------------------
+
+The `native_link_modifiers` feature allows you to use the `modifiers` syntax with the `#[link(..)]` attribute.
+
+Modifiers are specified as a comma-delimited string with each modifier prefixed with either a `+` or `-` to indicate that the modifier is enabled or disabled, respectively. The last boolean value specified for a given modifier wins.
--- /dev/null
+# `no_coverage`
+
+The tracking issue for this feature is: [#84605]
+
+[#84605]: https://github.com/rust-lang/rust/issues/84605
+
+---
+
+The `no_coverage` attribute can be used to selectively disable coverage
+instrumentation in an annotated function. This might be useful to:
+
+- Avoid instrumentation overhead in a performance critical function
+- Avoid generating coverage for a function that is not meant to be executed,
+ but still target 100% coverage for the rest of the program.
+
+## Example
+
+```rust
+#![feature(no_coverage)]
+
+// `foo()` will get coverage instrumentation (by default)
+fn foo() {
+ // ...
+}
+
+#[no_coverage]
+fn bar() {
+ // ...
+}
+```
} else {
Attributes::from_ast(&both, None)
},
- both.cfg(cx.sess().diagnostic()),
+ both.cfg(cx.sess()),
)
} else {
- (old_attrs.clean(cx), old_attrs.cfg(cx.sess().diagnostic()))
+ (old_attrs.clean(cx), old_attrs.cfg(cx.sess()))
}
}
def_id: crate_def_id.into(),
visibility: krate.vis.clean(cx),
kind: box ExternCrateItem { src: orig_name },
- cfg: attrs.cfg(cx.sess().diagnostic()),
+ cfg: attrs.cfg(cx.sess()),
}]
}
use rustc_data_structures::thin_vec::ThinVec;
use rustc_hir as hir;
use rustc_hir::def::{CtorKind, DefKind, Res};
-use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE};
+use rustc_hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX, LOCAL_CRATE};
use rustc_hir::lang_items::LangItem;
use rustc_hir::{BodyId, Mutability};
use rustc_index::vec::IndexVec;
}
#[inline]
- crate fn as_local(self) -> Option<LocalDefId> {
- match self {
- FakeDefId::Real(id) => id.as_local(),
- FakeDefId::Fake(idx, krate) => {
- (krate == LOCAL_CRATE).then(|| LocalDefId { local_def_index: idx })
- }
- }
- }
-
- #[inline]
- crate fn expect_local(self) -> LocalDefId {
- self.as_local()
- .unwrap_or_else(|| panic!("FakeDefId::expect_local: `{:?}` isn't local", self))
- }
-
- #[inline]
+ #[track_caller]
crate fn expect_real(self) -> rustc_hir::def_id::DefId {
self.as_real().unwrap_or_else(|| panic!("FakeDefId::expect_real: `{:?}` isn't real", self))
}
kind,
box ast_attrs.clean(cx),
cx,
- ast_attrs.cfg(cx.sess().diagnostic()),
+ ast_attrs.cfg(cx.sess()),
)
}
cx: &mut DocContext<'_>,
cfg: Option<Arc<Cfg>>,
) -> Item {
- debug!("name={:?}, def_id={:?}", name, def_id);
+ trace!("name={:?}, def_id={:?}", name, def_id);
Item {
def_id: def_id.into(),
Some(ExternalLocation::Remote(ref s)) => {
format!("{}/std/", s.trim_end_matches('/'))
}
- Some(ExternalLocation::Unknown) | None => format!(
- "https://doc.rust-lang.org/{}/std/",
- crate::doc_rust_lang_org_channel(),
- ),
+ Some(ExternalLocation::Unknown) | None => {
+ "https://doc.rust-lang.org/nightly/std/".to_string()
+ }
};
// This is a primitive so the url is done "by hand".
let tail = fragment.find('#').unwrap_or_else(|| fragment.len());
fn other_attrs(&self) -> Vec<ast::Attribute>;
- fn cfg(&self, diagnostic: &::rustc_errors::Handler) -> Option<Arc<Cfg>>;
+ fn cfg(&self, sess: &Session) -> Option<Arc<Cfg>>;
}
impl AttributesExt for [ast::Attribute] {
self.iter().filter(|attr| attr.doc_str().is_none()).cloned().collect()
}
- fn cfg(&self, diagnostic: &::rustc_errors::Handler) -> Option<Arc<Cfg>> {
+ fn cfg(&self, sess: &Session) -> Option<Arc<Cfg>> {
let mut cfg = Cfg::True;
for attr in self.iter() {
+ // #[doc]
if attr.doc_str().is_none() && attr.has_name(sym::doc) {
- if let Some(mi) = attr.meta() {
- if let Some(cfg_mi) = Attributes::extract_cfg(&mi) {
- // Extracted #[doc(cfg(...))]
- match Cfg::parse(cfg_mi) {
- Ok(new_cfg) => cfg &= new_cfg,
- Err(e) => diagnostic.span_err(e.span, e.msg),
+ // #[doc(...)]
+ if let Some(list) = attr.meta().as_ref().and_then(|mi| mi.meta_item_list()) {
+ for item in list {
+ // #[doc(include)]
+ if !item.has_name(sym::cfg) {
+ continue;
+ }
+ // #[doc(cfg(...))]
+ if let Some(cfg_mi) = item
+ .meta_item()
+ .and_then(|item| rustc_expand::config::parse_cfg(&item, sess))
+ {
+ match Cfg::parse(&cfg_mi) {
+ Ok(new_cfg) => cfg &= new_cfg,
+ Err(e) => sess.span_err(e.span, e.msg),
+ }
}
}
}
self.other_attrs.lists(name)
}
- /// Extracts the content from an attribute `#[doc(cfg(content))]`.
- crate fn extract_cfg(mi: &ast::MetaItem) -> Option<&ast::MetaItem> {
- use rustc_ast::NestedMetaItem::MetaItem;
-
- if let ast::MetaItemKind::List(ref nmis) = mi.kind {
- if nmis.len() == 1 {
- if let MetaItem(ref cfg_mi) = nmis[0] {
- if cfg_mi.has_name(sym::cfg) {
- if let ast::MetaItemKind::List(ref cfg_nmis) = cfg_mi.kind {
- if cfg_nmis.len() == 1 {
- if let MetaItem(ref content_mi) = cfg_nmis[0] {
- return Some(content_mi);
- }
- }
- }
- }
- }
- }
- }
-
- None
- }
-
/// Reads a `MetaItem` from within an attribute, looks for whether it is a
/// `#[doc(include="file")]`, and returns the filename and contents of the file as loaded from
/// its expansion.
}
}
RawPointer(..) => Some(PrimitiveType::RawPointer),
- BorrowedRef { type_: box Generic(..), .. } => Some(PrimitiveType::Reference),
BareFunction(..) => Some(PrimitiveType::Fn),
Never => Some(PrimitiveType::Never),
_ => None,
}
crate fn is_primitive(&self) -> bool {
- match self {
- Self::Primitive(_) => true,
- Self::BorrowedRef { ref type_, .. } | Self::RawPointer(_, ref type_) => {
- type_.is_primitive()
- }
- _ => false,
- }
+ self.primitive_type().is_some()
}
crate fn projection(&self) -> Option<(&Type, DefId, Symbol)> {
&& attr.meta_item_list().map_or(false, |l| rustc_attr::list_contains_name(&l, flag))
})
}
-
-/// Return a channel suitable for using in a `doc.rust-lang.org/{channel}` format string.
-crate fn doc_rust_lang_org_channel() -> &'static str {
- match env!("CFG_RELEASE_CHANNEL") {
- "stable" => env!("CFG_RELEASE_NUM"),
- "beta" => "beta",
- "nightly" | "dev" => "nightly",
- // custom build of rustdoc maybe? link to the stable docs just in case
- _ => "",
- }
-}
use rustc_data_structures::fx::FxHashMap;
use rustc_session::config::{self, parse_crate_types_from_list, parse_externs, CrateType};
-use rustc_session::config::{
- build_codegen_options, build_debugging_options, get_cmd_lint_options, host_triple,
- nightly_options,
-};
+use rustc_session::config::{get_cmd_lint_options, host_triple, nightly_options};
use rustc_session::config::{CodegenOptions, DebuggingOptions, ErrorOutputType, Externs};
use rustc_session::getopts;
use rustc_session::lint::Level;
config::parse_json(&matches);
let error_format = config::parse_error_format(&matches, color, json_rendered);
- let codegen_options = build_codegen_options(matches, error_format);
- let debugging_opts = build_debugging_options(matches, error_format);
+ let codegen_options = CodegenOptions::build(matches, error_format);
+ let debugging_opts = DebuggingOptions::build(matches, error_format);
let diag = new_handler(error_format, None, &debugging_opts);
lints_to_show.extend(crate::lint::RUSTDOC_LINTS.iter().map(|lint| lint.name.to_string()));
let (lint_opts, lint_caps) = crate::lint::init_lints(lints_to_show, lint_opts, |lint| {
- // FIXME: why is this necessary?
- if lint.name == crate::lint::BROKEN_INTRA_DOC_LINKS.name
- || lint.name == crate::lint::INVALID_CODEBLOCK_ATTRIBUTES.name
- {
- None
- } else {
- Some((lint.name_lower(), lint::Allow))
- }
+ Some((lint.name_lower(), lint::Allow))
});
let crate_types =
let mut krate = tcx.sess.time("clean_crate", || clean::krate(&mut ctxt));
if krate.module.doc_value().map(|d| d.is_empty()).unwrap_or(true) {
- let help = format!(
- "The following guide may be of use:\n\
- https://doc.rust-lang.org/{}/rustdoc/how-to-write-documentation.html",
- crate::doc_rust_lang_org_channel(),
- );
+ let help = "The following guide may be of use:\n\
+ https://doc.rust-lang.org/nightly/rustdoc/how-to-write-documentation.html";
tcx.struct_lint_node(
crate::lint::MISSING_CRATE_LEVEL_DOCS,
DocContext::as_local_hir_id(tcx, krate.module.def_id).unwrap(),
|lint| {
let mut diag =
lint.build("no documentation found for this crate's top-level module");
- diag.help(&help);
+ diag.help(help);
diag.emit();
},
);
externs: options.externs.clone(),
unstable_features: options.render_options.unstable_features,
actually_rustdoc: true,
- debugging_opts: config::DebuggingOptions { ..config::basic_debugging_options() },
edition: options.edition,
target_triple: options.target.clone(),
crate_name: options.crate_name.clone(),
let ast_attrs = self.tcx.hir().attrs(hir_id);
let mut attrs = Attributes::from_ast(ast_attrs, None);
- if let Some(ref cfg) = ast_attrs.cfg(self.sess.diagnostic()) {
+ if let Some(ref cfg) = ast_attrs.cfg(self.sess) {
if !cfg.matches(&self.sess.parse_sess, Some(&self.sess.features_untracked())) {
return;
}
use rustc_lexer::{LiteralKind, TokenKind};
use rustc_span::edition::Edition;
use rustc_span::symbol::Symbol;
-use rustc_span::with_default_session_globals;
use super::format::Buffer;
playground_button: Option<&str>,
tooltip: Option<(Option<Edition>, &str)>,
edition: Edition,
+ extra_content: Option<Buffer>,
) {
debug!("highlighting: ================\n{}\n==============", src);
if let Some((edition_info, class)) = tooltip {
);
}
- write_header(out, class);
+ write_header(out, class, extra_content);
write_code(out, &src, edition);
write_footer(out, playground_button);
}
-fn write_header(out: &mut Buffer, class: Option<&str>) {
- writeln!(out, "<div class=\"example-wrap\"><pre class=\"rust {}\">", class.unwrap_or_default());
+fn write_header(out: &mut Buffer, class: Option<&str>, extra_content: Option<Buffer>) {
+ write!(out, "<div class=\"example-wrap\">");
+ if let Some(extra) = extra_content {
+ out.push_buffer(extra);
+ }
+ if let Some(class) = class {
+ writeln!(out, "<pre class=\"rust {}\">", class);
+ } else {
+ writeln!(out, "<pre class=\"rust\">");
+ }
}
fn write_code(out: &mut Buffer, src: &str, edition: Edition) {
/// possibly giving it an HTML span with a class specifying what flavor of
/// token is used.
fn highlight(mut self, sink: &mut dyn FnMut(Highlight<'a>)) {
- with_default_session_globals(|| {
- loop {
- if self
- .tokens
- .peek()
- .map(|t| matches!(t.0, TokenKind::Colon | TokenKind::Ident))
- .unwrap_or(false)
- {
- let tokens = self.get_full_ident_path();
- for (token, start, end) in tokens {
- let text = &self.src[start..end];
- self.advance(token, text, sink);
- self.byte_pos += text.len() as u32;
- }
- }
- if let Some((token, text)) = self.next() {
+ loop {
+ if self
+ .tokens
+ .peek()
+ .map(|t| matches!(t.0, TokenKind::Colon | TokenKind::Ident))
+ .unwrap_or(false)
+ {
+ let tokens = self.get_full_ident_path();
+ for (token, start, end) in tokens {
+ let text = &self.src[start..end];
self.advance(token, text, sink);
- } else {
- break;
+ self.byte_pos += text.len() as u32;
}
}
- })
+ if let Some((token, text)) = self.next() {
+ self.advance(token, text, sink);
+ } else {
+ break;
+ }
+ }
}
/// Single step of highlighting. This will classify `token`, but maybe also
use crate::html::format::Buffer;
use expect_test::expect_file;
use rustc_span::edition::Edition;
+use rustc_span::with_default_session_globals;
const STYLE: &str = r#"
<style>
#[test]
fn test_html_highlighting() {
- let src = include_str!("fixtures/sample.rs");
- let html = {
- let mut out = Buffer::new();
- write_code(&mut out, src, Edition::Edition2018);
- format!("{}<pre><code>{}</code></pre>\n", STYLE, out.into_inner())
- };
- expect_file!["fixtures/sample.html"].assert_eq(&html);
+ with_default_session_globals(|| {
+ let src = include_str!("fixtures/sample.rs");
+ let html = {
+ let mut out = Buffer::new();
+ write_code(&mut out, src, Edition::Edition2018);
+ format!("{}<pre><code>{}</code></pre>\n", STYLE, out.into_inner())
+ };
+ expect_file!["fixtures/sample.html"].assert_eq(&html);
+ });
}
#[test]
fn test_dos_backline() {
- let src = "pub fn foo() {\r\n\
+ with_default_session_globals(|| {
+ let src = "pub fn foo() {\r\n\
println!(\"foo\");\r\n\
}\r\n";
- let mut html = Buffer::new();
- write_code(&mut html, src, Edition::Edition2018);
- expect_file!["fixtures/dos_line.html"].assert_eq(&html.into_inner());
+ let mut html = Buffer::new();
+ write_code(&mut html, src, Edition::Edition2018);
+ expect_file!["fixtures/dos_line.html"].assert_eq(&html.into_inner());
+ });
}
crate static_extra_scripts: &'a [&'a str],
}
+impl<'a> Page<'a> {
+ crate fn get_static_root_path(&self) -> &str {
+ self.static_root_path.unwrap_or(self.root_path)
+ }
+}
+
crate fn render<T: Print, S: Print>(
layout: &Layout,
page: &Page<'_>,
t: T,
style_files: &[StylePath],
) -> String {
- let static_root_path = page.static_root_path.unwrap_or(page.root_path);
+ let static_root_path = page.get_static_root_path();
format!(
"<!DOCTYPE html>\
<html lang=\"en\">\
playground_button.as_deref(),
tooltip,
edition,
+ None,
);
Some(Event::Html(s.into_inner().into()))
}
&self.cache
}
- fn sess(&self) -> &'tcx Session {
+ pub(super) fn sess(&self) -> &'tcx Session {
&self.shared.tcx.sess
}
&self.shared.layout,
&page,
|buf: &mut _| print_sidebar(self, it, buf),
- |buf: &mut _| print_item(self, it, buf),
+ |buf: &mut _| print_item(self, it, buf, &page),
&self.shared.style_files,
)
} else {
info!("Documenting {}", name);
}
document_item_info(w, cx, item, parent);
- document_full(w, item, cx);
+ document_full_collapsible(w, item, cx);
}
/// Render md_text as markdown.
}
}
+fn document_full_collapsible(w: &mut Buffer, item: &clean::Item, cx: &Context<'_>) {
+ document_full_inner(w, item, cx, true);
+}
+
fn document_full(w: &mut Buffer, item: &clean::Item, cx: &Context<'_>) {
+ document_full_inner(w, item, cx, false);
+}
+
+fn document_full_inner(w: &mut Buffer, item: &clean::Item, cx: &Context<'_>, is_collapsible: bool) {
if let Some(s) = cx.shared.maybe_collapsed_doc_value(item) {
debug!("Doc block: =====\n{}\n=====", s);
- render_markdown(w, cx, &s, item.links(cx));
+ if is_collapsible {
+ w.write_str(
+ "<details class=\"rustdoc-toggle top-doc\" open>\
+ <summary class=\"hideme\">\
+ <span>Expand description</span>\
+ </summary>",
+ );
+ render_markdown(w, cx, &s, item.links(cx));
+ w.write_str("</details>");
+ } else {
+ render_markdown(w, cx, &s, item.links(cx));
+ }
}
}
use crate::html::escape::Escape;
use crate::html::format::{print_abi_with_space, print_where_clause, Buffer, PrintWithSpace};
use crate::html::highlight;
+use crate::html::layout::Page;
use crate::html::markdown::MarkdownSummaryLine;
-pub(super) fn print_item(cx: &Context<'_>, item: &clean::Item, buf: &mut Buffer) {
+pub(super) fn print_item(cx: &Context<'_>, item: &clean::Item, buf: &mut Buffer, page: &Page<'_>) {
debug_assert!(!item.is_stripped());
// Write the breadcrumb trail header for the top
buf.write_str("<h1 class=\"fqn\"><span class=\"in-band\">");
}
}
write!(buf, "<a class=\"{}\" href=\"\">{}</a>", item.type_(), item.name.as_ref().unwrap());
- write!(buf, "<button id=\"copy-path\" onclick=\"copy_path(this)\">⎘</button>");
+ write!(
+ buf,
+ "<button id=\"copy-path\" onclick=\"copy_path(this)\">\
+ <img src=\"{static_root_path}clipboard{suffix}.svg\" \
+ width=\"19\" height=\"18\" \
+ alt=\"Copy item import\">\
+ </button>",
+ static_root_path = page.get_static_root_path(),
+ suffix = page.resource_suffix,
+ );
buf.write_str("</span>"); // in-band
buf.write_str("<span class=\"out-of-band\">");
let import_item = clean::Item {
def_id: import_def_id.into(),
attrs: import_attrs,
- cfg: ast_attrs.cfg(cx.tcx().sess.diagnostic()),
+ cfg: ast_attrs.cfg(cx.sess()),
..myitem.clone()
};
None,
None,
it.span(cx.tcx()).inner().edition(),
+ None,
);
});
document(w, cx, it, None)
fn document_non_exhaustive(w: &mut Buffer, item: &clean::Item) {
if item.is_non_exhaustive() {
- write!(w, "<div class=\"docblock non-exhaustive non-exhaustive-{}\">", {
- if item.is_struct() {
- "struct"
- } else if item.is_enum() {
- "enum"
- } else if item.is_variant() {
- "variant"
- } else {
- "type"
+ write!(
+ w,
+ "<details class=\"rustdoc-toggle non-exhaustive\">\
+ <summary class=\"hideme\"><span>{}</span></summary>\
+ <div class=\"docblock\">",
+ {
+ if item.is_struct() {
+ "This struct is marked as non-exhaustive"
+ } else if item.is_enum() {
+ "This enum is marked as non-exhaustive"
+ } else if item.is_variant() {
+ "This variant is marked as non-exhaustive"
+ } else {
+ "This type is marked as non-exhaustive"
+ }
}
- });
+ );
if item.is_struct() {
w.write_str(
);
}
- w.write_str("</div>");
+ w.write_str("</div></details>");
}
}
}
write_toolchain("brush.svg", static_files::BRUSH_SVG)?;
write_toolchain("wheel.svg", static_files::WHEEL_SVG)?;
+ write_toolchain("clipboard.svg", static_files::CLIPBOARD_SVG)?;
write_toolchain("down-arrow.svg", static_files::DOWN_ARROW_SVG)?;
let mut themes: Vec<&String> = themes.iter().collect();
/// adding line numbers to the left-hand side.
fn print_src(buf: &mut Buffer, s: &str, edition: Edition) {
let lines = s.lines().count();
+ let mut line_numbers = Buffer::empty_from(buf);
let mut cols = 0;
let mut tmp = lines;
while tmp > 0 {
cols += 1;
tmp /= 10;
}
- buf.write_str("<pre class=\"line-numbers\">");
+ line_numbers.write_str("<pre class=\"line-numbers\">");
for i in 1..=lines {
- writeln!(buf, "<span id=\"{0}\">{0:1$}</span>", i, cols);
+ writeln!(line_numbers, "<span id=\"{0}\">{0:1$}</span>", i, cols);
}
- buf.write_str("</pre>");
- highlight::render_with_highlighting(s, buf, None, None, None, edition);
+ line_numbers.write_str("</pre>");
+ highlight::render_with_highlighting(s, buf, None, None, None, edition, Some(line_numbers));
}
--- /dev/null
+<svg width="24" height="25" viewBox="0 0 24 25" xmlns="http://www.w3.org/2000/svg" aria-label="Copy to clipboard"><path d="M18 20h2v3c0 1-1 2-2 2H2c-.998 0-2-1-2-2V5c0-.911.755-1.667 1.667-1.667h5A3.323 3.323 0 0110 0a3.323 3.323 0 013.333 3.333h5C19.245 3.333 20 4.09 20 5v8.333h-2V9H2v14h16v-3zM3 7h14c0-.911-.793-1.667-1.75-1.667H13.5c-.957 0-1.75-.755-1.75-1.666C11.75 2.755 10.957 2 10 2s-1.75.755-1.75 1.667c0 .911-.793 1.666-1.75 1.666H4.75C3.793 5.333 3 6.09 3 7z"/><path d="M4 19h6v2H4zM12 11H4v2h8zM4 17h4v-2H4zM15 15v-3l-4.5 4.5L15 21v-3l8.027-.032L23 15z"/></svg>
if (savedHash.length === 0) {
return;
}
- elem = document.getElementById(savedHash.slice(1)); // we remove the '#'
- if (!elem || !isHidden(elem)) {
- return;
- }
- var parent = elem.parentNode;
- if (parent && hasClass(parent, "impl-items")) {
- // In case this is a trait implementation item, we first need to toggle
- // the "Show hidden undocumented items".
- onEachLazy(parent.getElementsByClassName("collapsed"), function(e) {
- if (e.parentNode === parent) {
- // Only click on the toggle we're looking for.
- e.click();
- return true;
- }
- });
- if (isHidden(elem)) {
- // The whole parent is collapsed. We need to click on its toggle as well!
- if (hasClass(parent.lastElementChild, "collapse-toggle")) {
- parent.lastElementChild.click();
- }
- }
- }
+ expandSection(savedHash.slice(1)); // we remove the '#'
}
}
}
function expandSection(id) {
- var elem = document.getElementById(id);
- if (elem && isHidden(elem)) {
- var h3 = elem.parentNode.previousElementSibling;
- if (h3 && h3.tagName !== "H3") {
- h3 = h3.previousElementSibling; // skip div.docblock
- }
-
- if (h3) {
- var collapses = h3.getElementsByClassName("collapse-toggle");
- if (collapses.length > 0) {
- // The element is not visible, we need to make it appear!
- collapseDocs(collapses[0], "show");
- }
- // Open all ancestor <details> to make this element visible.
- openParentDetails(h3.parentNode);
- } else {
- openParentDetails(elem.parentNode);
- }
- }
+ openParentDetails(document.getElementById(id));
}
function getHelpElement(build) {
var helpElem = getHelpElement(false);
if (hasClass(ev.target, "help-button")) {
displayHelp(true, ev);
- } else if (hasClass(ev.target, "collapse-toggle")) {
- collapseDocs(ev.target, "toggle");
- } else if (hasClass(ev.target.parentNode, "collapse-toggle")) {
- collapseDocs(ev.target.parentNode, "toggle");
} else if (ev.target.tagName === "SPAN" && hasClass(ev.target.parentNode, "line-numbers")) {
handleSourceHighlight(ev);
} else if (helpElem && hasClass(helpElem, "hidden") === false) {
return "\u2212"; // "\u2212" is "−" minus sign
}
- function onEveryMatchingChild(elem, className, func) {
- if (elem && className && func) {
- var length = elem.childNodes.length;
- var nodes = elem.childNodes;
- for (var i = 0; i < length; ++i) {
- if (hasClass(nodes[i], className)) {
- func(nodes[i]);
- } else {
- onEveryMatchingChild(nodes[i], className, func);
- }
- }
- }
- }
-
- function toggleAllDocs(fromAutoCollapse) {
+ function toggleAllDocs() {
var innerToggle = document.getElementById(toggleAllDocsId);
if (!innerToggle) {
return;
}
+ var sectionIsCollapsed = false;
if (hasClass(innerToggle, "will-expand")) {
removeClass(innerToggle, "will-expand");
- onEachLazy(document.getElementsByTagName("details"), function(e) {
- e.open = true;
- });
- onEveryMatchingChild(innerToggle, "inner", function(e) {
- e.innerHTML = labelForToggleButton(false);
+ onEachLazy(document.getElementsByClassName("rustdoc-toggle"), function(e) {
+ if (!hasClass(e, "type-contents-toggle")) {
+ e.open = true;
+ }
});
innerToggle.title = "collapse all docs";
- if (fromAutoCollapse !== true) {
- onEachLazy(document.getElementsByClassName("collapse-toggle"), function(e) {
- collapseDocs(e, "show");
- });
- }
} else {
addClass(innerToggle, "will-expand");
- onEachLazy(document.getElementsByTagName("details"), function(e) {
- e.open = false;
- });
- onEveryMatchingChild(innerToggle, "inner", function(e) {
- var parent = e.parentNode;
- var superParent = null;
-
- if (parent) {
- superParent = parent.parentNode;
- }
- if (!parent || !superParent || superParent.id !== "main" ||
- hasClass(parent, "impl") === false) {
- e.innerHTML = labelForToggleButton(true);
+ onEachLazy(document.getElementsByClassName("rustdoc-toggle"), function(e) {
+ if (e.parentNode.id !== "main" ||
+ (!hasClass(e, "implementors-toggle") &&
+ !hasClass(e, "type-contents-toggle")))
+ {
+ e.open = false;
}
});
+ sectionIsCollapsed = true;
innerToggle.title = "expand all docs";
- if (fromAutoCollapse !== true) {
- onEachLazy(document.getElementsByClassName("collapse-toggle"), function(e) {
- var parent = e.parentNode;
- var superParent = null;
-
- if (parent) {
- superParent = parent.parentNode;
- }
- if (!parent || !superParent || superParent.id !== "main" ||
- hasClass(parent, "impl") === false) {
- collapseDocs(e, "hide");
- }
- });
- }
}
+ innerToggle.children[0].innerText = labelForToggleButton(sectionIsCollapsed);
}
function collapseDocs(toggle, mode) {
referenceNode.parentNode.insertBefore(newNode, referenceNode.nextSibling);
}
- function createSimpleToggle(sectionIsCollapsed) {
- var toggle = document.createElement("a");
- toggle.href = "javascript:void(0)";
- toggle.className = "collapse-toggle";
- toggle.innerHTML = "[<span class=\"inner\">" + labelForToggleButton(sectionIsCollapsed) +
- "</span>]";
- return toggle;
- }
-
- function createToggle(toggle, otherMessage, fontSize, extraClass, show) {
- var span = document.createElement("span");
- span.className = "toggle-label";
- if (show) {
- span.style.display = "none";
- }
- if (!otherMessage) {
- span.innerHTML = " Expand description";
- } else {
- span.innerHTML = otherMessage;
- }
-
- if (fontSize) {
- span.style.fontSize = fontSize;
- }
-
- var mainToggle = toggle.cloneNode(true);
- mainToggle.appendChild(span);
-
- var wrapper = document.createElement("div");
- wrapper.className = "toggle-wrapper";
- if (!show) {
- addClass(wrapper, "collapsed");
- var inner = mainToggle.getElementsByClassName("inner");
- if (inner && inner.length > 0) {
- inner[0].innerHTML = "+";
- }
- }
- if (extraClass) {
- addClass(wrapper, extraClass);
- }
- wrapper.appendChild(mainToggle);
- return wrapper;
- }
-
(function() {
var toggles = document.getElementById(toggleAllDocsId);
if (toggles) {
toggles.onclick = toggleAllDocs;
}
- var toggle = createSimpleToggle(false);
var hideMethodDocs = getSettingValue("auto-hide-method-docs") === "true";
var hideImplementors = getSettingValue("auto-collapse-implementors") !== "false";
var hideLargeItemContents = getSettingValue("auto-hide-large-items") !== "false";
var impl_list = document.getElementById("trait-implementations-list");
if (impl_list !== null) {
- onEachLazy(impl_list.getElementsByClassName("collapse-toggle"), function(e) {
+ onEachLazy(impl_list.getElementsByClassName("rustdoc-toggle"), function(e) {
collapseNonInherent(e);
});
}
var blanket_list = document.getElementById("blanket-implementations-list");
if (blanket_list !== null) {
- onEachLazy(blanket_list.getElementsByClassName("collapse-toggle"), function(e) {
+ onEachLazy(blanket_list.getElementsByClassName("rustdoc-toggle"), function(e) {
collapseNonInherent(e);
});
}
}
}
- function buildToggleWrapper(e) {
- if (hasClass(e, "autohide")) {
- var wrap = e.previousElementSibling;
- if (wrap && hasClass(wrap, "toggle-wrapper")) {
- var inner_toggle = wrap.childNodes[0];
- var extra = e.childNodes[0].tagName === "H3";
-
- e.style.display = "none";
- addClass(wrap, "collapsed");
- onEachLazy(inner_toggle.getElementsByClassName("inner"), function(e) {
- e.innerHTML = labelForToggleButton(true);
- });
- onEachLazy(inner_toggle.getElementsByClassName("toggle-label"), function(e) {
- e.style.display = "inline-block";
- if (extra === true) {
- e.innerHTML = " Show " + e.childNodes[0].innerHTML;
- }
- });
- }
- }
- if (e.parentNode.id === "main") {
- var otherMessage = "";
- var fontSize;
- var extraClass;
-
- if (hasClass(e, "type-decl")) {
- // We do something special for these
- return;
- } else if (hasClass(e, "non-exhaustive")) {
- otherMessage = " This ";
- if (hasClass(e, "non-exhaustive-struct")) {
- otherMessage += "struct";
- } else if (hasClass(e, "non-exhaustive-enum")) {
- otherMessage += "enum";
- } else if (hasClass(e, "non-exhaustive-variant")) {
- otherMessage += "enum variant";
- } else if (hasClass(e, "non-exhaustive-type")) {
- otherMessage += "type";
- }
- otherMessage += " is marked as non-exhaustive";
- } else if (hasClass(e.childNodes[0], "impl-items")) {
- extraClass = "marg-left";
- }
-
- e.parentNode.insertBefore(
- createToggle(
- toggle,
- otherMessage,
- fontSize,
- extraClass,
- true),
- e);
- if (hasClass(e, "non-exhaustive") === true) {
- collapseDocs(e.previousSibling.childNodes[0], "toggle");
- }
- }
- }
-
- onEachLazy(document.getElementsByClassName("docblock"), buildToggleWrapper);
-
var pageId = getPageId();
if (pageId !== null) {
expandSection(pageId);
document.execCommand('copy');
document.body.removeChild(el);
- but.textContent = '✓';
+ // There is always one children, but multiple childNodes.
+ but.children[0].style.display = 'none';
+
+ var tmp;
+ if (but.childNodes.length < 2) {
+ tmp = document.createTextNode('✓');
+ but.appendChild(tmp);
+ } else {
+ onEachLazy(but.childNodes, function(e) {
+ if (e.nodeType === Node.TEXT_NODE) {
+ tmp = e;
+ return true;
+ }
+ });
+ tmp.textContent = '✓';
+ }
if (reset_button_timeout !== null) {
window.clearTimeout(reset_button_timeout);
}
function reset_button() {
- but.textContent = '⎘';
+ tmp.textContent = '';
reset_button_timeout = null;
+ but.children[0].style.display = "";
}
reset_button_timeout = window.setTimeout(reset_button, 1000);
max-width: none;
overflow: visible;
margin-left: 0px;
- min-width: 70em;
}
nav.sub {
padding-left: 0;
}
-.rustdoc:not(.source) .example-wrap {
+.rustdoc .example-wrap {
display: inline-flex;
margin-bottom: 10px;
}
.example-wrap > pre.line-number {
overflow: initial;
border: 1px solid;
- border-top-left-radius: 5px;
- border-bottom-left-radius: 5px;
padding: 13px 8px;
text-align: right;
}
overflow-x: auto;
}
-.rustdoc:not(.source) .example-wrap > pre {
+.rustdoc .example-wrap > pre {
margin: 0;
}
table-layout: fixed;
}
-.content pre.line-numbers {
- float: left;
- border: none;
+.content > .example-wrap pre.line-numbers {
position: relative;
-
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
+ border-top-left-radius: 5px;
+ border-bottom-left-radius: 5px;
}
.line-numbers span {
cursor: pointer;
}
#copy-path {
- height: 30px;
- font-size: 18px;
margin-left: 10px;
- padding: 0 6px;
- width: 28px;
+ padding: 0;
+ padding-left: 2px;
+}
+#copy-path> img {
+ margin-bottom: 2px;
}
#theme-choices {
cursor: pointer;
}
+details.rustdoc-toggle.top-doc > summary,
+details.rustdoc-toggle.top-doc > summary::before,
+details.rustdoc-toggle.non-exhaustive > summary,
+details.rustdoc-toggle.non-exhaustive > summary::before {
+ font-family: 'Fira Sans';
+ font-size: 16px;
+}
+
+details.non-exhaustive {
+ margin-bottom: 8px;
+}
+
details.rustdoc-toggle > summary.hideme::before {
position: relative;
}
.docblock code, .docblock-short code {
background-color: #191f26;
}
-pre {
+pre, .rustdoc.source .example-wrap {
color: #e6e1cf;
background-color: #191f26;
}
color: #fff;
}
-#theme-picker > img, #settings-menu > img {
+#theme-picker > img, #settings-menu > img, #copy-path > img {
filter: invert(100);
}
.docblock code, .docblock-short code {
background-color: #2A2A2A;
}
-pre {
+pre, .rustdoc.source .example-wrap {
background-color: #2A2A2A;
}
.docblock code, .docblock-short code {
background-color: #F5F5F5;
}
-pre {
+pre, .rustdoc.source .example-wrap {
background-color: #F5F5F5;
}
/// The file contents of `wheel.svg`, the icon used for the settings button.
crate static WHEEL_SVG: &[u8] = include_bytes!("static/wheel.svg");
+/// The file contents of `clipboard.svg`, the icon used for the "copy path" button.
+crate static CLIPBOARD_SVG: &[u8] = include_bytes!("static/clipboard.svg");
+
/// The file contents of `down-arrow.svg`, the icon used for the crate choice combobox.
crate static DOWN_ARROW_SVG: &[u8] = include_bytes!("static/down-arrow.svg");
use rustc_session::getopts;
use rustc_session::{early_error, early_warn};
-use crate::clean::utils::doc_rust_lang_org_channel;
-
/// A macro to create a FxHashMap.
///
/// Example:
}
println!("{}", options.usage(&format!("{} [options] <input>", argv0)));
println!(" @path Read newline separated options from `path`\n");
- println!(
- "More information available at https://doc.rust-lang.org/{}/rustdoc/what-is-rustdoc.html",
- doc_rust_lang_org_channel()
- );
+ println!("More information available at https://doc.rust-lang.org/rustdoc/what-is-rustdoc.html")
}
/// A result type used by several functions under `main()`.
let filename = i.span(self.ctx.tcx).filename(self.ctx.sess());
let has_doc_example = tests.found_tests != 0;
- let hir_id = self.ctx.tcx.hir().local_def_id_to_hir_id(i.def_id.expect_local());
+ // The `expect_real()` should be okay because `local_def_id_to_hir_id`
+ // would presumably panic if a fake `DefIndex` were passed.
+ let hir_id = self
+ .ctx
+ .tcx
+ .hir()
+ .local_def_id_to_hir_id(i.def_id.expect_real().expect_local());
let (level, source) = self.ctx.tcx.lint_level_at_node(MISSING_DOCS, hir_id);
// `missing_docs` is allow-by-default, so don't treat this as ignoring the item
// unless the user had an explicit `allow`
// item can be non-local e.g. when using #[doc(primitive = "pointer")]
if let Some((src_id, dst_id)) = id
.as_local()
- .and_then(|dst_id| item.def_id.as_local().map(|src_id| (src_id, dst_id)))
+ // The `expect_real()` should be okay because `local_def_id_to_hir_id`
+ // would presumably panic if a fake `DefIndex` were passed.
+ .and_then(|dst_id| {
+ item.def_id.expect_real().as_local().map(|src_id| (src_id, dst_id))
+ })
{
use rustc_hir::def_id::LOCAL_CRATE;
) {
diag_info.link_range = disambiguator_range;
report_diagnostic(cx.tcx, BROKEN_INTRA_DOC_LINKS, msg, &diag_info, |diag, _sp| {
- let msg = format!(
- "see https://doc.rust-lang.org/{}/rustdoc/linking-to-items-by-name.html#namespaces-and-disambiguators \
- for more info about disambiguators",
- crate::doc_rust_lang_org_channel(),
- );
- diag.note(&msg);
+ let msg = "see https://doc.rust-lang.org/nightly/rustdoc/linking-to-items-by-name.html#namespaces-and-disambiguators for more info about disambiguators";
+ diag.note(msg);
});
}
{
return false;
}
- let hir_id = cx.tcx.hir().local_def_id_to_hir_id(item.def_id.expect_local());
+ // The `expect_real()` should be okay because `local_def_id_to_hir_id`
+ // would presumably panic if a fake `DefIndex` were passed.
+ let hir_id = cx.tcx.hir().local_def_id_to_hir_id(item.def_id.expect_real().expect_local());
if cx.tcx.hir().attrs(hir_id).lists(sym::doc).has_word(sym::hidden)
|| inherits_doc_hidden(cx.tcx, hir_id)
{
-Subproject commit b61c24f3521303d442fa86fe691bc8e6acc15103
+Subproject commit c78cf18a07f19faa3e51f15220bca39f47d437e0
// == Test [gdb|lldb]-[command|check] are parsed correctly ===
// should-fail
+// needs-run-enabled
// compile-flags:-g
// === GDB TESTS ===================================================================================
12| 1| if b {
13| 1| println!("non_async_func println in block");
14| 1| }
+ ^0
15| 1|}
16| |
- 17| |// FIXME(#83985): The auto-generated closure in an async function is failing to include
- 18| |// the println!() and `let` assignment lines in the coverage code region(s), as it does in the
- 19| |// non-async function above, unless the `println!()` is inside a covered block.
+ 17| |
+ 18| |
+ 19| |
20| 1|async fn async_func() {
21| 1| println!("async_func was covered");
22| 1| let b = true;
^0
26| 1|}
27| |
- 28| |// FIXME(#83985): As above, this async function only has the `println!()` macro call, which is not
- 29| |// showing coverage, so the entire async closure _appears_ uncovered; but this is not exactly true.
- 30| |// It's only certain kinds of lines and/or their context that results in missing coverage.
+ 28| |
+ 29| |
+ 30| |
31| 1|async fn async_func_just_println() {
32| 1| println!("async_func_just_println was covered");
33| 1|}
37| 0| countdown = 10;
38| 0| }
39| 0| "alt string 2".to_owned()
- 40| 1| };
+ 40| 0| };
41| 1| println!(
42| 1| "The string or alt: {}"
43| 1| ,
125| 0| countdown = 10;
126| 0| }
127| 0| "closure should be unused".to_owned()
- 128| 1| };
- 129| 1|
+ 128| 0| };
+ 129| |
130| 1| let mut countdown = 10;
131| 1| let _short_unused_closure = | _unused_arg: u8 | countdown += 1;
^0
- 132| 1|
- 133| 1| // Macros can sometimes confuse the coverage results. Compare this next assignment, with an
- 134| 1| // unused closure that invokes the `println!()` macro, with the closure assignment above, that
- 135| 1| // does not use a macro. The closure above correctly shows `0` executions.
- 136| 1| let _short_unused_closure = | _unused_arg: u8 | println!("not called");
- 137| 1| // The closure assignment above is executed, with a line count of `1`, but the `println!()`
- 138| 1| // could not have been called, and yet, there is no indication that it wasn't...
- 139| 1|
- 140| 1| // ...but adding block braces gives the expected result, showing the block was not executed.
+ 132| |
+ 133| |
+ 134| 1| let short_used_covered_closure_macro = | used_arg: u8 | println!("called");
+ 135| 1| let short_used_not_covered_closure_macro = | used_arg: u8 | println!("not called");
+ ^0
+ 136| 1| let _short_unused_closure_macro = | _unused_arg: u8 | println!("not called");
+ ^0
+ 137| |
+ 138| |
+ 139| |
+ 140| |
141| 1| let _short_unused_closure_block = | _unused_arg: u8 | { println!("not called") };
^0
- 142| 1|
+ 142| |
143| 1| let _shortish_unused_closure = | _unused_arg: u8 | {
144| 0| println!("not called")
- 145| 1| };
- 146| 1|
+ 145| 0| };
+ 146| |
147| 1| let _as_short_unused_closure = |
148| | _unused_arg: u8
- 149| 1| | { println!("not called") };
- ^0
- 150| 1|
+ 149| 0| | { println!("not called") };
+ 150| |
151| 1| let _almost_as_short_unused_closure = |
152| | _unused_arg: u8
- 153| 1| | { println!("not called") }
- ^0
- 154| 1| ;
- 155| 1|}
+ 153| 0| | { println!("not called") }
+ 154| | ;
+ 155| |
+ 156| |
+ 157| |
+ 158| |
+ 159| |
+ 160| 1| let _short_unused_closure_line_break_no_block = | _unused_arg: u8 |
+ 161| 0|println!("not called")
+ 162| | ;
+ 163| |
+ 164| 1| let _short_unused_closure_line_break_no_block2 =
+ 165| | | _unused_arg: u8 |
+ 166| 0| println!(
+ 167| 0| "not called"
+ 168| 0| )
+ 169| | ;
+ 170| |
+ 171| 1| let short_used_not_covered_closure_line_break_no_block_embedded_branch =
+ 172| 1| | _unused_arg: u8 |
+ 173| 0| println!(
+ 174| 0| "not called: {}",
+ 175| 0| if is_true { "check" } else { "me" }
+ 176| 0| )
+ 177| | ;
+ 178| |
+ 179| 1| let short_used_not_covered_closure_line_break_block_embedded_branch =
+ 180| 1| | _unused_arg: u8 |
+ 181| 0| {
+ 182| 0| println!(
+ 183| 0| "not called: {}",
+ 184| 0| if is_true { "check" } else { "me" }
+ 185| | )
+ 186| 0| }
+ 187| | ;
+ 188| |
+ 189| 1| let short_used_covered_closure_line_break_no_block_embedded_branch =
+ 190| 1| | _unused_arg: u8 |
+ 191| 1| println!(
+ 192| 1| "not called: {}",
+ 193| 1| if is_true { "check" } else { "me" }
+ ^0
+ 194| 1| )
+ 195| | ;
+ 196| |
+ 197| 1| let short_used_covered_closure_line_break_block_embedded_branch =
+ 198| 1| | _unused_arg: u8 |
+ 199| 1| {
+ 200| 1| println!(
+ 201| 1| "not called: {}",
+ 202| 1| if is_true { "check" } else { "me" }
+ ^0
+ 203| | )
+ 204| 1| }
+ 205| | ;
+ 206| |
+ 207| 1| if is_false {
+ 208| 0| short_used_not_covered_closure_macro(0);
+ 209| 0| short_used_not_covered_closure_line_break_no_block_embedded_branch(0);
+ 210| 0| short_used_not_covered_closure_line_break_block_embedded_branch(0);
+ 211| 1| }
+ 212| 1| short_used_covered_closure_macro(0);
+ 213| 1| short_used_covered_closure_line_break_no_block_embedded_branch(0);
+ 214| 1| short_used_covered_closure_line_break_block_embedded_branch(0);
+ 215| 1|}
14| |
15| |macro_rules! on_error {
16| | ($value:expr, $error_message:expr) => {
- 17| 0| $value.or_else(|e| {
- 18| 0| let message = format!($error_message, e);
- 19| 0| if message.len() > 0 {
- 20| 0| println!("{}", message);
- 21| 0| Ok(String::from("ok"))
+ 17| | $value.or_else(|e| { // FIXME(85000): no coverage in closure macros
+ 18| | let message = format!($error_message, e);
+ 19| | if message.len() > 0 {
+ 20| | println!("{}", message);
+ 21| | Ok(String::from("ok"))
22| | } else {
- 23| 0| bail!("error");
+ 23| | bail!("error");
24| | }
- 25| 0| })
+ 25| | })
26| | };
27| |}
28| |
14| |
15| |macro_rules! on_error {
16| | ($value:expr, $error_message:expr) => {
- 17| 0| $value.or_else(|e| {
- 18| 0| let message = format!($error_message, e);
- 19| 0| if message.len() > 0 {
- 20| 0| println!("{}", message);
- 21| 0| Ok(String::from("ok"))
+ 17| | $value.or_else(|e| { // FIXME(85000): no coverage in closure macros
+ 18| | let message = format!($error_message, e);
+ 19| | if message.len() > 0 {
+ 20| | println!("{}", message);
+ 21| | Ok(String::from("ok"))
22| | } else {
- 23| 0| bail!("error");
+ 23| | bail!("error");
24| | }
- 25| 0| })
+ 25| | })
26| | };
27| |}
28| |
5| 1| if true {
6| 1| countdown = 10;
7| 1| }
+ ^0
8| |
9| | const B: u32 = 100;
10| 1| let x = if countdown > 7 {
24| 1| if true {
25| 1| countdown = 10;
26| 1| }
+ ^0
27| |
28| 1| if countdown > 7 {
29| 1| countdown -= 4;
41| 1| if true {
42| 1| countdown = 10;
43| 1| }
+ ^0
44| |
45| 1| if countdown > 7 {
46| 1| countdown -= 4;
53| | } else {
54| 0| return;
55| | }
- 56| | } // Note: closing brace shows uncovered (vs. `0` for implicit else) because condition literal
- 57| | // `true` was const-evaluated. The compiler knows the `if` block will be executed.
+ 56| 0| }
+ 57| |
58| |
59| 1| let mut countdown = 0;
60| 1| if true {
61| 1| countdown = 1;
62| 1| }
+ ^0
63| |
64| 1| let z = if countdown > 7 {
^0
8| 1|//! assert_eq!(1, 1);
9| |//! } else {
10| |//! // this is not!
- 11| |//! assert_eq!(1, 2);
+ 11| 0|//! assert_eq!(1, 2);
12| |//! }
13| 1|//! ```
14| |//!
74| 1| if true {
75| 1| assert_eq!(1, 1);
76| | } else {
- 77| | assert_eq!(1, 2);
+ 77| 0| assert_eq!(1, 2);
78| | }
79| 1|}
80| |
19| 1| if true {
20| 1| println!("Exiting with error...");
21| 1| return Err(1);
- 22| | }
- 23| |
- 24| | let _ = Firework { strength: 1000 };
- 25| |
- 26| | Ok(())
+ 22| 0| }
+ 23| 0|
+ 24| 0| let _ = Firework { strength: 1000 };
+ 25| 0|
+ 26| 0| Ok(())
27| 1|}
28| |
29| |// Expected program output:
30| 1| if true {
31| 1| println!("Exiting with error...");
32| 1| return Err(1);
- 33| | } // The remaining lines below have no coverage because `if true` (with the constant literal
- 34| | // `true`) is guaranteed to execute the `then` block, which is also guaranteed to `return`.
- 35| | // Thankfully, in the normal case, conditions are not guaranteed ahead of time, and as shown
- 36| | // in other tests, the lines below would have coverage (which would show they had `0`
- 37| | // executions, assuming the condition still evaluated to `true`).
- 38| |
- 39| | let _ = Firework { strength: 1000 };
- 40| |
- 41| | Ok(())
+ 33| 0| }
+ 34| 0|
+ 35| 0|
+ 36| 0|
+ 37| 0|
+ 38| 0|
+ 39| 0| let _ = Firework { strength: 1000 };
+ 40| 0|
+ 41| 0| Ok(())
42| 1|}
43| |
44| |// Expected program output:
9| 1| fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
10| 1| if true {
11| 1| if false {
- 12| | while true {
- 13| | }
+ 12| 0| while true {
+ 13| 0| }
14| 1| }
- 15| 1| write!(f, "error")?;
- ^0
- 16| | } else {
- 17| | }
+ 15| 1| write!(f, "cool")?;
+ ^0
+ 16| 0| } else {
+ 17| 0| }
18| |
19| 10| for i in 0..10 {
20| 10| if true {
21| 10| if false {
- 22| | while true {}
+ 22| 0| while true {}
23| 10| }
- 24| 10| write!(f, "error")?;
- ^0
- 25| | } else {
- 26| | }
+ 24| 10| write!(f, "cool")?;
+ ^0
+ 25| 0| } else {
+ 26| 0| }
27| | }
28| 1| Ok(())
29| 1| }
34| |impl std::fmt::Display for DisplayTest {
35| 1| fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
36| 1| if false {
- 37| | } else {
+ 37| 0| } else {
38| 1| if false {
- 39| | while true {}
+ 39| 0| while true {}
40| 1| }
- 41| 1| write!(f, "error")?;
- ^0
+ 41| 1| write!(f, "cool")?;
+ ^0
42| | }
43| 10| for i in 0..10 {
44| 10| if false {
- 45| | } else {
+ 45| 0| } else {
46| 10| if false {
- 47| | while true {}
+ 47| 0| while true {}
48| 10| }
- 49| 10| write!(f, "error")?;
- ^0
+ 49| 10| write!(f, "cool")?;
+ ^0
50| | }
51| | }
52| 1| Ok(())
11| | println!("called but not covered");
12| |}
13| |
- 14| 1|fn main() {
- 15| 1| do_not_add_coverage_1();
- 16| 1| do_not_add_coverage_2();
- 17| 1|}
+ 14| |#[no_coverage]
+ 15| |fn do_not_add_coverage_not_called() {
+ 16| | println!("not called and not covered");
+ 17| |}
+ 18| |
+ 19| 1|fn add_coverage_1() {
+ 20| 1| println!("called and covered");
+ 21| 1|}
+ 22| |
+ 23| 1|fn add_coverage_2() {
+ 24| 1| println!("called and covered");
+ 25| 1|}
+ 26| |
+ 27| 0|fn add_coverage_not_called() {
+ 28| 0| println!("not called but covered");
+ 29| 0|}
+ 30| |
+ 31| 1|fn main() {
+ 32| 1| do_not_add_coverage_1();
+ 33| 1| do_not_add_coverage_2();
+ 34| 1| add_coverage_1();
+ 35| 1| add_coverage_2();
+ 36| 1|}
+++ /dev/null
- 1| |// Enables `no_coverage` on individual functions
- 2| |
- 3| |#[feature(no_coverage)]
- 4| |#[no_coverage]
- 5| |fn do_not_add_coverage_1() {
- 6| | println!("called but not covered");
- 7| |}
- 8| |
- 9| |#[no_coverage]
- 10| |#[feature(no_coverage)]
- 11| |fn do_not_add_coverage_2() {
- 12| | println!("called but not covered");
- 13| |}
- 14| |
- 15| 1|fn main() {
- 16| 1| do_not_add_coverage_1();
- 17| 1| do_not_add_coverage_2();
- 18| 1|}
-
29| |// 2. Since the `panic_unwind.rs` test is allowed to unwind, it is also allowed to execute the
30| |// normal program exit cleanup, including writing out the current values of the coverage
31| |// counters.
- 32| |// 3. The coverage results show (interestingly) that the `panic!()` call did execute, but it does
- 33| |// not show coverage of the `if countdown == 1` branch in `main()` that calls
- 34| |// `might_panic(true)` (causing the call to `panic!()`).
- 35| |// 4. The reason `main()`s `if countdown == 1` branch, calling `might_panic(true)`, appears
- 36| |// "uncovered" is, InstrumentCoverage (intentionally) treats `TerminatorKind::Call` terminators
- 37| |// as non-branching, because when a program executes normally, they always are. Errors handled
- 38| |// via the try `?` operator produce error handling branches that *are* treated as branches in
- 39| |// coverage results. By treating calls without try `?` operators as non-branching (assumed to
- 40| |// return normally and continue) the coverage graph can be simplified, producing smaller,
- 41| |// faster binaries, and cleaner coverage results.
- 42| |// 5. The reason the coverage results actually show `panic!()` was called is most likely because
- 43| |// `panic!()` is a macro, not a simple function call, and there are other `Statement`s and/or
- 44| |// `Terminator`s that execute with a coverage counter before the panic and unwind occur.
- 45| |// 6. Since the common practice is not to use `panic!()` for error handling, the coverage
- 46| |// implementation avoids incurring an additional cost (in program size and execution time) to
- 47| |// improve coverage results for an event that is generally not "supposed" to happen.
- 48| |// 7. FIXME(#78544): This issue describes a feature request for a proposed option to enable
- 49| |// more accurate coverage results for tests that intentionally panic.
1| 1|fn main() {
2| 1| if false {
- 3| | loop {}
+ 3| 0| loop {}
4| 1| }
5| 1|}
}
}
-// FIXME(#83985): The auto-generated closure in an async function is failing to include
-// the println!() and `let` assignment lines in the coverage code region(s), as it does in the
-// non-async function above, unless the `println!()` is inside a covered block.
+
+
+
async fn async_func() {
println!("async_func was covered");
let b = true;
}
}
-// FIXME(#83985): As above, this async function only has the `println!()` macro call, which is not
-// showing coverage, so the entire async closure _appears_ uncovered; but this is not exactly true.
-// It's only certain kinds of lines and/or their context that results in missing coverage.
+
+
+
async fn async_func_just_println() {
println!("async_func_just_println was covered");
}
let mut countdown = 10;
let _short_unused_closure = | _unused_arg: u8 | countdown += 1;
- // Macros can sometimes confuse the coverage results. Compare this next assignment, with an
- // unused closure that invokes the `println!()` macro, with the closure assignment above, that
- // does not use a macro. The closure above correctly shows `0` executions.
- let _short_unused_closure = | _unused_arg: u8 | println!("not called");
- // The closure assignment above is executed, with a line count of `1`, but the `println!()`
- // could not have been called, and yet, there is no indication that it wasn't...
-
- // ...but adding block braces gives the expected result, showing the block was not executed.
+
+ let short_used_covered_closure_macro = | used_arg: u8 | println!("called");
+ let short_used_not_covered_closure_macro = | used_arg: u8 | println!("not called");
+ let _short_unused_closure_macro = | _unused_arg: u8 | println!("not called");
+
+
+
+
let _short_unused_closure_block = | _unused_arg: u8 | { println!("not called") };
let _shortish_unused_closure = | _unused_arg: u8 | {
_unused_arg: u8
| { println!("not called") }
;
+
+
+
+
+
+ let _short_unused_closure_line_break_no_block = | _unused_arg: u8 |
+println!("not called")
+ ;
+
+ let _short_unused_closure_line_break_no_block2 =
+ | _unused_arg: u8 |
+ println!(
+ "not called"
+ )
+ ;
+
+ let short_used_not_covered_closure_line_break_no_block_embedded_branch =
+ | _unused_arg: u8 |
+ println!(
+ "not called: {}",
+ if is_true { "check" } else { "me" }
+ )
+ ;
+
+ let short_used_not_covered_closure_line_break_block_embedded_branch =
+ | _unused_arg: u8 |
+ {
+ println!(
+ "not called: {}",
+ if is_true { "check" } else { "me" }
+ )
+ }
+ ;
+
+ let short_used_covered_closure_line_break_no_block_embedded_branch =
+ | _unused_arg: u8 |
+ println!(
+ "not called: {}",
+ if is_true { "check" } else { "me" }
+ )
+ ;
+
+ let short_used_covered_closure_line_break_block_embedded_branch =
+ | _unused_arg: u8 |
+ {
+ println!(
+ "not called: {}",
+ if is_true { "check" } else { "me" }
+ )
+ }
+ ;
+
+ if is_false {
+ short_used_not_covered_closure_macro(0);
+ short_used_not_covered_closure_line_break_no_block_embedded_branch(0);
+ short_used_not_covered_closure_line_break_block_embedded_branch(0);
+ }
+ short_used_covered_closure_macro(0);
+ short_used_covered_closure_line_break_no_block_embedded_branch(0);
+ short_used_covered_closure_line_break_block_embedded_branch(0);
}
macro_rules! on_error {
($value:expr, $error_message:expr) => {
- $value.or_else(|e| {
+ $value.or_else(|e| { // FIXME(85000): no coverage in closure macros
let message = format!($error_message, e);
if message.len() > 0 {
println!("{}", message);
macro_rules! on_error {
($value:expr, $error_message:expr) => {
- $value.or_else(|e| {
+ $value.or_else(|e| { // FIXME(85000): no coverage in closure macros
let message = format!($error_message, e);
if message.len() > 0 {
println!("{}", message);
} else {
return;
}
- } // Note: closing brace shows uncovered (vs. `0` for implicit else) because condition literal
- // `true` was const-evaluated. The compiler knows the `if` block will be executed.
+ }
+
let mut countdown = 0;
if true {
if true {
println!("Exiting with error...");
return Err(1);
- } // The remaining lines below have no coverage because `if true` (with the constant literal
- // `true`) is guaranteed to execute the `then` block, which is also guaranteed to `return`.
- // Thankfully, in the normal case, conditions are not guaranteed ahead of time, and as shown
- // in other tests, the lines below would have coverage (which would show they had `0`
- // executions, assuming the condition still evaluated to `true`).
+ }
+
+
+
+
let _ = Firework { strength: 1000 };
while true {
}
}
- write!(f, "error")?;
+ write!(f, "cool")?;
} else {
}
if false {
while true {}
}
- write!(f, "error")?;
+ write!(f, "cool")?;
} else {
}
}
if false {
while true {}
}
- write!(f, "error")?;
+ write!(f, "cool")?;
}
for i in 0..10 {
if false {
if false {
while true {}
}
- write!(f, "error")?;
+ write!(f, "cool")?;
}
}
Ok(())
println!("called but not covered");
}
+#[no_coverage]
+fn do_not_add_coverage_not_called() {
+ println!("not called and not covered");
+}
+
+fn add_coverage_1() {
+ println!("called and covered");
+}
+
+fn add_coverage_2() {
+ println!("called and covered");
+}
+
+fn add_coverage_not_called() {
+ println!("not called but covered");
+}
+
fn main() {
do_not_add_coverage_1();
do_not_add_coverage_2();
+ add_coverage_1();
+ add_coverage_2();
}
+++ /dev/null
-// Enables `no_coverage` on individual functions
-
-#[feature(no_coverage)]
-#[no_coverage]
-fn do_not_add_coverage_1() {
- println!("called but not covered");
-}
-
-#[no_coverage]
-#[feature(no_coverage)]
-fn do_not_add_coverage_2() {
- println!("called but not covered");
-}
-
-fn main() {
- do_not_add_coverage_1();
- do_not_add_coverage_2();
-}
// 2. Since the `panic_unwind.rs` test is allowed to unwind, it is also allowed to execute the
// normal program exit cleanup, including writing out the current values of the coverage
// counters.
-// 3. The coverage results show (interestingly) that the `panic!()` call did execute, but it does
-// not show coverage of the `if countdown == 1` branch in `main()` that calls
-// `might_panic(true)` (causing the call to `panic!()`).
-// 4. The reason `main()`s `if countdown == 1` branch, calling `might_panic(true)`, appears
-// "uncovered" is, InstrumentCoverage (intentionally) treats `TerminatorKind::Call` terminators
-// as non-branching, because when a program executes normally, they always are. Errors handled
-// via the try `?` operator produce error handling branches that *are* treated as branches in
-// coverage results. By treating calls without try `?` operators as non-branching (assumed to
-// return normally and continue) the coverage graph can be simplified, producing smaller,
-// faster binaries, and cleaner coverage results.
-// 5. The reason the coverage results actually show `panic!()` was called is most likely because
-// `panic!()` is a macro, not a simple function call, and there are other `Statement`s and/or
-// `Terminator`s that execute with a coverage counter before the panic and unwind occur.
-// 6. Since the common practice is not to use `panic!()` for error handling, the coverage
-// implementation avoids incurring an additional cost (in program size and execution time) to
-// improve coverage results for an event that is generally not "supposed" to happen.
-// 7. FIXME(#78544): This issue describes a feature request for a proposed option to enable
-// more accurate coverage results for tests that intentionally panic.
--- /dev/null
+goto: file://|DOC_PATH|/../src/test_docs/lib.rs.html
+// Check that we can click on the line number.
+click: (40, 224) // This is the position of the span for line 4.
+// Unfortunately, "#4" isn't a valid query selector, so we have to go around that limitation
+// by instead getting the nth span.
+assert: (".line-numbers > span:nth-child(4)", "class", "line-highlighted")
+// We now check that the good spans are highlighted
+goto: file://|DOC_PATH|/../src/test_docs/lib.rs.html#4-6
+assert-false: (".line-numbers > span:nth-child(3)", "class", "line-highlighted")
+assert: (".line-numbers > span:nth-child(4)", "class", "line-highlighted")
+assert: (".line-numbers > span:nth-child(5)", "class", "line-highlighted")
+assert: (".line-numbers > span:nth-child(6)", "class", "line-highlighted")
+assert-false: (".line-numbers > span:nth-child(7)", "class", "line-highlighted")
+++ /dev/null
-#![crate_type = "lib"]
-#![deny(warnings)]
-
-#[doc(test(no_crate_inject))] //~ ERROR
-//~^ WARN
-pub fn foo() {}
-
-pub mod bar {
- #![doc(test(no_crate_inject))] //~ ERROR
- //~^ WARN
-}
+++ /dev/null
-error: `#![doc(test(...)]` is only allowed as a crate-level attribute
- --> $DIR/doc-attr2.rs:4:7
- |
-LL | #[doc(test(no_crate_inject))]
- | ^^^^^^^^^^^^^^^^^^^^^
- |
-note: the lint level is defined here
- --> $DIR/doc-attr2.rs:2:9
- |
-LL | #![deny(warnings)]
- | ^^^^^^^^
- = note: `#[deny(invalid_doc_attributes)]` implied by `#[deny(warnings)]`
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
-
-error: `#![doc(test(...)]` is only allowed as a crate-level attribute
- --> $DIR/doc-attr2.rs:9:12
- |
-LL | #![doc(test(no_crate_inject))]
- | ^^^^^^^^^^^^^^^^^^^^^
- |
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
-
-error: aborting due to 2 previous errors
-
--- /dev/null
+#![feature(doc_cfg)]
+#[doc(cfg = "x")] //~ ERROR not followed by parentheses
+#[doc(cfg(x, y))] //~ ERROR multiple `cfg` predicates
+struct S {}
--- /dev/null
+error: `cfg` is not followed by parentheses
+ --> $DIR/invalid-cfg.rs:2:7
+ |
+LL | #[doc(cfg = "x")]
+ | ^^^^^^^^^ help: expected syntax is: `cfg(/* predicate */)`
+
+error: multiple `cfg` predicates are specified
+ --> $DIR/invalid-cfg.rs:3:14
+ |
+LL | #[doc(cfg(x, y))]
+ | ^
+
+error: aborting due to 2 previous errors
+
--- /dev/null
+#![crate_type = "lib"]
+#![deny(warnings)]
+
+#[doc(test(no_crate_inject))]
+//~^ ERROR can only be applied at the crate level
+//~| WARN is being phased out
+//~| HELP to apply to the crate, use an inner attribute
+//~| SUGGESTION #![doc(test(no_crate_inject))]
+#[doc(inline)]
+//~^ ERROR can only be applied to a `use` item
+//~| WARN is being phased out
+pub fn foo() {}
+
+pub mod bar {
+ #![doc(test(no_crate_inject))]
+ //~^ ERROR can only be applied at the crate level
+ //~| WARN is being phased out
+
+ #[doc(test(no_crate_inject))]
+ //~^ ERROR can only be applied at the crate level
+ //~| WARN is being phased out
+ #[doc(inline)]
+ //~^ ERROR can only be applied to a `use` item
+ //~| WARN is being phased out
+ pub fn baz() {}
+}
+
+#[doc(inline)]
+#[doc(no_inline)]
+//~^^ ERROR conflicting doc inlining attributes
+//~| HELP remove one of the conflicting attributes
+pub use bar::baz;
--- /dev/null
+error: this attribute can only be applied at the crate level
+ --> $DIR/invalid-doc-attr.rs:4:7
+ |
+LL | #[doc(test(no_crate_inject))]
+ | ^^^^^^^^^^^^^^^^^^^^^
+ |
+note: the lint level is defined here
+ --> $DIR/invalid-doc-attr.rs:2:9
+ |
+LL | #![deny(warnings)]
+ | ^^^^^^^^
+ = note: `#[deny(invalid_doc_attributes)]` implied by `#[deny(warnings)]`
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
+ = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#at-the-crate-level for more information
+help: to apply to the crate, use an inner attribute
+ |
+LL | #![doc(test(no_crate_inject))]
+ |
+
+error: this attribute can only be applied to a `use` item
+ --> $DIR/invalid-doc-attr.rs:9:7
+ |
+LL | #[doc(inline)]
+ | ^^^^^^ only applicable on `use` items
+...
+LL | pub fn foo() {}
+ | ------------ not a `use` item
+ |
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
+ = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#docno_inlinedocinline for more information
+
+error: this attribute can only be applied at the crate level
+ --> $DIR/invalid-doc-attr.rs:15:12
+ |
+LL | #![doc(test(no_crate_inject))]
+ | ^^^^^^^^^^^^^^^^^^^^^
+ |
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
+ = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#at-the-crate-level for more information
+
+error: conflicting doc inlining attributes
+ --> $DIR/invalid-doc-attr.rs:28:7
+ |
+LL | #[doc(inline)]
+ | ^^^^^^ this attribute...
+LL | #[doc(no_inline)]
+ | ^^^^^^^^^ ...conflicts with this attribute
+ |
+ = help: remove one of the conflicting attributes
+
+error: this attribute can only be applied at the crate level
+ --> $DIR/invalid-doc-attr.rs:19:11
+ |
+LL | #[doc(test(no_crate_inject))]
+ | ^^^^^^^^^^^^^^^^^^^^^
+ |
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
+ = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#at-the-crate-level for more information
+
+error: this attribute can only be applied to a `use` item
+ --> $DIR/invalid-doc-attr.rs:22:11
+ |
+LL | #[doc(inline)]
+ | ^^^^^^ only applicable on `use` items
+...
+LL | pub fn baz() {}
+ | ------------ not a `use` item
+ |
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
+ = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#docno_inlinedocinline for more information
+
+error: aborting due to 6 previous errors
+
pub fn uses_cfg_target_feature() {
uses_target_feature();
}
+
+// multiple attributes should be allowed
+// @has doc_cfg/fn.multiple_attrs.html \
+// '//*[@id="main"]/*[@class="item-info"]/*[@class="stab portability"]' \
+// 'This is supported on x and y and z only.'
+#[doc(inline, cfg(x))]
+#[doc(cfg(y), cfg(z))]
+pub fn multiple_attrs() {}
// @has issue_55364/subone/index.html
// These foo/bar links in the module's documentation should refer inside `subone`
-// @has - '//section[@id="main"]/div[@class="docblock"]//a[@href="fn.foo.html"]' 'foo'
-// @has - '//section[@id="main"]/div[@class="docblock"]//a[@href="fn.bar.html"]' 'bar'
+// @has - '//section[@id="main"]/details[@open=""]/div[@class="docblock"]//a[@href="fn.foo.html"]' 'foo'
+// @has - '//section[@id="main"]/details[@open=""]/div[@class="docblock"]//a[@href="fn.bar.html"]' 'bar'
pub mod subone {
//! See either [foo] or [bar].
// This should refer to subone's `bar`
// @has issue_55364/subone/fn.foo.html
- // @has - '//section[@id="main"]/div[@class="docblock"]//a[@href="fn.bar.html"]' 'bar'
+ // @has - '//section[@id="main"]/details/div[@class="docblock"]//a[@href="fn.bar.html"]' 'bar'
/// See [bar]
pub fn foo() {}
// This should refer to subone's `foo`
// @has issue_55364/subone/fn.bar.html
- // @has - '//section[@id="main"]/div[@class="docblock"]//a[@href="fn.foo.html"]' 'foo'
+ // @has - '//section[@id="main"]/details/div[@class="docblock"]//a[@href="fn.foo.html"]' 'foo'
/// See [foo]
pub fn bar() {}
}
// @!has - '//section[@id="main"]/div[@class="docblock"]//a[@href="fn.foo.html"]' 'foo'
// @!has - '//section[@id="main"]/div[@class="docblock"]//a[@href="fn.bar.html"]' 'bar'
// Instead it should be referencing the top level functions
-// @has - '//section[@id="main"]/div[@class="docblock"]//a[@href="../fn.foo.html"]' 'foo'
-// @has - '//section[@id="main"]/div[@class="docblock"]//a[@href="../fn.bar.html"]' 'bar'
+// @has - '//section[@id="main"]/details/div[@class="docblock"]//a[@href="../fn.foo.html"]' 'foo'
+// @has - '//section[@id="main"]/details/div[@class="docblock"]//a[@href="../fn.bar.html"]' 'bar'
// Though there should be such links later
// @has - '//section[@id="main"]/table//tr[@class="module-item"]/td/a[@class="fn"][@href="fn.foo.html"]' 'foo'
// @has - '//section[@id="main"]/table//tr[@class="module-item"]/td/a[@class="fn"][@href="fn.bar.html"]' 'bar'
// Despite the module's docs referring to the top level foo/bar,
// this should refer to subtwo's `bar`
// @has issue_55364/subtwo/fn.foo.html
- // @has - '//section[@id="main"]/div[@class="docblock"]//a[@href="fn.bar.html"]' 'bar'
+ // @has - '//section[@id="main"]/details/div[@class="docblock"]//a[@href="fn.bar.html"]' 'bar'
/// See [bar]
pub fn foo() {}
// Despite the module's docs referring to the top level foo/bar,
// this should refer to subtwo's `foo`
// @has issue_55364/subtwo/fn.bar.html
- // @has - '//section[@id="main"]/div[@class="docblock"]//a[@href="fn.foo.html"]' 'foo'
+ // @has - '//section[@id="main"]/details/div[@class="docblock"]//a[@href="fn.foo.html"]' 'foo'
/// See [foo]
pub fn bar() {}
}
// @has issue_55364/subthree/index.html
// This module should also refer to the top level foo/bar
-// @has - '//section[@id="main"]/div[@class="docblock"]//a[@href="../fn.foo.html"]' 'foo'
-// @has - '//section[@id="main"]/div[@class="docblock"]//a[@href="../fn.bar.html"]' 'bar'
+// @has - '//section[@id="main"]/details/div[@class="docblock"]//a[@href="../fn.foo.html"]' 'foo'
+// @has - '//section[@id="main"]/details/div[@class="docblock"]//a[@href="../fn.bar.html"]' 'bar'
pub mod subthree {
//! See either [foo][super::foo] or [bar][super::bar]
}
+++ /dev/null
-#![crate_type = "lib"]
-#![deny(warnings)]
-
-#[doc(test(no_crate_inject))] //~ ERROR
-//~^ WARN
-pub fn foo() {}
-
-pub mod bar {
- #![doc(test(no_crate_inject))] //~ ERROR
- //~^ WARN
-}
+++ /dev/null
-error: `#![doc(test(...)]` is only allowed as a crate-level attribute
- --> $DIR/doc-attr2.rs:4:7
- |
-LL | #[doc(test(no_crate_inject))]
- | ^^^^^^^^^^^^^^^^^^^^^
- |
-note: the lint level is defined here
- --> $DIR/doc-attr2.rs:2:9
- |
-LL | #![deny(warnings)]
- | ^^^^^^^^
- = note: `#[deny(invalid_doc_attributes)]` implied by `#[deny(warnings)]`
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
-
-error: `#![doc(test(...)]` is only allowed as a crate-level attribute
- --> $DIR/doc-attr2.rs:9:12
- |
-LL | #![doc(test(no_crate_inject))]
- | ^^^^^^^^^^^^^^^^^^^^^
- |
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
-
-error: aborting due to 2 previous errors
-
--- /dev/null
+#![crate_type = "lib"]
+#![deny(warnings)]
+
+#[doc(test(no_crate_inject))]
+//~^ ERROR can only be applied at the crate level
+//~| WARN is being phased out
+//~| HELP to apply to the crate, use an inner attribute
+//~| SUGGESTION #![doc(test(no_crate_inject))]
+#[doc(inline)]
+//~^ ERROR can only be applied to a `use` item
+//~| WARN is being phased out
+pub fn foo() {}
+
+pub mod bar {
+ #![doc(test(no_crate_inject))]
+ //~^ ERROR can only be applied at the crate level
+ //~| WARN is being phased out
+
+ #[doc(test(no_crate_inject))]
+ //~^ ERROR can only be applied at the crate level
+ //~| WARN is being phased out
+ #[doc(inline)]
+ //~^ ERROR can only be applied to a `use` item
+ //~| WARN is being phased out
+ pub fn baz() {}
+}
+
+#[doc(inline)]
+#[doc(no_inline)]
+//~^^ ERROR conflicting doc inlining attributes
+//~| HELP remove one of the conflicting attributes
+pub use bar::baz;
--- /dev/null
+error: this attribute can only be applied at the crate level
+ --> $DIR/invalid-doc-attr.rs:4:7
+ |
+LL | #[doc(test(no_crate_inject))]
+ | ^^^^^^^^^^^^^^^^^^^^^
+ |
+note: the lint level is defined here
+ --> $DIR/invalid-doc-attr.rs:2:9
+ |
+LL | #![deny(warnings)]
+ | ^^^^^^^^
+ = note: `#[deny(invalid_doc_attributes)]` implied by `#[deny(warnings)]`
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
+ = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#at-the-crate-level for more information
+help: to apply to the crate, use an inner attribute
+ |
+LL | #![doc(test(no_crate_inject))]
+ |
+
+error: this attribute can only be applied to a `use` item
+ --> $DIR/invalid-doc-attr.rs:9:7
+ |
+LL | #[doc(inline)]
+ | ^^^^^^ only applicable on `use` items
+...
+LL | pub fn foo() {}
+ | ------------ not a `use` item
+ |
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
+ = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#docno_inlinedocinline for more information
+
+error: this attribute can only be applied at the crate level
+ --> $DIR/invalid-doc-attr.rs:15:12
+ |
+LL | #![doc(test(no_crate_inject))]
+ | ^^^^^^^^^^^^^^^^^^^^^
+ |
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
+ = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#at-the-crate-level for more information
+
+error: conflicting doc inlining attributes
+ --> $DIR/invalid-doc-attr.rs:28:7
+ |
+LL | #[doc(inline)]
+ | ^^^^^^ this attribute...
+LL | #[doc(no_inline)]
+ | ^^^^^^^^^ ...conflicts with this attribute
+ |
+ = help: remove one of the conflicting attributes
+
+error: this attribute can only be applied at the crate level
+ --> $DIR/invalid-doc-attr.rs:19:11
+ |
+LL | #[doc(test(no_crate_inject))]
+ | ^^^^^^^^^^^^^^^^^^^^^
+ |
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
+ = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#at-the-crate-level for more information
+
+error: this attribute can only be applied to a `use` item
+ --> $DIR/invalid-doc-attr.rs:22:11
+ |
+LL | #[doc(inline)]
+ | ^^^^^^ only applicable on `use` items
+...
+LL | pub fn baz() {}
+ | ------------ not a `use` item
+ |
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #82730 <https://github.com/rust-lang/rust/issues/82730>
+ = note: read https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#docno_inlinedocinline for more information
+
+error: aborting due to 6 previous errors
+
// Unfortunately, LLVM has no "disable" option for this, so we have to set
// "enable" to 0 instead.
-// compile-flags:-g -Cllvm-args=-enable-tail-merge=0 -Cllvm-args=-opt-bisect-limit=0
+// compile-flags:-g -Copt-level=0 -Cllvm-args=-enable-tail-merge=0
// compile-flags:-Cforce-frame-pointers=yes
// ignore-pretty issue #37195
// ignore-emscripten spawning processes is not supported
// ignore-sgx no processes
-// normalize-stderr-test ".*\n" -> ""
-
-// Note that above `-opt-bisect-limit=0` is used to basically disable
-// optimizations. It creates tons of output on stderr, hence we normalize
-// that away entirely.
use std::env;
// build-pass
// compile-flags: -C panic=unwind
+// needs-unwind
// ignore-emscripten no panic_unwind implementation
// ignore-wasm32 no panic_unwind implementation
// ignore-wasm64 no panic_unwind implementation
const fn no_codegen<T>() {
if false {
+ // This bad constant is only used in dead code in a no-codegen function... and yet we still
+ // must make sure that the build fails.
let _ = PrintName::<T>::VOID; //~ERROR could not evaluate static initializer
}
}
= note: for more information, see issue #71800 <https://github.com/rust-lang/rust/issues/71800>
error[E0080]: could not evaluate static initializer
- --> $DIR/erroneous-const.rs:13:17
+ --> $DIR/erroneous-const.rs:15:17
|
LL | let _ = PrintName::<T>::VOID;
| ^^^^^^^^^^^^^^^^^^^^
| |
| referenced constant has errors
- | inside `no_codegen::<i32>` at $DIR/erroneous-const.rs:13:17
+ | inside `no_codegen::<i32>` at $DIR/erroneous-const.rs:15:17
...
LL | pub static FOO: () = no_codegen::<i32>();
- | ------------------- inside `FOO` at $DIR/erroneous-const.rs:17:22
+ | ------------------- inside `FOO` at $DIR/erroneous-const.rs:19:22
error: aborting due to previous error; 2 warnings emitted
--- /dev/null
+//! Make sure we error on erroneous consts even if they are unused.
+#![warn(const_err, unconditional_panic)]
+
+struct PrintName<T>(T);
+impl<T> PrintName<T> {
+ const VOID: () = [()][2]; //~WARN any use of this value will cause an error
+ //~^ WARN this operation will panic at runtime
+ //~| WARN this was previously accepted by the compiler but is being phased out
+}
+
+pub static FOO: () = {
+ if false {
+ // This bad constant is only used in dead code in a static initializer... and yet we still
+ // must make sure that the build fails.
+ let _ = PrintName::<i32>::VOID; //~ERROR could not evaluate static initializer
+ }
+};
+
+fn main() {
+ FOO
+}
--- /dev/null
+warning: this operation will panic at runtime
+ --> $DIR/erroneous-const2.rs:6:22
+ |
+LL | const VOID: () = [()][2];
+ | ^^^^^^^ index out of bounds: the length is 1 but the index is 2
+ |
+note: the lint level is defined here
+ --> $DIR/erroneous-const2.rs:2:20
+ |
+LL | #![warn(const_err, unconditional_panic)]
+ | ^^^^^^^^^^^^^^^^^^^
+
+warning: any use of this value will cause an error
+ --> $DIR/erroneous-const2.rs:6:22
+ |
+LL | const VOID: () = [()][2];
+ | -----------------^^^^^^^-
+ | |
+ | index out of bounds: the length is 1 but the index is 2
+ |
+note: the lint level is defined here
+ --> $DIR/erroneous-const2.rs:2:9
+ |
+LL | #![warn(const_err, unconditional_panic)]
+ | ^^^^^^^^^
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #71800 <https://github.com/rust-lang/rust/issues/71800>
+
+error[E0080]: could not evaluate static initializer
+ --> $DIR/erroneous-const2.rs:15:17
+ |
+LL | let _ = PrintName::<i32>::VOID;
+ | ^^^^^^^^^^^^^^^^^^^^^^ referenced constant has errors
+
+error: aborting due to previous error; 2 warnings emitted
+
+For more information about this error, try `rustc --explain E0080`.
warning: any use of this value will cause an error
- --> $DIR/promoted_errors.rs:13:5
+ --> $DIR/promoted_errors.rs:15:5
|
LL | 0 - 1
| ^^^^^
| |
| attempt to compute `0_u32 - 1_u32`, which would overflow
- | inside `overflow` at $DIR/promoted_errors.rs:13:5
- | inside `X` at $DIR/promoted_errors.rs:33:29
+ | inside `overflow` at $DIR/promoted_errors.rs:15:5
+ | inside `X` at $DIR/promoted_errors.rs:38:29
...
LL | / const X: () = {
LL | | let _x: &'static u32 = &overflow();
| |__-
|
note: the lint level is defined here
- --> $DIR/promoted_errors.rs:9:9
+ --> $DIR/promoted_errors.rs:11:9
|
LL | #![warn(const_err, arithmetic_overflow, unconditional_panic)]
| ^^^^^^^^^
= note: for more information, see issue #71800 <https://github.com/rust-lang/rust/issues/71800>
warning: any use of this value will cause an error
- --> $DIR/promoted_errors.rs:33:28
+ --> $DIR/promoted_errors.rs:38:28
|
LL | / const X: () = {
LL | | let _x: &'static u32 = &overflow();
warning: any use of this value will cause an error
- --> $DIR/promoted_errors.rs:18:5
+ --> $DIR/promoted_errors.rs:20:5
|
LL | 1 / 0
| ^^^^^
| |
| attempt to divide `1_i32` by zero
- | inside `div_by_zero1` at $DIR/promoted_errors.rs:18:5
- | inside `X` at $DIR/promoted_errors.rs:36:29
+ | inside `div_by_zero1` at $DIR/promoted_errors.rs:20:5
+ | inside `X` at $DIR/promoted_errors.rs:41:29
...
LL | / const X: () = {
LL | | let _x: &'static u32 = &overflow();
| |__-
|
note: the lint level is defined here
- --> $DIR/promoted_errors.rs:9:9
+ --> $DIR/promoted_errors.rs:11:9
|
LL | #![warn(const_err, arithmetic_overflow, unconditional_panic)]
| ^^^^^^^^^
= note: for more information, see issue #71800 <https://github.com/rust-lang/rust/issues/71800>
warning: any use of this value will cause an error
- --> $DIR/promoted_errors.rs:36:28
+ --> $DIR/promoted_errors.rs:41:28
|
LL | / const X: () = {
LL | | let _x: &'static u32 = &overflow();
warning: any use of this value will cause an error
- --> $DIR/promoted_errors.rs:13:5
+ --> $DIR/promoted_errors.rs:15:5
|
LL | 0 - 1
| ^^^^^
| |
| attempt to compute `0_u32 - 1_u32`, which would overflow
- | inside `overflow` at $DIR/promoted_errors.rs:13:5
- | inside `X` at $DIR/promoted_errors.rs:33:29
+ | inside `overflow` at $DIR/promoted_errors.rs:15:5
+ | inside `X` at $DIR/promoted_errors.rs:38:29
...
LL | / const X: () = {
LL | | let _x: &'static u32 = &overflow();
| |__-
|
note: the lint level is defined here
- --> $DIR/promoted_errors.rs:9:9
+ --> $DIR/promoted_errors.rs:11:9
|
LL | #![warn(const_err, arithmetic_overflow, unconditional_panic)]
| ^^^^^^^^^
= note: for more information, see issue #71800 <https://github.com/rust-lang/rust/issues/71800>
warning: any use of this value will cause an error
- --> $DIR/promoted_errors.rs:33:28
+ --> $DIR/promoted_errors.rs:38:28
|
LL | / const X: () = {
LL | | let _x: &'static u32 = &overflow();
// build-pass
// ignore-pass (test emits codegen-time warnings and verifies that they are not errors)
+//! This test ensures that when we promote code that fails to evaluate, the build still succeeds.
+
#![warn(const_err, arithmetic_overflow, unconditional_panic)]
// The only way to have promoteds that fail is in `const fn` called from `const`/`static`.
[1, 2, 3][4]
}
+// An unused constant containing failing promoteds.
+// This should work as long as `const_err` can be turned into just a warning;
+// once it turns into a hard error, just remove `X`.
const X: () = {
let _x: &'static u32 = &overflow();
//[opt_with_overflow_checks,noopt]~^ WARN any use of this value will cause an error
let _x: &'static i32 = &oob();
};
-fn main() {}
+const fn mk_false() -> bool { false }
+
+// An actually used constant referencing failing promoteds in dead code.
+// This needs to always work.
+const Y: () = {
+ if mk_false() {
+ let _x: &'static u32 = &overflow();
+ let _x: &'static i32 = &div_by_zero1();
+ let _x: &'static i32 = &div_by_zero2();
+ let _x: &'static i32 = &div_by_zero3();
+ let _x: &'static i32 = &oob();
+ }
+ ()
+};
+
+fn main() {
+ let _y = Y;
+}
--> $DIR/ub-wide-ptr.rs:135:5
|
LL | mem::transmute::<_, &dyn Trait>((&92u8, 0usize))
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ inbounds test failed: 0x0 is not a valid pointer
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ null pointer is not a valid pointer for this operation
error[E0080]: could not evaluate static initializer
--> $DIR/ub-wide-ptr.rs:139:5
--> $DIR/ub-wide-ptr.rs:135:5
|
LL | mem::transmute::<_, &dyn Trait>((&92u8, 0usize))
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ inbounds test failed: 0x0 is not a valid pointer
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ null pointer is not a valid pointer for this operation
error[E0080]: could not evaluate static initializer
--> $DIR/ub-wide-ptr.rs:139:5
-// only-x86_64
+#![feature(rustc_attrs)]
-#[cfg(target_arch = "x86")]
-use std::arch::x86::*;
-#[cfg(target_arch = "x86_64")]
-use std::arch::x86_64::*;
+#[rustc_args_required_const(0)]
+fn foo(_imm8: i32) {}
-unsafe fn pclmul(a: __m128i, b: __m128i) -> __m128i {
+fn bar() {
let imm8 = 3;
- _mm_clmulepi64_si128(a, b, imm8) //~ ERROR argument 3 is required to be a constant
+ foo(imm8) //~ ERROR argument 1 is required to be a constant
}
fn main() {}
-error: argument 3 is required to be a constant
- --> $DIR/const_arg_local.rs:10:5
+error: argument 1 is required to be a constant
+ --> $DIR/const_arg_local.rs:8:5
|
-LL | _mm_clmulepi64_si128(a, b, imm8)
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+LL | foo(imm8)
+ | ^^^^^^^^^
error: aborting due to previous error
-// only-x86_64
+#![feature(rustc_attrs)]
-#[cfg(target_arch = "x86")]
-use std::arch::x86::*;
-#[cfg(target_arch = "x86_64")]
-use std::arch::x86_64::*;
+#[rustc_args_required_const(0)]
+fn foo(_imm8: i32) {}
-unsafe fn pclmul(a: __m128i, b: __m128i) -> __m128i {
- _mm_clmulepi64_si128(a, b, *&mut 42) //~ ERROR argument 3 is required to be a constant
+fn bar() {
+ foo(*&mut 42) //~ ERROR argument 1 is required to be a constant
}
fn main() {}
-error: argument 3 is required to be a constant
- --> $DIR/const_arg_promotable.rs:9:5
+error: argument 1 is required to be a constant
+ --> $DIR/const_arg_promotable.rs:7:5
|
-LL | _mm_clmulepi64_si128(a, b, *&mut 42)
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+LL | foo(*&mut 42)
+ | ^^^^^^^^^^^^^
error: aborting due to previous error
-// only-x86_64
+#![feature(rustc_attrs)]
-#[cfg(target_arch = "x86")]
-use std::arch::x86::*;
-#[cfg(target_arch = "x86_64")]
-use std::arch::x86_64::*;
+#[rustc_args_required_const(0)]
+fn foo(_imm8: i32) {}
-unsafe fn pclmul(a: __m128i, b: __m128i, imm8: i32) -> __m128i {
- _mm_clmulepi64_si128(a, b, imm8) //~ ERROR argument 3 is required to be a constant
+fn bar(imm8: i32) {
+ foo(imm8) //~ ERROR argument 1 is required to be a constant
}
fn main() {}
-error: argument 3 is required to be a constant
- --> $DIR/const_arg_wrapper.rs:9:5
+error: argument 1 is required to be a constant
+ --> $DIR/const_arg_wrapper.rs:7:5
|
-LL | _mm_clmulepi64_si128(a, b, imm8)
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+LL | foo(imm8)
+ | ^^^^^^^^^
error: aborting due to previous error
LL | unsafe { intrinsics::ptr_offset_from(self, origin) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
- | inbounds test failed: 0x0 is not a valid pointer
+ | null pointer is not a valid pointer for this operation
| inside `ptr::const_ptr::<impl *const u8>::offset_from` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| inside `OFFSET_FROM_NULL` at $DIR/offset_from_ub.rs:36:14
|
LL | unsafe { intrinsics::offset(self, count) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
- | inbounds test failed: pointer must be in-bounds at offset 2, but is outside bounds of allocN which has size 1
+ | pointer arithmetic failed: pointer must be in-bounds at offset 2, but is outside bounds of allocN which has size 1
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| inside `AFTER_END` at $DIR/offset_ub.rs:7:43
|
LL | unsafe { intrinsics::offset(self, count) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
- | inbounds test failed: pointer must be in-bounds at offset 101, but is outside bounds of allocN which has size 100
+ | pointer arithmetic failed: pointer must be in-bounds at offset 101, but is outside bounds of allocN which has size 100
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| inside `AFTER_ARRAY` at $DIR/offset_ub.rs:8:45
|
LL | unsafe { intrinsics::offset(self, count) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
- | inbounds test failed: pointer must be in-bounds at offset 1, but is outside bounds of allocN which has size 0
+ | pointer arithmetic failed: pointer must be in-bounds at offset 1, but is outside bounds of allocN which has size 0
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| inside `ZERO_SIZED_ALLOC` at $DIR/offset_ub.rs:15:50
|
LL | unsafe { intrinsics::offset(self, count) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
- | inbounds test failed: 0x0 is not a valid pointer
+ | pointer arithmetic failed: 0x0 is not a valid pointer
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| inside `NULL_OFFSET_ZERO` at $DIR/offset_ub.rs:19:50
|
LL | unsafe { intrinsics::offset(self, count) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
- | inbounds test failed: pointer must be in-bounds at offset $TWO_WORDS, but is outside bounds of alloc2 which has size $WORD
+ | pointer arithmetic failed: pointer must be in-bounds at offset $TWO_WORDS, but is outside bounds of alloc2 which has size $WORD
| inside `ptr::const_ptr::<impl *const usize>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| inside `_` at $DIR/ptr_comparisons.rs:61:34
|
-// build-fail
+// run-pass
// aux-build:main_functions.rs
#![feature(imported_main)]
extern crate main_functions;
-pub use main_functions::boilerplate as main; //~ ERROR entry symbol `main` from foreign crate
-
-// FIXME: Should be run-pass
+pub use main_functions::boilerplate as main;
+++ /dev/null
-error: entry symbol `main` from foreign crate is not yet supported.
- --> $DIR/imported_main_from_extern_crate.rs:7:9
- |
-LL | pub use main_functions::boilerplate as main;
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- |
- = note: see issue #28937 <https://github.com/rust-lang/rust/issues/28937> for more information
-
-error: aborting due to previous error
-
LL | mod module_that_doesnt_exist;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
- = help: to create the module `module_that_doesnt_exist`, create file "$DIR/module_that_doesnt_exist.rs"
+ = help: to create the module `module_that_doesnt_exist`, create file "$DIR/module_that_doesnt_exist.rs" or "$DIR/module_that_doesnt_exist/mod.rs"
error: aborting due to previous error
+++ /dev/null
-// Test use of advanced const fn without the `const_fn` feature gate.
-
-const fn foo() -> usize { 0 } // ok
-
-trait Foo {
- const fn foo() -> u32; //~ ERROR functions in traits cannot be declared const
- const fn bar() -> u32 { 0 } //~ ERROR functions in traits cannot be declared const
-}
-
-impl Foo for u32 {
- const fn foo() -> u32 { 0 } //~ ERROR functions in traits cannot be declared const
-}
-
-trait Bar {}
-
-impl dyn Bar {
- const fn baz() -> u32 { 0 } // ok
-}
-
-static FOO: usize = foo();
-const BAR: usize = foo();
-
-macro_rules! constant {
- ($n:ident: $t:ty = $v:expr) => {
- const $n: $t = $v;
- }
-}
-
-constant! {
- BAZ: usize = foo()
-}
-
-fn main() {
- let x: [usize; foo()] = [];
-}
+++ /dev/null
-error[E0379]: functions in traits cannot be declared const
- --> $DIR/feature-gate-const_fn.rs:6:5
- |
-LL | const fn foo() -> u32;
- | ^^^^^ functions in traits cannot be const
-
-error[E0379]: functions in traits cannot be declared const
- --> $DIR/feature-gate-const_fn.rs:7:5
- |
-LL | const fn bar() -> u32 { 0 }
- | ^^^^^ functions in traits cannot be const
-
-error[E0379]: functions in traits cannot be declared const
- --> $DIR/feature-gate-const_fn.rs:11:5
- |
-LL | const fn foo() -> u32 { 0 }
- | ^^^^^ functions in traits cannot be const
-
-error: aborting due to 3 previous errors
-
-For more information about this error, try `rustc --explain E0379`.
--- /dev/null
+#[link(name = "foo", modifiers = "")]
+//~^ ERROR: native link modifiers are experimental
+extern "C" {}
+
+fn main() {}
--- /dev/null
+error[E0658]: native link modifiers are experimental
+ --> $DIR/feature-gate-native_link_modifiers.rs:1:22
+ |
+LL | #[link(name = "foo", modifiers = "")]
+ | ^^^^^^^^^^^^^^
+ |
+ = note: see issue #81490 <https://github.com/rust-lang/rust/issues/81490> for more information
+ = help: add `#![feature(native_link_modifiers)]` to the crate attributes to enable
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+#![allow(incomplete_features)]
+#![feature(native_link_modifiers)]
+
+#[link(name = "foo", modifiers = "+as-needed")]
+//~^ ERROR: `#[link(modifiers="as-needed")]` is unstable
+extern "C" {}
+
+fn main() {}
--- /dev/null
+error[E0658]: `#[link(modifiers="as-needed")]` is unstable
+ --> $DIR/feature-gate-native_link_modifiers_as_needed.rs:4:34
+ |
+LL | #[link(name = "foo", modifiers = "+as-needed")]
+ | ^^^^^^^^^^^^
+ |
+ = note: see issue #81490 <https://github.com/rust-lang/rust/issues/81490> for more information
+ = help: add `#![feature(native_link_modifiers_as_needed)]` to the crate attributes to enable
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+#![allow(incomplete_features)]
+#![feature(native_link_modifiers)]
+
+#[link(name = "foo", modifiers = "+bundle")]
+//~^ ERROR: `#[link(modifiers="bundle")]` is unstable
+extern "C" {}
+
+fn main() {}
--- /dev/null
+error[E0658]: `#[link(modifiers="bundle")]` is unstable
+ --> $DIR/feature-gate-native_link_modifiers_bundle.rs:4:34
+ |
+LL | #[link(name = "foo", modifiers = "+bundle")]
+ | ^^^^^^^^^
+ |
+ = note: see issue #81490 <https://github.com/rust-lang/rust/issues/81490> for more information
+ = help: add `#![feature(native_link_modifiers_bundle)]` to the crate attributes to enable
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+#![allow(incomplete_features)]
+#![feature(native_link_modifiers)]
+
+#[link(name = "foo", modifiers = "+verbatim")]
+//~^ ERROR: `#[link(modifiers="verbatim")]` is unstable
+extern "C" {}
+
+fn main() {}
--- /dev/null
+error[E0658]: `#[link(modifiers="verbatim")]` is unstable
+ --> $DIR/feature-gate-native_link_modifiers_verbatim.rs:4:34
+ |
+LL | #[link(name = "foo", modifiers = "+verbatim")]
+ | ^^^^^^^^^^^
+ |
+ = note: see issue #81490 <https://github.com/rust-lang/rust/issues/81490> for more information
+ = help: add `#![feature(native_link_modifiers_verbatim)]` to the crate attributes to enable
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+#![allow(incomplete_features)]
+#![feature(native_link_modifiers)]
+
+#[link(name = "foo", modifiers = "+whole-archive")]
+//~^ ERROR: `#[link(modifiers="whole-archive")]` is unstable
+extern "C" {}
+
+fn main() {}
--- /dev/null
+error[E0658]: `#[link(modifiers="whole-archive")]` is unstable
+ --> $DIR/feature-gate-native_link_modifiers_whole_archive.rs:4:34
+ |
+LL | #[link(name = "foo", modifiers = "+whole-archive")]
+ | ^^^^^^^^^^^^^^^^
+ |
+ = note: see issue #81490 <https://github.com/rust-lang/rust/issues/81490> for more information
+ = help: add `#![feature(native_link_modifiers_whole_archive)]` to the crate attributes to enable
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0658`.
#![crate_type = "lib"]
-#[no_coverage]
-#[feature(no_coverage)] // does not have to be enabled before `#[no_coverage]`
-fn no_coverage_is_enabled_on_this_function() {}
+#[derive(PartialEq, Eq)] // ensure deriving `Eq` does not enable `feature(no_coverage)`
+struct Foo {
+ a: u8,
+ b: u32,
+}
#[no_coverage] //~ ERROR the `#[no_coverage]` attribute is an experimental feature
-fn requires_feature_no_coverage() {}
+fn requires_feature_no_coverage() -> bool {
+ let bar = Foo { a: 0, b: 0 };
+ bar == Foo { a: 0, b: 0 }
+}
error[E0658]: the `#[no_coverage]` attribute is an experimental feature
- --> $DIR/feature-gate-no_coverage.rs:7:1
+ --> $DIR/feature-gate-no_coverage.rs:9:1
|
LL | #[no_coverage]
| ^^^^^^^^^^^^^^
|
= note: see issue #84605 <https://github.com/rust-lang/rust/issues/84605> for more information
= help: add `#![feature(no_coverage)]` to the crate attributes to enable
- = help: or, alternatively, add `#[feature(no_coverage)]` to the function
error: aborting due to previous error
// Test internal const fn feature gate.
-#![feature(const_fn)]
-
#[rustc_const_unstable(feature="fzzzzzt")] //~ stability attributes may not be used outside
pub const fn bazinga() {}
error[E0734]: stability attributes may not be used outside of the standard library
- --> $DIR/feature-gate-rustc_const_unstable.rs:5:1
+ --> $DIR/feature-gate-rustc_const_unstable.rs:3:1
|
LL | #[rustc_const_unstable(feature="fzzzzzt")]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+warning: library kind `static-nobundle` has been superseded by specifying `-bundle` on library kind `static`. Try `static:-bundle`
+
error[E0658]: kind="static-nobundle" is unstable
|
= note: see issue #37403 <https://github.com/rust-lang/rust/issues/37403> for more information
#[link(name = "foo", kind = "static-nobundle")]
-//~^ ERROR: kind="static-nobundle" is unstable
+//~^ WARNING: library kind `static-nobundle` has been superseded by specifying modifier `-bundle` with library kind `static`
+//~^^ ERROR: kind="static-nobundle" is unstable
extern "C" {}
fn main() {}
+warning: library kind `static-nobundle` has been superseded by specifying modifier `-bundle` with library kind `static`
+ --> $DIR/feature-gate-static-nobundle.rs:1:22
+ |
+LL | #[link(name = "foo", kind = "static-nobundle")]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^
+
error[E0658]: kind="static-nobundle" is unstable
--> $DIR/feature-gate-static-nobundle.rs:1:1
|
= note: see issue #37403 <https://github.com/rust-lang/rust/issues/37403> for more information
= help: add `#![feature(static_nobundle)]` to the crate attributes to enable
-error: aborting due to previous error
+error: aborting due to previous error; 1 warning emitted
For more information about this error, try `rustc --explain E0658`.
#![stable(feature = "rust1", since = "1.0.0")]
#![feature(staged_api)]
-#![feature(const_transmute, const_fn)]
+#![feature(const_transmute)]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "rust1", since = "1.0.0")]
target_os = "dragonfly",
target_os = "emscripten",
target_os = "freebsd",
+ target_os = "fuchsia",
target_os = "linux",
target_os = "macos",
target_os = "netbsd",
LL | pub mod baz;
| ^^^^^^^^^^^^
|
- = help: to create the module `baz`, create file "$DIR/auxiliary/foo/bar/baz.rs"
+ = help: to create the module `baz`, create file "$DIR/auxiliary/foo/bar/baz.rs" or "$DIR/auxiliary/foo/bar/baz/mod.rs"
error: aborting due to previous error
--- /dev/null
+// build-fail
+// compile-flags: -Cpasses=unknown-pass -Z new-llvm-pass-manager=yes
+
+fn main() {}
--- /dev/null
+error: failed to run LLVM passes: unknown pass name 'unknown-pass'
+
+error: aborting due to previous error
+
-#![feature(const_fn)]
-
const ARR_LEN: usize = Tt::const_val::<[i8; 123]>();
//~^ ERROR type annotations needed
error[E0379]: functions in traits cannot be declared const
- --> $DIR/issue-54954.rs:7:5
+ --> $DIR/issue-54954.rs:5:5
|
LL | const fn const_val<T: Sized>() -> usize {
| ^^^^^ functions in traits cannot be const
error[E0283]: type annotations needed
- --> $DIR/issue-54954.rs:3:24
+ --> $DIR/issue-54954.rs:1:24
|
LL | const ARR_LEN: usize = Tt::const_val::<[i8; 123]>();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^ cannot infer type
// run-pass
// compile-flags: -Zlink-native-libraries=no -Cdefault-linker-libraries=yes
// ignore-windows - this will probably only work on unixish systems
+// ignore-fuchsia - missing __libc_start_main for some reason (#84733)
#[link(name = "some-random-non-existent-library", kind = "static")]
extern "C" {}
--- /dev/null
+// check-pass
+
+pub trait Deserialize<'de>: Sized {}
+pub trait DeserializeOwned: for<'de> Deserialize<'de> {}
+
+pub trait Extensible {
+ type Config;
+}
+
+// The `C` here generates a `C: Sized` candidate
+pub trait Installer<C> {
+ fn init<B: Extensible<Config = C>>(&mut self) -> ()
+ where
+ // This clause generates a `for<'de> C: Sized` candidate
+ B::Config: DeserializeOwned,
+ {
+ }
+}
+
+fn main() {}
// run-fail
// revisions: foo bar
// should-fail
+// needs-run-enabled
//[foo] error-pattern:bar
//[bar] error-pattern:foo
-#![feature(const_fn)]
-
trait Foo {
fn f() -> u32;
const fn g(); //~ ERROR cannot be declared const
error[E0379]: functions in traits cannot be declared const
- --> $DIR/const-fn-in-trait.rs:5:5
+ --> $DIR/const-fn-in-trait.rs:3:5
|
LL | const fn g();
| ^^^^^ functions in traits cannot be const
error[E0379]: functions in traits cannot be declared const
- --> $DIR/const-fn-in-trait.rs:9:5
+ --> $DIR/const-fn-in-trait.rs:7:5
|
LL | const fn f() -> u32 { 22 }
| ^^^^^ functions in traits cannot be const
LL | mod missing;
| ^^^^^^^^^^^^
|
- = help: to create the module `missing`, create file "$DIR/foo/missing.rs"
+ = help: to create the module `missing`, create file "$DIR/foo/missing.rs" or "$DIR/foo/missing/mod.rs"
error: aborting due to previous error
LL | mod missing;
| ^^^^^^^^^^^^
|
- = help: to create the module `missing`, create file "$DIR/foo_inline/inline/missing.rs"
+ = help: to create the module `missing`, create file "$DIR/foo_inline/inline/missing.rs" or "$DIR/foo_inline/inline/missing/mod.rs"
error: aborting due to previous error
--- /dev/null
+// Unspecified kind should fail with an error
+
+// compile-flags: -l =mylib
+// error-pattern: unknown library kind ``, expected one of dylib, framework, or static
+
+fn main() {}
--- /dev/null
+error: unknown library kind ``, expected one of dylib, framework, or static
+
--- /dev/null
+// Unspecified kind should fail with an error
+
+// compile-flags: -l :+bundle=mylib
+// error-pattern: unknown library kind ``, expected one of dylib, framework, or static
+
+fn main() {}
--- /dev/null
+error: unknown library kind ``, expected one of dylib, framework, or static
+
panic!(a!()); //~ WARN panic message is not a string literal
panic!(format!("{}", 1)); //~ WARN panic message is not a string literal
+ assert!(false, format!("{}", 1)); //~ WARN panic message is not a string literal
+ debug_assert!(false, format!("{}", 1)); //~ WARN panic message is not a string literal
panic![123]; //~ WARN panic message is not a string literal
panic!{123}; //~ WARN panic message is not a string literal
| -- --
warning: panic message is not a string literal
- --> $DIR/non-fmt-panic.rs:40:12
+ --> $DIR/non-fmt-panic.rs:39:20
+ |
+LL | assert!(false, format!("{}", 1));
+ | ^^^^^^^^^^^^^^^^
+ |
+ = note: this is no longer accepted in Rust 2021
+ = note: the assert!() macro supports formatting, so there's no need for the format!() macro here
+help: remove the `format!(..)` macro call
+ |
+LL | assert!(false, "{}", 1);
+ | -- --
+
+warning: panic message is not a string literal
+ --> $DIR/non-fmt-panic.rs:40:26
+ |
+LL | debug_assert!(false, format!("{}", 1));
+ | ^^^^^^^^^^^^^^^^
+ |
+ = note: this is no longer accepted in Rust 2021
+ = note: the debug_assert!() macro supports formatting, so there's no need for the format!() macro here
+help: remove the `format!(..)` macro call
+ |
+LL | debug_assert!(false, "{}", 1);
+ | -- --
+
+warning: panic message is not a string literal
+ --> $DIR/non-fmt-panic.rs:42:12
|
LL | panic![123];
| ^^^
| ^^^^^^^^^^^^^^^^^^^^^^ ^
warning: panic message is not a string literal
- --> $DIR/non-fmt-panic.rs:41:12
+ --> $DIR/non-fmt-panic.rs:43:12
|
LL | panic!{123};
| ^^^
LL | std::panic::panic_any(123);
| ^^^^^^^^^^^^^^^^^^^^^^ ^
-warning: 18 warnings emitted
+warning: 20 warnings emitted
// aux-build:weak-lang-items.rs
// error-pattern: `#[panic_handler]` function required, but not found
// error-pattern: language item required, but not found: `eh_personality`
+// needs-unwind since it affects the error output
// ignore-emscripten compiled with panic=abort, personality not required
#![no_std]
error[E0259]: the name `core` is defined multiple times
- --> $DIR/weak-lang-item.rs:8:1
+ --> $DIR/weak-lang-item.rs:9:1
|
LL | extern crate core;
| ^^^^^^^^^^^^^^^^^^ `core` reimported here
// build-fail
// compile-flags:-C panic=abort -C prefer-dynamic
+// needs-unwind
// ignore-musl - no dylibs here
// ignore-emscripten
// ignore-sgx no dynamic lib support
#![allow(unused_variables)]
// compile-flags:-C lto -C panic=unwind
+// needs-unwind
// no-prefer-dynamic
// ignore-emscripten no processes
// ignore-sgx no processes
// build-fail
+// needs-unwind
// aux-build:panic-runtime-unwind.rs
// aux-build:panic-runtime-abort.rs
// aux-build:wants-panic-runtime-unwind.rs
// build-fail
+// needs-unwind
// error-pattern:is incompatible with this crate's strategy of `unwind`
// aux-build:panic-runtime-abort.rs
// aux-build:panic-runtime-lang-items.rs
// build-fail
+// needs-unwind
// error-pattern:is incompatible with this crate's strategy of `unwind`
// aux-build:panic-runtime-abort.rs
// aux-build:wants-panic-runtime-abort.rs
// edition:2018
#![feature(const_extern_fn)]
-#![feature(const_fn)]
fn main() {
async fn ff1() {} // OK.
error: functions cannot be both `const` and `async`
- --> $DIR/fn-header-semantic-fail.rs:13:5
+ --> $DIR/fn-header-semantic-fail.rs:12:5
|
LL | const async unsafe extern "C" fn ff5() {} // OK.
| ^^^^^-^^^^^------------------------------
| `const` because of this
error[E0706]: functions in traits cannot be declared `async`
- --> $DIR/fn-header-semantic-fail.rs:17:9
+ --> $DIR/fn-header-semantic-fail.rs:16:9
|
LL | async fn ft1();
| -----^^^^^^^^^^
= note: consider using the `async-trait` crate: https://crates.io/crates/async-trait
error[E0379]: functions in traits cannot be declared const
- --> $DIR/fn-header-semantic-fail.rs:19:9
+ --> $DIR/fn-header-semantic-fail.rs:18:9
|
LL | const fn ft3();
| ^^^^^ functions in traits cannot be const
error[E0379]: functions in traits cannot be declared const
- --> $DIR/fn-header-semantic-fail.rs:21:9
+ --> $DIR/fn-header-semantic-fail.rs:20:9
|
LL | const async unsafe extern "C" fn ft5();
| ^^^^^ functions in traits cannot be const
error[E0706]: functions in traits cannot be declared `async`
- --> $DIR/fn-header-semantic-fail.rs:21:9
+ --> $DIR/fn-header-semantic-fail.rs:20:9
|
LL | const async unsafe extern "C" fn ft5();
| ^^^^^^-----^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: consider using the `async-trait` crate: https://crates.io/crates/async-trait
error: functions cannot be both `const` and `async`
- --> $DIR/fn-header-semantic-fail.rs:21:9
+ --> $DIR/fn-header-semantic-fail.rs:20:9
|
LL | const async unsafe extern "C" fn ft5();
| ^^^^^-^^^^^----------------------------
| `const` because of this
error[E0706]: functions in traits cannot be declared `async`
- --> $DIR/fn-header-semantic-fail.rs:29:9
+ --> $DIR/fn-header-semantic-fail.rs:28:9
|
LL | async fn ft1() {}
| -----^^^^^^^^^^^^
= note: consider using the `async-trait` crate: https://crates.io/crates/async-trait
error[E0379]: functions in traits cannot be declared const
- --> $DIR/fn-header-semantic-fail.rs:32:9
+ --> $DIR/fn-header-semantic-fail.rs:31:9
|
LL | const fn ft3() {}
| ^^^^^ functions in traits cannot be const
error[E0379]: functions in traits cannot be declared const
- --> $DIR/fn-header-semantic-fail.rs:34:9
+ --> $DIR/fn-header-semantic-fail.rs:33:9
|
LL | const async unsafe extern "C" fn ft5() {}
| ^^^^^ functions in traits cannot be const
error[E0706]: functions in traits cannot be declared `async`
- --> $DIR/fn-header-semantic-fail.rs:34:9
+ --> $DIR/fn-header-semantic-fail.rs:33:9
|
LL | const async unsafe extern "C" fn ft5() {}
| ^^^^^^-----^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: consider using the `async-trait` crate: https://crates.io/crates/async-trait
error: functions cannot be both `const` and `async`
- --> $DIR/fn-header-semantic-fail.rs:34:9
+ --> $DIR/fn-header-semantic-fail.rs:33:9
|
LL | const async unsafe extern "C" fn ft5() {}
| ^^^^^-^^^^^------------------------------
| `const` because of this
error: functions cannot be both `const` and `async`
- --> $DIR/fn-header-semantic-fail.rs:46:9
+ --> $DIR/fn-header-semantic-fail.rs:45:9
|
LL | const async unsafe extern "C" fn fi5() {}
| ^^^^^-^^^^^------------------------------
| `const` because of this
error: functions in `extern` blocks cannot have qualifiers
- --> $DIR/fn-header-semantic-fail.rs:51:18
+ --> $DIR/fn-header-semantic-fail.rs:50:18
|
LL | extern "C" {
| ---------- in this `extern` block
| ^^
error: functions in `extern` blocks cannot have qualifiers
- --> $DIR/fn-header-semantic-fail.rs:52:19
+ --> $DIR/fn-header-semantic-fail.rs:51:19
|
LL | extern "C" {
| ---------- in this `extern` block
| ^^
error: functions in `extern` blocks cannot have qualifiers
- --> $DIR/fn-header-semantic-fail.rs:53:18
+ --> $DIR/fn-header-semantic-fail.rs:52:18
|
LL | extern "C" {
| ---------- in this `extern` block
| ^^
error: functions in `extern` blocks cannot have qualifiers
- --> $DIR/fn-header-semantic-fail.rs:54:23
+ --> $DIR/fn-header-semantic-fail.rs:53:23
|
LL | extern "C" {
| ---------- in this `extern` block
| ^^
error: functions in `extern` blocks cannot have qualifiers
- --> $DIR/fn-header-semantic-fail.rs:55:42
+ --> $DIR/fn-header-semantic-fail.rs:54:42
|
LL | extern "C" {
| ---------- in this `extern` block
| ^^
error: functions cannot be both `const` and `async`
- --> $DIR/fn-header-semantic-fail.rs:55:9
+ --> $DIR/fn-header-semantic-fail.rs:54:9
|
LL | const async unsafe extern "C" fn fe5();
| ^^^^^-^^^^^----------------------------
| `const` because of this
error[E0053]: method `ft1` has an incompatible type for trait
- --> $DIR/fn-header-semantic-fail.rs:29:24
+ --> $DIR/fn-header-semantic-fail.rs:28:24
|
LL | async fn ft1();
| - type in trait
found fn pointer `fn() -> impl Future`
error[E0053]: method `ft5` has an incompatible type for trait
- --> $DIR/fn-header-semantic-fail.rs:34:48
+ --> $DIR/fn-header-semantic-fail.rs:33:48
|
LL | const async unsafe extern "C" fn ft5();
| - type in trait
LL | mod not_a_real_file;
| ^^^^^^^^^^^^^^^^^^^^
|
- = help: to create the module `not_a_real_file`, create file "$DIR/not_a_real_file.rs"
+ = help: to create the module `not_a_real_file`, create file "$DIR/not_a_real_file.rs" or "$DIR/not_a_real_file/mod.rs"
error[E0433]: failed to resolve: use of undeclared crate or module `mod_file_aux`
--> $DIR/mod_file_not_exist.rs:7:16
LL | mod not_a_real_file;
| ^^^^^^^^^^^^^^^^^^^^
|
- = help: to create the module `not_a_real_file`, create file "$DIR/not_a_real_file.rs"
+ = help: to create the module `not_a_real_file`, create file "$DIR/not_a_real_file.rs" or "$DIR/not_a_real_file/mod.rs"
error[E0433]: failed to resolve: use of undeclared crate or module `mod_file_aux`
--> $DIR/mod_file_not_exist_windows.rs:7:16
--- /dev/null
+// edition:2018
+
+fn foo1(_: &dyn Drop + AsRef<str>) {} //~ ERROR ambiguous `+` in a type
+//~^ ERROR only auto traits can be used as additional traits in a trait object
+
+fn foo2(_: &dyn (Drop + AsRef<str>)) {} //~ ERROR incorrect braces around trait bounds
+
+fn foo3(_: &dyn {Drop + AsRef<str>}) {} //~ ERROR expected parameter name, found `{`
+//~^ ERROR expected one of `!`, `(`, `)`, `,`, `?`, `for`, lifetime, or path, found `{`
+//~| ERROR at least one trait is required for an object type
+
+fn foo4(_: &dyn <Drop + AsRef<str>>) {} //~ ERROR expected identifier, found `<`
+
+fn foo5(_: &(dyn Drop + dyn AsRef<str>)) {} //~ ERROR invalid `dyn` keyword
+//~^ ERROR only auto traits can be used as additional traits in a trait object
+
+fn main() {}
--- /dev/null
+error: ambiguous `+` in a type
+ --> $DIR/trait-object-delimiters.rs:3:13
+ |
+LL | fn foo1(_: &dyn Drop + AsRef<str>) {}
+ | ^^^^^^^^^^^^^^^^^^^^^ help: use parentheses to disambiguate: `(dyn Drop + AsRef<str>)`
+
+error: incorrect braces around trait bounds
+ --> $DIR/trait-object-delimiters.rs:6:17
+ |
+LL | fn foo2(_: &dyn (Drop + AsRef<str>)) {}
+ | ^ ^
+ |
+help: remove the parentheses
+ |
+LL | fn foo2(_: &dyn Drop + AsRef<str>) {}
+ | -- --
+
+error: expected parameter name, found `{`
+ --> $DIR/trait-object-delimiters.rs:8:17
+ |
+LL | fn foo3(_: &dyn {Drop + AsRef<str>}) {}
+ | ^ expected parameter name
+
+error: expected one of `!`, `(`, `)`, `,`, `?`, `for`, lifetime, or path, found `{`
+ --> $DIR/trait-object-delimiters.rs:8:17
+ |
+LL | fn foo3(_: &dyn {Drop + AsRef<str>}) {}
+ | -^ expected one of 8 possible tokens
+ | |
+ | help: missing `,`
+
+error: expected identifier, found `<`
+ --> $DIR/trait-object-delimiters.rs:12:17
+ |
+LL | fn foo4(_: &dyn <Drop + AsRef<str>>) {}
+ | ^ expected identifier
+
+error: invalid `dyn` keyword
+ --> $DIR/trait-object-delimiters.rs:14:25
+ |
+LL | fn foo5(_: &(dyn Drop + dyn AsRef<str>)) {}
+ | ^^^ help: remove this keyword
+ |
+ = help: `dyn` is only needed at the start of a trait `+`-separated list
+
+error[E0225]: only auto traits can be used as additional traits in a trait object
+ --> $DIR/trait-object-delimiters.rs:3:24
+ |
+LL | fn foo1(_: &dyn Drop + AsRef<str>) {}
+ | ---- ^^^^^^^^^^ additional non-auto trait
+ | |
+ | first non-auto trait
+ |
+ = help: consider creating a new trait with all of these as super-traits and using that trait here instead: `trait NewTrait: Drop + AsRef<str> {}`
+ = note: auto-traits like `Send` and `Sync` are traits that have special properties; for more information on them, visit <https://doc.rust-lang.org/reference/special-types-and-traits.html#auto-traits>
+
+error[E0224]: at least one trait is required for an object type
+ --> $DIR/trait-object-delimiters.rs:8:13
+ |
+LL | fn foo3(_: &dyn {Drop + AsRef<str>}) {}
+ | ^^^
+
+error[E0225]: only auto traits can be used as additional traits in a trait object
+ --> $DIR/trait-object-delimiters.rs:14:29
+ |
+LL | fn foo5(_: &(dyn Drop + dyn AsRef<str>)) {}
+ | ---- ^^^^^^^^^^ additional non-auto trait
+ | |
+ | first non-auto trait
+ |
+ = help: consider creating a new trait with all of these as super-traits and using that trait here instead: `trait NewTrait: Drop + AsRef<str> {}`
+ = note: auto-traits like `Send` and `Sync` are traits that have special properties; for more information on them, visit <https://doc.rust-lang.org/reference/special-types-and-traits.html#auto-traits>
+
+error: aborting due to 9 previous errors
+
+Some errors have detailed explanations: E0224, E0225.
+For more information about an error, try `rustc --explain E0224`.
LL | unsafe mod n;
| ^^^^^^^^^^^^^
|
- = help: to create the module `n`, create file "$DIR/n.rs"
+ = help: to create the module `n`, create file "$DIR/n.rs" or "$DIR/n/mod.rs"
error: module cannot be declared unsafe
--> $DIR/unsafe-mod.rs:1:1
// run-pass
// compile-flags: -Z unleash-the-miri-inside-of-you
-#![feature(core_intrinsics, const_caller_location, const_fn)]
+#![feature(core_intrinsics, const_caller_location)]
type L = &'static std::panic::Location<'static>;
// revisions: default mir-opt
//[mir-opt] compile-flags: -Zmir-opt-level=4
-#![feature(const_caller_location, const_fn)]
+#![feature(const_caller_location)]
use std::panic::Location;
LL | mod řųśť;
| ^^^^^^^^^
|
- = help: to create the module `řųśť`, create file "$DIR/řųśť.rs"
+ = help: to create the module `řųśť`, create file "$DIR/řųśť.rs" or "$DIR/řųśť/mod.rs"
error[E0754]: trying to load file for module `řųśť` with non-ascii identifier name
--> $DIR/mod_file_nonascii_forbidden.rs:1:5
-#![feature(rustc_attrs, const_fn)]
+#![feature(rustc_attrs)]
#[rustc_args_required_const(0)]
fn foo(_a: i32) {
// Various checks that stability attributes are used correctly, per RFC 507
-#![feature(const_fn, staged_api)]
+#![feature(staged_api)]
#![stable(feature = "rust1", since = "1.0.0")]
-#![feature(const_fn)]
-
struct WithDtor;
impl Drop for WithDtor {
error[E0493]: destructors cannot be evaluated at compile-time
- --> $DIR/static-drop-scope.rs:9:60
+ --> $DIR/static-drop-scope.rs:7:60
|
LL | static PROMOTION_FAIL_S: Option<&'static WithDtor> = Some(&WithDtor);
| ^^^^^^^^- value is dropped here
| statics cannot evaluate destructors
error[E0716]: temporary value dropped while borrowed
- --> $DIR/static-drop-scope.rs:9:60
+ --> $DIR/static-drop-scope.rs:7:60
|
LL | static PROMOTION_FAIL_S: Option<&'static WithDtor> = Some(&WithDtor);
| ------^^^^^^^^-
| using this value as a static requires that borrow lasts for `'static`
error[E0493]: destructors cannot be evaluated at compile-time
- --> $DIR/static-drop-scope.rs:13:59
+ --> $DIR/static-drop-scope.rs:11:59
|
LL | const PROMOTION_FAIL_C: Option<&'static WithDtor> = Some(&WithDtor);
| ^^^^^^^^- value is dropped here
| constants cannot evaluate destructors
error[E0716]: temporary value dropped while borrowed
- --> $DIR/static-drop-scope.rs:13:59
+ --> $DIR/static-drop-scope.rs:11:59
|
LL | const PROMOTION_FAIL_C: Option<&'static WithDtor> = Some(&WithDtor);
| ------^^^^^^^^-
| using this value as a constant requires that borrow lasts for `'static`
error[E0493]: destructors cannot be evaluated at compile-time
- --> $DIR/static-drop-scope.rs:17:28
+ --> $DIR/static-drop-scope.rs:15:28
|
LL | static EARLY_DROP_S: i32 = (WithDtor, 0).1;
| ^^^^^^^^^^^^^ - value is dropped here
| statics cannot evaluate destructors
error[E0493]: destructors cannot be evaluated at compile-time
- --> $DIR/static-drop-scope.rs:20:27
+ --> $DIR/static-drop-scope.rs:18:27
|
LL | const EARLY_DROP_C: i32 = (WithDtor, 0).1;
| ^^^^^^^^^^^^^ - value is dropped here
| constants cannot evaluate destructors
error[E0493]: destructors cannot be evaluated at compile-time
- --> $DIR/static-drop-scope.rs:23:24
+ --> $DIR/static-drop-scope.rs:21:24
|
LL | const fn const_drop<T>(_: T) {}
| ^ - value is dropped here
| constant functions cannot evaluate destructors
error[E0493]: destructors cannot be evaluated at compile-time
- --> $DIR/static-drop-scope.rs:27:5
+ --> $DIR/static-drop-scope.rs:25:5
|
LL | (x, ()).1
| ^^^^^^^ constant functions cannot evaluate destructors
| - value is dropped here
error[E0493]: destructors cannot be evaluated at compile-time
- --> $DIR/static-drop-scope.rs:31:34
+ --> $DIR/static-drop-scope.rs:29:34
|
LL | const EARLY_DROP_C_OPTION: i32 = (Some(WithDtor), 0).1;
| ^^^^^^^^^^^^^^^^^^^ - value is dropped here
| constants cannot evaluate destructors
error[E0493]: destructors cannot be evaluated at compile-time
- --> $DIR/static-drop-scope.rs:36:43
+ --> $DIR/static-drop-scope.rs:34:43
|
LL | const EARLY_DROP_C_OPTION_CONSTANT: i32 = (HELPER, 0).1;
| ^^^^^^^^^^^ - value is dropped here
target_os = "dragonfly",
target_os = "emscripten",
target_os = "freebsd",
+ target_os = "fuchsia",
target_os = "linux",
target_os = "macos",
target_os = "netbsd",
--- /dev/null
+struct A<T> {
+//~^ ERROR recursive type `A` has infinite size
+ x: T,
+ y: A<A<T>>,
+}
+
+struct B {
+ z: A<usize>
+}
+
+fn main() {}
--- /dev/null
+error[E0072]: recursive type `A` has infinite size
+ --> $DIR/issue-74224.rs:1:1
+ |
+LL | struct A<T> {
+ | ^^^^^^^^^^^ recursive type has infinite size
+...
+LL | y: A<A<T>>,
+ | ------- recursive without indirection
+ |
+help: insert some indirection (e.g., a `Box`, `Rc`, or `&`) to make `A` representable
+ |
+LL | y: Box<A<A<T>>>,
+ | ^^^^ ^
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0072`.
--- /dev/null
+struct Foo<T> {
+//~^ ERROR recursive type `Foo` has infinite size
+ x: Foo<[T; 1]>,
+ y: T,
+}
+
+struct Bar {
+ x: Foo<Bar>,
+}
+
+fn main() {}
--- /dev/null
+error[E0072]: recursive type `Foo` has infinite size
+ --> $DIR/issue-84611.rs:1:1
+ |
+LL | struct Foo<T> {
+ | ^^^^^^^^^^^^^ recursive type has infinite size
+LL |
+LL | x: Foo<[T; 1]>,
+ | ----------- recursive without indirection
+ |
+help: insert some indirection (e.g., a `Box`, `Rc`, or `&`) to make `Foo` representable
+ |
+LL | x: Box<Foo<[T; 1]>>,
+ | ^^^^ ^
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0072`.
--- /dev/null
+struct A<T> {
+//~^ ERROR recursive type `A` has infinite size
+ x: T,
+ y: B<T>,
+}
+
+struct B<T> {
+//~^ ERROR recursive type `B` has infinite size
+ z: A<T>
+}
+
+struct C<T> {
+//~^ ERROR recursive type `C` has infinite size
+ x: T,
+ y: Option<Option<D<T>>>,
+}
+
+struct D<T> {
+//~^ ERROR recursive type `D` has infinite size
+ z: Option<Option<C<T>>>,
+}
+
+fn main() {}
--- /dev/null
+error[E0072]: recursive type `A` has infinite size
+ --> $DIR/mutual-struct-recursion.rs:1:1
+ |
+LL | struct A<T> {
+ | ^^^^^^^^^^^ recursive type has infinite size
+...
+LL | y: B<T>,
+ | ---- recursive without indirection
+ |
+help: insert some indirection (e.g., a `Box`, `Rc`, or `&`) to make `A` representable
+ |
+LL | y: Box<B<T>>,
+ | ^^^^ ^
+
+error[E0072]: recursive type `B` has infinite size
+ --> $DIR/mutual-struct-recursion.rs:7:1
+ |
+LL | struct B<T> {
+ | ^^^^^^^^^^^ recursive type has infinite size
+LL |
+LL | z: A<T>
+ | ---- recursive without indirection
+ |
+help: insert some indirection (e.g., a `Box`, `Rc`, or `&`) to make `B` representable
+ |
+LL | z: Box<A<T>>
+ | ^^^^ ^
+
+error[E0072]: recursive type `C` has infinite size
+ --> $DIR/mutual-struct-recursion.rs:12:1
+ |
+LL | struct C<T> {
+ | ^^^^^^^^^^^ recursive type has infinite size
+...
+LL | y: Option<Option<D<T>>>,
+ | -------------------- recursive without indirection
+ |
+help: insert some indirection (e.g., a `Box`, `Rc`, or `&`) to make `C` representable
+ |
+LL | y: Box<Option<Option<D<T>>>>,
+ | ^^^^ ^
+
+error[E0072]: recursive type `D` has infinite size
+ --> $DIR/mutual-struct-recursion.rs:18:1
+ |
+LL | struct D<T> {
+ | ^^^^^^^^^^^ recursive type has infinite size
+LL |
+LL | z: Option<Option<C<T>>>,
+ | -------------------- recursive without indirection
+ |
+help: insert some indirection (e.g., a `Box`, `Rc`, or `&`) to make `D` representable
+ |
+LL | z: Box<Option<Option<C<T>>>>,
+ | ^^^^ ^
+
+error: aborting due to 4 previous errors
+
+For more information about this error, try `rustc --explain E0072`.
--- /dev/null
+// It might be intuitive for a user coming from languages like Java
+// to declare a method directly in a struct's definition. Make sure
+// rustc can give a helpful suggestion.
+// Suggested in issue #76421
+
+struct S {
+ field: usize,
+
+ fn foo() {}
+ //~^ ERROR functions are not allowed in struct definitions
+ //~| HELP unlike in C++, Java, and C#, functions are declared in `impl` blocks
+ //~| HELP see https://doc.rust-lang.org/book/ch05-03-method-syntax.html for more information
+}
+
+union U {
+ variant: usize,
+
+ fn foo() {}
+ //~^ ERROR functions are not allowed in union definitions
+ //~| HELP unlike in C++, Java, and C#, functions are declared in `impl` blocks
+ //~| HELP see https://doc.rust-lang.org/book/ch05-03-method-syntax.html for more information
+}
+
+enum E {
+ Variant,
+
+ fn foo() {}
+ //~^ ERROR functions are not allowed in enum definitions
+ //~| HELP unlike in C++, Java, and C#, functions are declared in `impl` blocks
+ //~| HELP see https://doc.rust-lang.org/book/ch05-03-method-syntax.html for more information
+}
+
+fn main() {}
--- /dev/null
+error: functions are not allowed in struct definitions
+ --> $DIR/struct-fn-in-definition.rs:9:5
+ |
+LL | fn foo() {}
+ | ^^^^^^^^^^^
+ |
+ = help: unlike in C++, Java, and C#, functions are declared in `impl` blocks
+ = help: see https://doc.rust-lang.org/book/ch05-03-method-syntax.html for more information
+
+error: functions are not allowed in union definitions
+ --> $DIR/struct-fn-in-definition.rs:18:5
+ |
+LL | fn foo() {}
+ | ^^^^^^^^^^^
+ |
+ = help: unlike in C++, Java, and C#, functions are declared in `impl` blocks
+ = help: see https://doc.rust-lang.org/book/ch05-03-method-syntax.html for more information
+
+error: functions are not allowed in enum definitions
+ --> $DIR/struct-fn-in-definition.rs:27:5
+ |
+LL | fn foo() {}
+ | ^^^^^^^^^^^
+ |
+ = help: unlike in C++, Java, and C#, functions are declared in `impl` blocks
+ = help: see https://doc.rust-lang.org/book/ch05-03-method-syntax.html for more information
+
+error: aborting due to 3 previous errors
+
--- /dev/null
+use std::alloc::{GlobalAlloc, Layout};
+
+struct Test(u32);
+
+unsafe impl GlobalAlloc for Test {
+ unsafe fn alloc(&self, _layout: Layout) -> *mut u8 {
+ self.0 += 1; //~ ERROR cannot assign
+ 0 as *mut u8
+ }
+
+ unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
+ unimplemented!();
+ }
+}
+
+fn main() { }
--- /dev/null
+error[E0594]: cannot assign to `self.0` which is behind a `&` reference
+ --> $DIR/issue-68049-1.rs:7:9
+ |
+LL | self.0 += 1;
+ | ^^^^^^^^^^^ `self` is a `&` reference, so the data it refers to cannot be written
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0594`.
--- /dev/null
+trait Hello {
+ fn example(&self, input: &i32); // should suggest here
+}
+
+struct Test1(i32);
+
+impl Hello for Test1 {
+ fn example(&self, input: &i32) { // should not suggest here
+ *input = self.0; //~ ERROR cannot assign
+ }
+}
+
+struct Test2(i32);
+
+impl Hello for Test2 {
+ fn example(&self, input: &i32) { // should not suggest here
+ self.0 += *input; //~ ERROR cannot assign
+ }
+}
+
+fn main() { }
--- /dev/null
+error[E0594]: cannot assign to `*input` which is behind a `&` reference
+ --> $DIR/issue-68049-2.rs:9:7
+ |
+LL | fn example(&self, input: &i32); // should suggest here
+ | ---- help: consider changing that to be a mutable reference: `&mut i32`
+...
+LL | *input = self.0;
+ | ^^^^^^^^^^^^^^^ `input` is a `&` reference, so the data it refers to cannot be written
+
+error[E0594]: cannot assign to `self.0` which is behind a `&` reference
+ --> $DIR/issue-68049-2.rs:17:5
+ |
+LL | fn example(&self, input: &i32); // should suggest here
+ | ----- help: consider changing that to be a mutable reference: `&mut self`
+...
+LL | self.0 += *input;
+ | ^^^^^^^^^^^^^^^^ `self` is a `&` reference, so the data it refers to cannot be written
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0594`.
--- /dev/null
+/* Checks whether issue #84592 has been resolved. The issue was
+ * that in this example, there are two expected/missing lifetime
+ * parameters with *different spans*, leading to incorrect
+ * suggestions from rustc.
+ */
+
+struct TwoLifetimes<'x, 'y> {
+ x: &'x (),
+ y: &'y (),
+}
+
+fn two_lifetimes_needed(a: &(), b: &()) -> TwoLifetimes<'_, '_> {
+//~^ ERROR missing lifetime specifiers [E0106]
+ TwoLifetimes { x: &(), y: &() }
+}
+
+fn main() {}
--- /dev/null
+error[E0106]: missing lifetime specifiers
+ --> $DIR/issue-84592.rs:12:57
+ |
+LL | fn two_lifetimes_needed(a: &(), b: &()) -> TwoLifetimes<'_, '_> {
+ | --- --- ^^ ^^ expected named lifetime parameter
+ | |
+ | expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value, but the signature does not say whether it is borrowed from `a` or `b`
+help: consider introducing a named lifetime parameter
+ |
+LL | fn two_lifetimes_needed<'a>(a: &'a (), b: &'a ()) -> TwoLifetimes<'a, 'a> {
+ | ^^^^ ^^^^^^ ^^^^^^ ^^ ^^
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0106`.
|
LL | struct V<'a>(&'a dyn for<'b> Fn(&X) -> &X);
| ^^ ^^
+help: consider using one of the available lifetimes here
+ |
+LL | struct V<'a>(&'a dyn for<'b> Fn(&X) -> &'lifetime X);
+ | ^^^^^^^^^^
error[E0106]: missing lifetime specifier
--> $DIR/missing-lt-for-hrtb.rs:5:41
--- /dev/null
+/* Checks all four scenarios possible in report_elision_failure() of
+ * rustc_resolve::late::lifetimes::LifetimeContext related to returning
+ * borrowed values, in various configurations.
+ */
+
+fn f1() -> &i32 { loop {} }
+//~^ ERROR missing lifetime specifier [E0106]
+fn f1_() -> (&i32, &i32) { loop {} }
+//~^ ERROR missing lifetime specifier [E0106]
+//~^^ ERROR missing lifetime specifier [E0106]
+
+fn f2(a: i32, b: i32) -> &i32 { loop {} }
+//~^ ERROR missing lifetime specifier [E0106]
+fn f2_(a: i32, b: i32) -> (&i32, &i32) { loop {} }
+//~^ ERROR missing lifetime specifier [E0106]
+//~^^ ERROR missing lifetime specifier [E0106]
+
+struct S<'a, 'b> { a: &'a i32, b: &'b i32 }
+fn f3(s: &S) -> &i32 { loop {} }
+//~^ ERROR missing lifetime specifier [E0106]
+fn f3_(s: &S, t: &S) -> (&i32, &i32) { loop {} }
+//~^ ERROR missing lifetime specifier [E0106]
+//~^^ ERROR missing lifetime specifier [E0106]
+
+fn f4<'a, 'b>(a: &'a i32, b: &'b i32) -> &i32 { loop {} }
+//~^ ERROR missing lifetime specifier [E0106]
+fn f4_<'a, 'b>(a: &'a i32, b: &'b i32) -> (&i32, &i32) { loop {} }
+//~^ ERROR missing lifetime specifier [E0106]
+//~^^ ERROR missing lifetime specifier [E0106]
+
+fn f5<'a>(a: &'a i32, b: &i32) -> &i32 { loop {} }
+//~^ ERROR missing lifetime specifier [E0106]
+fn f5_<'a>(a: &'a i32, b: &i32) -> (&i32, &i32) { loop {} }
+//~^ ERROR missing lifetime specifier [E0106]
+//~^^ ERROR missing lifetime specifier [E0106]
+
+fn main() {}
--- /dev/null
+error[E0106]: missing lifetime specifier
+ --> $DIR/return-elided-lifetime.rs:6:12
+ |
+LL | fn f1() -> &i32 { loop {} }
+ | ^ expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value, but there is no value for it to be borrowed from
+help: consider using the `'static` lifetime
+ |
+LL | fn f1() -> &'static i32 { loop {} }
+ | ^^^^^^^^
+
+error[E0106]: missing lifetime specifier
+ --> $DIR/return-elided-lifetime.rs:8:14
+ |
+LL | fn f1_() -> (&i32, &i32) { loop {} }
+ | ^ expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value, but there is no value for it to be borrowed from
+help: consider using the `'static` lifetime
+ |
+LL | fn f1_() -> (&'static i32, &i32) { loop {} }
+ | ^^^^^^^^
+
+error[E0106]: missing lifetime specifier
+ --> $DIR/return-elided-lifetime.rs:8:20
+ |
+LL | fn f1_() -> (&i32, &i32) { loop {} }
+ | ^ expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value, but there is no value for it to be borrowed from
+help: consider using the `'static` lifetime
+ |
+LL | fn f1_() -> (&i32, &'static i32) { loop {} }
+ | ^^^^^^^^
+
+error[E0106]: missing lifetime specifier
+ --> $DIR/return-elided-lifetime.rs:12:26
+ |
+LL | fn f2(a: i32, b: i32) -> &i32 { loop {} }
+ | ^ expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value with an elided lifetime, but the lifetime cannot be derived from the arguments
+help: consider using the `'static` lifetime
+ |
+LL | fn f2(a: i32, b: i32) -> &'static i32 { loop {} }
+ | ^^^^^^^^
+
+error[E0106]: missing lifetime specifier
+ --> $DIR/return-elided-lifetime.rs:14:28
+ |
+LL | fn f2_(a: i32, b: i32) -> (&i32, &i32) { loop {} }
+ | ^ expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value with an elided lifetime, but the lifetime cannot be derived from the arguments
+help: consider using the `'static` lifetime
+ |
+LL | fn f2_(a: i32, b: i32) -> (&'static i32, &i32) { loop {} }
+ | ^^^^^^^^
+
+error[E0106]: missing lifetime specifier
+ --> $DIR/return-elided-lifetime.rs:14:34
+ |
+LL | fn f2_(a: i32, b: i32) -> (&i32, &i32) { loop {} }
+ | ^ expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value with an elided lifetime, but the lifetime cannot be derived from the arguments
+help: consider using the `'static` lifetime
+ |
+LL | fn f2_(a: i32, b: i32) -> (&i32, &'static i32) { loop {} }
+ | ^^^^^^^^
+
+error[E0106]: missing lifetime specifier
+ --> $DIR/return-elided-lifetime.rs:19:17
+ |
+LL | fn f3(s: &S) -> &i32 { loop {} }
+ | -- ^ expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value, but the signature does not say which one of `s`'s 3 lifetimes it is borrowed from
+help: consider introducing a named lifetime parameter
+ |
+LL | fn f3<'a>(s: &'a S) -> &'a i32 { loop {} }
+ | ^^^^ ^^^^^ ^^^
+
+error[E0106]: missing lifetime specifier
+ --> $DIR/return-elided-lifetime.rs:21:26
+ |
+LL | fn f3_(s: &S, t: &S) -> (&i32, &i32) { loop {} }
+ | -- -- ^ expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value, but the signature does not say whether it is borrowed from one of `s`'s 3 lifetimes or one of `t`'s 3 lifetimes
+help: consider introducing a named lifetime parameter
+ |
+LL | fn f3_<'a>(s: &'a S, t: &'a S) -> (&'a i32, &i32) { loop {} }
+ | ^^^^ ^^^^^ ^^^^^ ^^^
+
+error[E0106]: missing lifetime specifier
+ --> $DIR/return-elided-lifetime.rs:21:32
+ |
+LL | fn f3_(s: &S, t: &S) -> (&i32, &i32) { loop {} }
+ | -- -- ^ expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value, but the signature does not say whether it is borrowed from one of `s`'s 3 lifetimes or one of `t`'s 3 lifetimes
+help: consider introducing a named lifetime parameter
+ |
+LL | fn f3_<'a>(s: &'a S, t: &'a S) -> (&i32, &'a i32) { loop {} }
+ | ^^^^ ^^^^^ ^^^^^ ^^^
+
+error[E0106]: missing lifetime specifier
+ --> $DIR/return-elided-lifetime.rs:25:42
+ |
+LL | fn f4<'a, 'b>(a: &'a i32, b: &'b i32) -> &i32 { loop {} }
+ | ------- ------- ^ expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value, but the signature does not say whether it is borrowed from `a` or `b`
+note: these named lifetimes are available to use
+ --> $DIR/return-elided-lifetime.rs:25:7
+ |
+LL | fn f4<'a, 'b>(a: &'a i32, b: &'b i32) -> &i32 { loop {} }
+ | ^^ ^^
+help: consider using one of the available lifetimes here
+ |
+LL | fn f4<'a, 'b>(a: &'a i32, b: &'b i32) -> &'lifetime i32 { loop {} }
+ | ^^^^^^^^^^
+
+error[E0106]: missing lifetime specifier
+ --> $DIR/return-elided-lifetime.rs:27:44
+ |
+LL | fn f4_<'a, 'b>(a: &'a i32, b: &'b i32) -> (&i32, &i32) { loop {} }
+ | ------- ------- ^ expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value, but the signature does not say whether it is borrowed from `a` or `b`
+note: these named lifetimes are available to use
+ --> $DIR/return-elided-lifetime.rs:27:8
+ |
+LL | fn f4_<'a, 'b>(a: &'a i32, b: &'b i32) -> (&i32, &i32) { loop {} }
+ | ^^ ^^
+help: consider using one of the available lifetimes here
+ |
+LL | fn f4_<'a, 'b>(a: &'a i32, b: &'b i32) -> (&'lifetime i32, &i32) { loop {} }
+ | ^^^^^^^^^^
+
+error[E0106]: missing lifetime specifier
+ --> $DIR/return-elided-lifetime.rs:27:50
+ |
+LL | fn f4_<'a, 'b>(a: &'a i32, b: &'b i32) -> (&i32, &i32) { loop {} }
+ | ------- ------- ^ expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value, but the signature does not say whether it is borrowed from `a` or `b`
+note: these named lifetimes are available to use
+ --> $DIR/return-elided-lifetime.rs:27:8
+ |
+LL | fn f4_<'a, 'b>(a: &'a i32, b: &'b i32) -> (&i32, &i32) { loop {} }
+ | ^^ ^^
+help: consider using one of the available lifetimes here
+ |
+LL | fn f4_<'a, 'b>(a: &'a i32, b: &'b i32) -> (&i32, &'lifetime i32) { loop {} }
+ | ^^^^^^^^^^
+
+error[E0106]: missing lifetime specifier
+ --> $DIR/return-elided-lifetime.rs:31:35
+ |
+LL | fn f5<'a>(a: &'a i32, b: &i32) -> &i32 { loop {} }
+ | ------- ---- ^ expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value, but the signature does not say whether it is borrowed from `a` or `b`
+help: consider using the `'a` lifetime
+ |
+LL | fn f5<'a>(a: &'a i32, b: &i32) -> &'a i32 { loop {} }
+ | ^^^
+
+error[E0106]: missing lifetime specifier
+ --> $DIR/return-elided-lifetime.rs:33:37
+ |
+LL | fn f5_<'a>(a: &'a i32, b: &i32) -> (&i32, &i32) { loop {} }
+ | ------- ---- ^ expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value, but the signature does not say whether it is borrowed from `a` or `b`
+help: consider using the `'a` lifetime
+ |
+LL | fn f5_<'a>(a: &'a i32, b: &i32) -> (&'a i32, &i32) { loop {} }
+ | ^^^
+
+error[E0106]: missing lifetime specifier
+ --> $DIR/return-elided-lifetime.rs:33:43
+ |
+LL | fn f5_<'a>(a: &'a i32, b: &i32) -> (&i32, &i32) { loop {} }
+ | ------- ---- ^ expected named lifetime parameter
+ |
+ = help: this function's return type contains a borrowed value, but the signature does not say whether it is borrowed from `a` or `b`
+help: consider using the `'a` lifetime
+ |
+LL | fn f5_<'a>(a: &'a i32, b: &i32) -> (&i32, &'a i32) { loop {} }
+ | ^^^
+
+error: aborting due to 15 previous errors
+
+For more information about this error, try `rustc --explain E0106`.
--- /dev/null
+// run-rustfix
+
+#![allow(dead_code, unused_variables)]
+
+fn foo1(bar: &str) {}
+//~^ ERROR the size for values of type `str` cannot be known at compilation time
+//~| HELP the trait `Sized` is not implemented for `str`
+//~| HELP unsized fn params are gated as an unstable feature
+//~| HELP function arguments must have a statically known size, borrowed types always have a known size
+
+fn foo2(_bar: &str) {}
+//~^ ERROR the size for values of type `str` cannot be known at compilation time
+//~| HELP the trait `Sized` is not implemented for `str`
+//~| HELP unsized fn params are gated as an unstable feature
+//~| HELP function arguments must have a statically known size, borrowed types always have a known size
+
+fn foo3(_: &str) {}
+//~^ ERROR the size for values of type `str` cannot be known at compilation time
+//~| HELP the trait `Sized` is not implemented for `str`
+//~| HELP unsized fn params are gated as an unstable feature
+//~| HELP function arguments must have a statically known size, borrowed types always have a known size
+
+fn main() {}
--- /dev/null
+// run-rustfix
+
+#![allow(dead_code, unused_variables)]
+
+fn foo1(bar: str) {}
+//~^ ERROR the size for values of type `str` cannot be known at compilation time
+//~| HELP the trait `Sized` is not implemented for `str`
+//~| HELP unsized fn params are gated as an unstable feature
+//~| HELP function arguments must have a statically known size, borrowed types always have a known size
+
+fn foo2(_bar: str) {}
+//~^ ERROR the size for values of type `str` cannot be known at compilation time
+//~| HELP the trait `Sized` is not implemented for `str`
+//~| HELP unsized fn params are gated as an unstable feature
+//~| HELP function arguments must have a statically known size, borrowed types always have a known size
+
+fn foo3(_: str) {}
+//~^ ERROR the size for values of type `str` cannot be known at compilation time
+//~| HELP the trait `Sized` is not implemented for `str`
+//~| HELP unsized fn params are gated as an unstable feature
+//~| HELP function arguments must have a statically known size, borrowed types always have a known size
+
+fn main() {}
--- /dev/null
+error[E0277]: the size for values of type `str` cannot be known at compilation time
+ --> $DIR/unsized-function-parameter.rs:5:9
+ |
+LL | fn foo1(bar: str) {}
+ | ^^^ doesn't have a size known at compile-time
+ |
+ = help: the trait `Sized` is not implemented for `str`
+ = help: unsized fn params are gated as an unstable feature
+help: function arguments must have a statically known size, borrowed types always have a known size
+ |
+LL | fn foo1(bar: &str) {}
+ | ^
+
+error[E0277]: the size for values of type `str` cannot be known at compilation time
+ --> $DIR/unsized-function-parameter.rs:11:9
+ |
+LL | fn foo2(_bar: str) {}
+ | ^^^^ doesn't have a size known at compile-time
+ |
+ = help: the trait `Sized` is not implemented for `str`
+ = help: unsized fn params are gated as an unstable feature
+help: function arguments must have a statically known size, borrowed types always have a known size
+ |
+LL | fn foo2(_bar: &str) {}
+ | ^
+
+error[E0277]: the size for values of type `str` cannot be known at compilation time
+ --> $DIR/unsized-function-parameter.rs:17:9
+ |
+LL | fn foo3(_: str) {}
+ | ^ doesn't have a size known at compile-time
+ |
+ = help: the trait `Sized` is not implemented for `str`
+ = help: unsized fn params are gated as an unstable feature
+help: function arguments must have a statically known size, borrowed types always have a known size
+ |
+LL | fn foo3(_: &str) {}
+ | ^
+
+error: aborting due to 3 previous errors
+
+For more information about this error, try `rustc --explain E0277`.
--- /dev/null
+// build-pass
+// only-x86
+// compile-flags: -C target-feature=+pclmulqdq
+
+fn main() {}
// error-pattern:building tests with panic=abort is not supported
// no-prefer-dynamic
-// compile-flags: --test -Cpanic=abort
+// compile-flags: --test -Cpanic=abort -Zpanic-abort-tests=no
// run-flags: --test-threads=1
// ignore-wasm no panic or subprocess support
-#![feature(const_fn, thread_local)]
+#![feature(thread_local)]
#[thread_local]
static A: u32 = 1;
-#![feature(cfg_target_thread_local, const_fn, thread_local)]
+#![feature(cfg_target_thread_local, thread_local)]
#![crate_type = "lib"]
#[cfg(target_thread_local)]
-#![feature(const_fn)]
#![feature(thread_local)]
#![feature(cfg_target_thread_local, thread_local_internals)]
error[E0133]: call to unsafe function is unsafe and requires unsafe function or block
- --> $DIR/issue-43733.rs:18:5
+ --> $DIR/issue-43733.rs:17:5
|
LL | __KEY.get(Default::default)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^ call to unsafe function
= note: consult the function's documentation for information on how to avoid undefined behavior
error[E0133]: call to unsafe function is unsafe and requires unsafe function or block
- --> $DIR/issue-43733.rs:22:5
+ --> $DIR/issue-43733.rs:21:5
|
LL | std::thread::LocalKey::new(__getit);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ call to unsafe function
// run-pass
-#![feature(const_fn)]
-
type Field1 = (i32, u32);
type Field2 = f32;
type Field3 = i64;
#![stable(feature = "foo", since = "1.33.0")]
#![feature(staged_api)]
#![feature(const_raw_ptr_deref)]
-#![feature(const_fn)]
#[stable(feature = "foo", since = "1.33.0")]
#[rustc_const_unstable(feature = "const_foo", issue = "none")]
error[E0133]: dereference of raw pointer is unsafe and requires unsafe function or block
- --> $DIR/unsafe-unstable-const-fn.rs:9:5
+ --> $DIR/unsafe-unstable-const-fn.rs:8:5
|
LL | *a == b
| ^^ dereference of raw pointer
// run-pass
+// needs-unwind
// ignore-windows target requires uwtable
// ignore-wasm32-bare no proper panic=unwind support
// compile-flags: -C panic=unwind -C force-unwind-tables=n
target_os = "dragonfly",
target_os = "emscripten",
target_os = "freebsd",
+ target_os = "fuchsia",
target_os = "linux",
target_os = "macos",
target_os = "netbsd",
-Subproject commit f3e13226d6d17a2bc5f325303494b43a45f53b7f
+Subproject commit e51522ab3db23b0d8f1de54eb1f0113924896331
let cargo = &Path::new(cargo);
for test in TEST_REPOS.iter().rev() {
- test_repo(cargo, out_dir, test);
+ if args[3..].is_empty() || args[3..].iter().any(|s| s.contains(test.name)) {
+ test_repo(cargo, out_dir, test);
+ }
}
}
# gh pages docs
util/gh-pages/lints.json
+**/metadata_collection.json
# rustfmt backups
*.rs.bk
Run `cargo dev ide_setup --repo-path <repo-path>` where `<repo-path>` is a path to the rustc repo
you just cloned.
The command will add path-dependencies pointing towards rustc-crates inside the rustc repo to
-Clippys `Cargo.toml`s and should allow rust-analyzer to understand most of the types that Clippy uses.
+Clippys `Cargo.toml`s and should allow `IntelliJ Rust` to understand most of the types that Clippy uses.
Just make sure to remove the dependencies again before finally making a pull request!
[rustc_repo]: https://github.com/rust-lang/rust/
[package]
name = "clippy"
-version = "0.1.53"
+version = "0.1.54"
authors = ["The Rust Clippy Developers"]
description = "A bunch of helpful lints to avoid common pitfalls in Rust"
repository = "https://github.com/rust-lang/rust-clippy"
deny-warnings = []
integration = ["tempfile"]
internal-lints = ["clippy_lints/internal-lints"]
+metadata-collector-lint = ["internal-lints", "clippy_lints/metadata-collector-lint"]
[package.metadata.rust-analyzer]
# This package uses #[feature(rustc_private)]
[package]
name = "clippy_lints"
# begin automatic update
-version = "0.1.53"
+version = "0.1.54"
# end automatic update
authors = ["The Rust Clippy Developers"]
description = "A bunch of helpful lints to avoid common pitfalls in Rust"
quine-mc_cluskey = "0.2.2"
regex-syntax = "0.6"
serde = { version = "1.0", features = ["derive"] }
+serde_json = { version = "1.0", optional = true }
toml = "0.5.3"
unicode-normalization = "0.1"
semver = "0.11"
deny-warnings = []
# build clippy with internal lints enabled, off by default
internal-lints = ["clippy_utils/internal-lints"]
+metadata-collector-lint = ["serde_json", "clippy_utils/metadata-collector-lint"]
[package.metadata.rust-analyzer]
# This crate uses #[feature(rustc_private)]
use clippy_utils::diagnostics::span_lint_and_help;
use clippy_utils::ty::implements_trait;
-use clippy_utils::{get_trait_def_id, if_sequence, is_else_clause, paths, SpanlessEq};
+use clippy_utils::{get_trait_def_id, if_sequence, in_constant, is_else_clause, paths, SpanlessEq};
use rustc_hir::{BinOpKind, Expr, ExprKind};
use rustc_lint::{LateContext, LateLintPass};
use rustc_session::{declare_lint_pass, declare_tool_lint};
return;
}
+ if in_constant(cx, expr.hir_id) {
+ return;
+ }
+
// Check that there exists at least one explicit else condition
let (conds, _) = if_sequence(expr);
if conds.len() < 2 {
use clippy_utils::diagnostics::{span_lint_and_note, span_lint_and_sugg};
use clippy_utils::source::snippet_with_macro_callsite;
-use clippy_utils::{any_parent_is_automatically_derived, contains_name, match_def_path, paths};
+use clippy_utils::{any_parent_is_automatically_derived, contains_name, in_macro, match_def_path, paths};
use if_chain::if_chain;
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::Applicability;
impl LateLintPass<'_> for Default {
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
if_chain! {
+ if !in_macro(expr.span);
// Avoid cases already linted by `field_reassign_with_default`
if !self.reassigned_linted.contains(&expr.span);
if let ExprKind::Call(path, ..) = expr.kind;
use clippy_utils::diagnostics::{span_lint, span_lint_and_note};
use clippy_utils::{get_parent_expr, path_to_local, path_to_local_id};
+use if_chain::if_chain;
use rustc_hir::intravisit::{walk_expr, NestedVisitorMap, Visitor};
use rustc_hir::{BinOpKind, Block, Expr, ExprKind, Guard, HirId, Local, Node, Stmt, StmtKind};
use rustc_lint::{LateContext, LateLintPass};
impl<'tcx> LateLintPass<'tcx> for EvalOrderDependence {
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
// Find a write to a local variable.
- match expr.kind {
- ExprKind::Assign(lhs, ..) | ExprKind::AssignOp(_, lhs, _) => {
- if let Some(var) = path_to_local(lhs) {
- let mut visitor = ReadVisitor {
- cx,
- var,
- write_expr: expr,
- last_expr: expr,
- };
- check_for_unsequenced_reads(&mut visitor);
- }
- },
- _ => {},
- }
+ let var = if_chain! {
+ if let ExprKind::Assign(lhs, ..) | ExprKind::AssignOp(_, lhs, _) = expr.kind;
+ if let Some(var) = path_to_local(lhs);
+ if expr.span.desugaring_kind().is_none();
+ then { var } else { return; }
+ };
+ let mut visitor = ReadVisitor {
+ cx,
+ var,
+ write_expr: expr,
+ last_expr: expr,
+ };
+ check_for_unsequenced_reads(&mut visitor);
}
fn check_stmt(&mut self, cx: &LateContext<'tcx>, stmt: &'tcx Stmt<'_>) {
match stmt.kind {
self.cx,
EVAL_ORDER_DEPENDENCE,
expr.span,
- "unsequenced read of a variable",
+ &format!("unsequenced read of `{}`", self.cx.tcx.hir().name(self.var)),
Some(self.write_expr.span),
"whether read occurs before this write depends on evaluation order",
);
-use clippy_utils::diagnostics::span_lint_and_then;
-use clippy_utils::match_panic_def_id;
-use clippy_utils::source::snippet_opt;
-use if_chain::if_chain;
+use clippy_utils::{
+ diagnostics::span_lint_and_sugg,
+ get_async_fn_body, is_async_fn,
+ source::{snippet_with_applicability, snippet_with_context, walk_span_to_context},
+ visitors::visit_break_exprs,
+};
use rustc_errors::Applicability;
use rustc_hir::intravisit::FnKind;
-use rustc_hir::{Body, Expr, ExprKind, FnDecl, HirId, MatchSource, StmtKind};
-use rustc_lint::{LateContext, LateLintPass};
+use rustc_hir::{Block, Body, Expr, ExprKind, FnDecl, FnRetTy, HirId};
+use rustc_lint::{LateContext, LateLintPass, LintContext};
+use rustc_middle::lint::in_external_macro;
use rustc_session::{declare_lint_pass, declare_tool_lint};
-use rustc_span::source_map::Span;
+use rustc_span::{Span, SyntaxContext};
declare_clippy_lint! {
/// **What it does:** Checks for missing return statements at the end of a block.
declare_lint_pass!(ImplicitReturn => [IMPLICIT_RETURN]);
-static LINT_BREAK: &str = "change `break` to `return` as shown";
-static LINT_RETURN: &str = "add `return` as shown";
-
-fn lint(cx: &LateContext<'_>, outer_span: Span, inner_span: Span, msg: &str) {
- let outer_span = outer_span.source_callsite();
- let inner_span = inner_span.source_callsite();
-
- span_lint_and_then(cx, IMPLICIT_RETURN, outer_span, "missing `return` statement", |diag| {
- if let Some(snippet) = snippet_opt(cx, inner_span) {
- diag.span_suggestion(
- outer_span,
- msg,
- format!("return {}", snippet),
- Applicability::MachineApplicable,
- );
- }
- });
+fn lint_return(cx: &LateContext<'_>, span: Span) {
+ let mut app = Applicability::MachineApplicable;
+ let snip = snippet_with_applicability(cx, span, "..", &mut app);
+ span_lint_and_sugg(
+ cx,
+ IMPLICIT_RETURN,
+ span,
+ "missing `return` statement",
+ "add `return` as shown",
+ format!("return {}", snip),
+ app,
+ );
+}
+
+fn lint_break(cx: &LateContext<'_>, break_span: Span, expr_span: Span) {
+ let mut app = Applicability::MachineApplicable;
+ let snip = snippet_with_context(cx, expr_span, break_span.ctxt(), "..", &mut app).0;
+ span_lint_and_sugg(
+ cx,
+ IMPLICIT_RETURN,
+ break_span,
+ "missing `return` statement",
+ "change `break` to `return` as shown",
+ format!("return {}", snip),
+ app,
+ )
+}
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+enum LintLocation {
+ /// The lint was applied to a parent expression.
+ Parent,
+ /// The lint was applied to this expression, a child, or not applied.
+ Inner,
+}
+impl LintLocation {
+ fn still_parent(self, b: bool) -> Self {
+ if b { self } else { Self::Inner }
+ }
+
+ fn is_parent(self) -> bool {
+ self == Self::Parent
+ }
+}
+
+// Gets the call site if the span is in a child context. Otherwise returns `None`.
+fn get_call_site(span: Span, ctxt: SyntaxContext) -> Option<Span> {
+ (span.ctxt() != ctxt).then(|| walk_span_to_context(span, ctxt).unwrap_or(span))
}
-fn expr_match(cx: &LateContext<'_>, expr: &Expr<'_>) {
+fn lint_implicit_returns(
+ cx: &LateContext<'tcx>,
+ expr: &'tcx Expr<'_>,
+ // The context of the function body.
+ ctxt: SyntaxContext,
+ // Whether the expression is from a macro expansion.
+ call_site_span: Option<Span>,
+) -> LintLocation {
match expr.kind {
- // loops could be using `break` instead of `return`
- ExprKind::Block(block, ..) | ExprKind::Loop(block, ..) => {
- if let Some(expr) = &block.expr {
- expr_match(cx, expr);
- }
- // only needed in the case of `break` with `;` at the end
- else if let Some(stmt) = block.stmts.last() {
- if_chain! {
- if let StmtKind::Semi(expr, ..) = &stmt.kind;
- // make sure it's a break, otherwise we want to skip
- if let ExprKind::Break(.., Some(break_expr)) = &expr.kind;
- then {
- lint(cx, expr.span, break_expr.span, LINT_BREAK);
- }
- }
- }
- },
- // use `return` instead of `break`
- ExprKind::Break(.., break_expr) => {
- if let Some(break_expr) = break_expr {
- lint(cx, expr.span, break_expr.span, LINT_BREAK);
+ ExprKind::Block(
+ Block {
+ expr: Some(block_expr), ..
+ },
+ _,
+ ) => lint_implicit_returns(
+ cx,
+ block_expr,
+ ctxt,
+ call_site_span.or_else(|| get_call_site(block_expr.span, ctxt)),
+ )
+ .still_parent(call_site_span.is_some()),
+
+ ExprKind::If(_, then_expr, Some(else_expr)) => {
+ // Both `then_expr` or `else_expr` are required to be blocks in the same context as the `if`. Don't
+ // bother checking.
+ let res = lint_implicit_returns(cx, then_expr, ctxt, call_site_span).still_parent(call_site_span.is_some());
+ if res.is_parent() {
+ // The return was added as a parent of this if expression.
+ return res;
}
+ lint_implicit_returns(cx, else_expr, ctxt, call_site_span).still_parent(call_site_span.is_some())
},
- ExprKind::If(.., if_expr, else_expr) => {
- expr_match(cx, if_expr);
- if let Some(else_expr) = else_expr {
- expr_match(cx, else_expr);
+ ExprKind::Match(_, arms, _) => {
+ for arm in arms {
+ let res = lint_implicit_returns(
+ cx,
+ arm.body,
+ ctxt,
+ call_site_span.or_else(|| get_call_site(arm.body.span, ctxt)),
+ )
+ .still_parent(call_site_span.is_some());
+ if res.is_parent() {
+ // The return was added as a parent of this match expression.
+ return res;
+ }
}
+ LintLocation::Inner
},
- ExprKind::Match(.., arms, source) => {
- let check_all_arms = match source {
- MatchSource::IfLetDesugar {
- contains_else_clause: has_else,
- } => has_else,
- _ => true,
- };
-
- if check_all_arms {
- for arm in arms {
- expr_match(cx, arm.body);
+
+ ExprKind::Loop(block, ..) => {
+ let mut add_return = false;
+ visit_break_exprs(block, |break_expr, dest, sub_expr| {
+ if dest.target_id.ok() == Some(expr.hir_id) {
+ if call_site_span.is_none() && break_expr.span.ctxt() == ctxt {
+ lint_break(cx, break_expr.span, sub_expr.unwrap().span);
+ } else {
+ // the break expression is from a macro call, add a return to the loop
+ add_return = true;
+ }
+ }
+ });
+ if add_return {
+ #[allow(clippy::option_if_let_else)]
+ if let Some(span) = call_site_span {
+ lint_return(cx, span);
+ LintLocation::Parent
+ } else {
+ lint_return(cx, expr.span);
+ LintLocation::Inner
}
} else {
- expr_match(cx, arms.first().expect("`if let` doesn't have a single arm").body);
+ LintLocation::Inner
}
},
- // skip if it already has a return statement
- ExprKind::Ret(..) => (),
- // make sure it's not a call that panics
- ExprKind::Call(expr, ..) => {
- if_chain! {
- if let ExprKind::Path(qpath) = &expr.kind;
- if let Some(path_def_id) = cx.qpath_res(qpath, expr.hir_id).opt_def_id();
- if match_panic_def_id(cx, path_def_id);
- then { }
- else {
- lint(cx, expr.span, expr.span, LINT_RETURN)
- }
+
+ // If expressions without an else clause, and blocks without a final expression can only be the final expression
+ // if they are divergent, or return the unit type.
+ ExprKind::If(_, _, None) | ExprKind::Block(Block { expr: None, .. }, _) | ExprKind::Ret(_) => {
+ LintLocation::Inner
+ },
+
+ // Any divergent expression doesn't need a return statement.
+ ExprKind::MethodCall(..)
+ | ExprKind::Call(..)
+ | ExprKind::Binary(..)
+ | ExprKind::Unary(..)
+ | ExprKind::Index(..)
+ if cx.typeck_results().expr_ty(expr).is_never() =>
+ {
+ LintLocation::Inner
+ },
+
+ _ =>
+ {
+ #[allow(clippy::option_if_let_else)]
+ if let Some(span) = call_site_span {
+ lint_return(cx, span);
+ LintLocation::Parent
+ } else {
+ lint_return(cx, expr.span);
+ LintLocation::Inner
}
},
- // everything else is missing `return`
- _ => lint(cx, expr.span, expr.span, LINT_RETURN),
}
}
fn check_fn(
&mut self,
cx: &LateContext<'tcx>,
- _: FnKind<'tcx>,
- _: &'tcx FnDecl<'_>,
+ kind: FnKind<'tcx>,
+ decl: &'tcx FnDecl<'_>,
body: &'tcx Body<'_>,
span: Span,
_: HirId,
) {
- if span.from_expansion() {
+ if (!matches!(kind, FnKind::Closure) && matches!(decl.output, FnRetTy::DefaultReturn(_)))
+ || span.ctxt() != body.value.span.ctxt()
+ || in_external_macro(cx.sess(), span)
+ {
return;
}
- let body = cx.tcx.hir().body(body.id());
- if cx.typeck_results().expr_ty(&body.value).is_unit() {
+
+ let res_ty = cx.typeck_results().expr_ty(&body.value);
+ if res_ty.is_unit() || res_ty.is_never() {
return;
}
- expr_match(cx, &body.value);
+
+ let expr = if is_async_fn(kind) {
+ match get_async_fn_body(cx.tcx, body) {
+ Some(e) => e,
+ None => return,
+ }
+ } else {
+ &body.value
+ };
+ lint_implicit_returns(cx, expr, expr.span.ctxt(), None);
}
}
// end lints modules, do not remove this comment, it’s used in `update_lints`
pub use crate::utils::conf::Conf;
+use crate::utils::conf::TryConf;
/// Register all pre expansion lints
///
}
#[doc(hidden)]
-pub fn read_conf(args: &[rustc_ast::NestedMetaItem], sess: &Session) -> Conf {
+pub fn read_conf(sess: &Session) -> Conf {
use std::path::Path;
- match utils::conf::file_from_args(args) {
- Ok(file_name) => {
- // if the user specified a file, it must exist, otherwise default to `clippy.toml` but
- // do not require the file to exist
- let file_name = match file_name {
- Some(file_name) => file_name,
- None => match utils::conf::lookup_conf_file() {
- Ok(Some(path)) => path,
- Ok(None) => return Conf::default(),
- Err(error) => {
- sess.struct_err(&format!("error finding Clippy's configuration file: {}", error))
- .emit();
- return Conf::default();
- },
- },
- };
-
- let file_name = if file_name.is_relative() {
- sess.local_crate_source_file
- .as_deref()
- .and_then(Path::parent)
- .unwrap_or_else(|| Path::new(""))
- .join(file_name)
- } else {
- file_name
- };
-
- let (conf, errors) = utils::conf::read(&file_name);
-
- // all conf errors are non-fatal, we just use the default conf in case of error
- for error in errors {
- sess.struct_err(&format!(
- "error reading Clippy's configuration file `{}`: {}",
- file_name.display(),
- error
- ))
- .emit();
- }
-
- conf
- },
- Err((err, span)) => {
- sess.struct_span_err(span, err)
- .span_note(span, "Clippy will use default configuration")
+ let file_name = match utils::conf::lookup_conf_file() {
+ Ok(Some(path)) => path,
+ Ok(None) => return Conf::default(),
+ Err(error) => {
+ sess.struct_err(&format!("error finding Clippy's configuration file: {}", error))
.emit();
- Conf::default()
+ return Conf::default();
},
+ };
+
+ let file_name = if file_name.is_relative() {
+ sess.local_crate_source_file
+ .as_deref()
+ .and_then(Path::parent)
+ .unwrap_or_else(|| Path::new(""))
+ .join(file_name)
+ } else {
+ file_name
+ };
+
+ let TryConf { conf, errors } = utils::conf::read(&file_name);
+ // all conf errors are non-fatal, we just use the default conf in case of error
+ for error in errors {
+ sess.struct_err(&format!(
+ "error reading Clippy's configuration file `{}`: {}",
+ file_name.display(),
+ error
+ ))
+ .emit();
}
+
+ conf
}
/// Register all lints and lint groups with the rustc plugin registry
store.register_late_pass(|| box utils::internal_lints::MatchTypeOnDiagItem);
store.register_late_pass(|| box utils::internal_lints::OuterExpnDataPass);
}
+ #[cfg(feature = "metadata-collector-lint")]
+ {
+ if std::env::var("ENABLE_METADATA_COLLECTION").eq(&Ok("1".to_string())) {
+ store.register_late_pass(|| box utils::internal_lints::metadata_collector::MetadataCollector::default());
+ }
+ }
+
store.register_late_pass(|| box utils::author::Author);
store.register_late_pass(|| box await_holding_invalid::AwaitHolding);
store.register_late_pass(|| box serde_api::SerdeApi);
use if_chain::if_chain;
use rustc_errors::Applicability;
use rustc_hir::intravisit::{walk_block, walk_expr, NestedVisitorMap, Visitor};
-use rustc_hir::{Block, Expr, ExprKind, GenericArg, HirId, Local, Pat, PatKind, QPath, StmtKind};
+use rustc_hir::{Block, Expr, ExprKind, GenericArg, GenericArgs, HirId, Local, Pat, PatKind, QPath, StmtKind, Ty};
use rustc_lint::LateContext;
use rustc_middle::hir::map::Map;
+
use rustc_span::symbol::{sym, Ident};
use rustc_span::{MultiSpan, Span};
if chain_method.ident.name == sym!(collect) && is_trait_method(cx, &args[0], sym::Iterator);
if let Some(generic_args) = chain_method.args;
if let Some(GenericArg::Type(ref ty)) = generic_args.args.get(0);
- let ty = cx.typeck_results().node_type(ty.hir_id);
+ if let Some(ty) = cx.typeck_results().node_type_opt(ty.hir_id);
if is_type_diagnostic_item(cx, ty, sym::vec_type)
|| is_type_diagnostic_item(cx, ty, sym::vecdeque_type)
|| match_type(cx, ty, &paths::BTREEMAP)
}
fn check_needless_collect_indirect_usage<'tcx>(expr: &'tcx Expr<'_>, cx: &LateContext<'tcx>) {
+ fn get_hir_id<'tcx>(ty: Option<&Ty<'tcx>>, method_args: Option<&GenericArgs<'tcx>>) -> Option<HirId> {
+ if let Some(ty) = ty {
+ return Some(ty.hir_id);
+ }
+
+ if let Some(generic_args) = method_args {
+ if let Some(GenericArg::Type(ref ty)) = generic_args.args.get(0) {
+ return Some(ty.hir_id);
+ }
+ }
+
+ None
+ }
if let ExprKind::Block(block, _) = expr.kind {
for stmt in block.stmts {
if_chain! {
if let StmtKind::Local(
Local { pat: Pat { hir_id: pat_id, kind: PatKind::Binding(_, _, ident, .. ), .. },
- init: Some(init_expr), .. }
+ init: Some(init_expr), ty, .. }
) = stmt.kind;
if let ExprKind::MethodCall(method_name, collect_span, &[ref iter_source], ..) = init_expr.kind;
if method_name.ident.name == sym!(collect) && is_trait_method(cx, init_expr, sym::Iterator);
- if let Some(generic_args) = method_name.args;
- if let Some(GenericArg::Type(ref ty)) = generic_args.args.get(0);
- if let ty = cx.typeck_results().node_type(ty.hir_id);
+ if let Some(hir_id) = get_hir_id(*ty, method_name.args);
+ if let Some(ty) = cx.typeck_results().node_type_opt(hir_id);
if is_type_diagnostic_item(cx, ty, sym::vec_type) ||
is_type_diagnostic_item(cx, ty, sym::vecdeque_type) ||
+ is_type_diagnostic_item(cx, ty, sym::BinaryHeap) ||
match_type(cx, ty, &paths::LINKED_LIST);
if let Some(iter_calls) = detect_iter_and_into_iters(block, *ident);
if let [iter_call] = &*iter_calls;
return;
}
let used_in_condition = &var_visitor.ids;
- let no_cond_variable_mutated = if let Some(used_mutably) = mutated_variables(expr, cx) {
- used_in_condition.is_disjoint(&used_mutably)
- } else {
- return;
- };
+ let mutated_in_body = mutated_variables(expr, cx);
+ let mutated_in_condition = mutated_variables(cond, cx);
+ let no_cond_variable_mutated =
+ if let (Some(used_mutably_body), Some(used_mutably_cond)) = (mutated_in_body, mutated_in_condition) {
+ used_in_condition.is_disjoint(&used_mutably_body) && used_in_condition.is_disjoint(&used_mutably_cond)
+ } else {
+ return;
+ };
let mutable_static_in_cond = var_visitor.def_ids.iter().any(|(_, v)| *v);
let mut has_break_or_return_visitor = HasBreakOrReturnVisitor {
// Checks if arm has the form `Some(ref v) => Some(v)` (checks for `ref` and `ref mut`)
fn is_ref_some_arm(cx: &LateContext<'_>, arm: &Arm<'_>) -> Option<BindingAnnotation> {
if_chain! {
- if let PatKind::TupleStruct(ref qpath, pats, _) = arm.pat.kind;
+ if let PatKind::TupleStruct(ref qpath, [first_pat, ..], _) = arm.pat.kind;
if is_lang_ctor(cx, qpath, OptionSome);
- if let PatKind::Binding(rb, .., ident, _) = pats[0].kind;
+ if let PatKind::Binding(rb, .., ident, _) = first_pat.kind;
if rb == BindingAnnotation::Ref || rb == BindingAnnotation::RefMut;
if let ExprKind::Call(e, args) = remove_blocks(arm.body).kind;
if let ExprKind::Path(ref some_path) = e.kind;
use clippy_utils::{is_lang_ctor, is_qpath_def_path, is_trait_method, paths};
use if_chain::if_chain;
use rustc_ast::ast::LitKind;
+ use rustc_data_structures::fx::FxHashSet;
use rustc_errors::Applicability;
use rustc_hir::LangItem::{OptionNone, OptionSome, PollPending, PollReady, ResultErr, ResultOk};
use rustc_hir::{
/// deallocate memory. For these types, and composites containing them, changing the drop order
/// won't result in any observable side effects.
fn type_needs_ordered_drop(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool {
+ type_needs_ordered_drop_inner(cx, ty, &mut FxHashSet::default())
+ }
+
+ fn type_needs_ordered_drop_inner(cx: &LateContext<'tcx>, ty: Ty<'tcx>, seen: &mut FxHashSet<Ty<'tcx>>) -> bool {
+ if !seen.insert(ty) {
+ return false;
+ }
if !ty.needs_drop(cx.tcx, cx.param_env) {
false
} else if !cx
// This type doesn't implement drop, so no side effects here.
// Check if any component type has any.
match ty.kind() {
- ty::Tuple(_) => ty.tuple_fields().any(|ty| type_needs_ordered_drop(cx, ty)),
- ty::Array(ty, _) => type_needs_ordered_drop(cx, ty),
+ ty::Tuple(_) => ty.tuple_fields().any(|ty| type_needs_ordered_drop_inner(cx, ty, seen)),
+ ty::Array(ty, _) => type_needs_ordered_drop_inner(cx, ty, seen),
ty::Adt(adt, subs) => adt
.all_fields()
.map(|f| f.ty(cx.tcx, subs))
- .any(|ty| type_needs_ordered_drop(cx, ty)),
+ .any(|ty| type_needs_ordered_drop_inner(cx, ty, seen)),
_ => true,
}
}
{
// Check all of the generic arguments.
if let ty::Adt(_, subs) = ty.kind() {
- subs.types().any(|ty| type_needs_ordered_drop(cx, ty))
+ subs.types().any(|ty| type_needs_ordered_drop_inner(cx, ty, seen))
} else {
true
}
ShouldImplTraitCase::new("std::ops::Sub", "sub", 2, FN_HEADER, SelfKind::Value, OutType::Any, true),
];
-#[rustfmt::skip]
-const PATTERN_METHODS: [(&str, usize); 17] = [
- ("contains", 1),
- ("starts_with", 1),
- ("ends_with", 1),
- ("find", 1),
- ("rfind", 1),
- ("split", 1),
- ("rsplit", 1),
- ("split_terminator", 1),
- ("rsplit_terminator", 1),
- ("splitn", 2),
- ("rsplitn", 2),
- ("matches", 1),
- ("rmatches", 1),
- ("match_indices", 1),
- ("rmatch_indices", 1),
- ("trim_start_matches", 1),
- ("trim_end_matches", 1),
-];
-
#[derive(Clone, Copy, PartialEq, Debug)]
enum SelfKind {
Value,
use super::SINGLE_CHAR_PATTERN;
+const PATTERN_METHODS: [(&str, usize); 19] = [
+ ("contains", 1),
+ ("starts_with", 1),
+ ("ends_with", 1),
+ ("find", 1),
+ ("rfind", 1),
+ ("split", 1),
+ ("rsplit", 1),
+ ("split_terminator", 1),
+ ("rsplit_terminator", 1),
+ ("splitn", 2),
+ ("rsplitn", 2),
+ ("matches", 1),
+ ("rmatches", 1),
+ ("match_indices", 1),
+ ("rmatch_indices", 1),
+ ("strip_prefix", 1),
+ ("strip_suffix", 1),
+ ("trim_start_matches", 1),
+ ("trim_end_matches", 1),
+];
+
/// lint for length-1 `str`s for methods in `PATTERN_METHODS`
pub(super) fn check(cx: &LateContext<'_>, _expr: &hir::Expr<'_>, method_name: Symbol, args: &[hir::Expr<'_>]) {
- for &(method, pos) in &crate::methods::PATTERN_METHODS {
+ for &(method, pos) in &PATTERN_METHODS {
if_chain! {
if let ty::Ref(_, ty, _) = cx.typeck_results().expr_ty_adjusted(&args[0]).kind();
if *ty.kind() == ty::Str;
use rustc_hir::LangItem::{OptionNone, OptionSome};
use rustc_lint::LateContext;
use rustc_middle::hir::map::Map;
+use rustc_middle::ty::{self, TyS};
use rustc_span::sym;
use super::UNNECESSARY_FILTER_MAP;
found_mapping |= return_visitor.found_mapping;
found_filtering |= return_visitor.found_filtering;
- if !found_filtering {
- span_lint(
- cx,
- UNNECESSARY_FILTER_MAP,
- expr.span,
- "this `.filter_map` can be written more simply using `.map`",
- );
- return;
- }
-
- if !found_mapping && !mutates_arg {
- span_lint(
- cx,
- UNNECESSARY_FILTER_MAP,
- expr.span,
- "this `.filter_map` can be written more simply using `.filter`",
- );
+ let sugg = if !found_filtering {
+ "map"
+ } else if !found_mapping && !mutates_arg {
+ let in_ty = cx.typeck_results().node_type(body.params[0].hir_id);
+ match cx.typeck_results().expr_ty(&body.value).kind() {
+ ty::Adt(adt, subst)
+ if cx.tcx.is_diagnostic_item(sym::option_type, adt.did)
+ && TyS::same_type(in_ty, subst.type_at(0)) =>
+ {
+ "filter"
+ },
+ _ => return,
+ }
+ } else {
return;
- }
+ };
+ span_lint(
+ cx,
+ UNNECESSARY_FILTER_MAP,
+ expr.span,
+ &format!("this `.filter_map` can be written more simply using `.{}`", sugg),
+ );
}
}
+++ /dev/null
-use clippy_utils::diagnostics::{span_lint, span_lint_and_help, span_lint_and_sugg, span_lint_and_then};
-use clippy_utils::source::snippet_opt;
-use rustc_ast::ast::{
- BindingMode, Expr, ExprKind, GenericParamKind, Generics, Lit, LitFloatType, LitIntType, LitKind, Mutability,
- NodeId, Pat, PatKind, UnOp,
-};
-use rustc_ast::visit::FnKind;
-use rustc_data_structures::fx::FxHashMap;
-use rustc_errors::Applicability;
-use rustc_hir::PrimTy;
-use rustc_lint::{EarlyContext, EarlyLintPass, LintContext};
-use rustc_middle::lint::in_external_macro;
-use rustc_session::{declare_lint_pass, declare_tool_lint};
-use rustc_span::source_map::Span;
-
-declare_clippy_lint! {
- /// **What it does:** Checks for structure field patterns bound to wildcards.
- ///
- /// **Why is this bad?** Using `..` instead is shorter and leaves the focus on
- /// the fields that are actually bound.
- ///
- /// **Known problems:** None.
- ///
- /// **Example:**
- /// ```rust
- /// # struct Foo {
- /// # a: i32,
- /// # b: i32,
- /// # c: i32,
- /// # }
- /// let f = Foo { a: 0, b: 0, c: 0 };
- ///
- /// // Bad
- /// match f {
- /// Foo { a: _, b: 0, .. } => {},
- /// Foo { a: _, b: _, c: _ } => {},
- /// }
- ///
- /// // Good
- /// match f {
- /// Foo { b: 0, .. } => {},
- /// Foo { .. } => {},
- /// }
- /// ```
- pub UNNEEDED_FIELD_PATTERN,
- restriction,
- "struct fields bound to a wildcard instead of using `..`"
-}
-
-declare_clippy_lint! {
- /// **What it does:** Checks for function arguments having the similar names
- /// differing by an underscore.
- ///
- /// **Why is this bad?** It affects code readability.
- ///
- /// **Known problems:** None.
- ///
- /// **Example:**
- /// ```rust
- /// // Bad
- /// fn foo(a: i32, _a: i32) {}
- ///
- /// // Good
- /// fn bar(a: i32, _b: i32) {}
- /// ```
- pub DUPLICATE_UNDERSCORE_ARGUMENT,
- style,
- "function arguments having names which only differ by an underscore"
-}
-
-declare_clippy_lint! {
- /// **What it does:** Detects expressions of the form `--x`.
- ///
- /// **Why is this bad?** It can mislead C/C++ programmers to think `x` was
- /// decremented.
- ///
- /// **Known problems:** None.
- ///
- /// **Example:**
- /// ```rust
- /// let mut x = 3;
- /// --x;
- /// ```
- pub DOUBLE_NEG,
- style,
- "`--x`, which is a double negation of `x` and not a pre-decrement as in C/C++"
-}
-
-declare_clippy_lint! {
- /// **What it does:** Warns on hexadecimal literals with mixed-case letter
- /// digits.
- ///
- /// **Why is this bad?** It looks confusing.
- ///
- /// **Known problems:** None.
- ///
- /// **Example:**
- /// ```rust
- /// // Bad
- /// let y = 0x1a9BAcD;
- ///
- /// // Good
- /// let y = 0x1A9BACD;
- /// ```
- pub MIXED_CASE_HEX_LITERALS,
- style,
- "hex literals whose letter digits are not consistently upper- or lowercased"
-}
-
-declare_clippy_lint! {
- /// **What it does:** Warns if literal suffixes are not separated by an
- /// underscore.
- ///
- /// **Why is this bad?** It is much less readable.
- ///
- /// **Known problems:** None.
- ///
- /// **Example:**
- /// ```rust
- /// // Bad
- /// let y = 123832i32;
- ///
- /// // Good
- /// let y = 123832_i32;
- /// ```
- pub UNSEPARATED_LITERAL_SUFFIX,
- pedantic,
- "literals whose suffix is not separated by an underscore"
-}
-
-declare_clippy_lint! {
- /// **What it does:** Warns if an integral constant literal starts with `0`.
- ///
- /// **Why is this bad?** In some languages (including the infamous C language
- /// and most of its
- /// family), this marks an octal constant. In Rust however, this is a decimal
- /// constant. This could
- /// be confusing for both the writer and a reader of the constant.
- ///
- /// **Known problems:** None.
- ///
- /// **Example:**
- ///
- /// In Rust:
- /// ```rust
- /// fn main() {
- /// let a = 0123;
- /// println!("{}", a);
- /// }
- /// ```
- ///
- /// prints `123`, while in C:
- ///
- /// ```c
- /// #include <stdio.h>
- ///
- /// int main() {
- /// int a = 0123;
- /// printf("%d\n", a);
- /// }
- /// ```
- ///
- /// prints `83` (as `83 == 0o123` while `123 == 0o173`).
- pub ZERO_PREFIXED_LITERAL,
- complexity,
- "integer literals starting with `0`"
-}
-
-declare_clippy_lint! {
- /// **What it does:** Warns if a generic shadows a built-in type.
- ///
- /// **Why is this bad?** This gives surprising type errors.
- ///
- /// **Known problems:** None.
- ///
- /// **Example:**
- ///
- /// ```ignore
- /// impl<u32> Foo<u32> {
- /// fn impl_func(&self) -> u32 {
- /// 42
- /// }
- /// }
- /// ```
- pub BUILTIN_TYPE_SHADOW,
- style,
- "shadowing a builtin type"
-}
-
-declare_clippy_lint! {
- /// **What it does:** Checks for patterns in the form `name @ _`.
- ///
- /// **Why is this bad?** It's almost always more readable to just use direct
- /// bindings.
- ///
- /// **Known problems:** None.
- ///
- /// **Example:**
- /// ```rust
- /// # let v = Some("abc");
- ///
- /// // Bad
- /// match v {
- /// Some(x) => (),
- /// y @ _ => (),
- /// }
- ///
- /// // Good
- /// match v {
- /// Some(x) => (),
- /// y => (),
- /// }
- /// ```
- pub REDUNDANT_PATTERN,
- style,
- "using `name @ _` in a pattern"
-}
-
-declare_clippy_lint! {
- /// **What it does:** Checks for tuple patterns with a wildcard
- /// pattern (`_`) is next to a rest pattern (`..`).
- ///
- /// _NOTE_: While `_, ..` means there is at least one element left, `..`
- /// means there are 0 or more elements left. This can make a difference
- /// when refactoring, but shouldn't result in errors in the refactored code,
- /// since the wildcard pattern isn't used anyway.
- /// **Why is this bad?** The wildcard pattern is unneeded as the rest pattern
- /// can match that element as well.
- ///
- /// **Known problems:** None.
- ///
- /// **Example:**
- /// ```rust
- /// # struct TupleStruct(u32, u32, u32);
- /// # let t = TupleStruct(1, 2, 3);
- /// // Bad
- /// match t {
- /// TupleStruct(0, .., _) => (),
- /// _ => (),
- /// }
- ///
- /// // Good
- /// match t {
- /// TupleStruct(0, ..) => (),
- /// _ => (),
- /// }
- /// ```
- pub UNNEEDED_WILDCARD_PATTERN,
- complexity,
- "tuple patterns with a wildcard pattern (`_`) is next to a rest pattern (`..`)"
-}
-
-declare_lint_pass!(MiscEarlyLints => [
- UNNEEDED_FIELD_PATTERN,
- DUPLICATE_UNDERSCORE_ARGUMENT,
- DOUBLE_NEG,
- MIXED_CASE_HEX_LITERALS,
- UNSEPARATED_LITERAL_SUFFIX,
- ZERO_PREFIXED_LITERAL,
- BUILTIN_TYPE_SHADOW,
- REDUNDANT_PATTERN,
- UNNEEDED_WILDCARD_PATTERN,
-]);
-
-impl EarlyLintPass for MiscEarlyLints {
- fn check_generics(&mut self, cx: &EarlyContext<'_>, gen: &Generics) {
- for param in &gen.params {
- if let GenericParamKind::Type { .. } = param.kind {
- if let Some(prim_ty) = PrimTy::from_name(param.ident.name) {
- span_lint(
- cx,
- BUILTIN_TYPE_SHADOW,
- param.ident.span,
- &format!("this generic shadows the built-in type `{}`", prim_ty.name()),
- );
- }
- }
- }
- }
-
- fn check_pat(&mut self, cx: &EarlyContext<'_>, pat: &Pat) {
- if let PatKind::Struct(ref npat, ref pfields, _) = pat.kind {
- let mut wilds = 0;
- let type_name = npat
- .segments
- .last()
- .expect("A path must have at least one segment")
- .ident
- .name;
-
- for field in pfields {
- if let PatKind::Wild = field.pat.kind {
- wilds += 1;
- }
- }
- if !pfields.is_empty() && wilds == pfields.len() {
- span_lint_and_help(
- cx,
- UNNEEDED_FIELD_PATTERN,
- pat.span,
- "all the struct fields are matched to a wildcard pattern, consider using `..`",
- None,
- &format!("try with `{} {{ .. }}` instead", type_name),
- );
- return;
- }
- if wilds > 0 {
- for field in pfields {
- if let PatKind::Wild = field.pat.kind {
- wilds -= 1;
- if wilds > 0 {
- span_lint(
- cx,
- UNNEEDED_FIELD_PATTERN,
- field.span,
- "you matched a field with a wildcard pattern, consider using `..` instead",
- );
- } else {
- let mut normal = vec![];
-
- for field in pfields {
- match field.pat.kind {
- PatKind::Wild => {},
- _ => {
- if let Ok(n) = cx.sess().source_map().span_to_snippet(field.span) {
- normal.push(n);
- }
- },
- }
- }
-
- span_lint_and_help(
- cx,
- UNNEEDED_FIELD_PATTERN,
- field.span,
- "you matched a field with a wildcard pattern, consider using `..` \
- instead",
- None,
- &format!("try with `{} {{ {}, .. }}`", type_name, normal[..].join(", ")),
- );
- }
- }
- }
- }
- }
-
- if let PatKind::Ident(left, ident, Some(ref right)) = pat.kind {
- let left_binding = match left {
- BindingMode::ByRef(Mutability::Mut) => "ref mut ",
- BindingMode::ByRef(Mutability::Not) => "ref ",
- BindingMode::ByValue(..) => "",
- };
-
- if let PatKind::Wild = right.kind {
- span_lint_and_sugg(
- cx,
- REDUNDANT_PATTERN,
- pat.span,
- &format!(
- "the `{} @ _` pattern can be written as just `{}`",
- ident.name, ident.name,
- ),
- "try",
- format!("{}{}", left_binding, ident.name),
- Applicability::MachineApplicable,
- );
- }
- }
-
- check_unneeded_wildcard_pattern(cx, pat);
- }
-
- fn check_fn(&mut self, cx: &EarlyContext<'_>, fn_kind: FnKind<'_>, _: Span, _: NodeId) {
- let mut registered_names: FxHashMap<String, Span> = FxHashMap::default();
-
- for arg in &fn_kind.decl().inputs {
- if let PatKind::Ident(_, ident, None) = arg.pat.kind {
- let arg_name = ident.to_string();
-
- if let Some(arg_name) = arg_name.strip_prefix('_') {
- if let Some(correspondence) = registered_names.get(arg_name) {
- span_lint(
- cx,
- DUPLICATE_UNDERSCORE_ARGUMENT,
- *correspondence,
- &format!(
- "`{}` already exists, having another argument having almost the same \
- name makes code comprehension and documentation more difficult",
- arg_name
- ),
- );
- }
- } else {
- registered_names.insert(arg_name, arg.pat.span);
- }
- }
- }
- }
-
- fn check_expr(&mut self, cx: &EarlyContext<'_>, expr: &Expr) {
- if in_external_macro(cx.sess(), expr.span) {
- return;
- }
- match expr.kind {
- ExprKind::Unary(UnOp::Neg, ref inner) => {
- if let ExprKind::Unary(UnOp::Neg, _) = inner.kind {
- span_lint(
- cx,
- DOUBLE_NEG,
- expr.span,
- "`--x` could be misinterpreted as pre-decrement by C programmers, is usually a no-op",
- );
- }
- },
- ExprKind::Lit(ref lit) => Self::check_lit(cx, lit),
- _ => (),
- }
- }
-}
-
-impl MiscEarlyLints {
- fn check_lit(cx: &EarlyContext<'_>, lit: &Lit) {
- // We test if first character in snippet is a number, because the snippet could be an expansion
- // from a built-in macro like `line!()` or a proc-macro like `#[wasm_bindgen]`.
- // Note that this check also covers special case that `line!()` is eagerly expanded by compiler.
- // See <https://github.com/rust-lang/rust-clippy/issues/4507> for a regression.
- // FIXME: Find a better way to detect those cases.
- let lit_snip = match snippet_opt(cx, lit.span) {
- Some(snip) if snip.chars().next().map_or(false, |c| c.is_digit(10)) => snip,
- _ => return,
- };
-
- if let LitKind::Int(value, lit_int_type) = lit.kind {
- let suffix = match lit_int_type {
- LitIntType::Signed(ty) => ty.name_str(),
- LitIntType::Unsigned(ty) => ty.name_str(),
- LitIntType::Unsuffixed => "",
- };
-
- let maybe_last_sep_idx = if let Some(val) = lit_snip.len().checked_sub(suffix.len() + 1) {
- val
- } else {
- return; // It's useless so shouldn't lint.
- };
- // Do not lint when literal is unsuffixed.
- if !suffix.is_empty() && lit_snip.as_bytes()[maybe_last_sep_idx] != b'_' {
- span_lint_and_sugg(
- cx,
- UNSEPARATED_LITERAL_SUFFIX,
- lit.span,
- "integer type suffix should be separated by an underscore",
- "add an underscore",
- format!("{}_{}", &lit_snip[..=maybe_last_sep_idx], suffix),
- Applicability::MachineApplicable,
- );
- }
-
- if lit_snip.starts_with("0x") {
- if maybe_last_sep_idx <= 2 {
- // It's meaningless or causes range error.
- return;
- }
- let mut seen = (false, false);
- for ch in lit_snip.as_bytes()[2..=maybe_last_sep_idx].iter() {
- match ch {
- b'a'..=b'f' => seen.0 = true,
- b'A'..=b'F' => seen.1 = true,
- _ => {},
- }
- if seen.0 && seen.1 {
- span_lint(
- cx,
- MIXED_CASE_HEX_LITERALS,
- lit.span,
- "inconsistent casing in hexadecimal literal",
- );
- break;
- }
- }
- } else if lit_snip.starts_with("0b") || lit_snip.starts_with("0o") {
- /* nothing to do */
- } else if value != 0 && lit_snip.starts_with('0') {
- span_lint_and_then(
- cx,
- ZERO_PREFIXED_LITERAL,
- lit.span,
- "this is a decimal constant",
- |diag| {
- diag.span_suggestion(
- lit.span,
- "if you mean to use a decimal constant, remove the `0` to avoid confusion",
- lit_snip.trim_start_matches(|c| c == '_' || c == '0').to_string(),
- Applicability::MaybeIncorrect,
- );
- diag.span_suggestion(
- lit.span,
- "if you mean to use an octal constant, use `0o`",
- format!("0o{}", lit_snip.trim_start_matches(|c| c == '_' || c == '0')),
- Applicability::MaybeIncorrect,
- );
- },
- );
- }
- } else if let LitKind::Float(_, LitFloatType::Suffixed(float_ty)) = lit.kind {
- let suffix = float_ty.name_str();
- let maybe_last_sep_idx = if let Some(val) = lit_snip.len().checked_sub(suffix.len() + 1) {
- val
- } else {
- return; // It's useless so shouldn't lint.
- };
- if lit_snip.as_bytes()[maybe_last_sep_idx] != b'_' {
- span_lint_and_sugg(
- cx,
- UNSEPARATED_LITERAL_SUFFIX,
- lit.span,
- "float type suffix should be separated by an underscore",
- "add an underscore",
- format!("{}_{}", &lit_snip[..=maybe_last_sep_idx], suffix),
- Applicability::MachineApplicable,
- );
- }
- }
- }
-}
-
-fn check_unneeded_wildcard_pattern(cx: &EarlyContext<'_>, pat: &Pat) {
- if let PatKind::TupleStruct(_, ref patterns) | PatKind::Tuple(ref patterns) = pat.kind {
- fn span_lint(cx: &EarlyContext<'_>, span: Span, only_one: bool) {
- span_lint_and_sugg(
- cx,
- UNNEEDED_WILDCARD_PATTERN,
- span,
- if only_one {
- "this pattern is unneeded as the `..` pattern can match that element"
- } else {
- "these patterns are unneeded as the `..` pattern can match those elements"
- },
- if only_one { "remove it" } else { "remove them" },
- "".to_string(),
- Applicability::MachineApplicable,
- );
- }
-
- if let Some(rest_index) = patterns.iter().position(|pat| pat.is_rest()) {
- if let Some((left_index, left_pat)) = patterns[..rest_index]
- .iter()
- .rev()
- .take_while(|pat| matches!(pat.kind, PatKind::Wild))
- .enumerate()
- .last()
- {
- span_lint(cx, left_pat.span.until(patterns[rest_index].span), left_index == 0);
- }
-
- if let Some((right_index, right_pat)) = patterns[rest_index + 1..]
- .iter()
- .take_while(|pat| matches!(pat.kind, PatKind::Wild))
- .enumerate()
- .last()
- {
- span_lint(
- cx,
- patterns[rest_index].span.shrink_to_hi().to(right_pat.span),
- right_index == 0,
- );
- }
- }
- }
-}
--- /dev/null
+use clippy_utils::diagnostics::span_lint;
+use rustc_ast::ast::{GenericParam, GenericParamKind};
+use rustc_hir::PrimTy;
+use rustc_lint::EarlyContext;
+
+use super::BUILTIN_TYPE_SHADOW;
+
+pub(super) fn check(cx: &EarlyContext<'_>, param: &GenericParam) {
+ if let GenericParamKind::Type { .. } = param.kind {
+ if let Some(prim_ty) = PrimTy::from_name(param.ident.name) {
+ span_lint(
+ cx,
+ BUILTIN_TYPE_SHADOW,
+ param.ident.span,
+ &format!("this generic shadows the built-in type `{}`", prim_ty.name()),
+ );
+ }
+ }
+}
--- /dev/null
+use super::MiscEarlyLints;
+use clippy_utils::diagnostics::span_lint;
+use rustc_ast::ast::{Expr, ExprKind, UnOp};
+use rustc_lint::EarlyContext;
+
+use super::DOUBLE_NEG;
+
+pub(super) fn check(cx: &EarlyContext<'_>, expr: &Expr) {
+ match expr.kind {
+ ExprKind::Unary(UnOp::Neg, ref inner) => {
+ if let ExprKind::Unary(UnOp::Neg, _) = inner.kind {
+ span_lint(
+ cx,
+ DOUBLE_NEG,
+ expr.span,
+ "`--x` could be misinterpreted as pre-decrement by C programmers, is usually a no-op",
+ );
+ }
+ },
+ ExprKind::Lit(ref lit) => MiscEarlyLints::check_lit(cx, lit),
+ _ => (),
+ }
+}
--- /dev/null
+use clippy_utils::diagnostics::span_lint;
+use rustc_ast::ast::Lit;
+use rustc_lint::EarlyContext;
+
+use super::MIXED_CASE_HEX_LITERALS;
+
+pub(super) fn check(cx: &EarlyContext<'_>, lit: &Lit, suffix: &str, lit_snip: &str) {
+ let maybe_last_sep_idx = if let Some(val) = lit_snip.len().checked_sub(suffix.len() + 1) {
+ val
+ } else {
+ return; // It's useless so shouldn't lint.
+ };
+ if maybe_last_sep_idx <= 2 {
+ // It's meaningless or causes range error.
+ return;
+ }
+ let mut seen = (false, false);
+ for ch in lit_snip.as_bytes()[2..=maybe_last_sep_idx].iter() {
+ match ch {
+ b'a'..=b'f' => seen.0 = true,
+ b'A'..=b'F' => seen.1 = true,
+ _ => {},
+ }
+ if seen.0 && seen.1 {
+ span_lint(
+ cx,
+ MIXED_CASE_HEX_LITERALS,
+ lit.span,
+ "inconsistent casing in hexadecimal literal",
+ );
+ break;
+ }
+ }
+}
--- /dev/null
+mod builtin_type_shadow;
+mod double_neg;
+mod mixed_case_hex_literals;
+mod redundant_pattern;
+mod unneeded_field_pattern;
+mod unneeded_wildcard_pattern;
+mod unseparated_literal_suffix;
+mod zero_prefixed_literal;
+
+use clippy_utils::diagnostics::span_lint;
+use clippy_utils::source::snippet_opt;
+use rustc_ast::ast::{Expr, Generics, Lit, LitFloatType, LitIntType, LitKind, NodeId, Pat, PatKind};
+use rustc_ast::visit::FnKind;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_lint::{EarlyContext, EarlyLintPass, LintContext};
+use rustc_middle::lint::in_external_macro;
+use rustc_session::{declare_lint_pass, declare_tool_lint};
+use rustc_span::source_map::Span;
+
+declare_clippy_lint! {
+ /// **What it does:** Checks for structure field patterns bound to wildcards.
+ ///
+ /// **Why is this bad?** Using `..` instead is shorter and leaves the focus on
+ /// the fields that are actually bound.
+ ///
+ /// **Known problems:** None.
+ ///
+ /// **Example:**
+ /// ```rust
+ /// # struct Foo {
+ /// # a: i32,
+ /// # b: i32,
+ /// # c: i32,
+ /// # }
+ /// let f = Foo { a: 0, b: 0, c: 0 };
+ ///
+ /// // Bad
+ /// match f {
+ /// Foo { a: _, b: 0, .. } => {},
+ /// Foo { a: _, b: _, c: _ } => {},
+ /// }
+ ///
+ /// // Good
+ /// match f {
+ /// Foo { b: 0, .. } => {},
+ /// Foo { .. } => {},
+ /// }
+ /// ```
+ pub UNNEEDED_FIELD_PATTERN,
+ restriction,
+ "struct fields bound to a wildcard instead of using `..`"
+}
+
+declare_clippy_lint! {
+ /// **What it does:** Checks for function arguments having the similar names
+ /// differing by an underscore.
+ ///
+ /// **Why is this bad?** It affects code readability.
+ ///
+ /// **Known problems:** None.
+ ///
+ /// **Example:**
+ /// ```rust
+ /// // Bad
+ /// fn foo(a: i32, _a: i32) {}
+ ///
+ /// // Good
+ /// fn bar(a: i32, _b: i32) {}
+ /// ```
+ pub DUPLICATE_UNDERSCORE_ARGUMENT,
+ style,
+ "function arguments having names which only differ by an underscore"
+}
+
+declare_clippy_lint! {
+ /// **What it does:** Detects expressions of the form `--x`.
+ ///
+ /// **Why is this bad?** It can mislead C/C++ programmers to think `x` was
+ /// decremented.
+ ///
+ /// **Known problems:** None.
+ ///
+ /// **Example:**
+ /// ```rust
+ /// let mut x = 3;
+ /// --x;
+ /// ```
+ pub DOUBLE_NEG,
+ style,
+ "`--x`, which is a double negation of `x` and not a pre-decrement as in C/C++"
+}
+
+declare_clippy_lint! {
+ /// **What it does:** Warns on hexadecimal literals with mixed-case letter
+ /// digits.
+ ///
+ /// **Why is this bad?** It looks confusing.
+ ///
+ /// **Known problems:** None.
+ ///
+ /// **Example:**
+ /// ```rust
+ /// // Bad
+ /// let y = 0x1a9BAcD;
+ ///
+ /// // Good
+ /// let y = 0x1A9BACD;
+ /// ```
+ pub MIXED_CASE_HEX_LITERALS,
+ style,
+ "hex literals whose letter digits are not consistently upper- or lowercased"
+}
+
+declare_clippy_lint! {
+ /// **What it does:** Warns if literal suffixes are not separated by an
+ /// underscore.
+ ///
+ /// **Why is this bad?** It is much less readable.
+ ///
+ /// **Known problems:** None.
+ ///
+ /// **Example:**
+ /// ```rust
+ /// // Bad
+ /// let y = 123832i32;
+ ///
+ /// // Good
+ /// let y = 123832_i32;
+ /// ```
+ pub UNSEPARATED_LITERAL_SUFFIX,
+ pedantic,
+ "literals whose suffix is not separated by an underscore"
+}
+
+declare_clippy_lint! {
+ /// **What it does:** Warns if an integral constant literal starts with `0`.
+ ///
+ /// **Why is this bad?** In some languages (including the infamous C language
+ /// and most of its
+ /// family), this marks an octal constant. In Rust however, this is a decimal
+ /// constant. This could
+ /// be confusing for both the writer and a reader of the constant.
+ ///
+ /// **Known problems:** None.
+ ///
+ /// **Example:**
+ ///
+ /// In Rust:
+ /// ```rust
+ /// fn main() {
+ /// let a = 0123;
+ /// println!("{}", a);
+ /// }
+ /// ```
+ ///
+ /// prints `123`, while in C:
+ ///
+ /// ```c
+ /// #include <stdio.h>
+ ///
+ /// int main() {
+ /// int a = 0123;
+ /// printf("%d\n", a);
+ /// }
+ /// ```
+ ///
+ /// prints `83` (as `83 == 0o123` while `123 == 0o173`).
+ pub ZERO_PREFIXED_LITERAL,
+ complexity,
+ "integer literals starting with `0`"
+}
+
+declare_clippy_lint! {
+ /// **What it does:** Warns if a generic shadows a built-in type.
+ ///
+ /// **Why is this bad?** This gives surprising type errors.
+ ///
+ /// **Known problems:** None.
+ ///
+ /// **Example:**
+ ///
+ /// ```ignore
+ /// impl<u32> Foo<u32> {
+ /// fn impl_func(&self) -> u32 {
+ /// 42
+ /// }
+ /// }
+ /// ```
+ pub BUILTIN_TYPE_SHADOW,
+ style,
+ "shadowing a builtin type"
+}
+
+declare_clippy_lint! {
+ /// **What it does:** Checks for patterns in the form `name @ _`.
+ ///
+ /// **Why is this bad?** It's almost always more readable to just use direct
+ /// bindings.
+ ///
+ /// **Known problems:** None.
+ ///
+ /// **Example:**
+ /// ```rust
+ /// # let v = Some("abc");
+ ///
+ /// // Bad
+ /// match v {
+ /// Some(x) => (),
+ /// y @ _ => (),
+ /// }
+ ///
+ /// // Good
+ /// match v {
+ /// Some(x) => (),
+ /// y => (),
+ /// }
+ /// ```
+ pub REDUNDANT_PATTERN,
+ style,
+ "using `name @ _` in a pattern"
+}
+
+declare_clippy_lint! {
+ /// **What it does:** Checks for tuple patterns with a wildcard
+ /// pattern (`_`) is next to a rest pattern (`..`).
+ ///
+ /// _NOTE_: While `_, ..` means there is at least one element left, `..`
+ /// means there are 0 or more elements left. This can make a difference
+ /// when refactoring, but shouldn't result in errors in the refactored code,
+ /// since the wildcard pattern isn't used anyway.
+ /// **Why is this bad?** The wildcard pattern is unneeded as the rest pattern
+ /// can match that element as well.
+ ///
+ /// **Known problems:** None.
+ ///
+ /// **Example:**
+ /// ```rust
+ /// # struct TupleStruct(u32, u32, u32);
+ /// # let t = TupleStruct(1, 2, 3);
+ /// // Bad
+ /// match t {
+ /// TupleStruct(0, .., _) => (),
+ /// _ => (),
+ /// }
+ ///
+ /// // Good
+ /// match t {
+ /// TupleStruct(0, ..) => (),
+ /// _ => (),
+ /// }
+ /// ```
+ pub UNNEEDED_WILDCARD_PATTERN,
+ complexity,
+ "tuple patterns with a wildcard pattern (`_`) is next to a rest pattern (`..`)"
+}
+
+declare_lint_pass!(MiscEarlyLints => [
+ UNNEEDED_FIELD_PATTERN,
+ DUPLICATE_UNDERSCORE_ARGUMENT,
+ DOUBLE_NEG,
+ MIXED_CASE_HEX_LITERALS,
+ UNSEPARATED_LITERAL_SUFFIX,
+ ZERO_PREFIXED_LITERAL,
+ BUILTIN_TYPE_SHADOW,
+ REDUNDANT_PATTERN,
+ UNNEEDED_WILDCARD_PATTERN,
+]);
+
+impl EarlyLintPass for MiscEarlyLints {
+ fn check_generics(&mut self, cx: &EarlyContext<'_>, gen: &Generics) {
+ for param in &gen.params {
+ builtin_type_shadow::check(cx, param);
+ }
+ }
+
+ fn check_pat(&mut self, cx: &EarlyContext<'_>, pat: &Pat) {
+ unneeded_field_pattern::check(cx, pat);
+ redundant_pattern::check(cx, pat);
+ unneeded_wildcard_pattern::check(cx, pat);
+ }
+
+ fn check_fn(&mut self, cx: &EarlyContext<'_>, fn_kind: FnKind<'_>, _: Span, _: NodeId) {
+ let mut registered_names: FxHashMap<String, Span> = FxHashMap::default();
+
+ for arg in &fn_kind.decl().inputs {
+ if let PatKind::Ident(_, ident, None) = arg.pat.kind {
+ let arg_name = ident.to_string();
+
+ if let Some(arg_name) = arg_name.strip_prefix('_') {
+ if let Some(correspondence) = registered_names.get(arg_name) {
+ span_lint(
+ cx,
+ DUPLICATE_UNDERSCORE_ARGUMENT,
+ *correspondence,
+ &format!(
+ "`{}` already exists, having another argument having almost the same \
+ name makes code comprehension and documentation more difficult",
+ arg_name
+ ),
+ );
+ }
+ } else {
+ registered_names.insert(arg_name, arg.pat.span);
+ }
+ }
+ }
+ }
+
+ fn check_expr(&mut self, cx: &EarlyContext<'_>, expr: &Expr) {
+ if in_external_macro(cx.sess(), expr.span) {
+ return;
+ }
+ double_neg::check(cx, expr)
+ }
+}
+
+impl MiscEarlyLints {
+ fn check_lit(cx: &EarlyContext<'_>, lit: &Lit) {
+ // We test if first character in snippet is a number, because the snippet could be an expansion
+ // from a built-in macro like `line!()` or a proc-macro like `#[wasm_bindgen]`.
+ // Note that this check also covers special case that `line!()` is eagerly expanded by compiler.
+ // See <https://github.com/rust-lang/rust-clippy/issues/4507> for a regression.
+ // FIXME: Find a better way to detect those cases.
+ let lit_snip = match snippet_opt(cx, lit.span) {
+ Some(snip) if snip.chars().next().map_or(false, |c| c.is_digit(10)) => snip,
+ _ => return,
+ };
+
+ if let LitKind::Int(value, lit_int_type) = lit.kind {
+ let suffix = match lit_int_type {
+ LitIntType::Signed(ty) => ty.name_str(),
+ LitIntType::Unsigned(ty) => ty.name_str(),
+ LitIntType::Unsuffixed => "",
+ };
+ unseparated_literal_suffix::check(cx, lit, &lit_snip, suffix, "integer");
+ if lit_snip.starts_with("0x") {
+ mixed_case_hex_literals::check(cx, lit, suffix, &lit_snip)
+ } else if lit_snip.starts_with("0b") || lit_snip.starts_with("0o") {
+ /* nothing to do */
+ } else if value != 0 && lit_snip.starts_with('0') {
+ zero_prefixed_literal::check(cx, lit, &lit_snip)
+ }
+ } else if let LitKind::Float(_, LitFloatType::Suffixed(float_ty)) = lit.kind {
+ let suffix = float_ty.name_str();
+ unseparated_literal_suffix::check(cx, lit, &lit_snip, suffix, "float")
+ }
+ }
+}
--- /dev/null
+use clippy_utils::diagnostics::span_lint_and_sugg;
+use rustc_ast::ast::{BindingMode, Mutability, Pat, PatKind};
+use rustc_errors::Applicability;
+use rustc_lint::EarlyContext;
+
+use super::REDUNDANT_PATTERN;
+
+pub(super) fn check(cx: &EarlyContext<'_>, pat: &Pat) {
+ if let PatKind::Ident(left, ident, Some(ref right)) = pat.kind {
+ let left_binding = match left {
+ BindingMode::ByRef(Mutability::Mut) => "ref mut ",
+ BindingMode::ByRef(Mutability::Not) => "ref ",
+ BindingMode::ByValue(..) => "",
+ };
+
+ if let PatKind::Wild = right.kind {
+ span_lint_and_sugg(
+ cx,
+ REDUNDANT_PATTERN,
+ pat.span,
+ &format!(
+ "the `{} @ _` pattern can be written as just `{}`",
+ ident.name, ident.name,
+ ),
+ "try",
+ format!("{}{}", left_binding, ident.name),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+}
--- /dev/null
+use clippy_utils::diagnostics::{span_lint, span_lint_and_help};
+use rustc_ast::ast::{Pat, PatKind};
+use rustc_lint::{EarlyContext, LintContext};
+
+use super::UNNEEDED_FIELD_PATTERN;
+
+pub(super) fn check(cx: &EarlyContext<'_>, pat: &Pat) {
+ if let PatKind::Struct(ref npat, ref pfields, _) = pat.kind {
+ let mut wilds = 0;
+ let type_name = npat
+ .segments
+ .last()
+ .expect("A path must have at least one segment")
+ .ident
+ .name;
+
+ for field in pfields {
+ if let PatKind::Wild = field.pat.kind {
+ wilds += 1;
+ }
+ }
+ if !pfields.is_empty() && wilds == pfields.len() {
+ span_lint_and_help(
+ cx,
+ UNNEEDED_FIELD_PATTERN,
+ pat.span,
+ "all the struct fields are matched to a wildcard pattern, consider using `..`",
+ None,
+ &format!("try with `{} {{ .. }}` instead", type_name),
+ );
+ return;
+ }
+ if wilds > 0 {
+ for field in pfields {
+ if let PatKind::Wild = field.pat.kind {
+ wilds -= 1;
+ if wilds > 0 {
+ span_lint(
+ cx,
+ UNNEEDED_FIELD_PATTERN,
+ field.span,
+ "you matched a field with a wildcard pattern, consider using `..` instead",
+ );
+ } else {
+ let mut normal = vec![];
+
+ for field in pfields {
+ match field.pat.kind {
+ PatKind::Wild => {},
+ _ => {
+ if let Ok(n) = cx.sess().source_map().span_to_snippet(field.span) {
+ normal.push(n);
+ }
+ },
+ }
+ }
+
+ span_lint_and_help(
+ cx,
+ UNNEEDED_FIELD_PATTERN,
+ field.span,
+ "you matched a field with a wildcard pattern, consider using `..` \
+ instead",
+ None,
+ &format!("try with `{} {{ {}, .. }}`", type_name, normal[..].join(", ")),
+ );
+ }
+ }
+ }
+ }
+ }
+}
--- /dev/null
+use clippy_utils::diagnostics::span_lint_and_sugg;
+use rustc_ast::ast::{Pat, PatKind};
+use rustc_errors::Applicability;
+use rustc_lint::EarlyContext;
+use rustc_span::source_map::Span;
+
+use super::UNNEEDED_WILDCARD_PATTERN;
+
+pub(super) fn check(cx: &EarlyContext<'_>, pat: &Pat) {
+ if let PatKind::TupleStruct(_, ref patterns) | PatKind::Tuple(ref patterns) = pat.kind {
+ if let Some(rest_index) = patterns.iter().position(|pat| pat.is_rest()) {
+ if let Some((left_index, left_pat)) = patterns[..rest_index]
+ .iter()
+ .rev()
+ .take_while(|pat| matches!(pat.kind, PatKind::Wild))
+ .enumerate()
+ .last()
+ {
+ span_lint(cx, left_pat.span.until(patterns[rest_index].span), left_index == 0);
+ }
+
+ if let Some((right_index, right_pat)) = patterns[rest_index + 1..]
+ .iter()
+ .take_while(|pat| matches!(pat.kind, PatKind::Wild))
+ .enumerate()
+ .last()
+ {
+ span_lint(
+ cx,
+ patterns[rest_index].span.shrink_to_hi().to(right_pat.span),
+ right_index == 0,
+ );
+ }
+ }
+ }
+}
+
+fn span_lint(cx: &EarlyContext<'_>, span: Span, only_one: bool) {
+ span_lint_and_sugg(
+ cx,
+ UNNEEDED_WILDCARD_PATTERN,
+ span,
+ if only_one {
+ "this pattern is unneeded as the `..` pattern can match that element"
+ } else {
+ "these patterns are unneeded as the `..` pattern can match those elements"
+ },
+ if only_one { "remove it" } else { "remove them" },
+ "".to_string(),
+ Applicability::MachineApplicable,
+ );
+}
--- /dev/null
+use clippy_utils::diagnostics::span_lint_and_sugg;
+use rustc_ast::ast::Lit;
+use rustc_errors::Applicability;
+use rustc_lint::EarlyContext;
+
+use super::UNSEPARATED_LITERAL_SUFFIX;
+
+pub(super) fn check(cx: &EarlyContext<'_>, lit: &Lit, lit_snip: &str, suffix: &str, sugg_type: &str) {
+ let maybe_last_sep_idx = if let Some(val) = lit_snip.len().checked_sub(suffix.len() + 1) {
+ val
+ } else {
+ return; // It's useless so shouldn't lint.
+ };
+ // Do not lint when literal is unsuffixed.
+ if !suffix.is_empty() && lit_snip.as_bytes()[maybe_last_sep_idx] != b'_' {
+ span_lint_and_sugg(
+ cx,
+ UNSEPARATED_LITERAL_SUFFIX,
+ lit.span,
+ &format!("{} type suffix should be separated by an underscore", sugg_type),
+ "add an underscore",
+ format!("{}_{}", &lit_snip[..=maybe_last_sep_idx], suffix),
+ Applicability::MachineApplicable,
+ );
+ }
+}
--- /dev/null
+use clippy_utils::diagnostics::span_lint_and_then;
+use rustc_ast::ast::Lit;
+use rustc_errors::Applicability;
+use rustc_lint::EarlyContext;
+
+use super::ZERO_PREFIXED_LITERAL;
+
+pub(super) fn check(cx: &EarlyContext<'_>, lit: &Lit, lit_snip: &str) {
+ span_lint_and_then(
+ cx,
+ ZERO_PREFIXED_LITERAL,
+ lit.span,
+ "this is a decimal constant",
+ |diag| {
+ diag.span_suggestion(
+ lit.span,
+ "if you mean to use a decimal constant, remove the `0` to avoid confusion",
+ lit_snip.trim_start_matches(|c| c == '_' || c == '0').to_string(),
+ Applicability::MaybeIncorrect,
+ );
+ diag.span_suggestion(
+ lit.span,
+ "if you mean to use an octal constant, use `0o`",
+ format!("0o{}", lit_snip.trim_start_matches(|c| c == '_' || c == '0')),
+ Applicability::MaybeIncorrect,
+ );
+ },
+ );
+}
use rustc_errors::Applicability;
use rustc_hir::intravisit::{walk_block, walk_expr, walk_stmt, NestedVisitorMap, Visitor};
use rustc_hir::{BindingAnnotation, Block, Expr, ExprKind, HirId, PatKind, QPath, Stmt, StmtKind};
-use rustc_lint::{LateContext, LateLintPass, Lint};
+use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::hir::map::Map;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::symbol::sym;
vec_alloc: &VecAllocation<'_>,
) {
match initialization {
- InitializationType::Extend(e) | InitializationType::Resize(e) => Self::emit_lint(
- cx,
- e,
- vec_alloc,
- "slow zero-filling initialization",
- SLOW_VECTOR_INITIALIZATION,
- ),
+ InitializationType::Extend(e) | InitializationType::Resize(e) => {
+ Self::emit_lint(cx, e, vec_alloc, "slow zero-filling initialization")
+ },
};
}
- fn emit_lint<'tcx>(
- cx: &LateContext<'tcx>,
- slow_fill: &Expr<'_>,
- vec_alloc: &VecAllocation<'_>,
- msg: &str,
- lint: &'static Lint,
- ) {
+ fn emit_lint<'tcx>(cx: &LateContext<'tcx>, slow_fill: &Expr<'_>, vec_alloc: &VecAllocation<'_>, msg: &str) {
let len_expr = Sugg::hir(cx, vec_alloc.len_expr, "len");
- span_lint_and_then(cx, lint, slow_fill.span, msg, |diag| {
+ span_lint_and_then(cx, SLOW_VECTOR_INITIALIZATION, slow_fill.span, msg, |diag| {
diag.span_suggestion(
vec_alloc.allocation_expr.span,
"consider replace allocation with",
if_chain! {
if let Some(stmt) = block.stmts.last();
if let ast::StmtKind::Expr(ref expr) = stmt.kind;
- if is_unit_expr(expr) && !stmt.span.from_expansion();
+ if is_unit_expr(expr);
+ let ctxt = block.span.ctxt();
+ if stmt.span.ctxt() == ctxt && expr.span.ctxt() == ctxt;
then {
let sp = expr.span;
span_lint_and_sugg(
//! Read configurations files.
-#![deny(clippy::missing_docs_in_private_items)]
+#![allow(clippy::module_name_repetitions)]
-use rustc_ast::ast::{LitKind, MetaItemKind, NestedMetaItem};
-use rustc_span::source_map;
-use source_map::Span;
-use std::lazy::SyncLazy;
+use serde::de::{Deserializer, IgnoredAny, IntoDeserializer, MapAccess, Visitor};
+use serde::Deserialize;
+use std::error::Error;
use std::path::{Path, PathBuf};
-use std::sync::Mutex;
use std::{env, fmt, fs, io};
-/// Gets the configuration file from arguments.
-pub fn file_from_args(args: &[NestedMetaItem]) -> Result<Option<PathBuf>, (&'static str, Span)> {
- for arg in args.iter().filter_map(NestedMetaItem::meta_item) {
- if arg.has_name(sym!(conf_file)) {
- return match arg.kind {
- MetaItemKind::Word | MetaItemKind::List(_) => Err(("`conf_file` must be a named value", arg.span)),
- MetaItemKind::NameValue(ref value) => {
- if let LitKind::Str(ref file, _) = value.kind {
- Ok(Some(file.to_string().into()))
- } else {
- Err(("`conf_file` value must be a string", value.span))
- }
- },
- };
- }
- }
-
- Ok(None)
-}
-
-/// Error from reading a configuration file.
-#[derive(Debug)]
-pub enum Error {
- /// An I/O error.
- Io(io::Error),
- /// Not valid toml or doesn't fit the expected config format
- Toml(String),
+/// Conf with parse errors
+#[derive(Default)]
+pub struct TryConf {
+ pub conf: Conf,
+ pub errors: Vec<String>,
}
-impl fmt::Display for Error {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
- Self::Io(err) => err.fmt(f),
- Self::Toml(err) => err.fmt(f),
+impl TryConf {
+ fn from_error(error: impl Error) -> Self {
+ Self {
+ conf: Conf::default(),
+ errors: vec![error.to_string()],
}
}
}
-impl From<io::Error> for Error {
- fn from(e: io::Error) -> Self {
- Self::Io(e)
- }
-}
+macro_rules! define_Conf {
+ ($(
+ #[$doc:meta]
+ $(#[conf_deprecated($dep:literal)])?
+ ($name:ident: $ty:ty = $default:expr),
+ )*) => {
+ /// Clippy lint configuration
+ pub struct Conf {
+ $(#[$doc] pub $name: $ty,)*
+ }
-/// Vec of errors that might be collected during config toml parsing
-static ERRORS: SyncLazy<Mutex<Vec<Error>>> = SyncLazy::new(|| Mutex::new(Vec::new()));
+ mod defaults {
+ $(pub fn $name() -> $ty { $default })*
+ }
-macro_rules! define_Conf {
- ($(#[$doc:meta] ($config:ident, $config_str:literal: $Ty:ty, $default:expr),)+) => {
- mod helpers {
- use serde::Deserialize;
- /// Type used to store lint configuration.
- #[derive(Deserialize)]
- #[serde(rename_all = "kebab-case", deny_unknown_fields)]
- pub struct Conf {
- $(
- #[$doc]
- #[serde(default = $config_str)]
- #[serde(with = $config_str)]
- pub $config: $Ty,
- )+
- #[allow(dead_code)]
- #[serde(default)]
- third_party: Option<::toml::Value>,
+ impl Default for Conf {
+ fn default() -> Self {
+ Self { $($name: defaults::$name(),)* }
}
+ }
- $(
- mod $config {
- use serde::Deserialize;
- pub fn deserialize<'de, D: serde::Deserializer<'de>>(deserializer: D) -> Result<$Ty, D::Error> {
- use super::super::{ERRORS, Error};
+ impl<'de> Deserialize<'de> for TryConf {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de> {
+ deserializer.deserialize_map(ConfVisitor)
+ }
+ }
- Ok(
- <$Ty>::deserialize(deserializer).unwrap_or_else(|e| {
- ERRORS
- .lock()
- .expect("no threading here")
- .push(Error::Toml(e.to_string()));
- super::$config()
- })
- )
- }
- }
+ #[derive(Deserialize)]
+ #[serde(field_identifier, rename_all = "kebab-case")]
+ #[allow(non_camel_case_types)]
+ enum Field { $($name,)* third_party, }
+
+ struct ConfVisitor;
+
+ impl<'de> Visitor<'de> for ConfVisitor {
+ type Value = TryConf;
- #[must_use]
- fn $config() -> $Ty {
- let x = $default;
- x
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str("Conf")
+ }
+
+ fn visit_map<V>(self, mut map: V) -> Result<Self::Value, V::Error> where V: MapAccess<'de> {
+ let mut errors = Vec::new();
+ $(let mut $name = None;)*
+ // could get `Field` here directly, but get `str` first for diagnostics
+ while let Some(name) = map.next_key::<&str>()? {
+ match Field::deserialize(name.into_deserializer())? {
+ $(Field::$name => {
+ $(errors.push(format!("deprecated field `{}`. {}", name, $dep));)?
+ match map.next_value() {
+ Err(e) => errors.push(e.to_string()),
+ Ok(value) => match $name {
+ Some(_) => errors.push(format!("duplicate field `{}`", name)),
+ None => $name = Some(value),
+ }
+ }
+ })*
+ // white-listed; ignore
+ Field::third_party => drop(map.next_value::<IgnoredAny>())
+ }
}
- )+
+ let conf = Conf { $($name: $name.unwrap_or_else(defaults::$name),)* };
+ Ok(TryConf { conf, errors })
+ }
}
};
}
-pub use self::helpers::Conf;
+// N.B., this macro is parsed by util/lintlib.py
define_Conf! {
- /// Lint: CLONED_INSTEAD_OF_COPIED, REDUNDANT_FIELD_NAMES, REDUNDANT_STATIC_LIFETIMES, FILTER_MAP_NEXT, CHECKED_CONVERSIONS, MANUAL_RANGE_CONTAINS, USE_SELF, MEM_REPLACE_WITH_DEFAULT, MANUAL_NON_EXHAUSTIVE, OPTION_AS_REF_DEREF, MAP_UNWRAP_OR, MATCH_LIKE_MATCHES_MACRO, MANUAL_STRIP, MISSING_CONST_FOR_FN, UNNESTED_OR_PATTERNS, FROM_OVER_INTO, PTR_AS_PTR. The minimum rust version that the project supports
- (msrv, "msrv": Option<String>, None),
+ /// Lint: CLONED_INSTEAD_OF_COPIED, REDUNDANT_FIELD_NAMES, REDUNDANT_STATIC_LIFETIMES, FILTER_MAP_NEXT, CHECKED_CONVERSIONS, MANUAL_RANGE_CONTAINS, USE_SELF, MEM_REPLACE_WITH_DEFAULT, MANUAL_NON_EXHAUSTIVE, OPTION_AS_REF_DEREF, MAP_UNWRAP_OR, MATCH_LIKE_MATCHES_MACRO, MANUAL_STRIP, MISSING_CONST_FOR_FN, UNNESTED_OR_PATTERNS, FROM_OVER_INTO, PTR_AS_PTR, IF_THEN_SOME_ELSE_NONE. The minimum rust version that the project supports
+ (msrv: Option<String> = None),
/// Lint: BLACKLISTED_NAME. The list of blacklisted names to lint about. NB: `bar` is not here since it has legitimate uses
- (blacklisted_names, "blacklisted_names": Vec<String>, ["foo", "baz", "quux"].iter().map(ToString::to_string).collect()),
+ (blacklisted_names: Vec<String> = ["foo", "baz", "quux"].iter().map(ToString::to_string).collect()),
/// Lint: COGNITIVE_COMPLEXITY. The maximum cognitive complexity a function can have
- (cognitive_complexity_threshold, "cognitive_complexity_threshold": u64, 25),
+ (cognitive_complexity_threshold: u64 = 25),
/// DEPRECATED LINT: CYCLOMATIC_COMPLEXITY. Use the Cognitive Complexity lint instead.
- (cyclomatic_complexity_threshold, "cyclomatic_complexity_threshold": Option<u64>, None),
+ #[conf_deprecated("Please use `cognitive-complexity-threshold` instead")]
+ (cyclomatic_complexity_threshold: Option<u64> = None),
/// Lint: DOC_MARKDOWN. The list of words this lint should not consider as identifiers needing ticks
- (doc_valid_idents, "doc_valid_idents": Vec<String>, [
+ (doc_valid_idents: Vec<String> = [
"KiB", "MiB", "GiB", "TiB", "PiB", "EiB",
"DirectX",
"ECMAScript",
"CamelCase",
].iter().map(ToString::to_string).collect()),
/// Lint: TOO_MANY_ARGUMENTS. The maximum number of argument a function or method can have
- (too_many_arguments_threshold, "too_many_arguments_threshold": u64, 7),
+ (too_many_arguments_threshold: u64 = 7),
/// Lint: TYPE_COMPLEXITY. The maximum complexity a type can have
- (type_complexity_threshold, "type_complexity_threshold": u64, 250),
+ (type_complexity_threshold: u64 = 250),
/// Lint: MANY_SINGLE_CHAR_NAMES. The maximum number of single char bindings a scope may have
- (single_char_binding_names_threshold, "single_char_binding_names_threshold": u64, 4),
+ (single_char_binding_names_threshold: u64 = 4),
/// Lint: BOXED_LOCAL, USELESS_VEC. The maximum size of objects (in bytes) that will be linted. Larger objects are ok on the heap
- (too_large_for_stack, "too_large_for_stack": u64, 200),
+ (too_large_for_stack: u64 = 200),
/// Lint: ENUM_VARIANT_NAMES. The minimum number of enum variants for the lints about variant names to trigger
- (enum_variant_name_threshold, "enum_variant_name_threshold": u64, 3),
+ (enum_variant_name_threshold: u64 = 3),
/// Lint: LARGE_ENUM_VARIANT. The maximum size of a enum's variant to avoid box suggestion
- (enum_variant_size_threshold, "enum_variant_size_threshold": u64, 200),
+ (enum_variant_size_threshold: u64 = 200),
/// Lint: VERBOSE_BIT_MASK. The maximum allowed size of a bit mask before suggesting to use 'trailing_zeros'
- (verbose_bit_mask_threshold, "verbose_bit_mask_threshold": u64, 1),
+ (verbose_bit_mask_threshold: u64 = 1),
/// Lint: DECIMAL_LITERAL_REPRESENTATION. The lower bound for linting decimal literals
- (literal_representation_threshold, "literal_representation_threshold": u64, 16384),
+ (literal_representation_threshold: u64 = 16384),
/// Lint: TRIVIALLY_COPY_PASS_BY_REF. The maximum size (in bytes) to consider a `Copy` type for passing by value instead of by reference.
- (trivial_copy_size_limit, "trivial_copy_size_limit": Option<u64>, None),
+ (trivial_copy_size_limit: Option<u64> = None),
/// Lint: LARGE_TYPE_PASS_BY_MOVE. The minimum size (in bytes) to consider a type for passing by reference instead of by value.
- (pass_by_value_size_limit, "pass_by_value_size_limit": u64, 256),
+ (pass_by_value_size_limit: u64 = 256),
/// Lint: TOO_MANY_LINES. The maximum number of lines a function or method can have
- (too_many_lines_threshold, "too_many_lines_threshold": u64, 100),
+ (too_many_lines_threshold: u64 = 100),
/// Lint: LARGE_STACK_ARRAYS, LARGE_CONST_ARRAYS. The maximum allowed size for arrays on the stack
- (array_size_threshold, "array_size_threshold": u64, 512_000),
+ (array_size_threshold: u64 = 512_000),
/// Lint: VEC_BOX. The size of the boxed type in bytes, where boxing in a `Vec` is allowed
- (vec_box_size_threshold, "vec_box_size_threshold": u64, 4096),
+ (vec_box_size_threshold: u64 = 4096),
/// Lint: TYPE_REPETITION_IN_BOUNDS. The maximum number of bounds a trait can have to be linted
- (max_trait_bounds, "max_trait_bounds": u64, 3),
+ (max_trait_bounds: u64 = 3),
/// Lint: STRUCT_EXCESSIVE_BOOLS. The maximum number of bools a struct can have
- (max_struct_bools, "max_struct_bools": u64, 3),
+ (max_struct_bools: u64 = 3),
/// Lint: FN_PARAMS_EXCESSIVE_BOOLS. The maximum number of bools function parameters can have
- (max_fn_params_bools, "max_fn_params_bools": u64, 3),
+ (max_fn_params_bools: u64 = 3),
/// Lint: WILDCARD_IMPORTS. Whether to allow certain wildcard imports (prelude, super in tests).
- (warn_on_all_wildcard_imports, "warn_on_all_wildcard_imports": bool, false),
+ (warn_on_all_wildcard_imports: bool = false),
/// Lint: DISALLOWED_METHOD. The list of disallowed methods, written as fully qualified paths.
- (disallowed_methods, "disallowed_methods": Vec<String>, Vec::<String>::new()),
+ (disallowed_methods: Vec<String> = Vec::new()),
/// Lint: UNREADABLE_LITERAL. Should the fraction of a decimal be linted to include separators.
- (unreadable_literal_lint_fractions, "unreadable_literal_lint_fractions": bool, true),
+ (unreadable_literal_lint_fractions: bool = true),
/// Lint: UPPER_CASE_ACRONYMS. Enables verbose mode. Triggers if there is more than one uppercase char next to each other
- (upper_case_acronyms_aggressive, "upper_case_acronyms_aggressive": bool, false),
+ (upper_case_acronyms_aggressive: bool = false),
/// Lint: _CARGO_COMMON_METADATA. For internal testing only, ignores the current `publish` settings in the Cargo manifest.
- (cargo_ignore_publish, "cargo_ignore_publish": bool, false),
-}
-
-impl Default for Conf {
- #[must_use]
- fn default() -> Self {
- toml::from_str("").expect("we never error on empty config files")
- }
+ (cargo_ignore_publish: bool = false),
}
/// Search for the configuration file.
}
}
-/// Produces a `Conf` filled with the default values and forwards the errors
-///
-/// Used internally for convenience
-fn default(errors: Vec<Error>) -> (Conf, Vec<Error>) {
- (Conf::default(), errors)
-}
-
/// Read the `toml` configuration file.
///
/// In case of error, the function tries to continue as much as possible.
-pub fn read(path: &Path) -> (Conf, Vec<Error>) {
+pub fn read(path: &Path) -> TryConf {
let content = match fs::read_to_string(path) {
+ Err(e) => return TryConf::from_error(e),
Ok(content) => content,
- Err(err) => return default(vec![err.into()]),
};
-
- assert!(ERRORS.lock().expect("no threading -> mutex always safe").is_empty());
- match toml::from_str(&content) {
- Ok(toml) => {
- let mut errors = ERRORS.lock().expect("no threading -> mutex always safe").split_off(0);
-
- let toml_ref: &Conf = &toml;
-
- let cyc_field: Option<u64> = toml_ref.cyclomatic_complexity_threshold;
-
- if cyc_field.is_some() {
- let cyc_err = "found deprecated field `cyclomatic-complexity-threshold`. Please use `cognitive-complexity-threshold` instead.".to_string();
- errors.push(Error::Toml(cyc_err));
- }
-
- (toml, errors)
- },
- Err(e) => {
- let mut errors = ERRORS.lock().expect("no threading -> mutex always safe").split_off(0);
- errors.push(Error::Toml(e.to_string()));
-
- default(errors)
- },
- }
+ toml::from_str(&content).unwrap_or_else(TryConf::from_error)
}
use std::borrow::{Borrow, Cow};
+#[cfg(feature = "metadata-collector-lint")]
+pub mod metadata_collector;
+
declare_clippy_lint! {
/// **What it does:** Checks for various things we like to keep tidy in clippy.
///
--- /dev/null
+//! This lint is used to collect metadata about clippy lints. This metadata is exported as a json
+//! file and then used to generate the [clippy lint list](https://rust-lang.github.io/rust-clippy/master/index.html)
+//!
+//! This module and therefor the entire lint is guarded by a feature flag called
+//! `metadata-collector-lint`
+//!
+//! The module transforms all lint names to ascii lowercase to ensure that we don't have mismatches
+//! during any comparison or mapping. (Please take care of this, it's not fun to spend time on such
+//! a simple mistake)
+
+// # NITs
+// - TODO xFrednet 2021-02-13: Collect depreciations and maybe renames
+
+use if_chain::if_chain;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::{
+ self as hir, def::DefKind, intravisit, intravisit::Visitor, ExprKind, Item, ItemKind, Mutability, QPath,
+};
+use rustc_lint::{CheckLintNameResult, LateContext, LateLintPass, LintContext, LintId};
+use rustc_middle::hir::map::Map;
+use rustc_session::{declare_tool_lint, impl_lint_pass};
+use rustc_span::{sym, Loc, Span, Symbol};
+use serde::{ser::SerializeStruct, Serialize, Serializer};
+use std::collections::BinaryHeap;
+use std::fs::{self, OpenOptions};
+use std::io::prelude::*;
+use std::path::Path;
+
+use crate::utils::internal_lints::is_lint_ref_type;
+use clippy_utils::{
+ diagnostics::span_lint, last_path_segment, match_function_call, match_path, paths, ty::match_type,
+ ty::walk_ptrs_ty_depth,
+};
+
+/// This is the output file of the lint collector.
+const OUTPUT_FILE: &str = "../util/gh-pages/metadata_collection.json";
+/// These lints are excluded from the export.
+const BLACK_LISTED_LINTS: [&str; 3] = ["lint_author", "deep_code_inspection", "internal_metadata_collector"];
+/// These groups will be ignored by the lint group matcher. This is useful for collections like
+/// `clippy::all`
+const IGNORED_LINT_GROUPS: [&str; 1] = ["clippy::all"];
+/// Lints within this group will be excluded from the collection
+const EXCLUDED_LINT_GROUPS: [&str; 1] = ["clippy::internal"];
+
+const LINT_EMISSION_FUNCTIONS: [&[&str]; 7] = [
+ &["clippy_utils", "diagnostics", "span_lint"],
+ &["clippy_utils", "diagnostics", "span_lint_and_help"],
+ &["clippy_utils", "diagnostics", "span_lint_and_note"],
+ &["clippy_utils", "diagnostics", "span_lint_hir"],
+ &["clippy_utils", "diagnostics", "span_lint_and_sugg"],
+ &["clippy_utils", "diagnostics", "span_lint_and_then"],
+ &["clippy_utils", "diagnostics", "span_lint_hir_and_then"],
+];
+const SUGGESTION_DIAGNOSTIC_BUILDER_METHODS: [(&str, bool); 9] = [
+ ("span_suggestion", false),
+ ("span_suggestion_short", false),
+ ("span_suggestion_verbose", false),
+ ("span_suggestion_hidden", false),
+ ("tool_only_span_suggestion", false),
+ ("multipart_suggestion", true),
+ ("multipart_suggestions", true),
+ ("tool_only_multipart_suggestion", true),
+ ("span_suggestions", true),
+];
+const SUGGESTION_FUNCTIONS: [&[&str]; 2] = [
+ &["clippy_utils", "diagnostics", "multispan_sugg"],
+ &["clippy_utils", "diagnostics", "multispan_sugg_with_applicability"],
+];
+
+/// The index of the applicability name of `paths::APPLICABILITY_VALUES`
+const APPLICABILITY_NAME_INDEX: usize = 2;
+
+declare_clippy_lint! {
+ /// **What it does:** Collects metadata about clippy lints for the website.
+ ///
+ /// This lint will be used to report problems of syntax parsing. You should hopefully never
+ /// see this but never say never I guess ^^
+ ///
+ /// **Why is this bad?** This is not a bad thing but definitely a hacky way to do it. See
+ /// issue [#4310](https://github.com/rust-lang/rust-clippy/issues/4310) for a discussion
+ /// about the implementation.
+ ///
+ /// **Known problems:** Hopefully none. It would be pretty uncool to have a problem here :)
+ ///
+ /// **Example output:**
+ /// ```json,ignore
+ /// {
+ /// "id": "internal_metadata_collector",
+ /// "id_span": {
+ /// "path": "clippy_lints/src/utils/internal_lints/metadata_collector.rs",
+ /// "line": 1
+ /// },
+ /// "group": "clippy::internal",
+ /// "docs": " **What it does:** Collects metadata about clippy lints for the website. [...] "
+ /// }
+ /// ```
+ pub INTERNAL_METADATA_COLLECTOR,
+ internal_warn,
+ "A busy bee collection metadata about lints"
+}
+
+impl_lint_pass!(MetadataCollector => [INTERNAL_METADATA_COLLECTOR]);
+
+#[allow(clippy::module_name_repetitions)]
+#[derive(Debug, Clone, Default)]
+pub struct MetadataCollector {
+ /// All collected lints
+ ///
+ /// We use a Heap here to have the lints added in alphabetic order in the export
+ lints: BinaryHeap<LintMetadata>,
+ applicability_info: FxHashMap<String, ApplicabilityInfo>,
+}
+
+impl Drop for MetadataCollector {
+ /// You might ask: How hacky is this?
+ /// My answer: YES
+ fn drop(&mut self) {
+ // The metadata collector gets dropped twice, this makes sure that we only write
+ // when the list is full
+ if self.lints.is_empty() {
+ return;
+ }
+
+ let mut applicability_info = std::mem::take(&mut self.applicability_info);
+
+ // Mapping the final data
+ let mut lints = std::mem::take(&mut self.lints).into_sorted_vec();
+ lints
+ .iter_mut()
+ .for_each(|x| x.applicability = applicability_info.remove(&x.id));
+
+ // Outputting
+ if Path::new(OUTPUT_FILE).exists() {
+ fs::remove_file(OUTPUT_FILE).unwrap();
+ }
+ let mut file = OpenOptions::new().write(true).create(true).open(OUTPUT_FILE).unwrap();
+ writeln!(file, "{}", serde_json::to_string_pretty(&lints).unwrap()).unwrap();
+ }
+}
+
+#[derive(Debug, Clone, Serialize, PartialEq, Eq, PartialOrd, Ord)]
+struct LintMetadata {
+ id: String,
+ id_span: SerializableSpan,
+ group: String,
+ docs: String,
+ /// This field is only used in the output and will only be
+ /// mapped shortly before the actual output.
+ applicability: Option<ApplicabilityInfo>,
+}
+
+impl LintMetadata {
+ fn new(id: String, id_span: SerializableSpan, group: String, docs: String) -> Self {
+ Self {
+ id,
+ id_span,
+ group,
+ docs,
+ applicability: None,
+ }
+ }
+}
+
+#[derive(Debug, Clone, Serialize, PartialEq, Eq, PartialOrd, Ord)]
+struct SerializableSpan {
+ path: String,
+ line: usize,
+}
+
+impl std::fmt::Display for SerializableSpan {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{}:{}", self.path.rsplit('/').next().unwrap_or_default(), self.line)
+ }
+}
+
+impl SerializableSpan {
+ fn from_item(cx: &LateContext<'_>, item: &Item<'_>) -> Self {
+ Self::from_span(cx, item.ident.span)
+ }
+
+ fn from_span(cx: &LateContext<'_>, span: Span) -> Self {
+ let loc: Loc = cx.sess().source_map().lookup_char_pos(span.lo());
+
+ Self {
+ path: format!("{}", loc.file.name),
+ line: loc.line,
+ }
+ }
+}
+
+#[derive(Debug, Clone, Default, PartialEq, Eq, PartialOrd, Ord)]
+struct ApplicabilityInfo {
+ /// Indicates if any of the lint emissions uses multiple spans. This is related to
+ /// [rustfix#141](https://github.com/rust-lang/rustfix/issues/141) as such suggestions can
+ /// currently not be applied automatically.
+ is_multi_part_suggestion: bool,
+ applicability: Option<usize>,
+}
+
+impl Serialize for ApplicabilityInfo {
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ let index = self.applicability.unwrap_or_default();
+
+ let mut s = serializer.serialize_struct("ApplicabilityInfo", 2)?;
+ s.serialize_field("is_multi_part_suggestion", &self.is_multi_part_suggestion)?;
+ s.serialize_field(
+ "applicability",
+ &paths::APPLICABILITY_VALUES[index][APPLICABILITY_NAME_INDEX],
+ )?;
+ s.end()
+ }
+}
+
+impl<'hir> LateLintPass<'hir> for MetadataCollector {
+ /// Collecting lint declarations like:
+ /// ```rust, ignore
+ /// declare_clippy_lint! {
+ /// /// **What it does:** Something IDK.
+ /// pub SOME_LINT,
+ /// internal,
+ /// "Who am I?"
+ /// }
+ /// ```
+ fn check_item(&mut self, cx: &LateContext<'hir>, item: &'hir Item<'_>) {
+ if_chain! {
+ // item validation
+ if let ItemKind::Static(ref ty, Mutability::Not, _) = item.kind;
+ if is_lint_ref_type(cx, ty);
+ // blacklist check
+ let lint_name = sym_to_string(item.ident.name).to_ascii_lowercase();
+ if !BLACK_LISTED_LINTS.contains(&lint_name.as_str());
+ // metadata extraction
+ if let Some(group) = get_lint_group_or_lint(cx, &lint_name, item);
+ if let Some(docs) = extract_attr_docs_or_lint(cx, item);
+ then {
+ self.lints.push(LintMetadata::new(
+ lint_name,
+ SerializableSpan::from_item(cx, item),
+ group,
+ docs,
+ ));
+ }
+ }
+ }
+
+ /// Collecting constant applicability from the actual lint emissions
+ ///
+ /// Example:
+ /// ```rust, ignore
+ /// span_lint_and_sugg(
+ /// cx,
+ /// SOME_LINT,
+ /// item.span,
+ /// "Le lint message",
+ /// "Here comes help:",
+ /// "#![allow(clippy::all)]",
+ /// Applicability::MachineApplicable, // <-- Extracts this constant value
+ /// );
+ /// ```
+ fn check_expr(&mut self, cx: &LateContext<'hir>, expr: &'hir hir::Expr<'_>) {
+ if let Some(args) = match_lint_emission(cx, expr) {
+ let mut emission_info = extract_emission_info(cx, args);
+ if emission_info.is_empty() {
+ // See:
+ // - src/misc.rs:734:9
+ // - src/methods/mod.rs:3545:13
+ // - src/methods/mod.rs:3496:13
+ // We are basically unable to resolve the lint name it self.
+ return;
+ }
+
+ for (lint_name, applicability, is_multi_part) in emission_info.drain(..) {
+ let app_info = self.applicability_info.entry(lint_name).or_default();
+ app_info.applicability = applicability;
+ app_info.is_multi_part_suggestion = is_multi_part;
+ }
+ }
+ }
+}
+
+// ==================================================================
+// Lint definition extraction
+// ==================================================================
+fn sym_to_string(sym: Symbol) -> String {
+ sym.as_str().to_string()
+}
+
+fn extract_attr_docs_or_lint(cx: &LateContext<'_>, item: &Item<'_>) -> Option<String> {
+ extract_attr_docs(cx, item).or_else(|| {
+ lint_collection_error_item(cx, item, "could not collect the lint documentation");
+ None
+ })
+}
+
+/// This function collects all documentation that has been added to an item using
+/// `#[doc = r""]` attributes. Several attributes are aggravated using line breaks
+///
+/// ```ignore
+/// #[doc = r"Hello world!"]
+/// #[doc = r"=^.^="]
+/// struct SomeItem {}
+/// ```
+///
+/// Would result in `Hello world!\n=^.^=\n`
+fn extract_attr_docs(cx: &LateContext<'_>, item: &Item<'_>) -> Option<String> {
+ cx.tcx
+ .hir()
+ .attrs(item.hir_id())
+ .iter()
+ .filter_map(|ref x| x.doc_str().map(|sym| sym.as_str().to_string()))
+ .reduce(|mut acc, sym| {
+ acc.push_str(&sym);
+ acc.push('\n');
+ acc
+ })
+}
+
+fn get_lint_group_or_lint(cx: &LateContext<'_>, lint_name: &str, item: &'hir Item<'_>) -> Option<String> {
+ let result = cx.lint_store.check_lint_name(lint_name, Some(sym::clippy));
+ if let CheckLintNameResult::Tool(Ok(lint_lst)) = result {
+ get_lint_group(cx, lint_lst[0])
+ .or_else(|| {
+ lint_collection_error_item(cx, item, "Unable to determine lint group");
+ None
+ })
+ .filter(|group| !EXCLUDED_LINT_GROUPS.contains(&group.as_str()))
+ } else {
+ lint_collection_error_item(cx, item, "Unable to find lint in lint_store");
+ None
+ }
+}
+
+fn get_lint_group(cx: &LateContext<'_>, lint_id: LintId) -> Option<String> {
+ for (group_name, lints, _) in &cx.lint_store.get_lint_groups() {
+ if IGNORED_LINT_GROUPS.contains(group_name) {
+ continue;
+ }
+
+ if lints.iter().any(|x| *x == lint_id) {
+ return Some((*group_name).to_string());
+ }
+ }
+
+ None
+}
+
+// ==================================================================
+// Lint emission
+// ==================================================================
+fn lint_collection_error_item(cx: &LateContext<'_>, item: &Item<'_>, message: &str) {
+ span_lint(
+ cx,
+ INTERNAL_METADATA_COLLECTOR,
+ item.ident.span,
+ &format!("metadata collection error for `{}`: {}", item.ident.name, message),
+ );
+}
+
+// ==================================================================
+// Applicability
+// ==================================================================
+/// This function checks if a given expression is equal to a simple lint emission function call.
+/// It will return the function arguments if the emission matched any function.
+fn match_lint_emission<'hir>(cx: &LateContext<'hir>, expr: &'hir hir::Expr<'_>) -> Option<&'hir [hir::Expr<'hir>]> {
+ LINT_EMISSION_FUNCTIONS
+ .iter()
+ .find_map(|emission_fn| match_function_call(cx, expr, emission_fn))
+}
+
+fn take_higher_applicability(a: Option<usize>, b: Option<usize>) -> Option<usize> {
+ a.map_or(b, |a| a.max(b.unwrap_or_default()).into())
+}
+
+fn extract_emission_info<'hir>(
+ cx: &LateContext<'hir>,
+ args: &'hir [hir::Expr<'hir>],
+) -> Vec<(String, Option<usize>, bool)> {
+ let mut lints = Vec::new();
+ let mut applicability = None;
+ let mut multi_part = false;
+
+ for arg in args {
+ let (arg_ty, _) = walk_ptrs_ty_depth(cx.typeck_results().expr_ty(&arg));
+
+ if match_type(cx, arg_ty, &paths::LINT) {
+ // If we found the lint arg, extract the lint name
+ let mut resolved_lints = resolve_lints(cx, arg);
+ lints.append(&mut resolved_lints);
+ } else if match_type(cx, arg_ty, &paths::APPLICABILITY) {
+ applicability = resolve_applicability(cx, arg);
+ } else if arg_ty.is_closure() {
+ multi_part |= check_is_multi_part(cx, arg);
+ // TODO xFrednet 2021-03-01: don't use or_else but rather a comparison
+ applicability = applicability.or_else(|| resolve_applicability(cx, arg));
+ }
+ }
+
+ lints
+ .drain(..)
+ .map(|lint_name| (lint_name, applicability, multi_part))
+ .collect()
+}
+
+/// Resolves the possible lints that this expression could reference
+fn resolve_lints(cx: &LateContext<'hir>, expr: &'hir hir::Expr<'hir>) -> Vec<String> {
+ let mut resolver = LintResolver::new(cx);
+ resolver.visit_expr(expr);
+ resolver.lints
+}
+
+/// This function tries to resolve the linked applicability to the given expression.
+fn resolve_applicability(cx: &LateContext<'hir>, expr: &'hir hir::Expr<'hir>) -> Option<usize> {
+ let mut resolver = ApplicabilityResolver::new(cx);
+ resolver.visit_expr(expr);
+ resolver.complete()
+}
+
+fn check_is_multi_part(cx: &LateContext<'hir>, closure_expr: &'hir hir::Expr<'hir>) -> bool {
+ if let ExprKind::Closure(_, _, body_id, _, _) = closure_expr.kind {
+ let mut scanner = IsMultiSpanScanner::new(cx);
+ intravisit::walk_body(&mut scanner, cx.tcx.hir().body(body_id));
+ return scanner.is_multi_part();
+ } else if let Some(local) = get_parent_local(cx, closure_expr) {
+ if let Some(local_init) = local.init {
+ return check_is_multi_part(cx, local_init);
+ }
+ }
+
+ false
+}
+
+struct LintResolver<'a, 'hir> {
+ cx: &'a LateContext<'hir>,
+ lints: Vec<String>,
+}
+
+impl<'a, 'hir> LintResolver<'a, 'hir> {
+ fn new(cx: &'a LateContext<'hir>) -> Self {
+ Self {
+ cx,
+ lints: Vec::<String>::default(),
+ }
+ }
+}
+
+impl<'a, 'hir> intravisit::Visitor<'hir> for LintResolver<'a, 'hir> {
+ type Map = Map<'hir>;
+
+ fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
+ intravisit::NestedVisitorMap::All(self.cx.tcx.hir())
+ }
+
+ fn visit_expr(&mut self, expr: &'hir hir::Expr<'hir>) {
+ if_chain! {
+ if let ExprKind::Path(qpath) = &expr.kind;
+ if let QPath::Resolved(_, path) = qpath;
+
+ let (expr_ty, _) = walk_ptrs_ty_depth(self.cx.typeck_results().expr_ty(&expr));
+ if match_type(self.cx, expr_ty, &paths::LINT);
+ then {
+ if let hir::def::Res::Def(DefKind::Static, _) = path.res {
+ let lint_name = last_path_segment(qpath).ident.name;
+ self.lints.push(sym_to_string(lint_name).to_ascii_lowercase());
+ } else if let Some(local) = get_parent_local(self.cx, expr) {
+ if let Some(local_init) = local.init {
+ intravisit::walk_expr(self, local_init);
+ }
+ }
+ }
+ }
+
+ intravisit::walk_expr(self, expr);
+ }
+}
+
+/// This visitor finds the highest applicability value in the visited expressions
+struct ApplicabilityResolver<'a, 'hir> {
+ cx: &'a LateContext<'hir>,
+ /// This is the index of hightest `Applicability` for `paths::APPLICABILITY_VALUES`
+ applicability_index: Option<usize>,
+}
+
+impl<'a, 'hir> ApplicabilityResolver<'a, 'hir> {
+ fn new(cx: &'a LateContext<'hir>) -> Self {
+ Self {
+ cx,
+ applicability_index: None,
+ }
+ }
+
+ fn add_new_index(&mut self, new_index: usize) {
+ self.applicability_index = take_higher_applicability(self.applicability_index, Some(new_index));
+ }
+
+ fn complete(self) -> Option<usize> {
+ self.applicability_index
+ }
+}
+
+impl<'a, 'hir> intravisit::Visitor<'hir> for ApplicabilityResolver<'a, 'hir> {
+ type Map = Map<'hir>;
+
+ fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
+ intravisit::NestedVisitorMap::All(self.cx.tcx.hir())
+ }
+
+ fn visit_path(&mut self, path: &'hir hir::Path<'hir>, _id: hir::HirId) {
+ for (index, enum_value) in paths::APPLICABILITY_VALUES.iter().enumerate() {
+ if match_path(path, enum_value) {
+ self.add_new_index(index);
+ return;
+ }
+ }
+ }
+
+ fn visit_expr(&mut self, expr: &'hir hir::Expr<'hir>) {
+ let (expr_ty, _) = walk_ptrs_ty_depth(self.cx.typeck_results().expr_ty(&expr));
+
+ if_chain! {
+ if match_type(self.cx, expr_ty, &paths::APPLICABILITY);
+ if let Some(local) = get_parent_local(self.cx, expr);
+ if let Some(local_init) = local.init;
+ then {
+ intravisit::walk_expr(self, local_init);
+ }
+ };
+
+ // TODO xFrednet 2021-03-01: support function arguments?
+
+ intravisit::walk_expr(self, expr);
+ }
+}
+
+/// This returns the parent local node if the expression is a reference one
+fn get_parent_local(cx: &LateContext<'hir>, expr: &'hir hir::Expr<'hir>) -> Option<&'hir hir::Local<'hir>> {
+ if let ExprKind::Path(QPath::Resolved(_, path)) = expr.kind {
+ if let hir::def::Res::Local(local_hir) = path.res {
+ return get_parent_local_hir_id(cx, local_hir);
+ }
+ }
+
+ None
+}
+
+fn get_parent_local_hir_id(cx: &LateContext<'hir>, hir_id: hir::HirId) -> Option<&'hir hir::Local<'hir>> {
+ let map = cx.tcx.hir();
+
+ match map.find(map.get_parent_node(hir_id)) {
+ Some(hir::Node::Local(local)) => Some(local),
+ Some(hir::Node::Pat(pattern)) => get_parent_local_hir_id(cx, pattern.hir_id),
+ _ => None,
+ }
+}
+
+/// This visitor finds the highest applicability value in the visited expressions
+struct IsMultiSpanScanner<'a, 'hir> {
+ cx: &'a LateContext<'hir>,
+ suggestion_count: usize,
+}
+
+impl<'a, 'hir> IsMultiSpanScanner<'a, 'hir> {
+ fn new(cx: &'a LateContext<'hir>) -> Self {
+ Self {
+ cx,
+ suggestion_count: 0,
+ }
+ }
+
+ /// Add a new single expression suggestion to the counter
+ fn add_single_span_suggestion(&mut self) {
+ self.suggestion_count += 1;
+ }
+
+ /// Signals that a suggestion with possible multiple spans was found
+ fn add_multi_part_suggestion(&mut self) {
+ self.suggestion_count += 2;
+ }
+
+ /// Checks if the suggestions include multiple spanns
+ fn is_multi_part(&self) -> bool {
+ self.suggestion_count > 1
+ }
+}
+
+impl<'a, 'hir> intravisit::Visitor<'hir> for IsMultiSpanScanner<'a, 'hir> {
+ type Map = Map<'hir>;
+
+ fn nested_visit_map(&mut self) -> intravisit::NestedVisitorMap<Self::Map> {
+ intravisit::NestedVisitorMap::All(self.cx.tcx.hir())
+ }
+
+ fn visit_expr(&mut self, expr: &'hir hir::Expr<'hir>) {
+ // Early return if the lint is already multi span
+ if self.is_multi_part() {
+ return;
+ }
+
+ match &expr.kind {
+ ExprKind::Call(fn_expr, _args) => {
+ let found_function = SUGGESTION_FUNCTIONS
+ .iter()
+ .any(|func_path| match_function_call(self.cx, fn_expr, func_path).is_some());
+ if found_function {
+ // These functions are all multi part suggestions
+ self.add_single_span_suggestion()
+ }
+ },
+ ExprKind::MethodCall(path, _path_span, arg, _arg_span) => {
+ let (self_ty, _) = walk_ptrs_ty_depth(self.cx.typeck_results().expr_ty(&arg[0]));
+ if match_type(self.cx, self_ty, &paths::DIAGNOSTIC_BUILDER) {
+ let called_method = path.ident.name.as_str().to_string();
+ for (method_name, is_multi_part) in &SUGGESTION_DIAGNOSTIC_BUILDER_METHODS {
+ if *method_name == called_method {
+ if *is_multi_part {
+ self.add_multi_part_suggestion();
+ } else {
+ self.add_single_span_suggestion();
+ }
+ break;
+ }
+ }
+ }
+ },
+ _ => {},
+ }
+
+ intravisit::walk_expr(self, expr);
+ }
+}
pub mod author;
pub mod conf;
pub mod inspector;
-#[cfg(feature = "internal-lints")]
+#[cfg(any(feature = "internal-lints", feature = "metadata-collector-lint"))]
pub mod internal_lints;
[package]
name = "clippy_utils"
-version = "0.1.53"
+version = "0.1.54"
authors = ["The Rust Clippy Developers"]
edition = "2018"
publish = false
[features]
internal-lints = []
+metadata-collector-lint = []
[package.metadata.rust-analyzer]
# This crate uses #[feature(rustc_private)]
//! Clippy wrappers around rustc's diagnostic functions.
+//!
+//! These functions are used by the `INTERNAL_METADATA_COLLECTOR` lint to collect the corresponding
+//! lint applicability. Please make sure that you update the `LINT_EMISSION_FUNCTIONS` variable in
+//! `clippy_lints::utils::internal_lints::metadata_collector` when a new function is added
+//! or renamed.
+//!
+//! Thank you!
+//! ~The `INTERNAL_METADATA_COLLECTOR` lint
use rustc_errors::{Applicability, DiagnosticBuilder};
use rustc_hir::HirId;
self.hash_expr(e);
for arm in arms {
- // TODO: arm.pat?
+ self.hash_pat(arm.pat);
if let Some(ref e) = arm.guard {
self.hash_guard(e);
}
// self.maybe_typeck_results.unwrap().qpath_res(p, id).hash(&mut self.s);
}
+ pub fn hash_pat(&mut self, pat: &Pat<'_>) {
+ std::mem::discriminant(&pat.kind).hash(&mut self.s);
+ match pat.kind {
+ PatKind::Binding(ann, _, _, pat) => {
+ ann.hash_stable(&mut self.cx.tcx.get_stable_hashing_context(), &mut self.s);
+ if let Some(pat) = pat {
+ self.hash_pat(pat);
+ }
+ },
+ PatKind::Box(pat) => self.hash_pat(pat),
+ PatKind::Lit(expr) => self.hash_expr(expr),
+ PatKind::Or(pats) => {
+ for pat in pats {
+ self.hash_pat(pat);
+ }
+ },
+ PatKind::Path(ref qpath) => self.hash_qpath(qpath),
+ PatKind::Range(s, e, i) => {
+ if let Some(s) = s {
+ self.hash_expr(s);
+ }
+ if let Some(e) = e {
+ self.hash_expr(e);
+ }
+ i.hash_stable(&mut self.cx.tcx.get_stable_hashing_context(), &mut self.s);
+ },
+ PatKind::Ref(pat, m) => {
+ self.hash_pat(pat);
+ m.hash(&mut self.s);
+ },
+ PatKind::Slice(l, m, r) => {
+ for pat in l {
+ self.hash_pat(pat);
+ }
+ if let Some(pat) = m {
+ self.hash_pat(pat);
+ }
+ for pat in r {
+ self.hash_pat(pat);
+ }
+ },
+ PatKind::Struct(ref qpath, fields, e) => {
+ self.hash_qpath(qpath);
+ for f in fields {
+ self.hash_name(f.ident.name);
+ self.hash_pat(f.pat);
+ }
+ e.hash(&mut self.s)
+ },
+ PatKind::Tuple(pats, e) => {
+ for pat in pats {
+ self.hash_pat(pat);
+ }
+ e.hash(&mut self.s);
+ },
+ PatKind::TupleStruct(ref qpath, pats, e) => {
+ self.hash_qpath(qpath);
+ for pat in pats {
+ self.hash_pat(pat);
+ }
+ e.hash(&mut self.s);
+ },
+ PatKind::Wild => {},
+ }
+ }
+
pub fn hash_path(&mut self, path: &Path<'_>) {
match path.res {
// constant hash since equality is dependant on inter-expression context
match &b.kind {
StmtKind::Local(local) => {
+ self.hash_pat(local.pat);
if let Some(ref init) = local.init {
self.hash_expr(init);
}
}
}
- pub fn hash_lifetime(&mut self, lifetime: &Lifetime) {
+ pub fn hash_lifetime(&mut self, lifetime: Lifetime) {
std::mem::discriminant(&lifetime.name).hash(&mut self.s);
if let LifetimeName::Param(ref name) = lifetime.name {
std::mem::discriminant(name).hash(&mut self.s);
}
pub fn hash_ty(&mut self, ty: &Ty<'_>) {
- self.hash_tykind(&ty.kind);
- }
-
- pub fn hash_tykind(&mut self, ty: &TyKind<'_>) {
- std::mem::discriminant(ty).hash(&mut self.s);
- match ty {
+ std::mem::discriminant(&ty.kind).hash(&mut self.s);
+ match ty.kind {
TyKind::Slice(ty) => {
self.hash_ty(ty);
},
self.hash_ty(ty);
self.hash_body(anon_const.body);
},
- TyKind::Ptr(mut_ty) => {
+ TyKind::Ptr(ref mut_ty) => {
self.hash_ty(&mut_ty.ty);
mut_ty.mutbl.hash(&mut self.s);
},
- TyKind::Rptr(lifetime, mut_ty) => {
+ TyKind::Rptr(lifetime, ref mut_ty) => {
self.hash_lifetime(lifetime);
self.hash_ty(&mut_ty.ty);
mut_ty.mutbl.hash(&mut self.s);
bfn.decl.c_variadic.hash(&mut self.s);
},
TyKind::Tup(ty_list) => {
- for ty in *ty_list {
+ for ty in ty_list {
self.hash_ty(ty);
}
},
- TyKind::Path(qpath) => match qpath {
+ TyKind::Path(ref qpath) => match qpath {
QPath::Resolved(ref maybe_ty, ref path) => {
if let Some(ref ty) = maybe_ty {
self.hash_ty(ty);
fn hash_generic_args(&mut self, arg_list: &[GenericArg<'_>]) {
for arg in arg_list {
- match arg {
- GenericArg::Lifetime(ref l) => self.hash_lifetime(l),
- GenericArg::Type(ref ty) => self.hash_ty(&ty),
+ match *arg {
+ GenericArg::Lifetime(l) => self.hash_lifetime(l),
+ GenericArg::Type(ref ty) => self.hash_ty(ty),
GenericArg::Const(ref ca) => self.hash_body(ca.value.body),
}
}
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
-use rustc_hir::intravisit::{self, walk_expr, ErasedMap, NestedVisitorMap, Visitor};
+use rustc_hir::intravisit::{self, walk_expr, ErasedMap, FnKind, NestedVisitorMap, Visitor};
use rustc_hir::LangItem::{ResultErr, ResultOk};
use rustc_hir::{
def, Arm, BindingAnnotation, Block, Body, Constness, Destination, Expr, ExprKind, FnDecl, GenericArgs, HirId, Impl,
- ImplItem, ImplItemKind, Item, ItemKind, LangItem, Local, MatchSource, Node, Param, Pat, PatKind, Path, PathSegment,
- QPath, Stmt, StmtKind, TraitItem, TraitItemKind, TraitRef, TyKind,
+ ImplItem, ImplItemKind, IsAsync, Item, ItemKind, LangItem, Local, MatchSource, Node, Param, Pat, PatKind, Path,
+ PathSegment, QPath, Stmt, StmtKind, TraitItem, TraitItemKind, TraitRef, TyKind,
};
use rustc_lint::{LateContext, Level, Lint, LintContext};
use rustc_middle::hir::exports::Export;
/// Gets the parent expression, if any –- this is useful to constrain a lint.
pub fn get_parent_expr<'tcx>(cx: &LateContext<'tcx>, e: &Expr<'_>) -> Option<&'tcx Expr<'tcx>> {
- match get_parent_node(cx.tcx, e.hir_id) {
+ get_parent_expr_for_hir(cx, e.hir_id)
+}
+
+/// This retrieves the parent for the given `HirId` if it's an expression. This is useful for
+/// constraint lints
+pub fn get_parent_expr_for_hir<'tcx>(cx: &LateContext<'tcx>, hir_id: hir::HirId) -> Option<&'tcx Expr<'tcx>> {
+ match get_parent_node(cx.tcx, hir_id) {
Some(Node::Expr(parent)) => Some(parent),
_ => None,
}
(conds, blocks)
}
+/// Checks if the given function kind is an async function.
+pub fn is_async_fn(kind: FnKind<'_>) -> bool {
+ matches!(kind, FnKind::ItemFn(_, _, header, _) if header.asyncness == IsAsync::Async)
+}
+
+/// Peels away all the compiler generated code surrounding the body of an async function,
+pub fn get_async_fn_body(tcx: TyCtxt<'tcx>, body: &Body<'_>) -> Option<&'tcx Expr<'tcx>> {
+ if let ExprKind::Call(
+ _,
+ &[Expr {
+ kind: ExprKind::Closure(_, _, body, _, _),
+ ..
+ }],
+ ) = body.value.kind
+ {
+ if let ExprKind::Block(
+ Block {
+ stmts: [],
+ expr:
+ Some(Expr {
+ kind: ExprKind::DropTemps(expr),
+ ..
+ }),
+ ..
+ },
+ _,
+ ) = tcx.hir().body(body).value.kind
+ {
+ return Some(expr);
+ }
+ };
+ None
+}
+
// Finds the `#[must_use]` attribute, if any
pub fn must_use_attr(attrs: &[Attribute]) -> Option<&Attribute> {
attrs.iter().find(|a| a.has_name(sym::must_use))
//! See <https://github.com/rust-lang/rust-clippy/issues/5393> for more information.
pub const ANY_TRAIT: [&str; 3] = ["core", "any", "Any"];
+#[cfg(feature = "metadata-collector-lint")]
+pub const APPLICABILITY: [&str; 2] = ["rustc_lint_defs", "Applicability"];
+#[cfg(feature = "metadata-collector-lint")]
+pub const APPLICABILITY_VALUES: [[&str; 3]; 4] = [
+ ["rustc_lint_defs", "Applicability", "Unspecified"],
+ ["rustc_lint_defs", "Applicability", "HasPlaceholders"],
+ ["rustc_lint_defs", "Applicability", "MaybeIncorrect"],
+ ["rustc_lint_defs", "Applicability", "MachineApplicable"],
+];
+#[cfg(feature = "metadata-collector-lint")]
+pub const DIAGNOSTIC_BUILDER: [&str; 3] = ["rustc_errors", "diagnostic_builder", "DiagnosticBuilder"];
pub const ARC_PTR_EQ: [&str; 4] = ["alloc", "sync", "Arc", "ptr_eq"];
pub const ASMUT_TRAIT: [&str; 3] = ["core", "convert", "AsMut"];
pub const ASREF_TRAIT: [&str; 3] = ["core", "convert", "AsRef"];
#[cfg(feature = "internal-lints")]
pub const LATE_CONTEXT: [&str; 2] = ["rustc_lint", "LateContext"];
pub const LINKED_LIST: [&str; 4] = ["alloc", "collections", "linked_list", "LinkedList"];
-#[cfg(feature = "internal-lints")]
+#[cfg(any(feature = "internal-lints", feature = "metadata-collector-lint"))]
pub const LINT: [&str; 2] = ["rustc_lint_defs", "Lint"];
pub const MEM_DISCRIMINANT: [&str; 3] = ["core", "mem", "discriminant"];
pub const MEM_FORGET: [&str; 3] = ["core", "mem", "forget"];
default: &'a str,
applicability: &mut Applicability,
) -> (Cow<'a, str>, bool) {
- let outer_span = hygiene::walk_chain(span, outer);
- let (span, is_macro_call) = if outer_span.ctxt() == outer {
- (outer_span, span.ctxt() != outer)
- } else {
- // The span is from a macro argument, and the outer context is the macro using the argument
- if *applicability != Applicability::Unspecified {
- *applicability = Applicability::MaybeIncorrect;
- }
- // TODO: get the argument span.
- (span, false)
- };
+ let (span, is_macro_call) = walk_span_to_context(span, outer).map_or_else(
+ || {
+ // The span is from a macro argument, and the outer context is the macro using the argument
+ if *applicability != Applicability::Unspecified {
+ *applicability = Applicability::MaybeIncorrect;
+ }
+ // TODO: get the argument span.
+ (span, false)
+ },
+ |outer_span| (outer_span, span.ctxt() != outer),
+ );
(
snippet_with_applicability(cx, span, default, applicability),
)
}
+/// Walks the span up to the target context, thereby returning the macro call site if the span is
+/// inside a macro expansion, or the original span if it is not. Note this will return `None` in the
+/// case of the span being in a macro expansion, but the target context is from expanding a macro
+/// argument.
+///
+/// Given the following
+///
+/// ```rust,ignore
+/// macro_rules! m { ($e:expr) => { f($e) }; }
+/// g(m!(0))
+/// ```
+///
+/// If called with a span of the call to `f` and a context of the call to `g` this will return a
+/// span containing `m!(0)`. However, if called with a span of the literal `0` this will give a span
+/// containing `0` as the context is the same as the outer context.
+///
+/// This will traverse through multiple macro calls. Given the following:
+///
+/// ```rust,ignore
+/// macro_rules! m { ($e:expr) => { n!($e, 0) }; }
+/// macro_rules! n { ($e:expr, $f:expr) => { f($e, $f) }; }
+/// g(m!(0))
+/// ```
+///
+/// If called with a span of the call to `f` and a context of the call to `g` this will return a
+/// span containing `m!(0)`.
+pub fn walk_span_to_context(span: Span, outer: SyntaxContext) -> Option<Span> {
+ let outer_span = hygiene::walk_chain(span, outer);
+ (outer_span.ctxt() == outer).then(|| outer_span)
+}
+
/// Removes block comments from the given `Vec` of lines.
///
/// # Examples
use crate::path_to_local_id;
use rustc_hir as hir;
-use rustc_hir::intravisit::{self, walk_expr, NestedVisitorMap, Visitor};
-use rustc_hir::{Arm, Body, Expr, HirId, Stmt};
+use rustc_hir::intravisit::{self, walk_expr, ErasedMap, NestedVisitorMap, Visitor};
+use rustc_hir::{Arm, Block, Body, Destination, Expr, ExprKind, HirId, Stmt};
use rustc_lint::LateContext;
use rustc_middle::hir::map::Map;
NestedVisitorMap::OnlyBodies(self.hir)
}
}
+
+pub trait Visitable<'tcx> {
+ fn visit<V: Visitor<'tcx>>(self, v: &mut V);
+}
+impl Visitable<'tcx> for &'tcx Expr<'tcx> {
+ fn visit<V: Visitor<'tcx>>(self, v: &mut V) {
+ v.visit_expr(self)
+ }
+}
+impl Visitable<'tcx> for &'tcx Block<'tcx> {
+ fn visit<V: Visitor<'tcx>>(self, v: &mut V) {
+ v.visit_block(self)
+ }
+}
+impl<'tcx> Visitable<'tcx> for &'tcx Stmt<'tcx> {
+ fn visit<V: Visitor<'tcx>>(self, v: &mut V) {
+ v.visit_stmt(self)
+ }
+}
+impl<'tcx> Visitable<'tcx> for &'tcx Body<'tcx> {
+ fn visit<V: Visitor<'tcx>>(self, v: &mut V) {
+ v.visit_body(self)
+ }
+}
+impl<'tcx> Visitable<'tcx> for &'tcx Arm<'tcx> {
+ fn visit<V: Visitor<'tcx>>(self, v: &mut V) {
+ v.visit_arm(self)
+ }
+}
+
+pub fn visit_break_exprs<'tcx>(
+ node: impl Visitable<'tcx>,
+ f: impl FnMut(&'tcx Expr<'tcx>, Destination, Option<&'tcx Expr<'tcx>>),
+) {
+ struct V<F>(F);
+ impl<'tcx, F: FnMut(&'tcx Expr<'tcx>, Destination, Option<&'tcx Expr<'tcx>>)> Visitor<'tcx> for V<F> {
+ type Map = ErasedMap<'tcx>;
+ fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> {
+ NestedVisitorMap::None
+ }
+
+ fn visit_expr(&mut self, e: &'tcx Expr<'_>) {
+ if let ExprKind::Break(dest, sub_expr) = e.kind {
+ self.0(e, dest, sub_expr)
+ }
+ walk_expr(self, e);
+ }
+ }
+
+ node.visit(&mut V(f));
+}
```rust
define_Conf! {
/// Lint: LIST, OF, LINTS, <THE_NEWLY_ADDED_LINT>. The minimum rust version that the project supports
- (msrv, "msrv": Option<String>, None),
+ (msrv: Option<String> = None),
...
}
```
like this:
```rust
/// Lint: LINT_NAME. <The configuration field doc comment>
- (configuration_ident, "configuration_value": Type, DefaultValue),
+ (configuration_ident: Type = DefaultValue),
```
The configuration value and identifier should usually be the same. The doc comment will be
automatically added to the lint documentation.
[toolchain]
-channel = "nightly-2021-04-22"
+channel = "nightly-2021-05-06"
components = ["llvm-tools-preview", "rustc-dev", "rust-src"]
(previous)(sess, lint_store);
}
- let conf = clippy_lints::read_conf(&[], sess);
+ let conf = clippy_lints::read_conf(sess);
clippy_lints::register_plugins(lint_store, sess, &conf);
clippy_lints::register_pre_expansion_lints(lint_store);
clippy_lints::register_renamed(lint_store);
+//! This test is a part of quality control and makes clippy eat what it produces. Awesome lints and
+//! long error messages
+//!
+//! See [Eating your own dog food](https://en.wikipedia.org/wiki/Eating_your_own_dog_food) for context
+
// Dogfood cannot run on Windows
#![cfg(not(windows))]
#![feature(once_cell)]
return;
}
let root_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
+ let enable_metadata_collection = std::env::var("ENABLE_METADATA_COLLECTION").unwrap_or_else(|_| "0".to_string());
let mut command = Command::new(&*CLIPPY_PATH);
command
.current_dir(root_dir)
.env("CLIPPY_DOGFOOD", "1")
.env("CARGO_INCREMENTAL", "0")
+ .env("ENABLE_METADATA_COLLECTION", &enable_metadata_collection)
.arg("clippy")
.arg("--all-targets")
.arg("--all-features")
-error: error reading Clippy's configuration file `$DIR/clippy.toml`: invalid type: integer `42`, expected a sequence
+error: error reading Clippy's configuration file `$DIR/clippy.toml`: invalid type: integer `42`, expected a sequence for key `blacklisted-names`
error: aborting due to previous error
-error: error reading Clippy's configuration file `$DIR/clippy.toml`: found deprecated field `cyclomatic-complexity-threshold`. Please use `cognitive-complexity-threshold` instead.
+error: error reading Clippy's configuration file `$DIR/clippy.toml`: deprecated field `cyclomatic-complexity-threshold`. Please use `cognitive-complexity-threshold` instead
error: aborting due to previous error
+++ /dev/null
-#![warn(clippy::builtin_type_shadow)]
-#![allow(non_camel_case_types)]
-
-fn foo<u32>(a: u32) -> u32 {
- 42
- // ^ rustc's type error
-}
-
-fn main() {}
+++ /dev/null
-error: this generic shadows the built-in type `u32`
- --> $DIR/builtin-type-shadow.rs:4:8
- |
-LL | fn foo<u32>(a: u32) -> u32 {
- | ^^^
- |
- = note: `-D clippy::builtin-type-shadow` implied by `-D warnings`
-
-error[E0308]: mismatched types
- --> $DIR/builtin-type-shadow.rs:5:5
- |
-LL | fn foo<u32>(a: u32) -> u32 {
- | --- --- expected `u32` because of return type
- | |
- | this type parameter
-LL | 42
- | ^^ expected type parameter `u32`, found integer
- |
- = note: expected type parameter `u32`
- found type `{integer}`
-
-error: aborting due to 2 previous errors
-
-For more information about this error, try `rustc --explain E0308`.
--- /dev/null
+#![warn(clippy::builtin_type_shadow)]
+#![allow(non_camel_case_types)]
+
+fn foo<u32>(a: u32) -> u32 {
+ 42
+ // ^ rustc's type error
+}
+
+fn main() {}
--- /dev/null
+error: this generic shadows the built-in type `u32`
+ --> $DIR/builtin_type_shadow.rs:4:8
+ |
+LL | fn foo<u32>(a: u32) -> u32 {
+ | ^^^
+ |
+ = note: `-D clippy::builtin-type-shadow` implied by `-D warnings`
+
+error[E0308]: mismatched types
+ --> $DIR/builtin_type_shadow.rs:5:5
+ |
+LL | fn foo<u32>(a: u32) -> u32 {
+ | --- --- expected `u32` because of return type
+ | |
+ | this type parameter
+LL | 42
+ | ^^ expected type parameter `u32`, found integer
+ |
+ = note: expected type parameter `u32`
+ found type `{integer}`
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0308`.
}
}
+enum Sign {
+ Negative,
+ Positive,
+ Zero,
+}
+
+impl Sign {
+ const fn sign_i8(n: i8) -> Self {
+ if n == 0 {
+ Sign::Zero
+ } else if n > 0 {
+ Sign::Positive
+ } else {
+ Sign::Negative
+ }
+ }
+}
+
+const fn sign_i8(n: i8) -> Sign {
+ if n == 0 {
+ Sign::Zero
+ } else if n > 0 {
+ Sign::Positive
+ } else {
+ Sign::Negative
+ }
+}
+
fn main() {}
fn main() {
let my_iterator = Countdown(5);
- let a: Vec<_> = my_iterator.take(1).collect();
- assert_eq!(a.len(), 1);
- let b: Vec<_> = my_iterator.collect();
- assert_eq!(b.len(), 5);
+ assert_eq!(my_iterator.take(1).count(), 1);
+ assert_eq!(my_iterator.count(), 5);
}
--- /dev/null
+#[derive(Default)]
+struct A<T> {
+ a: Vec<A<T>>,
+ b: T,
+}
+
+fn main() {
+ if let Ok(_) = Ok::<_, ()>(A::<String>::default()) {}
+}
--- /dev/null
+error: redundant pattern matching, consider using `is_ok()`
+ --> $DIR/ice-7169.rs:8:12
+ |
+LL | if let Ok(_) = Ok::<_, ()>(A::<String>::default()) {}
+ | -------^^^^^-------------------------------------- help: try this: `if Ok::<_, ()>(A::<String>::default()).is_ok()`
+ |
+ = note: `-D clippy::redundant-pattern-matching` implied by `-D warnings`
+
+error: aborting due to previous error
+
LL | #[warn(clippy::filter_map)]
| ^^^^^^^^^^^^^^^^^^
-error: lint `clippy::unstable_as_slice` has been removed: `Vec::as_slice` has been stabilized in 1.7
- --> $DIR/deprecated.rs:1:8
- |
-LL | #[warn(clippy::unstable_as_slice)]
- | ^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error: aborting due to 15 previous errors
+error: aborting due to 14 previous errors
LL | #[warn(misaligned_transmute)]
| ^^^^^^^^^^^^^^^^^^^^
-error: lint `unstable_as_slice` has been removed: `Vec::as_slice` has been stabilized in 1.7
- --> $DIR/deprecated_old.rs:1:8
- |
-LL | #[warn(unstable_as_slice)]
- | ^^^^^^^^^^^^^^^^^
-
-error: aborting due to 4 previous errors
+error: aborting due to 3 previous errors
+// edition:2018
+
#[warn(clippy::eval_order_dependence)]
#[allow(
unused_assignments,
},
);
}
+
+async fn issue_6925() {
+ let _ = vec![async { true }.await, async { false }.await];
+}
-error: unsequenced read of a variable
- --> $DIR/eval_order_dependence.rs:15:9
+error: unsequenced read of `x`
+ --> $DIR/eval_order_dependence.rs:17:9
|
LL | } + x;
| ^
|
= note: `-D clippy::eval-order-dependence` implied by `-D warnings`
note: whether read occurs before this write depends on evaluation order
- --> $DIR/eval_order_dependence.rs:13:9
+ --> $DIR/eval_order_dependence.rs:15:9
|
LL | x = 1;
| ^^^^^
-error: unsequenced read of a variable
- --> $DIR/eval_order_dependence.rs:18:5
+error: unsequenced read of `x`
+ --> $DIR/eval_order_dependence.rs:20:5
|
LL | x += {
| ^
|
note: whether read occurs before this write depends on evaluation order
- --> $DIR/eval_order_dependence.rs:19:9
+ --> $DIR/eval_order_dependence.rs:21:9
|
LL | x = 20;
| ^^^^^^
-error: unsequenced read of a variable
- --> $DIR/eval_order_dependence.rs:31:12
+error: unsequenced read of `x`
+ --> $DIR/eval_order_dependence.rs:33:12
|
LL | a: x,
| ^
|
note: whether read occurs before this write depends on evaluation order
- --> $DIR/eval_order_dependence.rs:33:13
+ --> $DIR/eval_order_dependence.rs:35:13
|
LL | x = 6;
| ^^^^^
-error: unsequenced read of a variable
- --> $DIR/eval_order_dependence.rs:40:9
+error: unsequenced read of `x`
+ --> $DIR/eval_order_dependence.rs:42:9
|
LL | x += {
| ^
|
note: whether read occurs before this write depends on evaluation order
- --> $DIR/eval_order_dependence.rs:41:13
+ --> $DIR/eval_order_dependence.rs:43:13
|
LL | x = 20;
| ^^^^^^
+// edition:2018
// run-rustfix
#![warn(clippy::implicit_return)]
-#![allow(clippy::needless_return, unused)]
+#![allow(clippy::needless_return, clippy::needless_bool, unused, clippy::never_loop)]
fn test_end_of_fn() -> bool {
if true {
return true
}
-#[allow(clippy::needless_bool)]
fn test_if_block() -> bool {
if true { return true } else { return false }
}
}
}
-#[allow(clippy::needless_return)]
fn test_match_with_unreachable(x: bool) -> bool {
match x {
true => return false,
}
}
-#[allow(clippy::never_loop)]
fn test_loop() -> bool {
loop {
return true;
}
}
-#[allow(clippy::never_loop)]
fn test_loop_with_block() -> bool {
loop {
{
}
}
-#[allow(clippy::never_loop)]
fn test_loop_with_nests() -> bool {
loop {
if true {
return format!("test {}", "test")
}
-fn main() {
- let _ = test_end_of_fn();
- let _ = test_if_block();
- let _ = test_match(true);
- let _ = test_match_with_unreachable(true);
- let _ = test_loop();
- let _ = test_loop_with_block();
- let _ = test_loop_with_nests();
- let _ = test_loop_with_if_let();
- test_closure();
- let _ = test_return_macro();
+fn macro_branch_test() -> bool {
+ macro_rules! m {
+ ($t:expr, $f:expr) => {
+ if true { $t } else { $f }
+ };
+ }
+ return m!(true, false)
+}
+
+fn loop_test() -> bool {
+ 'outer: loop {
+ if true {
+ return true;
+ }
+
+ let _ = loop {
+ if false {
+ return false;
+ }
+ if true {
+ break true;
+ }
+ };
+ }
}
+
+fn loop_macro_test() -> bool {
+ macro_rules! m {
+ ($e:expr) => {
+ break $e
+ };
+ }
+ return loop {
+ m!(true);
+ }
+}
+
+fn divergent_test() -> bool {
+ fn diverge() -> ! {
+ panic!()
+ }
+ diverge()
+}
+
+// issue #6940
+async fn foo() -> bool {
+ return true
+}
+
+fn main() {}
+// edition:2018
// run-rustfix
#![warn(clippy::implicit_return)]
-#![allow(clippy::needless_return, unused)]
+#![allow(clippy::needless_return, clippy::needless_bool, unused, clippy::never_loop)]
fn test_end_of_fn() -> bool {
if true {
true
}
-#[allow(clippy::needless_bool)]
fn test_if_block() -> bool {
if true { true } else { false }
}
}
}
-#[allow(clippy::needless_return)]
fn test_match_with_unreachable(x: bool) -> bool {
match x {
true => return false,
}
}
-#[allow(clippy::never_loop)]
fn test_loop() -> bool {
loop {
break true;
}
}
-#[allow(clippy::never_loop)]
fn test_loop_with_block() -> bool {
loop {
{
}
}
-#[allow(clippy::never_loop)]
fn test_loop_with_nests() -> bool {
loop {
if true {
format!("test {}", "test")
}
-fn main() {
- let _ = test_end_of_fn();
- let _ = test_if_block();
- let _ = test_match(true);
- let _ = test_match_with_unreachable(true);
- let _ = test_loop();
- let _ = test_loop_with_block();
- let _ = test_loop_with_nests();
- let _ = test_loop_with_if_let();
- test_closure();
- let _ = test_return_macro();
+fn macro_branch_test() -> bool {
+ macro_rules! m {
+ ($t:expr, $f:expr) => {
+ if true { $t } else { $f }
+ };
+ }
+ m!(true, false)
+}
+
+fn loop_test() -> bool {
+ 'outer: loop {
+ if true {
+ break true;
+ }
+
+ let _ = loop {
+ if false {
+ break 'outer false;
+ }
+ if true {
+ break true;
+ }
+ };
+ }
+}
+
+fn loop_macro_test() -> bool {
+ macro_rules! m {
+ ($e:expr) => {
+ break $e
+ };
+ }
+ loop {
+ m!(true);
+ }
+}
+
+fn divergent_test() -> bool {
+ fn diverge() -> ! {
+ panic!()
+ }
+ diverge()
}
+
+// issue #6940
+async fn foo() -> bool {
+ true
+}
+
+fn main() {}
error: missing `return` statement
- --> $DIR/implicit_return.rs:12:5
+ --> $DIR/implicit_return.rs:13:5
|
LL | true
| ^^^^ help: add `return` as shown: `return true`
| ^^^^ help: add `return` as shown: `return true`
error: missing `return` statement
- --> $DIR/implicit_return.rs:39:9
+ --> $DIR/implicit_return.rs:37:9
|
LL | break true;
| ^^^^^^^^^^ help: change `break` to `return` as shown: `return true`
error: missing `return` statement
- --> $DIR/implicit_return.rs:47:13
+ --> $DIR/implicit_return.rs:44:13
|
LL | break true;
| ^^^^^^^^^^ help: change `break` to `return` as shown: `return true`
error: missing `return` statement
- --> $DIR/implicit_return.rs:56:13
+ --> $DIR/implicit_return.rs:52:13
|
LL | break true;
| ^^^^^^^^^^ help: change `break` to `return` as shown: `return true`
error: missing `return` statement
- --> $DIR/implicit_return.rs:74:18
+ --> $DIR/implicit_return.rs:70:18
|
LL | let _ = || { true };
| ^^^^ help: add `return` as shown: `return true`
error: missing `return` statement
- --> $DIR/implicit_return.rs:75:16
+ --> $DIR/implicit_return.rs:71:16
|
LL | let _ = || true;
| ^^^^ help: add `return` as shown: `return true`
error: missing `return` statement
- --> $DIR/implicit_return.rs:83:5
+ --> $DIR/implicit_return.rs:79:5
|
LL | format!("test {}", "test")
| ^^^^^^^^^^^^^^^^^^^^^^^^^^ help: add `return` as shown: `return format!("test {}", "test")`
-error: aborting due to 11 previous errors
+error: missing `return` statement
+ --> $DIR/implicit_return.rs:88:5
+ |
+LL | m!(true, false)
+ | ^^^^^^^^^^^^^^^ help: add `return` as shown: `return m!(true, false)`
+
+error: missing `return` statement
+ --> $DIR/implicit_return.rs:94:13
+ |
+LL | break true;
+ | ^^^^^^^^^^ help: change `break` to `return` as shown: `return true`
+
+error: missing `return` statement
+ --> $DIR/implicit_return.rs:99:17
+ |
+LL | break 'outer false;
+ | ^^^^^^^^^^^^^^^^^^ help: change `break` to `return` as shown: `return false`
+
+error: missing `return` statement
+ --> $DIR/implicit_return.rs:114:5
+ |
+LL | / loop {
+LL | | m!(true);
+LL | | }
+ | |_____^
+ |
+help: add `return` as shown
+ |
+LL | return loop {
+LL | m!(true);
+LL | }
+ |
+
+error: missing `return` statement
+ --> $DIR/implicit_return.rs:128:5
+ |
+LL | true
+ | ^^^^ help: add `return` as shown: `return true`
+
+error: aborting due to 16 previous errors
}
}
+fn immutable_condition_false_positive(mut n: u64) -> u32 {
+ let mut count = 0;
+ while {
+ n >>= 1;
+ n != 0
+ } {
+ count += 1;
+ }
+ count
+}
+
fn main() {
immutable_condition();
unused_var();
used_immutable();
internally_mutable();
+ immutable_condition_false_positive(5);
let mut c = Counter { count: 0 };
c.inc_n(5);
-use std::collections::{HashMap, VecDeque};
+use std::collections::{BinaryHeap, HashMap, LinkedList, VecDeque};
fn main() {
let sample = [1; 5];
.collect::<Vec<_>>();
}
}
+
+mod issue7110 {
+ // #7110 - lint for type annotation cases
+ use super::*;
+
+ fn lint_vec(string: &str) -> usize {
+ let buffer: Vec<&str> = string.split('/').collect();
+ buffer.len()
+ }
+ fn lint_vec_deque() -> usize {
+ let sample = [1; 5];
+ let indirect_len: VecDeque<_> = sample.iter().collect();
+ indirect_len.len()
+ }
+ fn lint_linked_list() -> usize {
+ let sample = [1; 5];
+ let indirect_len: LinkedList<_> = sample.iter().collect();
+ indirect_len.len()
+ }
+ fn lint_binary_heap() -> usize {
+ let sample = [1; 5];
+ let indirect_len: BinaryHeap<_> = sample.iter().collect();
+ indirect_len.len()
+ }
+ fn dont_lint(string: &str) -> usize {
+ let buffer: Vec<&str> = string.split('/').collect();
+ for buff in &buffer {
+ println!("{}", buff);
+ }
+ buffer.len()
+ }
+}
LL | sample.into_iter().any(|x| x == a);
|
-error: aborting due to 5 previous errors
+error: avoid using `collect()` when not needed
+ --> $DIR/needless_collect_indirect.rs:52:51
+ |
+LL | let buffer: Vec<&str> = string.split('/').collect();
+ | ^^^^^^^
+LL | buffer.len()
+ | ------------ the iterator could be used here instead
+ |
+help: take the original Iterator's count instead of collecting it and finding the length
+ |
+LL |
+LL | string.split('/').count()
+ |
+
+error: avoid using `collect()` when not needed
+ --> $DIR/needless_collect_indirect.rs:57:55
+ |
+LL | let indirect_len: VecDeque<_> = sample.iter().collect();
+ | ^^^^^^^
+LL | indirect_len.len()
+ | ------------------ the iterator could be used here instead
+ |
+help: take the original Iterator's count instead of collecting it and finding the length
+ |
+LL |
+LL | sample.iter().count()
+ |
+
+error: avoid using `collect()` when not needed
+ --> $DIR/needless_collect_indirect.rs:62:57
+ |
+LL | let indirect_len: LinkedList<_> = sample.iter().collect();
+ | ^^^^^^^
+LL | indirect_len.len()
+ | ------------------ the iterator could be used here instead
+ |
+help: take the original Iterator's count instead of collecting it and finding the length
+ |
+LL |
+LL | sample.iter().count()
+ |
+
+error: avoid using `collect()` when not needed
+ --> $DIR/needless_collect_indirect.rs:67:57
+ |
+LL | let indirect_len: BinaryHeap<_> = sample.iter().collect();
+ | ^^^^^^^
+LL | indirect_len.len()
+ | ------------------ the iterator could be used here instead
+ |
+help: take the original Iterator's count instead of collecting it and finding the length
+ |
+LL |
+LL | sample.iter().count()
+ |
+
+error: aborting due to 9 previous errors
-#![feature(const_fn)]
#![allow(dead_code, clippy::missing_safety_doc)]
#![warn(clippy::new_without_default)]
error: you should consider adding a `Default` implementation for `Foo`
- --> $DIR/new_without_default.rs:8:5
+ --> $DIR/new_without_default.rs:7:5
|
LL | / pub fn new() -> Foo {
LL | | Foo
|
error: you should consider adding a `Default` implementation for `Bar`
- --> $DIR/new_without_default.rs:16:5
+ --> $DIR/new_without_default.rs:15:5
|
LL | / pub fn new() -> Self {
LL | | Bar
|
error: you should consider adding a `Default` implementation for `LtKo<'c>`
- --> $DIR/new_without_default.rs:80:5
+ --> $DIR/new_without_default.rs:79:5
|
LL | / pub fn new() -> LtKo<'c> {
LL | | unimplemented!()
|
error: you should consider adding a `Default` implementation for `NewNotEqualToDerive`
- --> $DIR/new_without_default.rs:157:5
+ --> $DIR/new_without_default.rs:156:5
|
LL | / pub fn new() -> Self {
LL | | NewNotEqualToDerive { foo: 1 }
|
error: you should consider adding a `Default` implementation for `FooGenerics<T>`
- --> $DIR/new_without_default.rs:165:5
+ --> $DIR/new_without_default.rs:164:5
|
LL | / pub fn new() -> Self {
LL | | Self(Default::default())
|
error: you should consider adding a `Default` implementation for `BarGenerics<T>`
- --> $DIR/new_without_default.rs:172:5
+ --> $DIR/new_without_default.rs:171:5
|
LL | / pub fn new() -> Self {
LL | | Self(Default::default())
LL | #[warn(clippy::const_static_lifetime)]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use the new name: `clippy::redundant_static_lifetimes`
-error: lint `clippy::cyclomatic_complexity` has been renamed to `clippy::cognitive_complexity`
- --> $DIR/rename.rs:10:9
- |
-LL | #![warn(clippy::cyclomatic_complexity)]
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use the new name: `clippy::cognitive_complexity`
-
-error: aborting due to 5 previous errors
+error: aborting due to 4 previous errors
x.rmatch_indices('x');
x.trim_start_matches('x');
x.trim_end_matches('x');
+ x.strip_prefix('x');
+ x.strip_suffix('x');
// Make sure we escape characters correctly.
x.split('\n');
x.split('\'');
x.rmatch_indices("x");
x.trim_start_matches("x");
x.trim_end_matches("x");
+ x.strip_prefix("x");
+ x.strip_suffix("x");
// Make sure we escape characters correctly.
x.split("\n");
x.split("'");
| ^^^ help: try using a `char` instead: `'x'`
error: single-character string constant used as pattern
- --> $DIR/single_char_pattern.rs:37:13
+ --> $DIR/single_char_pattern.rs:36:20
+ |
+LL | x.strip_prefix("x");
+ | ^^^ help: try using a `char` instead: `'x'`
+
+error: single-character string constant used as pattern
+ --> $DIR/single_char_pattern.rs:37:20
+ |
+LL | x.strip_suffix("x");
+ | ^^^ help: try using a `char` instead: `'x'`
+
+error: single-character string constant used as pattern
+ --> $DIR/single_char_pattern.rs:39:13
|
LL | x.split("/n");
| ^^^^ help: try using a `char` instead: `'/n'`
error: single-character string constant used as pattern
- --> $DIR/single_char_pattern.rs:38:13
+ --> $DIR/single_char_pattern.rs:40:13
|
LL | x.split("'");
| ^^^ help: try using a `char` instead: `'/''`
error: single-character string constant used as pattern
- --> $DIR/single_char_pattern.rs:39:13
+ --> $DIR/single_char_pattern.rs:41:13
|
LL | x.split("/'");
| ^^^^ help: try using a `char` instead: `'/''`
error: single-character string constant used as pattern
- --> $DIR/single_char_pattern.rs:44:31
+ --> $DIR/single_char_pattern.rs:46:31
|
LL | x.replace(";", ",").split(","); // issue #2978
| ^^^ help: try using a `char` instead: `','`
error: single-character string constant used as pattern
- --> $DIR/single_char_pattern.rs:45:19
+ --> $DIR/single_char_pattern.rs:47:19
|
LL | x.starts_with("/x03"); // issue #2996
| ^^^^^^ help: try using a `char` instead: `'/x03'`
error: single-character string constant used as pattern
- --> $DIR/single_char_pattern.rs:52:13
+ --> $DIR/single_char_pattern.rs:54:13
|
LL | x.split(r"a");
| ^^^^ help: try using a `char` instead: `'a'`
error: single-character string constant used as pattern
- --> $DIR/single_char_pattern.rs:53:13
+ --> $DIR/single_char_pattern.rs:55:13
|
LL | x.split(r#"a"#);
| ^^^^^^ help: try using a `char` instead: `'a'`
error: single-character string constant used as pattern
- --> $DIR/single_char_pattern.rs:54:13
+ --> $DIR/single_char_pattern.rs:56:13
|
LL | x.split(r###"a"###);
| ^^^^^^^^^^ help: try using a `char` instead: `'a'`
error: single-character string constant used as pattern
- --> $DIR/single_char_pattern.rs:55:13
+ --> $DIR/single_char_pattern.rs:57:13
|
LL | x.split(r###"'"###);
| ^^^^^^^^^^ help: try using a `char` instead: `'/''`
error: single-character string constant used as pattern
- --> $DIR/single_char_pattern.rs:56:13
+ --> $DIR/single_char_pattern.rs:58:13
|
LL | x.split(r###"#"###);
| ^^^^^^^^^^ help: try using a `char` instead: `'#'`
-error: aborting due to 30 previous errors
+error: aborting due to 32 previous errors
LL | #[warn(clippy::const_static_lifetim)]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: did you mean: `clippy::redundant_static_lifetimes`
-error: unknown lint: `clippy::All`
- --> $DIR/unknown_clippy_lints.rs:5:10
- |
-LL | #![allow(clippy::All)]
- | ^^^^^^^^^^^ help: did you mean: `clippy::all`
-
-error: aborting due to 9 previous errors
+error: aborting due to 8 previous errors
let _ = (0..4).filter_map(i32::checked_abs);
}
+
+fn filter_map_none_changes_item_type() -> impl Iterator<Item = bool> {
+ "".chars().filter_map(|_| None)
+}
#[rustfmt::skip]
fn test3(){}
+
+fn macro_expr() {
+ macro_rules! e {
+ () => (());
+ }
+ e!()
+}
#[rustfmt::skip]
fn test3()-> (){}
+
+fn macro_expr() {
+ macro_rules! e {
+ () => (());
+ }
+ e!()
+}
group_re = re.compile(r'''\s*([a-z_][a-z_0-9]+)''')
conf_re = re.compile(r'''define_Conf! {\n([^}]*)\n}''', re.MULTILINE)
confvar_re = re.compile(
- r'''/// Lint: ([\w,\s]+)\. (.*)\n\s*\([^,]+,\s+"([^"]+)":\s+([^,]+),\s+([^\.\)]+).*\),''', re.MULTILINE)
+ r'''/// Lint: ([\w,\s]+)\. (.*)\n\s*\(([^:]+):\s*([^\s=]+)\s*=\s*([^\.\)]+).*\),''', re.MULTILINE)
comment_re = re.compile(r'''\s*/// ?(.*)''')
lint_levels = {
}
}
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub enum PanicStrategy {
+ Unwind,
+ Abort,
+}
+
/// Configuration for compiletest
#[derive(Debug, Clone)]
pub struct Config {
/// Force the pass mode of a check/build/run-pass test to this mode.
pub force_pass_mode: Option<PassMode>,
+ /// Explicitly enable or disable running.
+ pub run: Option<bool>,
+
/// Write out a parseable log of tests that were run
pub logfile: Option<PathBuf>,
/// Flags to pass to the compiler when building for the target
pub target_rustcflags: Option<String>,
+ /// What panic strategy the target is built with. Unwind supports Abort, but
+ /// not vice versa.
+ pub target_panic: PanicStrategy,
+
/// Target system to be tested
pub target: String,
pub npm: Option<String>,
}
+impl Config {
+ pub fn run_enabled(&self) -> bool {
+ self.run.unwrap_or_else(|| {
+ // Auto-detect whether to run based on the platform.
+ !self.target.ends_with("-fuchsia")
+ })
+ }
+}
+
#[derive(Debug, Clone)]
pub struct TestPaths {
pub file: PathBuf, // e.g., compile-test/foo/bar/baz.rs
use tracing::*;
-use crate::common::{CompareMode, Config, Debugger, FailMode, Mode, PassMode};
+use crate::common::{CompareMode, Config, Debugger, FailMode, Mode, PanicStrategy, PassMode};
use crate::util;
use crate::{extract_cdb_version, extract_gdb_version};
props.ignore = true;
}
+ if !config.run_enabled() && config.parse_name_directive(ln, "needs-run-enabled") {
+ props.ignore = true;
+ }
+
if !rustc_has_sanitizer_support
&& config.parse_name_directive(ln, "needs-sanitizer-support")
{
props.ignore = true;
}
+ if config.target_panic == PanicStrategy::Abort
+ && config.parse_name_directive(ln, "needs-unwind")
+ {
+ props.ignore = true;
+ }
+
if config.target == "wasm32-unknown-unknown" && config.parse_check_run_results(ln) {
props.ignore = true;
}
extern crate test;
-use crate::common::{expected_output_path, output_base_dir, output_relative_path, UI_EXTENSIONS};
+use crate::common::{
+ expected_output_path, output_base_dir, output_relative_path, PanicStrategy, UI_EXTENSIONS,
+};
use crate::common::{CompareMode, Config, Debugger, Mode, PassMode, Pretty, TestPaths};
use crate::util::logv;
use getopts::Options;
"force {check,build,run}-pass tests to this mode.",
"check | build | run",
)
+ .optopt("", "run", "whether to execute run-* tests", "auto | always | never")
.optflag("", "ignored", "run tests marked as ignored")
.optflag("", "exact", "filters match exactly")
.optopt(
(eg. emulator, valgrind)",
"PROGRAM",
)
- .optopt("", "host-rustcflags", "flags to pass to rustc for host", "FLAGS")
- .optopt("", "target-rustcflags", "flags to pass to rustc for target", "FLAGS")
+ .optmulti("", "host-rustcflags", "flags to pass to rustc for host", "FLAGS")
+ .optmulti("", "target-rustcflags", "flags to pass to rustc for target", "FLAGS")
+ .optopt("", "target-panic", "what panic strategy the target supports", "unwind | abort")
.optflag("", "verbose", "run tests verbosely, showing all output")
.optflag(
"",
mode.parse::<PassMode>()
.unwrap_or_else(|_| panic!("unknown `--pass` option `{}` given", mode))
}),
+ run: matches.opt_str("run").and_then(|mode| match mode.as_str() {
+ "auto" => None,
+ "always" => Some(true),
+ "never" => Some(false),
+ _ => panic!("unknown `--run` option `{}` given", mode),
+ }),
logfile: matches.opt_str("logfile").map(|s| PathBuf::from(&s)),
runtool: matches.opt_str("runtool"),
- host_rustcflags: matches.opt_str("host-rustcflags"),
- target_rustcflags: matches.opt_str("target-rustcflags"),
+ host_rustcflags: Some(matches.opt_strs("host-rustcflags").join(" ")),
+ target_rustcflags: Some(matches.opt_strs("target-rustcflags").join(" ")),
+ target_panic: match matches.opt_str("target-panic").as_deref() {
+ Some("unwind") | None => PanicStrategy::Unwind,
+ Some("abort") => PanicStrategy::Abort,
+ _ => panic!("unknown `--target-panic` option `{}` given", mode),
+ },
target,
host: opt_str2(matches.opt_str("host")),
cdb,
pub fn compute_stamp_hash(config: &Config) -> String {
let mut hash = DefaultHasher::new();
config.stage_id.hash(&mut hash);
+ config.run.hash(&mut hash);
match config.debugger {
Some(Debugger::Cdb) => {
enum WillExecute {
Yes,
No,
+ Disabled,
}
/// Should `--emit metadata` be used?
}
fn should_run(&self, pm: Option<PassMode>) -> WillExecute {
- match self.config.mode {
- Ui if pm == Some(PassMode::Run) || self.props.fail_mode == Some(FailMode::Run) => {
- WillExecute::Yes
- }
- MirOpt if pm == Some(PassMode::Run) => WillExecute::Yes,
- Ui | MirOpt => WillExecute::No,
+ let test_should_run = match self.config.mode {
+ Ui if pm == Some(PassMode::Run) || self.props.fail_mode == Some(FailMode::Run) => true,
+ MirOpt if pm == Some(PassMode::Run) => true,
+ Ui | MirOpt => false,
mode => panic!("unimplemented for mode {:?}", mode),
- }
+ };
+ if test_should_run { self.run_if_enabled() } else { WillExecute::No }
+ }
+
+ fn run_if_enabled(&self) -> WillExecute {
+ if self.config.run_enabled() { WillExecute::Yes } else { WillExecute::Disabled }
}
fn should_run_successfully(&self, pm: Option<PassMode>) -> bool {
fn run_rfail_test(&self) {
let pm = self.pass_mode();
- let proc_res = self.compile_test(WillExecute::Yes, self.should_emit_metadata(pm));
+ let should_run = self.run_if_enabled();
+ let proc_res = self.compile_test(should_run, self.should_emit_metadata(pm));
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
+ if let WillExecute::Disabled = should_run {
+ return;
+ }
+
let proc_res = self.exec_compiled_test();
// The value our Makefile configures valgrind to return on failure
fn run_rpass_test(&self) {
let emit_metadata = self.should_emit_metadata(self.pass_mode());
- let proc_res = self.compile_test(WillExecute::Yes, emit_metadata);
+ let should_run = self.run_if_enabled();
+ let proc_res = self.compile_test(should_run, emit_metadata);
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
+ if let WillExecute::Disabled = should_run {
+ return;
+ }
+
// FIXME(#41968): Move this check to tidy?
let expected_errors = errors::load_errors(&self.testpaths.file, self.revision);
assert!(
return self.run_rpass_test();
}
- let mut proc_res = self.compile_test(WillExecute::Yes, EmitMetadata::No);
+ let should_run = self.run_if_enabled();
+ let mut proc_res = self.compile_test(should_run, EmitMetadata::No);
if !proc_res.status.success() {
self.fatal_proc_rec("compilation failed!", &proc_res);
}
+ if let WillExecute::Disabled = should_run {
+ return;
+ }
+
let mut new_config = self.config.clone();
new_config.runtool = new_config.valgrind_path.clone();
let new_cx = TestCx { config: &new_config, ..*self };
fn run_debuginfo_cdb_test_no_opt(&self) {
// compile test file (it should have 'compile-flags:-g' in the header)
- let compile_result = self.compile_test(WillExecute::Yes, EmitMetadata::No);
+ let should_run = self.run_if_enabled();
+ let compile_result = self.compile_test(should_run, EmitMetadata::No);
if !compile_result.status.success() {
self.fatal_proc_rec("compilation failed!", &compile_result);
}
+ if let WillExecute::Disabled = should_run {
+ return;
+ }
let exe_file = self.make_exe_name();
let mut cmds = commands.join("\n");
// compile test file (it should have 'compile-flags:-g' in the header)
- let compiler_run_result = self.compile_test(WillExecute::Yes, EmitMetadata::No);
+ let should_run = self.run_if_enabled();
+ let compiler_run_result = self.compile_test(should_run, EmitMetadata::No);
if !compiler_run_result.status.success() {
self.fatal_proc_rec("compilation failed!", &compiler_run_result);
}
+ if let WillExecute::Disabled = should_run {
+ return;
+ }
let exe_file = self.make_exe_name();
fn run_debuginfo_lldb_test_no_opt(&self) {
// compile test file (it should have 'compile-flags:-g' in the header)
- let compile_result = self.compile_test(WillExecute::Yes, EmitMetadata::No);
+ let should_run = self.run_if_enabled();
+ let compile_result = self.compile_test(should_run, EmitMetadata::No);
if !compile_result.status.success() {
self.fatal_proc_rec("compilation failed!", &compile_result);
}
+ if let WillExecute::Disabled = should_run {
+ return;
+ }
let exe_file = self.make_exe_name();
// Only use `make_exe_name` when the test ends up being executed.
let output_file = match will_execute {
WillExecute::Yes => TargetLocation::ThisFile(self.make_exe_name()),
- WillExecute::No => TargetLocation::ThisDirectory(self.output_base_dir()),
+ WillExecute::No | WillExecute::Disabled => {
+ TargetLocation::ThisDirectory(self.output_base_dir())
+ }
};
let allow_unused = match self.config.mode {
-Subproject commit 67c04afc251ad7d80ea22e2056c93349e7e9df58
+Subproject commit 38b5f236d2c62ff0b1017efd183b193f5db33123
-Subproject commit 359513ce678efba186972e4f280dbc7046cac15f
+Subproject commit e33f4e68496b296dedb100e297dc4451f169b2b3
-Subproject commit 617535393bb5ccc7adf0bac8a3b9a9c306454e79
+Subproject commit fd109fb587904cfecc1149e068814bfd38feb83c
// ```
// npm install browser-ui-test
// ```
-const path = require('path');
+const fs = require("fs");
+const path = require("path");
const {Options, runTest} = require('browser-ui-test');
function showHelp() {
console.log("rustdoc-js options:");
console.log(" --doc-folder [PATH] : location of the generated doc folder");
console.log(" --help : show this message then quit");
- console.log(" --test-file [PATH] : location of the JS test file");
+ console.log(" --tests-folder [PATH] : location of the .GOML tests folder");
}
function parseOptions(args) {
var opts = {
"doc_folder": "",
- "test_file": "",
+ "tests_folder": "",
};
var correspondances = {
"--doc-folder": "doc_folder",
- "--test-file": "test_file",
+ "--tests-folder": "tests_folder",
};
for (var i = 0; i < args.length; ++i) {
if (args[i] === "--doc-folder"
- || args[i] === "--test-file") {
+ || args[i] === "--tests-folder") {
i += 1;
if (i >= args.length) {
console.log("Missing argument after `" + args[i - 1] + "` option.");
return null;
}
}
- if (opts["test_file"].length < 1) {
- console.log("Missing `--test-file` option.");
+ if (opts["tests_folder"].length < 1) {
+ console.log("Missing `--tests-folder` option.");
} else if (opts["doc_folder"].length < 1) {
console.log("Missing `--doc-folder` option.");
} else {
return null;
}
-function checkFile(test_file, opts, loaded, index) {
- const test_name = path.basename(test_file, ".js");
-
- process.stdout.write('Checking "' + test_name + '" ... ');
- return runChecks(test_file, loaded, index);
-}
-
-function main(argv) {
- var opts = parseOptions(argv.slice(2));
+async function main(argv) {
+ let opts = parseOptions(argv.slice(2));
if (opts === null) {
process.exit(1);
}
try {
// This is more convenient that setting fields one by one.
options.parseArguments([
- '--no-screenshot',
+ "--no-screenshot",
"--variable", "DOC_PATH", opts["doc_folder"],
]);
} catch (error) {
process.exit(1);
}
- runTest(opts["test_file"], options).then(out => {
- const [output, nb_failures] = out;
- console.log(output);
- process.exit(nb_failures);
- }).catch(err => {
- console.error(err);
+ let failed = false;
+ let files = fs.readdirSync(opts["tests_folder"]).filter(file => path.extname(file) == ".goml");
+
+ files.sort();
+ for (var i = 0; i < files.length; ++i) {
+ const testPath = path.join(opts["tests_folder"], files[i]);
+ await runTest(testPath, options).then(out => {
+ const [output, nb_failures] = out;
+ console.log(output);
+ if (nb_failures > 0) {
+ failed = true;
+ }
+ }).catch(err => {
+ console.error(err);
+ failed = true;
+ });
+ }
+ if (failed) {
process.exit(1);
- });
+ }
}
main(process.argv);
("fortanix-sgx-abi", "MPL-2.0"), // libstd but only for `sgx` target
];
+const EXCEPTIONS_CRANELIFT: &[(&str, &str)] = &[
+ ("cranelift-bforest", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-codegen", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-codegen-meta", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-codegen-shared", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-entity", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-frontend", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-jit", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-module", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-native", "Apache-2.0 WITH LLVM-exception"),
+ ("cranelift-object", "Apache-2.0 WITH LLVM-exception"),
+ ("libloading", "ISC"),
+ ("mach", "BSD-2-Clause"),
+ ("regalloc", "Apache-2.0 WITH LLVM-exception"),
+ ("target-lexicon", "Apache-2.0 WITH LLVM-exception"),
+];
+
/// These are the root crates that are part of the runtime. The licenses for
/// these and all their dependencies *must not* be in the exception list.
const RUNTIME_CRATES: &[&str] = &["std", "core", "alloc", "test", "panic_abort", "panic_unwind"];
/// Crates whose dependencies must be explicitly permitted.
-const RESTRICTED_DEPENDENCY_CRATES: &[&str] = &["rustc_middle", "rustc_codegen_llvm"];
+const RESTRICTED_DEPENDENCY_CRATES: &[&str] = &["rustc_driver", "rustc_codegen_llvm"];
/// Crates rustc is allowed to depend on. Avoid adding to the list if possible.
///
"cc",
"cfg-if",
"chalk-derive",
+ "chalk-engine",
"chalk-ir",
+ "chalk-solve",
+ "chrono",
"cmake",
"compiler_builtins",
"cpuid-bool",
"expect-test",
"fake-simd",
"filetime",
+ "fixedbitset",
"flate2",
"fortanix-sgx-abi",
"fuchsia-zircon",
"indexmap",
"instant",
"itertools",
+ "itoa",
"jobserver",
"kernel32-sys",
"lazy_static",
"libz-sys",
"lock_api",
"log",
+ "matchers",
"maybe-uninit",
"md-5",
"measureme",
"memoffset",
"miniz_oxide",
"num_cpus",
+ "num-integer",
+ "num-traits",
"object",
"once_cell",
"opaque-debug",
"parking_lot_core",
"pathdiff",
"perf-event-open-sys",
+ "petgraph",
"pin-project-lite",
"pkg-config",
"polonius-engine",
"rand_xorshift",
"redox_syscall",
"regex",
+ "regex-automata",
"regex-syntax",
"remove_dir_all",
+ "rls-data",
+ "rls-span",
"rustc-demangle",
"rustc-hash",
"rustc-rayon",
"rustc-rayon-core",
"rustc_version",
+ "ryu",
"scoped-tls",
"scopeguard",
"semver",
"semver-parser",
"serde",
"serde_derive",
+ "serde_json",
"sha-1",
"sha2",
"smallvec",
+ "sharded-slab",
"snap",
"stable_deref_trait",
"stacker",
"termcolor",
"termize",
"thread_local",
+ "time",
+ "tinyvec",
"tracing",
"tracing-attributes",
"tracing-core",
+ "tracing-log",
+ "tracing-serde",
+ "tracing-subscriber",
+ "tracing-tree",
"typenum",
"unicode-normalization",
"unicode-script",
"yansi-term",
];
+const PERMITTED_CRANELIFT_DEPENDENCIES: &[&str] = &[
+ "anyhow",
+ "ar",
+ "autocfg",
+ "bitflags",
+ "byteorder",
+ "cfg-if",
+ "cranelift-bforest",
+ "cranelift-codegen",
+ "cranelift-codegen-meta",
+ "cranelift-codegen-shared",
+ "cranelift-entity",
+ "cranelift-frontend",
+ "cranelift-jit",
+ "cranelift-module",
+ "cranelift-native",
+ "cranelift-object",
+ "crc32fast",
+ "errno",
+ "errno-dragonfly",
+ "gcc",
+ "gimli",
+ "hashbrown",
+ "indexmap",
+ "libc",
+ "libloading",
+ "log",
+ "mach",
+ "object",
+ "proc-macro2",
+ "quote",
+ "regalloc",
+ "region",
+ "rustc-hash",
+ "smallvec",
+ "syn",
+ "target-lexicon",
+ "thiserror",
+ "thiserror-impl",
+ "unicode-xid",
+ "winapi",
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+];
+
+const FORBIDDEN_TO_HAVE_DUPLICATES: &[&str] = &[
+ // These two crates take quite a long time to build, so don't allow two versions of them
+ // to accidentally sneak into our dependency graph, in order to ensure we keep our CI times
+ // under control.
+ "cargo",
+ "rustc-ap-rustc_ast",
+];
+
/// Dependency checks.
///
/// `root` is path to the directory with the root `Cargo.toml` (for the workspace). `cargo` is path
.manifest_path(root.join("Cargo.toml"))
.features(cargo_metadata::CargoOpt::AllFeatures);
let metadata = t!(cmd.exec());
- check_exceptions(&metadata, bad);
- check_dependencies(&metadata, bad);
- check_crate_duplicate(&metadata, bad);
+ let runtime_ids = compute_runtime_crates(&metadata);
+ check_exceptions(&metadata, EXCEPTIONS, runtime_ids, bad);
+ check_dependencies(&metadata, PERMITTED_DEPENDENCIES, RESTRICTED_DEPENDENCY_CRATES, bad);
+ check_crate_duplicate(&metadata, FORBIDDEN_TO_HAVE_DUPLICATES, bad);
+
+ // Check rustc_codegen_cranelift independently as it has it's own workspace.
+ let mut cmd = cargo_metadata::MetadataCommand::new();
+ cmd.cargo_path(cargo)
+ .manifest_path(root.join("compiler/rustc_codegen_cranelift/Cargo.toml"))
+ .features(cargo_metadata::CargoOpt::AllFeatures);
+ let metadata = t!(cmd.exec());
+ let runtime_ids = HashSet::new();
+ check_exceptions(&metadata, EXCEPTIONS_CRANELIFT, runtime_ids, bad);
+ check_dependencies(
+ &metadata,
+ PERMITTED_CRANELIFT_DEPENDENCIES,
+ &["rustc_codegen_cranelift"],
+ bad,
+ );
+ check_crate_duplicate(&metadata, &[], bad);
}
/// Check that all licenses are in the valid list in `LICENSES`.
///
/// Packages listed in `EXCEPTIONS` are allowed for tools.
-fn check_exceptions(metadata: &Metadata, bad: &mut bool) {
+fn check_exceptions(
+ metadata: &Metadata,
+ exceptions: &[(&str, &str)],
+ runtime_ids: HashSet<&PackageId>,
+ bad: &mut bool,
+) {
// Validate the EXCEPTIONS list hasn't changed.
- for (name, license) in EXCEPTIONS {
+ for (name, license) in exceptions {
// Check that the package actually exists.
if !metadata.packages.iter().any(|p| p.name == *name) {
tidy_error!(
}
// Check that the license hasn't changed.
for pkg in metadata.packages.iter().filter(|p| p.name == *name) {
- if pkg.name == "fuchsia-cprng" {
- // This package doesn't declare a license expression. Manual
- // inspection of the license file is necessary, which appears
- // to be BSD-3-Clause.
- assert!(pkg.license.is_none());
- continue;
- }
match &pkg.license {
None => {
tidy_error!(
}
Some(pkg_license) => {
if pkg_license.as_str() != *license {
- if *name == "crossbeam-queue"
- && *license == "MIT/Apache-2.0 AND BSD-2-Clause"
- {
- // We have two versions of crossbeam-queue and both
- // are fine.
- continue;
- }
-
println!("dependency exception `{}` license has changed", name);
println!(" previously `{}` now `{}`", license, pkg_license);
println!(" update EXCEPTIONS for the new license");
}
}
- let exception_names: Vec<_> = EXCEPTIONS.iter().map(|(name, _license)| *name).collect();
- let runtime_ids = compute_runtime_crates(metadata);
+ let exception_names: Vec<_> = exceptions.iter().map(|(name, _license)| *name).collect();
// Check if any package does not have a valid license.
for pkg in &metadata.packages {
/// `true` if a check failed.
///
/// Specifically, this checks that the dependencies are on the `PERMITTED_DEPENDENCIES`.
-fn check_dependencies(metadata: &Metadata, bad: &mut bool) {
+fn check_dependencies(
+ metadata: &Metadata,
+ permitted_dependencies: &[&'static str],
+ restricted_dependency_crates: &[&'static str],
+ bad: &mut bool,
+) {
// Check that the PERMITTED_DEPENDENCIES does not have unused entries.
- for name in PERMITTED_DEPENDENCIES {
+ for name in permitted_dependencies {
if !metadata.packages.iter().any(|p| p.name == *name) {
tidy_error!(
bad,
}
}
// Get the list in a convenient form.
- let permitted_dependencies: HashSet<_> = PERMITTED_DEPENDENCIES.iter().cloned().collect();
+ let permitted_dependencies: HashSet<_> = permitted_dependencies.iter().cloned().collect();
// Check dependencies.
let mut visited = BTreeSet::new();
let mut unapproved = BTreeSet::new();
- for &krate in RESTRICTED_DEPENDENCY_CRATES.iter() {
+ for &krate in restricted_dependency_crates.iter() {
let pkg = pkg_from_name(metadata, krate);
let mut bad =
check_crate_dependencies(&permitted_dependencies, metadata, &mut visited, pkg);
}
/// Prevents multiple versions of some expensive crates.
-fn check_crate_duplicate(metadata: &Metadata, bad: &mut bool) {
- const FORBIDDEN_TO_HAVE_DUPLICATES: &[&str] = &[
- // These two crates take quite a long time to build, so don't allow two versions of them
- // to accidentally sneak into our dependency graph, in order to ensure we keep our CI times
- // under control.
- "cargo",
- "rustc-ap-rustc_ast",
- ];
-
- for &name in FORBIDDEN_TO_HAVE_DUPLICATES {
+fn check_crate_duplicate(
+ metadata: &Metadata,
+ forbidden_to_have_duplicates: &[&str],
+ bad: &mut bool,
+) {
+ for &name in forbidden_to_have_duplicates {
let matches: Vec<_> = metadata.packages.iter().filter(|pkg| pkg.name == name).collect();
match matches.len() {
0 => {
.iter()
.find(|n| &n.id == pkg_id)
.unwrap_or_else(|| panic!("could not find `{}` in resolve", pkg_id));
- // Don't care about dev-dependencies.
- // Build dependencies *shouldn't* matter unless they do some kind of
- // codegen. For now we'll assume they don't.
- let deps = node.deps.iter().filter(|node_dep| {
- node_dep
- .dep_kinds
- .iter()
- .any(|kind_info| kind_info.kind == cargo_metadata::DependencyKind::Normal)
- });
- for dep in deps {
+ for dep in &node.deps {
normal_deps_of_r(resolve, &dep.pkg, result);
}
}
pub has_gate_test: bool,
pub tracking_issue: Option<NonZeroU32>,
}
+impl Feature {
+ fn tracking_issue_display(&self) -> impl fmt::Display {
+ match self.tracking_issue {
+ None => "none".to_string(),
+ Some(x) => x.to_string(),
+ }
+ }
+}
pub type Features = HashMap<String, Feature>;
if f.tracking_issue != s.tracking_issue && f.level != Status::Stable {
tidy_error!(
bad,
- "{}:{}: mismatches the `issue` in {}",
+ "{}:{}: `issue` \"{}\" mismatches the {} `issue` of \"{}\"",
file.display(),
line,
- display
+ f.tracking_issue_display(),
+ display,
+ s.tracking_issue_display(),
);
}
}
//! - libunwind may have platform-specific code.
//! - other crates in the std facade may not.
//! - std may have platform-specific code in the following places:
-//! - `sys/unix/`
-//! - `sys/windows/`
+//! - `sys/`
//! - `os/`
//!
//! `std/sys_common` should _not_ contain platform-specific code.
// Paths that may contain platform-specific code.
const EXCEPTION_PATHS: &[&str] = &[
- // std crates
"library/panic_abort",
"library/panic_unwind",
"library/unwind",
- "library/std/src/sys/", // Platform-specific code for std lives here.
- // This has the trailing slash so that sys_common is not excepted.
- "library/std/src/os", // Platform-specific public interfaces
- "library/rtstartup", // Not sure what to do about this. magic stuff for mingw
- // Integration test for platform-specific run-time feature detection:
- "library/std/tests/run-time-detect.rs",
- "library/std/src/net/test.rs",
- "library/std/src/net/addr",
- "library/std/src/net/udp",
- "library/std/src/sys_common/remutex.rs",
- "library/std/src/sync/mutex.rs",
- "library/std/src/sync/rwlock.rs",
- "library/term", // Not sure how to make this crate portable, but test crate needs it.
- "library/test", // Probably should defer to unstable `std::sys` APIs.
- // std testing crates, okay for now at least
- "library/core/tests",
- "library/alloc/tests/lib.rs",
- "library/alloc/benches/lib.rs",
+ "library/rtstartup", // Not sure what to do about this. magic stuff for mingw
+ "library/term", // Not sure how to make this crate portable, but test crate needs it.
+ "library/test", // Probably should defer to unstable `std::sys` APIs.
// The `VaList` implementation must have platform specific code.
// The Windows implementation of a `va_list` is always a character
// pointer regardless of the target architecture. As a result,
// we must use `#[cfg(windows)]` to conditionally compile the
// correct `VaList` structure for windows.
"library/core/src/ffi.rs",
+ "library/std/src/sys/", // Platform-specific code for std lives here.
+ "library/std/src/os", // Platform-specific public interfaces
+ // Temporary `std` exceptions
+ // FIXME: platform-specific code should be moved to `sys`
+ "library/std/src/io/copy.rs",
+ "library/std/src/io/stdio.rs",
+ "library/std/src/f32.rs",
+ "library/std/src/f64.rs",
+ "library/std/src/path.rs",
+ "library/std/src/thread/available_concurrency.rs",
+ "library/std/src/sys_common", // Should only contain abstractions over platforms
+ "library/std/src/net/test.rs", // Utility helpers for tests
];
pub fn check(path: &Path, bad: &mut bool) {
return;
}
+ // exclude tests and benchmarks as some platforms do not support all tests
+ if filestr.contains("tests") || filestr.contains("benches") {
+ return;
+ }
+
check_cfgs(contents, &file, bad, &mut saw_target_arch, &mut saw_cfg_bang);
});
saw_target_arch: &mut bool,
saw_cfg_bang: &mut bool,
) {
- // For now it's ok to have platform-specific code after 'mod tests'.
- let mod_tests_idx = find_test_mod(contents);
- let contents = &contents[..mod_tests_idx];
// Pull out all `cfg(...)` and `cfg!(...)` strings.
let cfgs = parse_cfgs(contents);
continue;
}
- err(idx, cfg);
- }
-}
-
-fn find_test_mod(contents: &str) -> usize {
- if let Some(mod_tests_idx) = contents.find("mod tests") {
- // Also capture a previous line indicating that "mod tests" is cfg'd out.
- let prev_newline_idx = contents[..mod_tests_idx].rfind('\n').unwrap_or(mod_tests_idx);
- let prev_newline_idx = contents[..prev_newline_idx].rfind('\n');
- if let Some(nl) = prev_newline_idx {
- let prev_line = &contents[nl + 1..mod_tests_idx];
- if prev_line.contains("cfg(all(test, not(target_os")
- || prev_line.contains("cfg(all(test, not(any(target_os")
- {
- nl
- } else {
- mod_tests_idx
- }
- } else {
- mod_tests_idx
+ // exclude tests as some platforms do not support all tests
+ if cfg.contains("test") {
+ continue;
}
- } else {
- contents.len()
+
+ err(idx, cfg);
}
}
-fn parse_cfgs<'a>(contents: &'a str) -> Vec<(usize, &'a str)> {
+fn parse_cfgs(contents: &str) -> Vec<(usize, &str)> {
let candidate_cfgs = contents.match_indices("cfg");
let candidate_cfg_idxs = candidate_cfgs.map(|(i, _)| i);
// This is puling out the indexes of all "cfg" strings
// that appear to be tokens followed by a parenthesis.
let cfgs = candidate_cfg_idxs.filter(|i| {
- let pre_idx = i.saturating_sub(*i);
+ let pre_idx = i.saturating_sub(1);
let succeeds_non_ident = !contents
.as_bytes()
.get(pre_idx)