[submodule "src/llvm-project"]
path = src/llvm-project
url = https://github.com/rust-lang/llvm-project.git
- branch = rustc/14.0-2022-02-09
+ branch = rustc/14.0-2022-03-22
[submodule "src/doc/embedded-book"]
path = src/doc/embedded-book
url = https://github.com/rust-embedded/book.git
[[package]]
name = "chalk-derive"
-version = "0.76.0"
+version = "0.80.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "58c24b8052ea1e3adbb6f9ab7ba5fcc18b9d12591c042de4c833f709ce81e0e0"
+checksum = "d0001adf0cf12361e08b65e1898ea138f8f77d8f5177cbf29b6b3b3532252bd6"
dependencies = [
"proc-macro2",
"quote",
[[package]]
name = "chalk-engine"
-version = "0.76.0"
+version = "0.80.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0eca186b6ea9af798312f4b568fd094c82e7946ac08be5dc5fea22decc6d2ed8"
+checksum = "c44ee96f2d67cb5193d1503f185db1abad9933a1c6e6b4169c176f90baecd393"
dependencies = [
"chalk-derive",
"chalk-ir",
[[package]]
name = "chalk-ir"
-version = "0.76.0"
+version = "0.80.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f3cad5c3f1edd4b4a2c9bda24ae558ceb4f88336f88f944c2e35d0bfeb13c818"
+checksum = "92d8a95548f23618fda86426e4304e563ec2bb7ba0216139f0748d63c107b5f1"
dependencies = [
"bitflags",
"chalk-derive",
[[package]]
name = "chalk-solve"
-version = "0.76.0"
+version = "0.80.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "94533188d3452bc72cbd5618d166f45fc7646b674ad3fe9667d557bc25236dee"
+checksum = "f37f492dacfafe2e21319b80827da2779932909bb392f0cc86b2bd5c07c1b4e1"
dependencies = [
"chalk-derive",
"chalk-ir",
"futures 0.3.19",
"if_chain",
"itertools",
- "num_cpus",
"parking_lot",
"quote",
"regex",
+ "rustc-semver",
"rustc-workspace-hack",
"rustc_tools_util 0.2.0",
"semver",
[[package]]
name = "git2"
-version = "0.14.1"
+version = "0.14.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6e7d3b96ec1fcaa8431cf04a4f1ef5caafe58d5cf7bcc31f09c1626adddb0ffe"
+checksum = "3826a6e0e2215d7a41c2bfc7c9244123969273f3476b939a226aac0ab56e9e3c"
dependencies = [
"bitflags",
"libc",
[[package]]
name = "libgit2-sys"
-version = "0.13.1+1.4.2"
+version = "0.13.2+1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "43e598aa7a4faedf1ea1b4608f582b06f0f40211eec551b7ef36019ae3f62def"
+checksum = "3a42de9a51a5c12e00fc0e4ca6bc2ea43582fc6418488e8f615e905d886f258b"
dependencies = [
"cc",
"libc",
"compiletest_rs",
"env_logger 0.9.0",
"getrandom 0.2.0",
- "hex 0.4.2",
"libc",
"log",
"measureme 9.1.2",
jemalloc = ['tikv-jemalloc-sys']
llvm = ['rustc_driver/llvm']
max_level_info = ['rustc_driver/max_level_info']
+rustc_use_parallel_compiler = ['rustc_driver/rustc_use_parallel_compiler']
pub struct Crate {
pub attrs: Vec<Attribute>,
pub items: Vec<P<Item>>,
- pub span: Span,
+ pub spans: ModSpans,
/// Must be equal to `CRATE_NODE_ID` after the crate root is expanded, but may hold
/// expansion placeholders or an unassigned value (`DUMMY_NODE_ID`) before that.
pub id: NodeId,
/// or with definition outlined to a separate file `mod foo;` and already loaded from it.
/// The inner span is from the first token past `{` to the last token until `}`,
/// or from the first to the last token in the loaded file.
- Loaded(Vec<P<Item>>, Inline, Span),
+ Loaded(Vec<P<Item>>, Inline, ModSpans),
/// Module with definition outlined to a separate file `mod foo;` but not yet loaded from it.
Unloaded,
}
+#[derive(Copy, Clone, Encodable, Decodable, Debug)]
+pub struct ModSpans {
+ /// `inner_span` covers the body of the module; for a file module, its the whole file.
+ /// For an inline module, its the span inside the `{ ... }`, not including the curly braces.
+ pub inner_span: Span,
+ pub inject_use_span: Span,
+}
+
+impl Default for ModSpans {
+ fn default() -> ModSpans {
+ ModSpans { inner_span: Default::default(), inject_use_span: Default::default() }
+ }
+}
+
/// Foreign module declaration.
///
/// E.g., `extern { .. }` or `extern "C" { .. }`.
| Nonterminal::NtMeta(_)
| Nonterminal::NtPath(_)
| Nonterminal::NtVis(_)
- | Nonterminal::NtTT(_)
| Nonterminal::NtBlock(_)
| Nonterminal::NtIdent(..)
| Nonterminal::NtLifetime(_) => &[],
| Nonterminal::NtMeta(_)
| Nonterminal::NtPath(_)
| Nonterminal::NtVis(_)
- | Nonterminal::NtTT(_)
| Nonterminal::NtBlock(_)
| Nonterminal::NtIdent(..)
| Nonterminal::NtLifetime(_) => {}
Nonterminal::NtPath(path) => path.tokens_mut(),
Nonterminal::NtVis(vis) => vis.tokens_mut(),
Nonterminal::NtBlock(block) => block.tokens_mut(),
- Nonterminal::NtIdent(..) | Nonterminal::NtLifetime(..) | Nonterminal::NtTT(..) => None,
+ Nonterminal::NtIdent(..) | Nonterminal::NtLifetime(..) => None,
}
}
}
visit_lazy_tts(tokens, vis);
}
token::NtPath(path) => vis.visit_path(path),
- token::NtTT(tt) => visit_tt(tt, vis),
token::NtVis(visib) => vis.visit_vis(visib),
}
}
ItemKind::Mod(unsafety, mod_kind) => {
visit_unsafety(unsafety, vis);
match mod_kind {
- ModKind::Loaded(items, _inline, inner_span) => {
+ ModKind::Loaded(items, _inline, ModSpans { inner_span, inject_use_span }) => {
vis.visit_span(inner_span);
+ vis.visit_span(inject_use_span);
items.flat_map_in_place(|item| vis.flat_map_item(item));
}
ModKind::Unloaded => {}
}
pub fn noop_visit_crate<T: MutVisitor>(krate: &mut Crate, vis: &mut T) {
- let Crate { attrs, items, span, id, is_placeholder: _ } = krate;
+ let Crate { attrs, items, spans, id, is_placeholder: _ } = krate;
vis.visit_id(id);
visit_attrs(attrs, vis);
items.flat_map_in_place(|item| vis.flat_map_item(item));
- vis.visit_span(span);
+ let ModSpans { inner_span, inject_use_span } = spans;
+ vis.visit_span(inner_span);
+ vis.visit_span(inject_use_span);
}
// Mutates one item into possibly many items.
Crate {
attrs: Default::default(),
items: Default::default(),
- span: Default::default(),
+ spans: Default::default(),
id: DUMMY_NODE_ID,
is_placeholder: Default::default(),
}
use crate::ast;
use crate::ptr::P;
-use crate::tokenstream::TokenTree;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::sync::Lrc;
NtMeta(P<ast::AttrItem>),
NtPath(ast::Path),
NtVis(ast::Visibility),
- NtTT(TokenTree),
}
// `Nonterminal` is used a lot. Make sure it doesn't unintentionally get bigger.
NtMeta(attr_item) => attr_item.span(),
NtPath(path) => path.span,
NtVis(vis) => vis.span,
- NtTT(tt) => tt.span(),
}
}
}
ident_lhs == ident_rhs && is_raw_lhs == is_raw_rhs
}
(NtLifetime(ident_lhs), NtLifetime(ident_rhs)) => ident_lhs == ident_rhs,
- (NtTT(tt_lhs), NtTT(tt_rhs)) => tt_lhs == tt_rhs,
// FIXME: Assume that all "complex" nonterminal are not equal, we can't compare them
// correctly based on data from AST. This will prevent them from matching each other
// in macros. The comparison will become possible only when each nonterminal has an
NtLiteral(..) => f.pad("NtLiteral(..)"),
NtMeta(..) => f.pad("NtMeta(..)"),
NtPath(..) => f.pad("NtPath(..)"),
- NtTT(..) => f.pad("NtTT(..)"),
NtVis(..) => f.pad("NtVis(..)"),
NtLifetime(..) => f.pad("NtLifetime(..)"),
}
}
/// When we have a ty alias we *may* have two where clauses. To give the best diagnostics, we set the span
-/// to the where clause that is prefered, if it exists. Otherwise, it sets the span to the other where
+/// to the where clause that is preferred, if it exists. Otherwise, it sets the span to the other where
/// clause if it exists.
fn add_ty_alias_where_clause(
generics: &mut ast::Generics,
}
fn visit_assoc_item(&mut self, item: &'a AssocItem, ctxt: AssocCtxt) {
+ debug!(in_scope_lifetimes = ?self.lctx.in_scope_lifetimes);
self.lctx.with_hir_id_owner(item.id, |lctx| match ctxt {
AssocCtxt::Trait => hir::OwnerNode::TraitItem(lctx.lower_trait_item(item)),
AssocCtxt::Impl => hir::OwnerNode::ImplItem(lctx.lower_impl_item(item)),
// This should only be used with generics that have already had their
// in-band lifetimes added. In practice, this means that this function is
// only used when lowering a child item of a trait or impl.
+ #[tracing::instrument(level = "debug", skip(self, f))]
fn with_parent_item_lifetime_defs<T>(
&mut self,
parent_hir_id: LocalDefId,
f: impl FnOnce(&mut Self) -> T,
) -> T {
- let old_len = self.in_scope_lifetimes.len();
-
let parent_generics = match self.owners[parent_hir_id].unwrap().node().expect_item().kind {
hir::ItemKind::Impl(hir::Impl { ref generics, .. })
| hir::ItemKind::Trait(_, _, ref generics, ..) => generics.params,
_ => &[],
};
- let lt_def_names = parent_generics.iter().filter_map(|param| match param.kind {
- hir::GenericParamKind::Lifetime { .. } => Some(param.name.normalize_to_macros_2_0()),
- _ => None,
- });
- self.in_scope_lifetimes.extend(lt_def_names);
+ let lt_def_names = parent_generics
+ .iter()
+ .filter_map(|param| match param.kind {
+ hir::GenericParamKind::Lifetime { .. } => {
+ Some(param.name.normalize_to_macros_2_0())
+ }
+ _ => None,
+ })
+ .collect();
+ let old_in_scope_lifetimes = mem::replace(&mut self.in_scope_lifetimes, lt_def_names);
+ debug!(in_scope_lifetimes = ?self.in_scope_lifetimes);
let res = f(self);
- self.in_scope_lifetimes.truncate(old_len);
+ self.in_scope_lifetimes = old_in_scope_lifetimes;
res
}
// Clears (and restores) the `in_scope_lifetimes` field. Used when
// visiting nested items, which never inherit in-scope lifetimes
// from their surrounding environment.
+ #[tracing::instrument(level = "debug", skip(self, f))]
fn without_in_scope_lifetime_defs<T>(&mut self, f: impl FnOnce(&mut Self) -> T) -> T {
let old_in_scope_lifetimes = mem::replace(&mut self.in_scope_lifetimes, vec![]);
+ debug!(?old_in_scope_lifetimes);
// this vector is only used when walking over impl headers,
// input types, and the like, and should not be non-empty in
})
}
ItemKind::Mod(_, ref mod_kind) => match mod_kind {
- ModKind::Loaded(items, _, inner_span) => {
+ ModKind::Loaded(items, _, ModSpans { inner_span, inject_use_span: _ }) => {
hir::ItemKind::Mod(self.lower_mod(items, *inner_span))
}
ModKind::Unloaded => panic!("`mod` items should have been loaded by now"),
/// written at all (e.g., `&T` or `std::cell::Ref<T>`).
anonymous_lifetime_mode: AnonymousLifetimeMode,
- /// Used to create lifetime definitions from in-band lifetime usages.
- /// e.g., `fn foo(x: &'x u8) -> &'x u8` to `fn foo<'x>(x: &'x u8) -> &'x u8`
- /// When a named lifetime is encountered in a function or impl header and
- /// has not been defined
- /// (i.e., it doesn't appear in the in_scope_lifetimes list), it is added
+ /// Used to create lifetime definitions for anonymous lifetimes.
+ /// When an anonymous lifetime is encountered in a function or impl header and
+ /// requires to create a fresh lifetime parameter, it is added
/// to this list. The results of this list are then added to the list of
/// lifetime definitions in the corresponding impl or function generics.
- lifetimes_to_define: Vec<(Span, ParamName)>,
+ lifetimes_to_define: Vec<(Span, NodeId)>,
- /// `true` if in-band lifetimes are being collected. This is used to
- /// indicate whether or not we're in a place where new lifetimes will result
- /// in in-band lifetime definitions, such a function or an impl header,
- /// including implicit lifetimes from `impl_header_lifetime_elision`.
- is_collecting_anonymous_lifetimes: bool,
+ /// If anonymous lifetimes are being collected, this field holds the parent
+ /// `LocalDefId` to create the fresh lifetime parameters' `LocalDefId`.
+ is_collecting_anonymous_lifetimes: Option<LocalDefId>,
/// Currently in-scope lifetimes defined in impl headers, fn headers, or HRTB.
/// We always store a `normalize_to_macros_2_0()` version of the param-name in this
task_context: None,
current_item: None,
lifetimes_to_define: Vec::new(),
- is_collecting_anonymous_lifetimes: false,
+ is_collecting_anonymous_lifetimes: None,
in_scope_lifetimes: Vec::new(),
allow_try_trait: Some([sym::try_trait_v2][..].into()),
allow_gen_future: Some([sym::gen_future][..].into()),
visit::walk_crate(&mut item::ItemLowerer { lctx: &mut self }, c);
self.with_hir_id_owner(CRATE_NODE_ID, |lctx| {
- let module = lctx.lower_mod(&c.items, c.span);
+ let module = lctx.lower_mod(&c.items, c.spans.inner_span);
lctx.lower_attrs(hir::CRATE_HIR_ID, &c.attrs);
hir::OwnerNode::Crate(lctx.arena.alloc(module))
});
/// parameter while `f` is running (and restored afterwards).
fn collect_in_band_defs<T>(
&mut self,
+ parent_def_id: LocalDefId,
f: impl FnOnce(&mut Self) -> T,
- ) -> (Vec<(Span, ParamName)>, T) {
- let was_collecting = std::mem::replace(&mut self.is_collecting_anonymous_lifetimes, true);
+ ) -> (Vec<(Span, NodeId)>, T) {
+ let was_collecting =
+ std::mem::replace(&mut self.is_collecting_anonymous_lifetimes, Some(parent_def_id));
let len = self.lifetimes_to_define.len();
let res = f(self);
}
/// Converts a lifetime into a new generic parameter.
- fn lifetime_to_generic_param(
+ fn fresh_lifetime_to_generic_param(
&mut self,
span: Span,
- hir_name: ParamName,
- parent_def_id: LocalDefId,
+ node_id: NodeId,
) -> hir::GenericParam<'hir> {
- let node_id = self.resolver.next_node_id();
-
- // Get the name we'll use to make the def-path. Note
- // that collisions are ok here and this shouldn't
- // really show up for end-user.
- let (str_name, kind) = match hir_name {
- ParamName::Plain(ident) => (ident.name, hir::LifetimeParamKind::Explicit),
- ParamName::Fresh(_) => (kw::UnderscoreLifetime, hir::LifetimeParamKind::Elided),
- ParamName::Error => (kw::UnderscoreLifetime, hir::LifetimeParamKind::Error),
- };
-
- // Add a definition for the in-band lifetime def.
- self.resolver.create_def(
- parent_def_id,
- node_id,
- DefPathData::LifetimeNs(str_name),
- ExpnId::root(),
- span.with_parent(None),
- );
-
+ let hir_id = self.lower_node_id(node_id);
+ let def_id = self.resolver.local_def_id(node_id);
hir::GenericParam {
- hir_id: self.lower_node_id(node_id),
- name: hir_name,
+ hir_id,
+ name: hir::ParamName::Fresh(def_id),
bounds: &[],
span: self.lower_span(span),
pure_wrt_drop: false,
- kind: hir::GenericParamKind::Lifetime { kind },
+ kind: hir::GenericParamKind::Lifetime { kind: hir::LifetimeParamKind::Elided },
}
}
/// When we have either an elided or `'_` lifetime in an impl
/// header, we convert it to an in-band lifetime.
fn collect_fresh_anonymous_lifetime(&mut self, span: Span) -> ParamName {
- assert!(self.is_collecting_anonymous_lifetimes);
- let index = self.lifetimes_to_define.len() + self.in_scope_lifetimes.len();
- let hir_name = ParamName::Fresh(index);
- self.lifetimes_to_define.push((span, hir_name));
+ let Some(parent_def_id) = self.is_collecting_anonymous_lifetimes else { panic!() };
+
+ let node_id = self.resolver.next_node_id();
+
+ // Add a definition for the in-band lifetime def.
+ let param_def_id = self.resolver.create_def(
+ parent_def_id,
+ node_id,
+ DefPathData::LifetimeNs(kw::UnderscoreLifetime),
+ ExpnId::root(),
+ span.with_parent(None),
+ );
+
+ let hir_name = ParamName::Fresh(param_def_id);
+ self.lifetimes_to_define.push((span, node_id));
hir_name
}
f: impl FnOnce(&mut Self, &mut Vec<hir::GenericParam<'hir>>) -> T,
) -> (hir::Generics<'hir>, T) {
let (lifetimes_to_define, (mut lowered_generics, impl_trait_defs, res)) = self
- .collect_in_band_defs(|this| {
+ .collect_in_band_defs(parent_def_id, |this| {
this.with_anonymous_lifetime_mode(anonymous_lifetime_mode, |this| {
this.with_in_scope_lifetime_defs(&generics.params, |this| {
let mut impl_trait_defs = Vec::new();
lowered_generics.params.extend(
lifetimes_to_define
.into_iter()
- .map(|(span, hir_name)| {
- self.lifetime_to_generic_param(span, hir_name, parent_def_id)
- })
+ .map(|(span, node_id)| self.fresh_lifetime_to_generic_param(span, node_id))
.chain(impl_trait_defs),
);
sess.diagnostic().delay_span_bug(
span,
"unexpected delimiter in key-value attribute's value",
- )
+ );
}
unwrap_single_token(sess, tokens, span)
}
.in_scope_lifetimes
.iter()
.cloned()
- .map(|name| (name.ident().span, name, hir::LifetimeName::Param(name)))
- .chain(
- self.lifetimes_to_define
- .iter()
- .map(|&(span, name)| (span, name, hir::LifetimeName::Param(name))),
- )
+ .map(|name| (name.ident().span, hir::LifetimeName::Param(name)))
+ .chain(self.lifetimes_to_define.iter().map(|&(span, node_id)| {
+ let def_id = self.resolver.local_def_id(node_id);
+ let name = hir::ParamName::Fresh(def_id);
+ (span, hir::LifetimeName::Param(name))
+ }))
.collect();
self.with_hir_id_owner(opaque_ty_node_id, |this| {
+ let mut generic_params: Vec<_> = lifetime_params
+ .iter()
+ .map(|&(span, name)| {
+ // We can only get lifetime names from the outside.
+ let hir::LifetimeName::Param(hir_name) = name else { panic!() };
+
+ let node_id = this.resolver.next_node_id();
+
+ // Add a definition for the in-band lifetime def.
+ let def_id = this.resolver.create_def(
+ opaque_ty_def_id,
+ node_id,
+ DefPathData::LifetimeNs(hir_name.ident().name),
+ ExpnId::root(),
+ span.with_parent(None),
+ );
+
+ let (kind, name) = match hir_name {
+ ParamName::Plain(ident) => {
+ (hir::LifetimeParamKind::Explicit, hir::ParamName::Plain(ident))
+ }
+ ParamName::Fresh(_) => {
+ (hir::LifetimeParamKind::Elided, hir::ParamName::Fresh(def_id))
+ }
+ ParamName::Error => (hir::LifetimeParamKind::Error, hir::ParamName::Error),
+ };
+
+ hir::GenericParam {
+ hir_id: this.lower_node_id(node_id),
+ name,
+ bounds: &[],
+ span: this.lower_span(span),
+ pure_wrt_drop: false,
+ kind: hir::GenericParamKind::Lifetime { kind },
+ }
+ })
+ .collect();
+
// We have to be careful to get elision right here. The
// idea is that we create a lifetime parameter for each
// lifetime in the return type. So, given a return type
// hence the elision takes place at the fn site.
let (lifetimes_to_define, future_bound) =
this.with_anonymous_lifetime_mode(AnonymousLifetimeMode::CreateParameter, |this| {
- this.collect_in_band_defs(|this| {
+ this.collect_in_band_defs(opaque_ty_def_id, |this| {
this.lower_async_fn_output_type_to_future_bound(output, fn_def_id, span)
})
});
debug!("lower_async_fn_ret_ty: future_bound={:#?}", future_bound);
debug!("lower_async_fn_ret_ty: lifetimes_to_define={:#?}", lifetimes_to_define);
- lifetime_params.extend(
- // Output lifetime like `'_`:
- lifetimes_to_define
- .into_iter()
- .map(|(span, name)| (span, name, hir::LifetimeName::Implicit(false))),
- );
+ // Output lifetime like `'_`:
+ for (span, node_id) in lifetimes_to_define {
+ let param = this.fresh_lifetime_to_generic_param(span, node_id);
+ lifetime_params.push((span, hir::LifetimeName::Implicit(false)));
+ generic_params.push(param);
+ }
+ let generic_params = this.arena.alloc_from_iter(generic_params);
debug!("lower_async_fn_ret_ty: lifetime_params={:#?}", lifetime_params);
-
- let generic_params =
- this.arena.alloc_from_iter(lifetime_params.iter().map(|&(span, hir_name, _)| {
- this.lifetime_to_generic_param(span, hir_name, opaque_ty_def_id)
- }));
+ debug!("lower_async_fn_ret_ty: generic_params={:#?}", generic_params);
let opaque_ty_item = hir::OpaqueTy {
generics: hir::Generics {
// For the "output" lifetime parameters, we just want to
// generate `'_`.
let generic_args =
- self.arena.alloc_from_iter(lifetime_params.into_iter().map(|(span, _, name)| {
+ self.arena.alloc_from_iter(lifetime_params.into_iter().map(|(span, name)| {
GenericArg::Lifetime(hir::Lifetime {
hir_id: self.next_id(),
span: self.lower_span(span),
let (name, kind) = match param.kind {
GenericParamKind::Lifetime => {
let was_collecting_in_band = self.is_collecting_anonymous_lifetimes;
- self.is_collecting_anonymous_lifetimes = false;
+ self.is_collecting_anonymous_lifetimes = None;
let lt = self
.with_anonymous_lifetime_mode(AnonymousLifetimeMode::ReportError, |this| {
diag.note("only supported directly in conditions of `if` and `while` expressions");
diag.note("as well as when nested within `&&` and parentheses in those conditions");
if let ForbiddenLetReason::ForbiddenWithOr(span) = forbidden_let_reason {
- diag.span_note(span, "`||` operators are not allowed in let chain expressions");
+ diag.span_note(
+ span,
+ "`||` operators are not currently supported in let chain expressions",
+ );
}
diag.emit();
} else {
attr.span,
"allow, cfg, cfg_attr, deny, \
forbid, and warn are the only allowed built-in attributes in function parameters",
- )
+ );
}
});
}
}
fn error_item_without_body(&self, sp: Span, ctx: &str, msg: &str, sugg: &str) {
+ let source_map = self.session.source_map();
+ let end = source_map.end_point(sp);
+ let replace_span = if source_map.span_to_snippet(end).map(|s| s == ";").unwrap_or(false) {
+ end
+ } else {
+ sp.shrink_to_hi()
+ };
self.err_handler()
.struct_span_err(sp, msg)
.span_suggestion(
- self.session.source_map().end_point(sp),
+ replace_span,
&format!("provide a definition for the {}", ctx),
sugg.to_string(),
Applicability::HasPlaceholders,
"wasm ABI is experimental and subject to change"
);
}
- abi => self
- .sess
- .parse_sess
- .span_diagnostic
- .delay_span_bug(span, &format!("unrecognized ABI not caught in lowering: {}", abi)),
+ abi => {
+ self.sess.parse_sess.span_diagnostic.delay_span_bug(
+ span,
+ &format!("unrecognized ABI not caught in lowering: {}", abi),
+ );
+ }
}
}
token::NtIdent(e, is_raw) => IdentPrinter::for_ast_ident(e, is_raw).to_string(),
token::NtLifetime(e) => e.to_string(),
token::NtLiteral(ref e) => self.expr_to_string(e),
- token::NtTT(ref tree) => self.tt_to_string(tree),
token::NtVis(ref e) => self.vis_to_string(e),
}
}
match cfg.name_or_empty() {
sym::any => mis
.iter()
- .any(|mi| eval_condition(mi.meta_item().unwrap(), sess, features, eval)),
+ // We don't use any() here, because we want to evaluate all cfg condition
+ // as eval_condition can (and does) extra checks
+ .fold(false, |res, mi| {
+ res | eval_condition(mi.meta_item().unwrap(), sess, features, eval)
+ }),
sym::all => mis
.iter()
- .all(|mi| eval_condition(mi.meta_item().unwrap(), sess, features, eval)),
+ // We don't use all() here, because we want to evaluate all cfg condition
+ // as eval_condition can (and does) extra checks
+ .fold(true, |res, mi| {
+ res & eval_condition(mi.meta_item().unwrap(), sess, features, eval)
+ }),
sym::not => {
if mis.len() != 1 {
struct_span_err!(
if sess.is_nightly_build() {
diag.help("add `#![feature(deprecated_suggestion)]` to the crate root");
}
- // FIXME(jhpratt) change this to an actual tracking issue
- diag.note("see #XXX for more details").emit();
+ diag.note("see #94785 for more details").emit();
}
if !get(mi, &mut suggestion) {
meta.span(),
AttrError::UnknownMetaItem(
pprust::path_to_string(&mi.path),
- if attr.has_name(sym::deprecated) {
- &["since", "note"]
- } else {
+ if sess.features_untracked().deprecated_suggestion {
&["since", "note", "suggestion"]
+ } else {
+ &["since", "note"]
},
),
);
FakeReadCause, LocalDecl, LocalInfo, LocalKind, Location, Operand, Place, PlaceRef,
ProjectionElem, Rvalue, Statement, StatementKind, Terminator, TerminatorKind, VarBindingForm,
};
-use rustc_middle::ty::{
- self, suggest_constraining_type_param, suggest_constraining_type_params, PredicateKind, Ty,
-};
+use rustc_middle::ty::{self, subst::Subst, suggest_constraining_type_params, PredicateKind, Ty};
use rustc_mir_dataflow::move_paths::{InitKind, MoveOutIndex, MovePathIndex};
use rustc_span::symbol::sym;
use rustc_span::{BytePos, MultiSpan, Span};
.args_or_use()
})
.collect::<Vec<Span>>();
+
let reinits = maybe_reinitialized_locations.len();
if reinits == 1 {
err.span_label(reinit_spans[0], "this reinitialization might get skipped");
}
}
- if needs_note {
- let opt_name =
- self.describe_place_with_options(place.as_ref(), IncludingDowncast(true));
- let note_msg = match opt_name {
- Some(ref name) => format!("`{}`", name),
- None => "value".to_owned(),
- };
- if let ty::Param(param_ty) = ty.kind() {
- let tcx = self.infcx.tcx;
- let generics = tcx.generics_of(self.mir_def_id());
- let param = generics.type_param(¶m_ty, tcx);
- if let Some(generics) = tcx
- .typeck_root_def_id(self.mir_def_id().to_def_id())
- .as_local()
- .and_then(|def_id| tcx.hir().get_generics(def_id))
- {
- suggest_constraining_type_param(
- tcx,
- generics,
- &mut err,
- param.name.as_str(),
- "Copy",
- None,
- );
- }
- } else {
- // Try to find predicates on *generic params* that would allow copying `ty`
-
- let tcx = self.infcx.tcx;
- let generics = tcx.generics_of(self.mir_def_id());
- if let Some(hir_generics) = tcx
- .typeck_root_def_id(self.mir_def_id().to_def_id())
- .as_local()
- .and_then(|def_id| tcx.hir().get_generics(def_id))
- {
- let predicates: Result<Vec<_>, _> = tcx.infer_ctxt().enter(|infcx| {
- let mut fulfill_cx =
- <dyn rustc_infer::traits::TraitEngine<'_>>::new(infcx.tcx);
-
- let copy_did = infcx.tcx.lang_items().copy_trait().unwrap();
- let cause = ObligationCause::new(
- span,
- self.mir_hir_id(),
- rustc_infer::traits::ObligationCauseCode::MiscObligation,
- );
- fulfill_cx.register_bound(
- &infcx,
- self.param_env,
- // Erase any region vids from the type, which may not be resolved
- infcx.tcx.erase_regions(ty),
- copy_did,
- cause,
- );
- // Select all, including ambiguous predicates
- let errors = fulfill_cx.select_all_or_error(&infcx);
-
- // Only emit suggestion if all required predicates are on generic
- errors
- .into_iter()
- .map(|err| match err.obligation.predicate.kind().skip_binder() {
- PredicateKind::Trait(predicate) => {
- match predicate.self_ty().kind() {
- ty::Param(param_ty) => Ok((
- generics.type_param(param_ty, tcx),
- predicate
- .trait_ref
- .print_only_trait_path()
- .to_string(),
- )),
- _ => Err(()),
- }
- }
- _ => Err(()),
- })
- .collect()
- });
-
- if let Ok(predicates) = predicates {
- suggest_constraining_type_params(
- tcx,
- hir_generics,
- &mut err,
- predicates.iter().map(|(param, constraint)| {
- (param.name.as_str(), &**constraint, None)
- }),
- );
- }
- }
- }
+ let opt_name =
+ self.describe_place_with_options(place.as_ref(), IncludingDowncast(true));
+ let note_msg = match opt_name {
+ Some(ref name) => format!("`{}`", name),
+ None => "value".to_owned(),
+ };
+ if self.suggest_borrow_fn_like(&mut err, ty, &move_site_vec, ¬e_msg) {
+ // Suppress the next suggestion since we don't want to put more bounds onto
+ // something that already has `Fn`-like bounds (or is a closure), so we can't
+ // restrict anyways.
+ } else {
+ self.suggest_adding_copy_bounds(&mut err, ty, span);
+ }
+ if needs_note {
let span = if let Some(local) = place.as_local() {
- let decl = &self.body.local_decls[local];
- Some(decl.source_info.span)
+ Some(self.body.local_decls[local].source_info.span)
} else {
None
};
}
}
+ fn suggest_borrow_fn_like(
+ &self,
+ err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ ty: Ty<'tcx>,
+ move_sites: &[MoveSite],
+ value_name: &str,
+ ) -> bool {
+ let tcx = self.infcx.tcx;
+
+ // Find out if the predicates show that the type is a Fn or FnMut
+ let find_fn_kind_from_did = |predicates: &[(ty::Predicate<'tcx>, Span)], substs| {
+ predicates.iter().find_map(|(pred, _)| {
+ let pred = if let Some(substs) = substs {
+ pred.subst(tcx, substs).kind().skip_binder()
+ } else {
+ pred.kind().skip_binder()
+ };
+ if let ty::PredicateKind::Trait(pred) = pred && pred.self_ty() == ty {
+ if Some(pred.def_id()) == tcx.lang_items().fn_trait() {
+ return Some(hir::Mutability::Not);
+ } else if Some(pred.def_id()) == tcx.lang_items().fn_mut_trait() {
+ return Some(hir::Mutability::Mut);
+ }
+ }
+ None
+ })
+ };
+
+ // If the type is opaque/param/closure, and it is Fn or FnMut, let's suggest (mutably)
+ // borrowing the type, since `&mut F: FnMut` iff `F: FnMut` and similarly for `Fn`.
+ // These types seem reasonably opaque enough that they could be substituted with their
+ // borrowed variants in a function body when we see a move error.
+ let borrow_level = match ty.kind() {
+ ty::Param(_) => find_fn_kind_from_did(
+ tcx.explicit_predicates_of(self.mir_def_id().to_def_id()).predicates,
+ None,
+ ),
+ ty::Opaque(did, substs) => {
+ find_fn_kind_from_did(tcx.explicit_item_bounds(*did), Some(*substs))
+ }
+ ty::Closure(_, substs) => match substs.as_closure().kind() {
+ ty::ClosureKind::Fn => Some(hir::Mutability::Not),
+ ty::ClosureKind::FnMut => Some(hir::Mutability::Mut),
+ _ => None,
+ },
+ _ => None,
+ };
+
+ let Some(borrow_level) = borrow_level else { return false; };
+ let sugg = move_sites
+ .iter()
+ .map(|move_site| {
+ let move_out = self.move_data.moves[(*move_site).moi];
+ let moved_place = &self.move_data.move_paths[move_out.path].place;
+ let move_spans = self.move_spans(moved_place.as_ref(), move_out.source);
+ let move_span = move_spans.args_or_use();
+ let suggestion = if borrow_level == hir::Mutability::Mut {
+ "&mut ".to_string()
+ } else {
+ "&".to_string()
+ };
+ (move_span.shrink_to_lo(), suggestion)
+ })
+ .collect();
+ err.multipart_suggestion_verbose(
+ &format!(
+ "consider {}borrowing {value_name}",
+ if borrow_level == hir::Mutability::Mut { "mutably " } else { "" }
+ ),
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
+ true
+ }
+
+ fn suggest_adding_copy_bounds(
+ &self,
+ err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ ty: Ty<'tcx>,
+ span: Span,
+ ) {
+ let tcx = self.infcx.tcx;
+ let generics = tcx.generics_of(self.mir_def_id());
+
+ let Some(hir_generics) = tcx
+ .typeck_root_def_id(self.mir_def_id().to_def_id())
+ .as_local()
+ .and_then(|def_id| tcx.hir().get_generics(def_id))
+ else { return; };
+ // Try to find predicates on *generic params* that would allow copying `ty`
+ let predicates: Result<Vec<_>, _> = tcx.infer_ctxt().enter(|infcx| {
+ let mut fulfill_cx = <dyn rustc_infer::traits::TraitEngine<'_>>::new(infcx.tcx);
+
+ let copy_did = infcx.tcx.lang_items().copy_trait().unwrap();
+ let cause = ObligationCause::new(
+ span,
+ self.mir_hir_id(),
+ rustc_infer::traits::ObligationCauseCode::MiscObligation,
+ );
+ fulfill_cx.register_bound(
+ &infcx,
+ self.param_env,
+ // Erase any region vids from the type, which may not be resolved
+ infcx.tcx.erase_regions(ty),
+ copy_did,
+ cause,
+ );
+ // Select all, including ambiguous predicates
+ let errors = fulfill_cx.select_all_or_error(&infcx);
+
+ // Only emit suggestion if all required predicates are on generic
+ errors
+ .into_iter()
+ .map(|err| match err.obligation.predicate.kind().skip_binder() {
+ PredicateKind::Trait(predicate) => match predicate.self_ty().kind() {
+ ty::Param(param_ty) => Ok((
+ generics.type_param(param_ty, tcx),
+ predicate.trait_ref.print_only_trait_path().to_string(),
+ )),
+ _ => Err(()),
+ },
+ _ => Err(()),
+ })
+ .collect()
+ });
+
+ if let Ok(predicates) = predicates {
+ suggest_constraining_type_params(
+ tcx,
+ hir_generics,
+ err,
+ predicates
+ .iter()
+ .map(|(param, constraint)| (param.name.as_str(), &**constraint, None)),
+ );
+ }
+ }
+
pub(crate) fn report_move_out_while_borrowed(
&mut self,
location: Location,
let mut ty = local_decl.ty;
if local_decl.source_info.span.desugaring_kind() == Some(DesugaringKind::ForLoop) {
if let ty::Adt(adt, substs) = local_decl.ty.kind() {
- if tcx.is_diagnostic_item(sym::Option, adt.did) {
+ if tcx.is_diagnostic_item(sym::Option, adt.did()) {
// in for loop desugaring, only look at the `Some(..)` inner type
ty = substs.type_at(0);
}
// If type is an ADT that implements Drop, then
// simplify output by reporting just the ADT name.
ty::Adt(adt, _substs) if adt.has_dtor(tcx) && !adt.is_box() => {
- ("`Drop` code", format!("type `{}`", tcx.def_path_str(adt.did)))
+ ("`Drop` code", format!("type `{}`", tcx.def_path_str(adt.did())))
}
// Otherwise, just report the whole type (and use
ty::Adt(def, _) => {
let variant = if let Some(idx) = variant_index {
assert!(def.is_enum());
- &def.variants[idx]
+ &def.variant(idx)
} else {
def.non_enum_variant()
};
BorrowedContentSource::DerefMutableRef => "a mutable reference".to_string(),
BorrowedContentSource::OverloadedDeref(ty) => ty
.ty_adt_def()
- .and_then(|adt| match tcx.get_diagnostic_name(adt.did)? {
+ .and_then(|adt| match tcx.get_diagnostic_name(adt.did())? {
name @ (sym::Rc | sym::Arc) => Some(format!("an `{}`", name)),
_ => None,
})
}
BorrowedContentSource::OverloadedDeref(ty) => ty
.ty_adt_def()
- .and_then(|adt| match tcx.get_diagnostic_name(adt.did)? {
+ .and_then(|adt| match tcx.get_diagnostic_name(adt.did())? {
name @ (sym::Rc | sym::Arc) => Some(format!("an `{}`", name)),
_ => None,
})
fn report(&mut self, error: GroupedMoveError<'tcx>) {
let (mut err, err_span) = {
- let (span, use_spans, original_path, kind): (
+ let (span, use_spans, original_path, kind, has_complex_bindings): (
Span,
Option<UseSpans<'tcx>>,
Place<'tcx>,
&IllegalMoveOriginKind<'_>,
+ bool,
) = match error {
- GroupedMoveError::MovesFromPlace { span, original_path, ref kind, .. }
- | GroupedMoveError::MovesFromValue { span, original_path, ref kind, .. } => {
- (span, None, original_path, kind)
+ GroupedMoveError::MovesFromPlace {
+ span,
+ original_path,
+ ref kind,
+ ref binds_to,
+ ..
}
+ | GroupedMoveError::MovesFromValue {
+ span,
+ original_path,
+ ref kind,
+ ref binds_to,
+ ..
+ } => (span, None, original_path, kind, !binds_to.is_empty()),
GroupedMoveError::OtherIllegalMove { use_spans, original_path, ref kind } => {
- (use_spans.args_or_use(), Some(use_spans), original_path, kind)
+ (use_spans.args_or_use(), Some(use_spans), original_path, kind, false)
}
};
debug!(
target_place,
span,
use_spans,
+ has_complex_bindings,
),
&IllegalMoveOriginKind::InteriorOfTypeWithDestructor { container_ty: ty } => {
self.cannot_move_out_of_interior_of_drop(span, ty)
deref_target_place: Place<'tcx>,
span: Span,
use_spans: Option<UseSpans<'tcx>>,
+ has_complex_bindings: bool,
) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
// Inspect the type of the content behind the
// borrow to provide feedback about why this
};
let ty = move_place.ty(self.body, self.infcx.tcx).ty;
let def_id = match *ty.kind() {
- ty::Adt(self_def, _) => self_def.did,
+ ty::Adt(self_def, _) => self_def.did(),
ty::Foreign(def_id)
| ty::FnDef(def_id, _)
| ty::Closure(def_id, _)
let diag_name = self.infcx.tcx.get_diagnostic_name(def_id);
if matches!(diag_name, Some(sym::Option | sym::Result))
&& use_spans.map_or(true, |v| !v.for_closure())
+ && !has_complex_bindings
{
err.span_suggestion_verbose(
span.shrink_to_hi(),
ty::Adt(adt, substs) => {
let generic_arg = substs[param_index as usize];
let identity_substs =
- InternalSubsts::identity_for_item(self.infcx.tcx, adt.did);
- let base_ty = self.infcx.tcx.mk_adt(adt, identity_substs);
+ InternalSubsts::identity_for_item(self.infcx.tcx, adt.did());
+ let base_ty = self.infcx.tcx.mk_adt(*adt, identity_substs);
let base_generic_arg = identity_substs[param_index as usize];
let adt_desc = adt.descr();
"returns a closure that contains a reference to a captured variable, which then \
escapes the closure body"
}
- ty::Adt(def, _) if self.infcx.tcx.is_diagnostic_item(sym::gen_future, def.did) => {
+ ty::Adt(def, _) if self.infcx.tcx.is_diagnostic_item(sym::gen_future, def.did()) => {
"returns an `async` block that contains a reference to a captured variable, which then \
escapes the closure body"
}
// Gather the upvars of a closure, if any.
let tables = tcx.typeck_opt_const_arg(def);
- if let Some(ErrorGuaranteed) = tables.tainted_by_errors {
+ if let Some(ErrorGuaranteed { .. }) = tables.tainted_by_errors {
infcx.set_tainted_by_errors();
errors.set_tainted_by_errors();
}
}
mod error {
+ use rustc_errors::ErrorGuaranteed;
+
use super::*;
pub struct BorrowckErrors<'tcx> {
// FIXME(eddyb) this is a suboptimal API because `tainted_by_errors` is
// set before any emission actually happens (weakening the guarantee).
pub fn buffer_error(&mut self, t: DiagnosticBuilder<'_, ErrorGuaranteed>) {
- self.tainted_by_errors = Some(ErrorGuaranteed {});
+ self.tainted_by_errors = Some(ErrorGuaranteed::unchecked_claim_error_was_emitted());
t.buffer(&mut self.buffered);
}
}
pub fn set_tainted_by_errors(&mut self) {
- self.tainted_by_errors = Some(ErrorGuaranteed {});
+ self.tainted_by_errors = Some(ErrorGuaranteed::unchecked_claim_error_was_emitted());
}
}
if !self.errors.buffered.is_empty() {
self.errors.buffered.sort_by_key(|diag| diag.sort_span);
- for diag in self.errors.buffered.drain(..) {
- self.infcx.tcx.sess.diagnostic().emit_diagnostic(&diag);
+ for mut diag in self.errors.buffered.drain(..) {
+ self.infcx.tcx.sess.diagnostic().emit_diagnostic(&mut diag);
}
}
}
ProjectionElem::Downcast(maybe_name, index) => match base_ty.kind() {
ty::Adt(adt_def, _substs) if adt_def.is_enum() => {
- if index.as_usize() >= adt_def.variants.len() {
+ if index.as_usize() >= adt_def.variants().len() {
PlaceTy::from_ty(span_mirbug_and_err!(
self,
place,
"cast to variant #{:?} but enum only has {:?}",
index,
- adt_def.variants.len()
+ adt_def.variants().len()
))
} else {
PlaceTy { ty: base_ty, variant_index: Some(index) }
let (variant, substs) = match base_ty {
PlaceTy { ty, variant_index: Some(variant_index) } => match *ty.kind() {
- ty::Adt(adt_def, substs) => (&adt_def.variants[variant_index], substs),
+ ty::Adt(adt_def, substs) => (adt_def.variant(variant_index), substs),
ty::Generator(def_id, substs, _) => {
let mut variants = substs.as_generator().state_tys(def_id, tcx);
let Some(mut variant) = variants.nth(variant_index.into()) else {
},
PlaceTy { ty, variant_index: None } => match *ty.kind() {
ty::Adt(adt_def, substs) if !adt_def.is_enum() => {
- (&adt_def.variants[VariantIdx::new(0)], substs)
+ (adt_def.variant(VariantIdx::new(0)), substs)
}
ty::Closure(_, substs) => {
return match substs
);
}
};
- if variant_index.as_usize() >= adt.variants.len() {
+ if variant_index.as_usize() >= adt.variants().len() {
span_bug!(
stmt.source_info.span,
"bad set discriminant ({:?} = {:?}): value of of range",
match *ak {
AggregateKind::Adt(adt_did, variant_index, substs, _, active_field_index) => {
let def = tcx.adt_def(adt_did);
- let variant = &def.variants[variant_index];
+ let variant = &def.variant(variant_index);
let adj_field_index = active_field_index.unwrap_or(field_index);
if let Some(field) = variant.fields.get(adj_field_index) {
Ok(self.normalize(field.ty(tcx, substs), location))
use rustc_ast::token;
use rustc_ast::tokenstream::TokenStream;
use rustc_ast_pretty::pprust;
+use rustc_errors::PResult;
use rustc_expand::base::{self, *};
use rustc_expand::module::DirOwnership;
use rustc_parse::parser::{ForceCollect, Parser};
use rustc_parse::{self, new_parser_from_file};
use rustc_session::lint::builtin::INCOMPLETE_INCLUDE;
use rustc_span::symbol::Symbol;
-use rustc_span::{self, Pos, Span};
+use rustc_span::{self, FileName, Pos, Span};
use smallvec::SmallVec;
+use std::path::PathBuf;
use std::rc::Rc;
// These macros all relate to the file system; they either return
return DummyResult::any(sp);
};
// The file will be added to the code map by the parser
- let file = match cx.resolve_path(file, sp) {
+ let file = match resolve_path(cx, file, sp) {
Ok(f) => f,
Err(mut err) => {
err.emit();
let Some(file) = get_single_str_from_tts(cx, sp, tts, "include_str!") else {
return DummyResult::any(sp);
};
- let file = match cx.resolve_path(file, sp) {
+ let file = match resolve_path(cx, file, sp) {
Ok(f) => f,
Err(mut err) => {
err.emit();
let Some(file) = get_single_str_from_tts(cx, sp, tts, "include_bytes!") else {
return DummyResult::any(sp);
};
- let file = match cx.resolve_path(file, sp) {
+ let file = match resolve_path(cx, file, sp) {
Ok(f) => f,
Err(mut err) => {
err.emit();
}
}
}
+
+/// Resolves a `path` mentioned inside Rust code, returning an absolute path.
+///
+/// This unifies the logic used for resolving `include_X!`.
+fn resolve_path<'a>(
+ cx: &mut ExtCtxt<'a>,
+ path: impl Into<PathBuf>,
+ span: Span,
+) -> PResult<'a, PathBuf> {
+ let path = path.into();
+
+ // Relative paths are resolved relative to the file in which they are found
+ // after macro expansion (that is, they are unhygienic).
+ if !path.is_absolute() {
+ let callsite = span.source_callsite();
+ let mut result = match cx.source_map().span_to_filename(callsite) {
+ FileName::Real(name) => name
+ .into_local_path()
+ .expect("attempting to resolve a file path in an external file"),
+ FileName::DocTest(path, _) => path,
+ other => {
+ return Err(cx.struct_span_err(
+ span,
+ &format!(
+ "cannot resolve relative path in non-file source `{}`",
+ cx.source_map().filename_for_diagnostics(&other)
+ ),
+ ));
+ }
+ };
+ result.pop();
+ result.push(path);
+ Ok(result)
+ } else {
+ Ok(path)
+ }
+}
fn visit_crate(&mut self, c: &mut ast::Crate) {
let prev_tests = mem::take(&mut self.tests);
noop_visit_crate(c, self);
- self.add_test_cases(ast::CRATE_NODE_ID, c.span, prev_tests);
+ self.add_test_cases(ast::CRATE_NODE_ID, c.spans.inner_span, prev_tests);
// Create a main function to run our tests
c.items.push(mk_main(&mut self.cx));
// We don't want to recurse into anything other than mods, since
// mods or tests inside of functions will break things
- if let ast::ItemKind::Mod(_, ModKind::Loaded(.., span)) = item.kind {
+ if let ast::ItemKind::Mod(_, ModKind::Loaded(.., ref spans)) = item.kind {
+ let ast::ModSpans { inner_span: span, inject_use_span: _ } = *spans;
let prev_tests = mem::take(&mut self.tests);
noop_visit_item_kind(&mut item.kind, self);
self.add_test_cases(item.id, span, prev_tests);
[[package]]
name = "anyhow"
-version = "1.0.53"
+version = "1.0.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "94a45b455c14666b85fc40a019e8ab9eb75e3a124e05494f5397122bc9eb06e0"
+checksum = "4361135be9122e0870de935d7c439aef945b9f9ddd4199a553b5270b49c82a27"
[[package]]
name = "ar"
[[package]]
name = "autocfg"
-version = "1.0.1"
+version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bitflags"
[[package]]
name = "cranelift-bforest"
-version = "0.81.0"
+version = "0.82.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "71447555acc6c875c52c407d572fc1327dc5c34cba72b4b2e7ad048aa4e4fd19"
+checksum = "d16922317bd7dd104d509a373887822caa0242fc1def00de66abb538db221db4"
dependencies = [
"cranelift-entity",
]
[[package]]
name = "cranelift-codegen"
-version = "0.81.0"
+version = "0.82.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ec9a10261891a7a919b0d4f6aa73582e88441d9a8f6173c88efbe4a5a362ea67"
+checksum = "8b80bf40380256307b68a3dcbe1b91cac92a533e212b5b635abc3e4525781a0a"
dependencies = [
"cranelift-bforest",
"cranelift-codegen-meta",
[[package]]
name = "cranelift-codegen-meta"
-version = "0.81.0"
+version = "0.82.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "815755d76fcbcf6e17ab888545b28ab775f917cb12ce0797e60cd41a2288692c"
+checksum = "703d0ed7d3bc6c7a814ca12858175bf4e93167a3584127858c686e4b5dd6e432"
dependencies = [
"cranelift-codegen-shared",
]
[[package]]
name = "cranelift-codegen-shared"
-version = "0.81.0"
+version = "0.82.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "23ea92f2a67335a2e4d3c9c65624c3b14ae287d595b0650822c41824febab66b"
+checksum = "80f52311e1c90de12dcf8c4b9999c6ebfd1ed360373e88c357160936844511f6"
[[package]]
name = "cranelift-entity"
-version = "0.81.0"
+version = "0.82.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bd25847875e388c500ad3624b4d2e14067955c93185194a7222246a25b91c975"
+checksum = "66bc82ef522c1f643baf7d4d40b7c52643ee4549d8960b0e6a047daacb83f897"
[[package]]
name = "cranelift-frontend"
-version = "0.81.0"
+version = "0.82.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "308bcfb7eb47bdf5ff6e1ace262af4ed39ec19f204c751fffb037e0e82a0c8bf"
+checksum = "3cc35e4251864b17515845ba47447bca88fec9ca1a4186b19fe42526e36140e8"
dependencies = [
"cranelift-codegen",
"log",
[[package]]
name = "cranelift-jit"
-version = "0.81.0"
+version = "0.82.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f560b3a314b8d15facf411e5d29b917c3e787a2bbc3fcdc5183bc0c5b7d4fe01"
+checksum = "93c66d594ad3bfe4e58b1fbd8d17877a7c6564a5f2d6f78cbbf4b0182af1927f"
dependencies = [
"anyhow",
"cranelift-codegen",
[[package]]
name = "cranelift-module"
-version = "0.81.0"
+version = "0.82.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3a57aba9e603d694d1430ff38bd914bae23ef9c2e44b25a65e318905807e654c"
+checksum = "bf356697c40232aa09e1e3fb8a350ee894e849ccecc4eac56ff0570a4575c325"
dependencies = [
"anyhow",
"cranelift-codegen",
[[package]]
name = "cranelift-native"
-version = "0.81.0"
+version = "0.82.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "12cdc799aee673be2317e631d4569a1ba0a7e77a07a7ce45557086d2e02e9514"
+checksum = "b882b2251c9845d509d92aebfdb6c8bb3b3b48e207ac951f21fbd20cfe7f90b3"
dependencies = [
"cranelift-codegen",
"libc",
[[package]]
name = "cranelift-object"
-version = "0.81.0"
+version = "0.82.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "502a7333836052fcdf4425d7f7a21264d99f862d32b9c3a0e47cd920487a9b60"
+checksum = "2d3f1a88e654e567d2591169239ed157ab290811a729a6468f53999c01001263"
dependencies = [
"anyhow",
"cranelift-codegen",
[[package]]
name = "crc32fast"
-version = "1.3.1"
+version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3"
+checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
dependencies = [
"cfg-if",
]
[[package]]
name = "libc"
-version = "0.2.116"
+version = "0.2.119"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "565dbd88872dbe4cc8a46e527f26483c1d1f7afa6b884a3bd6cd893d4f98da74"
+checksum = "1bf2e165bb3457c8e098ea76f3e3bc9db55f87aa90d52d0e6be741470916aaa4"
[[package]]
name = "libloading"
"memchr",
]
+[[package]]
+name = "once_cell"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9"
+
[[package]]
name = "regalloc"
version = "0.0.34"
"indexmap",
"libloading",
"object",
+ "once_cell",
"smallvec",
"target-lexicon",
]
[[package]]
name = "target-lexicon"
-version = "0.12.2"
+version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d9bffcddbc2458fa3e6058414599e3c838a022abae82e5c67b4f7f80298d5bff"
+checksum = "d7fa7e55043acb85fca6b3c01485a2eeb6b69c5d21002e273c79e465f43b7ac1"
[[package]]
name = "winapi"
[dependencies]
# These have to be in sync with each other
-cranelift-codegen = { version = "0.81.0", features = ["unwind", "all-arch"] }
-cranelift-frontend = "0.81.0"
-cranelift-module = "0.81.0"
-cranelift-native = "0.81.0"
-cranelift-jit = { version = "0.81.0", optional = true }
-cranelift-object = "0.81.0"
+cranelift-codegen = { version = "0.82.1", features = ["unwind", "all-arch"] }
+cranelift-frontend = "0.82.1"
+cranelift-module = "0.82.1"
+cranelift-native = "0.82.1"
+cranelift-jit = { version = "0.82.1", optional = true }
+cranelift-object = "0.82.1"
target-lexicon = "0.12.0"
gimli = { version = "0.26.0", default-features = false, features = ["write"]}
object = { version = "0.27.0", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
ar = { git = "https://github.com/bjorn3/rust-ar.git", branch = "do_not_remove_cg_clif_ranlib" }
indexmap = "1.8.0"
libloading = { version = "0.6.0", optional = true }
+once_cell = "1.10.0"
smallvec = "1.6.1"
[patch.crates-io]
[[package]]
name = "compiler_builtins"
-version = "0.1.70"
+version = "0.1.71"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "163437f05ca8f29d7e9128ea728dedf5eb620e445fbca273641d3a3050305f23"
dependencies = [
"rustc-std-workspace-core",
]
[[package]]
name = "libc"
-version = "0.2.119"
+version = "0.2.121"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1bf2e165bb3457c8e098ea76f3e3bc9db55f87aa90d52d0e6be741470916aaa4"
+checksum = "efaa7b300f3b5fe8eb6bf21ce3895e1751d9665086af2d64b42f19701015ff4f"
dependencies = [
"rustc-std-workspace-core",
]
rustc-std-workspace-core = { path = "./sysroot_src/library/rustc-std-workspace-core" }
rustc-std-workspace-alloc = { path = "./sysroot_src/library/rustc-std-workspace-alloc" }
rustc-std-workspace-std = { path = "./sysroot_src/library/rustc-std-workspace-std" }
-compiler_builtins = { path = "./compiler-builtins" }
[profile.dev]
lto = "off"
debug = true
incremental = true
lto = "off"
+
+# Mandatory for correctly compiling compiler-builtins
+[profile.dev.package.compiler_builtins]
+debug-assertions = false
+overflow-checks = false
+codegen-units = 10000
+
+[profile.release.package.compiler_builtins]
+debug-assertions = false
+overflow-checks = false
+codegen-units = 10000
eprintln!("[INSTALL] hyperfine");
Command::new("cargo").arg("install").arg("hyperfine").spawn().unwrap().wait().unwrap();
- clone_repo(
+ clone_repo_shallow_github(
+ "rand",
+ "rust-random",
"rand",
- "https://github.com/rust-random/rand.git",
"0f933f9c7176e53b2a3c7952ded484e1783f0bf1",
);
apply_patches("rand", Path::new("rand"));
- clone_repo(
+ clone_repo_shallow_github(
+ "regex",
+ "rust-lang",
"regex",
- "https://github.com/rust-lang/regex.git",
"341f207c1071f7290e3f228c710817c280c8dca1",
);
- clone_repo(
+ clone_repo_shallow_github(
+ "portable-simd",
+ "rust-lang",
"portable-simd",
- "https://github.com/rust-lang/portable-simd",
"b8d6b6844602f80af79cd96401339ec594d472d8",
);
apply_patches("portable-simd", Path::new("portable-simd"));
- clone_repo(
+ clone_repo_shallow_github(
+ "simple-raytracer",
+ "ebobby",
"simple-raytracer",
- "https://github.com/ebobby/simple-raytracer",
"804a7a21b9e673a482797aa289a18ed480e4d813",
);
git_init_cmd.arg("init").arg("-q").current_dir(&sysroot_src);
spawn_and_wait(git_init_cmd);
- let mut git_add_cmd = Command::new("git");
- git_add_cmd.arg("add").arg(".").current_dir(&sysroot_src);
- spawn_and_wait(git_add_cmd);
-
- let mut git_commit_cmd = Command::new("git");
- git_commit_cmd
- .arg("commit")
- .arg("-m")
- .arg("Initial commit")
- .arg("-q")
- .current_dir(&sysroot_src);
- spawn_and_wait(git_commit_cmd);
+ init_git_repo(&sysroot_src);
apply_patches("sysroot", &sysroot_src);
-
- clone_repo(
- "build_sysroot/compiler-builtins",
- "https://github.com/rust-lang/compiler-builtins.git",
- "0.1.70",
- );
- apply_patches("compiler-builtins", Path::new("build_sysroot/compiler-builtins"));
}
+#[allow(dead_code)]
fn clone_repo(target_dir: &str, repo: &str, rev: &str) {
eprintln!("[CLONE] {}", repo);
// Ignore exit code as the repo may already have been checked out
spawn_and_wait(checkout_cmd);
}
+fn clone_repo_shallow_github(target_dir: &str, username: &str, repo: &str, rev: &str) {
+ if cfg!(windows) {
+ // Older windows doesn't have tar or curl by default. Fall back to using git.
+ clone_repo(target_dir, &format!("https://github.com/{}/{}.git", username, repo), rev);
+ return;
+ }
+
+ let archive_url = format!("https://github.com/{}/{}/archive/{}.tar.gz", username, repo, rev);
+ let archive_file = format!("{}.tar.gz", rev);
+ let archive_dir = format!("{}-{}", repo, rev);
+
+ eprintln!("[DOWNLOAD] {}/{} from {}", username, repo, archive_url);
+
+ // Remove previous results if they exists
+ let _ = std::fs::remove_file(&archive_file);
+ let _ = std::fs::remove_dir_all(&archive_dir);
+ let _ = std::fs::remove_dir_all(target_dir);
+
+ // Download zip archive
+ let mut download_cmd = Command::new("curl");
+ download_cmd.arg("--location").arg("--output").arg(&archive_file).arg(archive_url);
+ spawn_and_wait(download_cmd);
+
+ // Unpack tar archive
+ let mut unpack_cmd = Command::new("tar");
+ unpack_cmd.arg("xf").arg(&archive_file);
+ spawn_and_wait(unpack_cmd);
+
+ // Rename unpacked dir to the expected name
+ std::fs::rename(archive_dir, target_dir).unwrap();
+
+ init_git_repo(Path::new(target_dir));
+
+ // Cleanup
+ std::fs::remove_file(archive_file).unwrap();
+}
+
+fn init_git_repo(repo_dir: &Path) {
+ let mut git_init_cmd = Command::new("git");
+ git_init_cmd.arg("init").arg("-q").current_dir(repo_dir);
+ spawn_and_wait(git_init_cmd);
+
+ let mut git_add_cmd = Command::new("git");
+ git_add_cmd.arg("add").arg(".").current_dir(repo_dir);
+ spawn_and_wait(git_add_cmd);
+
+ let mut git_commit_cmd = Command::new("git");
+ git_commit_cmd.arg("commit").arg("-m").arg("Initial commit").arg("-q").current_dir(repo_dir);
+ spawn_and_wait(git_commit_cmd);
+}
+
fn get_patches(crate_name: &str) -> Vec<OsString> {
let mut patches: Vec<_> = fs::read_dir("patches")
.unwrap()
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![no_std]
-#![feature(allocator_api, rustc_private)]
-#![cfg_attr(any(unix, target_os = "redox"), feature(libc))]
-
-// The minimum alignment guaranteed by the architecture. This value is used to
-// add fast paths for low alignment values.
-#[cfg(all(any(target_arch = "x86",
- target_arch = "arm",
- target_arch = "mips",
- target_arch = "powerpc",
- target_arch = "powerpc64")))]
-const MIN_ALIGN: usize = 8;
-#[cfg(all(any(target_arch = "x86_64",
- target_arch = "aarch64",
- target_arch = "mips64",
- target_arch = "s390x",
- target_arch = "sparc64")))]
-const MIN_ALIGN: usize = 16;
pub struct System;
+
#[cfg(any(windows, unix, target_os = "redox"))]
mod realloc_fallback {
use core::alloc::{GlobalAlloc, Layout};
use core::cmp;
use core::ptr;
impl super::System {
- pub(crate) unsafe fn realloc_fallback(&self, ptr: *mut u8, old_layout: Layout,
- new_size: usize) -> *mut u8 {
+ pub(crate) unsafe fn realloc_fallback(
+ &self,
+ ptr: *mut u8,
+ old_layout: Layout,
+ new_size: usize,
+ ) -> *mut u8 {
// Docs for GlobalAlloc::realloc require this to be valid:
let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
let new_ptr = GlobalAlloc::alloc(self, new_layout);
}
#[cfg(any(unix, target_os = "redox"))]
mod platform {
- extern crate libc;
+ use core::alloc::{GlobalAlloc, Layout};
+ use core::ffi::c_void;
use core::ptr;
- use MIN_ALIGN;
use System;
- use core::alloc::{GlobalAlloc, Layout};
+ extern "C" {
+ fn posix_memalign(memptr: *mut *mut c_void, align: usize, size: usize) -> i32;
+ fn free(p: *mut c_void);
+ }
unsafe impl GlobalAlloc for System {
#[inline]
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
- if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
- libc::malloc(layout.size()) as *mut u8
- } else {
- #[cfg(target_os = "macos")]
- {
- if layout.align() > (1 << 31) {
- return ptr::null_mut()
- }
- }
- aligned_malloc(&layout)
- }
+ aligned_malloc(&layout)
}
#[inline]
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
- if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
- libc::calloc(layout.size(), 1) as *mut u8
- } else {
- let ptr = self.alloc(layout.clone());
- if !ptr.is_null() {
- ptr::write_bytes(ptr, 0, layout.size());
- }
- ptr
+ let ptr = self.alloc(layout.clone());
+ if !ptr.is_null() {
+ ptr::write_bytes(ptr, 0, layout.size());
}
+ ptr
}
#[inline]
unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
- libc::free(ptr as *mut libc::c_void)
+ free(ptr as *mut c_void)
}
#[inline]
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
- if layout.align() <= MIN_ALIGN && layout.align() <= new_size {
- libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8
- } else {
- self.realloc_fallback(ptr, layout, new_size)
- }
+ self.realloc_fallback(ptr, layout, new_size)
}
}
- #[cfg(any(target_os = "android",
- target_os = "hermit",
- target_os = "redox",
- target_os = "solaris"))]
- #[inline]
- unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
- // On android we currently target API level 9 which unfortunately
- // doesn't have the `posix_memalign` API used below. Instead we use
- // `memalign`, but this unfortunately has the property on some systems
- // where the memory returned cannot be deallocated by `free`!
- //
- // Upon closer inspection, however, this appears to work just fine with
- // Android, so for this platform we should be fine to call `memalign`
- // (which is present in API level 9). Some helpful references could
- // possibly be chromium using memalign [1], attempts at documenting that
- // memalign + free is ok [2] [3], or the current source of chromium
- // which still uses memalign on android [4].
- //
- // [1]: https://codereview.chromium.org/10796020/
- // [2]: https://code.google.com/p/android/issues/detail?id=35391
- // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
- // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
- // /memory/aligned_memory.cc
- libc::memalign(layout.align(), layout.size()) as *mut u8
- }
- #[cfg(not(any(target_os = "android",
- target_os = "hermit",
- target_os = "redox",
- target_os = "solaris")))]
- #[inline]
unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
let mut out = ptr::null_mut();
- let ret = libc::posix_memalign(&mut out, layout.align(), layout.size());
- if ret != 0 {
- ptr::null_mut()
- } else {
- out as *mut u8
- }
+ let ret = posix_memalign(&mut out, layout.align(), layout.size());
+ if ret != 0 { ptr::null_mut() } else { out as *mut u8 }
}
}
#[cfg(windows)]
#[allow(nonstandard_style)]
mod platform {
- use MIN_ALIGN;
- use System;
use core::alloc::{GlobalAlloc, Layout};
+ use System;
type LPVOID = *mut u8;
type HANDLE = LPVOID;
type SIZE_T = usize;
}
#[inline]
unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 {
- let ptr = if layout.align() <= MIN_ALIGN {
- HeapAlloc(GetProcessHeap(), flags, layout.size())
- } else {
- let size = layout.size() + layout.align();
- let ptr = HeapAlloc(GetProcessHeap(), flags, size);
- if ptr.is_null() {
- ptr
- } else {
- align_ptr(ptr, layout.align())
- }
- };
- ptr as *mut u8
+ let size = layout.size() + layout.align();
+ let ptr = HeapAlloc(GetProcessHeap(), flags, size);
+ (if ptr.is_null() { ptr } else { align_ptr(ptr, layout.align()) }) as *mut u8
}
unsafe impl GlobalAlloc for System {
#[inline]
}
#[inline]
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
- if layout.align() <= MIN_ALIGN {
- let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID);
- debug_assert!(err != 0, "Failed to free heap memory: {}",
- GetLastError());
- } else {
- let header = get_header(ptr);
- let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
- debug_assert!(err != 0, "Failed to free heap memory: {}",
- GetLastError());
- }
+ let header = get_header(ptr);
+ let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
+ debug_assert!(err != 0, "Failed to free heap memory: {}", GetLastError());
}
#[inline]
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
- if layout.align() <= MIN_ALIGN {
- HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, new_size) as *mut u8
- } else {
- self.realloc_fallback(ptr, layout, new_size)
- }
+ self.realloc_fallback(ptr, layout, new_size)
}
}
}
// Adapted from rustc run-pass test suite
#![feature(arbitrary_self_types, unsize, coerce_unsized, dispatch_from_dyn)]
-#![feature(rustc_attrs)]
use std::{
ops::{Deref, CoerceUnsized, DispatchFromDyn},
#![feature(
- no_core, lang_items, intrinsics, unboxed_closures, type_ascription, extern_types,
- untagged_unions, decl_macro, rustc_attrs, transparent_unions, auto_traits,
- thread_local,
+ no_core,
+ lang_items,
+ intrinsics,
+ unboxed_closures,
+ extern_types,
+ decl_macro,
+ rustc_attrs,
+ transparent_unions,
+ auto_traits,
+ thread_local
)]
#![no_core]
#![allow(dead_code)]
unsafe impl Copy for i32 {}
unsafe impl Copy for isize {}
unsafe impl Copy for f32 {}
+unsafe impl Copy for f64 {}
unsafe impl Copy for char {}
unsafe impl<'a, T: ?Sized> Copy for &'a T {}
unsafe impl<T: ?Sized> Copy for *const T {}
fn deref(&self) -> &Self::Target;
}
+pub struct Unique<T: ?Sized> {
+ pub pointer: *const T,
+ pub _marker: PhantomData<T>,
+}
+
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
#[lang = "owned_box"]
-pub struct Box<T: ?Sized>(*mut T);
+pub struct Box<T: ?Sized>(Unique<T>, ());
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
}
#[lang = "box_free"]
-unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
- libc::free(ptr as *mut u8);
+unsafe fn box_free<T: ?Sized>(ptr: Unique<T>, alloc: ()) {
+ libc::free(ptr.pointer as *mut u8);
}
#[lang = "drop"]
use mini_core::*;
use mini_core::libc::*;
-unsafe extern "C" fn my_puts(s: *const i8) {
- puts(s);
-}
-
macro_rules! assert {
($e:expr) => {
if !$e {
static mut NUM: u8 = 6 * 7;
static NUM_REF: &'static u8 = unsafe { &NUM };
-struct Unique<T: ?Sized> {
- pointer: *const T,
- _marker: PhantomData<T>,
-}
-
-impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
unsafe fn zeroed<T>() -> T {
let mut uninit = MaybeUninit { uninit: () };
+++ /dev/null
-From 1d574bf5e32d51641dcacaf8ef777e95b44f6f2a Mon Sep 17 00:00:00 2001
-From: bjorn3 <bjorn3@users.noreply.github.com>
-Date: Thu, 18 Feb 2021 18:30:55 +0100
-Subject: [PATCH] Disable 128bit atomic operations
-
-Cranelift doesn't support them yet
----
- src/mem/mod.rs | 12 ------------
- 1 file changed, 12 deletions(-)
-
-diff --git a/src/mem/mod.rs b/src/mem/mod.rs
-index 107762c..2d1ae10 100644
---- a/src/mem/mod.rs
-+++ b/src/mem/mod.rs
-@@ -137,10 +137,6 @@ intrinsics! {
- pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
- memcpy_element_unordered_atomic(dest, src, bytes);
- }
-- #[cfg(target_has_atomic_load_store = "128")]
-- pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
-- memcpy_element_unordered_atomic(dest, src, bytes);
-- }
-
- #[cfg(target_has_atomic_load_store = "8")]
- pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () {
-@@ -158,10 +154,6 @@ intrinsics! {
- pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () {
- memmove_element_unordered_atomic(dest, src, bytes);
- }
-- #[cfg(target_has_atomic_load_store = "128")]
-- pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () {
-- memmove_element_unordered_atomic(dest, src, bytes);
-- }
-
- #[cfg(target_has_atomic_load_store = "8")]
- pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_1(s: *mut u8, c: u8, bytes: usize) -> () {
-@@ -179,8 +171,4 @@ intrinsics! {
- pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_8(s: *mut u64, c: u8, bytes: usize) -> () {
- memset_element_unordered_atomic(s, c, bytes);
- }
-- #[cfg(target_has_atomic_load_store = "128")]
-- pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_16(s: *mut u128, c: u8, bytes: usize) -> () {
-- memset_element_unordered_atomic(s, c, bytes);
-- }
- }
---
-2.26.2.7.g19db9cfb68
-
+++ /dev/null
-From 9c5663e36391fa20becf84f3af2e82afa5bb720b Mon Sep 17 00:00:00 2001
-From: bjorn3 <bjorn3@users.noreply.github.com>
-Date: Sat, 15 Aug 2020 19:56:03 +0200
-Subject: [PATCH] [rand] Enable c2-chacha simd feature
-
----
- rand_chacha/Cargo.toml | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/rand_chacha/Cargo.toml b/rand_chacha/Cargo.toml
-index 9190b7f..872cca2 100644
---- a/rand_chacha/Cargo.toml
-+++ b/rand_chacha/Cargo.toml
-@@ -24,5 +24,5 @@ ppv-lite86 = { version = "0.2.8", default-features = false }
-
- [features]
- default = ["std"]
--std = ["ppv-lite86/std"]
-+std = ["ppv-lite86/std", "ppv-lite86/simd"]
- simd = [] # deprecated
---
-2.20.1
-
Subject: [PATCH] [rand] Disable failing test
---
- src/distributions/uniform.rs | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
+ src/distributions/uniform.rs | 1 +
+ 1 file changed, 1 insertion(+), 0 deletions(-)
diff --git a/src/distributions/uniform.rs b/src/distributions/uniform.rs
index 480b859..c80bb6f 100644
--- a/src/distributions/uniform.rs
+++ b/src/distributions/uniform.rs
-@@ -1085,7 +1085,7 @@ mod tests {
- _ => panic!("`UniformDurationMode` was not serialized/deserialized correctly")
- }
- }
--
-+
- #[test]
- #[cfg(feature = "serde1")]
- fn test_uniform_serialization() {
@@ -1314,6 +1314,7 @@ mod tests {
not(target_arch = "wasm32"),
not(target_arch = "asmjs")
))]
-+ #[ignore] // FIXME
++ #[ignore] // Requires unwinding
fn test_float_assertions() {
use super::SampleUniform;
use std::panic::catch_unwind;
index 0000000..46fd999
--- /dev/null
+++ b/library/core/tests/Cargo.toml
-@@ -0,0 +1,8 @@
+@@ -0,0 +1,11 @@
+[package]
+name = "core"
+version = "0.0.0"
+[lib]
+name = "coretests"
+path = "lib.rs"
-diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs
-index a35897e..f0bf645 100644
---- a/library/core/tests/num/flt2dec/mod.rs
-+++ b/library/core/tests/num/flt2dec/mod.rs
-@@ -13,7 +13,6 @@ mod strategy {
- mod dragon;
- mod grisu;
- }
--mod random;
-
- pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
- match decode(v).1 {
-diff --git a/library/core/tests/ptr.rs b/library/core/tests/ptr.rs
-index 1a6be3a..42dbd59 100644
---- a/library/core/tests/ptr.rs
-+++ b/library/core/tests/ptr.rs
-@@ -250,6 +250,7 @@ fn test_unsized_nonnull() {
- };
- }
-
-+/*
- #[test]
- #[allow(warnings)]
- // Have a symbol for the test below. It doesn’t need to be an actual variadic function, match the
-@@ -277,6 +277,7 @@ pub fn test_variadic_fnptr() {
- let mut s = SipHasher::new();
- assert_eq!(p.hash(&mut s), q.hash(&mut s));
- }
-+*/
-
- #[test]
- fn write_unaligned_drop() {
-diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
-index 6609bc3..241b497 100644
---- a/library/core/tests/slice.rs
-+++ b/library/core/tests/slice.rs
-@@ -1209,6 +1209,7 @@ fn brute_force_rotate_test_1() {
- }
- }
-
-+/*
- #[test]
- #[cfg(not(target_arch = "wasm32"))]
- fn sort_unstable() {
-@@ -1394,6 +1395,7 @@ fn partition_at_index() {
- v.select_nth_unstable(0);
- assert!(v == [0xDEADBEEF]);
- }
-+*/
-
- #[test]
- #[should_panic(expected = "index 0 greater than length of slice")]
++
++[dependencies]
++rand = "0.7"
--
2.21.0 (Apple Git-122)
-From 0ffdd8eda8df364391c8ac6e1ce92c73ba9254d4 Mon Sep 17 00:00:00 2001
+From eb703e627e7a84f1cd8d0d87f0f69da1f0acf765 Mon Sep 17 00:00:00 2001
From: bjorn3 <bjorn3@users.noreply.github.com>
Date: Fri, 3 Dec 2021 12:16:30 +0100
Subject: [PATCH] Disable long running tests
---
- library/core/tests/slice.rs | 3 +++
- 1 file changed, 3 insertions(+)
+ library/core/tests/slice.rs | 2 ++
+ 1 file changed, 2 insertions(+)
diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
-index 2c8f00a..44847ee 100644
+index 8402833..84592e0 100644
--- a/library/core/tests/slice.rs
+++ b/library/core/tests/slice.rs
-@@ -2332,7 +2332,8 @@ macro_rules! empty_max_mut {
- };
+@@ -1809,6 +1809,7 @@ fn sort_unstable() {
+ assert!(v == [0xDEADBEEF]);
}
+/*
- #[cfg(not(miri))] // Comparing usize::MAX many elements takes forever in Miri (and in rustc without optimizations)
- take_tests! {
- slice: &[(); usize::MAX], method: take,
- (take_in_bounds_max_range_to, (..usize::MAX), Some(EMPTY_MAX), &[(); 0]),
-@@ -2345,3 +2347,4 @@ take_tests! {
+ #[test]
+ #[cfg(not(target_arch = "wasm32"))]
+ #[cfg_attr(miri, ignore)] // Miri is too slow
+@@ -1914,6 +1915,7 @@ fn select_nth_unstable() {
+ v.select_nth_unstable(0);
+ assert!(v == [0xDEADBEEF]);
+ }
++*/
+
+ #[test]
+ #[should_panic(expected = "index 0 greater than length of slice")]
+@@ -2462,6 +2462,7 @@ take_tests! {
+ #[cfg(not(miri))] // unused in Miri
+ const EMPTY_MAX: &'static [()] = &[(); usize::MAX];
+
++/*
+ // can't be a constant due to const mutability rules
+ #[cfg(not(miri))] // unused in Miri
+ macro_rules! empty_max_mut {
+@@ -2485,6 +2486,7 @@ take_tests! {
(take_mut_oob_max_range_to_inclusive, (..=usize::MAX), None, empty_max_mut!()),
(take_mut_in_bounds_max_range_from, (usize::MAX..), Some(&mut [] as _), empty_max_mut!()),
}
+*/
+
+ #[test]
+ fn test_slice_from_ptr_range() {
--
2.26.2.7.g19db9cfb68
[toolchain]
-channel = "nightly-2022-02-23"
+channel = "nightly-2022-03-19"
components = ["rust-src", "rustc-dev", "llvm-tools-preview"]
git checkout "$(rustc -V | cut -d' ' -f3 | tr -d '(')"
git apply - <<EOF
-diff --git a/Cargo.toml b/Cargo.toml
-index 5bd1147cad5..10d68a2ff14 100644
---- a/Cargo.toml
-+++ b/Cargo.toml
-@@ -111,5 +111,7 @@ rustc-std-workspace-std = { path = 'library/rustc-std-workspace-std' }
- rustc-std-workspace-alloc = { path = 'library/rustc-std-workspace-alloc' }
- rustc-std-workspace-std = { path = 'library/rustc-std-workspace-std' }
-
-+compiler_builtins = { path = "../build_sysroot/compiler-builtins" }
-+
- [patch."https://github.com/rust-lang/rust-clippy"]
- clippy_lints = { path = "src/tools/clippy/clippy_lints" }
diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
index d95b5b7f17f..00b6f0e3635 100644
--- a/library/alloc/Cargo.toml
[dev-dependencies]
rand = "0.7"
rand_xorshift = "0.2"
+diff --git a/src/tools/compiletest/src/header.rs b/src/tools/compiletest/src/header.rs
+index 887d27fd6dca4..2c2239f2b83d1 100644
+--- a/src/tools/compiletest/src/header.rs
++++ b/src/tools/compiletest/src/header.rs
+@@ -806,8 +806,8 @@ pub fn make_test_description<R: Read>(
+ cfg: Option<&str>,
+ ) -> test::TestDesc {
+ let mut ignore = false;
+ #[cfg(not(bootstrap))]
+- let ignore_message: Option<String> = None;
++ let ignore_message: Option<&str> = None;
+ let mut should_fail = false;
+
+ let rustc_has_profiler_support = env::var_os("RUSTC_PROFILER_SUPPORT").is_some();
+
+diff --git a/src/tools/compiletest/src/runtest.rs b/src/tools/compiletest/src/runtest.rs
+index 8431aa7b818..a3ff7e68ce5 100644
+--- a/src/tools/compiletest/src/runtest.rs
++++ b/src/tools/compiletest/src/runtest.rs
+@@ -3489,11 +3489,7 @@ fn normalize_output(&self, output: &str, custom_rules: &[(String, String)]) -> S
+ .join("library");
+ normalize_path(&src_dir, "$(echo '$SRC_DIR')");
+
+- if let Some(virtual_rust_source_base_dir) =
+- option_env!("CFG_VIRTUAL_RUST_SOURCE_BASE_DIR").map(PathBuf::from)
+- {
+- normalize_path(&virtual_rust_source_base_dir.join("library"), "$(echo '$SRC_DIR')");
+- }
++ normalize_path(&Path::new("$(cd ../build_sysroot/sysroot_src/library; pwd)"), "$(echo '$SRC_DIR')");
+
+ // Paths into the build directory
+ let test_build_dir = &self.config.build_base;
EOF
cat > config.toml <<EOF
+changelog-seen = 2
+
[llvm]
ninja = false
command -v rg >/dev/null 2>&1 || cargo install ripgrep
-rm -r src/test/ui/{extern/,panics/,unsized-locals/,lto/,simd*,linkage*,unwind-*.rs} || true
-for test in $(rg --files-with-matches "asm!|catch_unwind|should_panic|lto|// needs-asm-support" src/test/ui); do
+rm -r src/test/ui/{extern/,unsized-locals/,lto/,linkage*} || true
+for test in $(rg --files-with-matches "asm!|lto|// needs-asm-support|// needs-unwind" src/test/{ui,incremental}); do
rm $test
done
-for test in $(rg -i --files-with-matches "//(\[\w+\])?~|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" src/test/ui); do
+for test in $(rg -i --files-with-matches "//(\[\w+\])?~[^\|]*\s*ERR|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" src/test/ui); do
rm $test
done
git checkout -- src/test/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed
-# these all depend on unwinding support
+# missing features
+# ================
+
+# requires stack unwinding
rm src/test/ui/backtrace.rs
-rm src/test/ui/array-slice-vec/box-of-array-of-drop-*.rs
-rm src/test/ui/array-slice-vec/slice-panic-*.rs
-rm src/test/ui/array-slice-vec/nested-vec-3.rs
-rm src/test/ui/cleanup-rvalue-temp-during-incomplete-alloc.rs
-rm src/test/ui/issues/issue-26655.rs
-rm src/test/ui/issues/issue-29485.rs
-rm src/test/ui/issues/issue-30018-panic.rs
rm src/test/ui/process/multi-panic.rs
-rm src/test/ui/sepcomp/sepcomp-unwind.rs
-rm src/test/ui/structs-enums/unit-like-struct-drop-run.rs
-rm src/test/ui/drop/terminate-in-initializer.rs
-rm src/test/ui/threads-sendsync/task-stderr.rs
-rm src/test/ui/numbers-arithmetic/int-abs-overflow.rs
-rm src/test/ui/drop/drop-trait-enum.rs
rm src/test/ui/numbers-arithmetic/issue-8460.rs
-rm src/test/ui/runtime/rt-explody-panic-payloads.rs
rm src/test/incremental/change_crate_dep_kind.rs
-rm src/test/ui/threads-sendsync/unwind-resource.rs
+rm src/test/incremental/issue-80691-bad-eval-cache.rs # -Cpanic=abort causes abort instead of exit(101)
+rm src/test/ui/panic-while-printing.rs
+rm src/test/ui/test-attrs/test-panic-while-printing.rs
+rm src/test/ui/test-attrs/test-type.rs
-rm src/test/ui/issues/issue-28950.rs # depends on stack size optimizations
-rm src/test/ui/codegen/init-large-type.rs # same
+# requires compiling with -Cpanic=unwind
+rm src/test/ui/test-attrs/test-fn-signature-verification-for-explicit-return-type.rs # "Cannot run dynamic test fn out-of-process"
+rm src/test/ui/async-await/async-fn-size-moved-locals.rs # -Cpanic=abort shrinks some generator by one byte
+rm src/test/ui/async-await/async-fn-size-uninit-locals.rs # same
+rm src/test/ui/generator/size-moved-locals.rs # same
+
+# vendor intrinsics
rm src/test/ui/sse2.rs # cpuid not supported, so sse2 not detected
+rm src/test/ui/intrinsics/const-eval-select-x86_64.rs # requires x86_64 vendor intrinsics
+rm src/test/ui/simd/array-type.rs # "Index argument for `simd_insert` is not a constant"
+rm src/test/ui/simd/intrinsic/generic-bitmask-pass.rs # simd_bitmask unimplemented
+rm src/test/ui/simd/intrinsic/generic-as.rs # simd_as unimplemented
+rm src/test/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs # simd_saturating_add unimplemented
+rm src/test/ui/simd/intrinsic/float-math-pass.rs # simd_fcos unimplemented
+rm src/test/ui/simd/intrinsic/generic-gather-pass.rs # simd_gather unimplemented
+rm src/test/ui/simd/intrinsic/generic-select-pass.rs # simd_select_bitmask unimplemented
+rm src/test/ui/simd/issue-85915-simd-ptrs.rs # simd_gather unimplemented
+rm src/test/ui/simd/issue-89193.rs # simd_gather unimplemented
+rm src/test/ui/simd/simd-bitmask.rs # simd_bitmask unimplemented
+
+# exotic linkages
rm src/test/ui/issues/issue-33992.rs # unsupported linkages
-rm src/test/ui/issues/issue-51947.rs # same
rm src/test/incremental/hashes/function_interfaces.rs # same
rm src/test/incremental/hashes/statics.rs # same
+
+# variadic arguments
+rm src/test/ui/abi/mir/mir_codegen_calls_variadic.rs # requires float varargs
+rm src/test/ui/abi/variadic-ffi.rs # requires callee side vararg support
+
+# unsized locals
+rm -r src/test/run-pass-valgrind/unsized-locals
+
+# misc unimplemented things
+rm src/test/ui/intrinsics/intrinsic-nearby.rs # unimplemented nearbyintf32 and nearbyintf64 intrinsics
+rm src/test/ui/target-feature/missing-plusminus.rs # error not implemented
+rm src/test/ui/fn/dyn-fn-alignment.rs # wants a 256 byte alignment
+rm -r src/test/run-make/emit-named-files # requires full --emit support
+
+# optimization tests
+# ==================
+rm src/test/ui/issues/issue-28950.rs # depends on stack size optimizations
+rm src/test/ui/codegen/init-large-type.rs # same
+rm -r src/test/run-make/fmt-write-bloat/ # tests an optimization
+
+# backend specific tests
+# ======================
+rm src/test/incremental/thinlto/cgu_invalidated_when_import_{added,removed}.rs # requires LLVM
+rm src/test/ui/abi/stack-protector.rs # requires stack protector support
+
+# giving different but possibly correct results
+# =============================================
rm src/test/ui/numbers-arithmetic/saturating-float-casts.rs # intrinsic gives different but valid result
+rm src/test/ui/simd/intrinsic/float-minmax-pass.rs # same
rm src/test/ui/mir/mir_misc_casts.rs # depends on deduplication of constants
rm src/test/ui/mir/mir_raw_fat_ptr.rs # same
rm src/test/ui/consts/issue-33537.rs # same
-rm src/test/ui/async-await/async-fn-size-moved-locals.rs # -Cpanic=abort shrinks some generator by one byte
-rm src/test/ui/async-await/async-fn-size-uninit-locals.rs # same
-rm src/test/ui/generator/size-moved-locals.rs # same
-rm src/test/ui/fn/dyn-fn-alignment.rs # wants a 256 byte alignment
-rm src/test/ui/test-attrs/test-fn-signature-verification-for-explicit-return-type.rs # "Cannot run dynamic test fn out-of-process"
-rm src/test/ui/intrinsics/intrinsic-nearby.rs # unimplemented nearbyintf32 and nearbyintf64 intrinsics
-
-rm src/test/incremental/hashes/inline_asm.rs # inline asm
-rm src/test/incremental/issue-72386.rs # same
-rm src/test/incremental/lto.rs # requires lto
-rm src/test/incremental/dirty_clean.rs # TODO
+# doesn't work due to the way the rustc test suite is invoked.
+# should work when using ./x.py test the way it is intended
+# ============================================================
rm -r src/test/run-make/emit-shared-files # requires the rustdoc executable in build/bin/
rm -r src/test/run-make/unstable-flag-required # same
rm -r src/test/run-make/rustdoc-* # same
-rm -r src/test/run-make/emit-named-files # requires full --emit support
-
-rm -r src/test/run-pass-valgrind/unsized-locals
-
-rm src/test/ui/json-bom-plus-crlf-multifile.rs # differing warning
-rm src/test/ui/json-bom-plus-crlf.rs # same
-rm src/test/ui/intrinsics/const-eval-select-x86_64.rs # same
-rm src/test/ui/match/issue-82392.rs # differing error
-rm src/test/ui/consts/min_const_fn/address_of_const.rs # same
-rm src/test/ui/consts/issue-miri-1910.rs # same
-rm src/test/ui/generic-associated-types/bugs/issue-80626.rs # same
-rm src/test/ui/generic-associated-types/bugs/issue-89008.rs # same
-rm src/test/ui/type-alias-impl-trait/cross_crate_ice*.rs # requires removed aux dep
+# genuine bugs
+# ============
rm src/test/ui/allocator/no_std-alloc-error-handler-default.rs # missing rust_oom definition
-rm src/test/ui/cfg/cfg-panic.rs
-rm -r src/test/ui/hygiene/
rm -r src/test/ui/polymorphization/ # polymorphization not yet supported
rm src/test/codegen-units/polymorphization/unused_type_parameters.rs # same
-rm -r src/test/run-make/fmt-write-bloat/ # tests an optimization
-rm src/test/ui/abi/mir/mir_codegen_calls_variadic.rs # requires float varargs
-rm src/test/ui/abi/variadic-ffi.rs # requires callee side vararg support
-
-rm src/test/ui/command/command-current-dir.rs # can't find libstd.so
-
-rm src/test/ui/abi/stack-protector.rs # requires stack protector support
-
-rm src/test/incremental/issue-80691-bad-eval-cache.rs # wrong exit code
rm src/test/incremental/spike-neg1.rs # errors out for some reason
rm src/test/incremental/spike-neg2.rs # same
rm src/test/ui/issues/issue-74564-if-expr-stack-overflow.rs # gives a stackoverflow before the backend runs
+rm src/test/ui/mir/ssa-analysis-regression-50041.rs # produces ICE
-rm src/test/incremental/thinlto/cgu_invalidated_when_import_{added,removed}.rs # requires LLVM
+rm src/test/ui/simd/intrinsic/generic-reduction-pass.rs # simd_reduce_add_unordered doesn't accept an accumulator for integer vectors
+
+# bugs in the test suite
+# ======================
+rm src/test/ui/unsafe/union.rs # has UB caught by cg_clif. see rust-lang/rust#95075
echo "[TEST] rustc test suite"
RUST_TEST_NOCAPTURE=1 COMPILETEST_FORCE_STAGE0=1 ./x.py test --stage 0 src/test/{codegen-units,run-make,run-pass-valgrind,ui,incremental}
fi
echo "[AOT] dst_field_align"
- # FIXME Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
$MY_RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target "$TARGET_TRIPLE"
$RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
let sig = Signature { params, returns, call_conv: self.target_config.default_call_conv };
let func_id = self.module.declare_function(name, Linkage::Import, &sig).unwrap();
let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
+ if self.clif_comments.enabled() {
+ self.add_comment(func_ref, format!("{:?}", name));
+ }
let call_inst = self.bcx.ins().call(func_ref, args);
if self.clif_comments.enabled() {
self.add_comment(call_inst, format!("easy_call {}", name));
.map(|inst| fx.tcx.codegen_fn_attrs(inst.def_id()).flags.contains(CodegenFnAttrFlags::COLD))
.unwrap_or(false);
if is_cold {
- // FIXME Mark current_block block as cold once Cranelift supports it
+ fx.bcx.set_cold_block(fx.bcx.current_block().unwrap());
+ if let Some((_place, destination_block)) = destination {
+ fx.bcx.set_cold_block(fx.get_block(destination_block));
+ }
}
// Unpack arguments tuple for closures
let ret_block = fx.get_block(dest);
fx.bcx.ins().jump(ret_block, &[]);
} else {
- trap_unreachable(fx, "[corruption] Diverging function returned");
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
}
}
use crate::prelude::*;
use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_session::config::OomStrategy;
/// Returns whether an allocator shim was created
pub(crate) fn codegen(
if any_dynamic_crate {
false
} else if let Some(kind) = tcx.allocator_kind(()) {
- codegen_inner(module, unwind_context, kind, tcx.lang_items().oom().is_some());
+ codegen_inner(
+ module,
+ unwind_context,
+ kind,
+ tcx.lang_items().oom().is_some(),
+ tcx.sess.opts.debugging_opts.oom,
+ );
true
} else {
false
unwind_context: &mut UnwindContext,
kind: AllocatorKind,
has_alloc_error_handler: bool,
+ oom_strategy: OomStrategy,
) {
let usize_ty = module.target_config().pointer_type();
}
module.define_function(func_id, &mut ctx).unwrap();
unwind_context.add_function(func_id, &ctx, module.isa());
+
+ let data_id = module.declare_data(OomStrategy::SYMBOL, Linkage::Export, false, false).unwrap();
+ let mut data_ctx = DataContext::new();
+ data_ctx.set_align(1);
+ let val = oom_strategy.should_panic();
+ data_ctx.define(Box::new([val]));
+ module.define_data(data_id, &data_ctx).unwrap();
}
use rustc_index::vec::IndexVec;
use rustc_middle::ty::adjustment::PointerCast;
use rustc_middle::ty::layout::FnAbiOf;
+use rustc_middle::ty::print::with_no_trimmed_paths;
use indexmap::IndexSet;
let mir = tcx.instance_mir(instance.def);
let _mir_guard = crate::PrintOnPanic(|| {
let mut buf = Vec::new();
- rustc_middle::mir::write_mir_pretty(tcx, Some(instance.def_id()), &mut buf).unwrap();
+ with_no_trimmed_paths!({
+ rustc_middle::mir::pretty::write_mir_fn(tcx, mir, &mut |_, _| Ok(()), &mut buf)
+ .unwrap();
+ });
String::from_utf8_lossy(&buf).into_owned()
});
} else if arg_uninhabited {
fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
- crate::trap::trap_unreachable(&mut fx, "function has uninhabited argument");
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
} else {
tcx.sess.time("codegen clif ir", || {
tcx.sess
if fx.clif_comments.enabled() {
let mut terminator_head = "\n".to_string();
- bb_data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
+ with_no_trimmed_paths!({
+ bb_data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
+ });
let inst = fx.bcx.func.layout.last_inst(block).unwrap();
fx.add_comment(inst, terminator_head);
}
let target = fx.get_block(*target);
let failure = fx.bcx.create_block();
- // FIXME Mark failure block as cold once Cranelift supports it
+ fx.bcx.set_cold_block(failure);
if *expected {
fx.bcx.ins().brz(cond, failure, &[]);
fx.bcx.ins().jump(destination_block, &[]);
}
None => {
- crate::trap::trap_unreachable(
- fx,
- "[corruption] Returned from noreturn inline asm",
- );
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
}
}
}
TerminatorKind::Resume | TerminatorKind::Abort => {
- trap_unreachable(fx, "[corruption] Unwinding bb reached.");
+ // FIXME implement unwinding
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
}
TerminatorKind::Unreachable => {
- trap_unreachable(fx, "[corruption] Hit unreachable code.");
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
}
TerminatorKind::Yield { .. }
| TerminatorKind::FalseEdge { .. }
for elem in place.projection {
match elem {
PlaceElem::Deref => {
- cplace = cplace.place_deref(fx);
+ if cplace.layout().ty.is_box() {
+ cplace = cplace
+ .place_field(fx, Field::new(0)) // Box<T> -> Unique<T>
+ .place_field(fx, Field::new(0)) // Unique<T> -> *const T
+ .place_deref(fx);
+ } else {
+ cplace = cplace.place_deref(fx);
+ }
}
PlaceElem::Field(field, _ty) => {
cplace = cplace.place_field(fx, field);
args,
);
- crate::trap::trap_unreachable(fx, "panic lang item returned");
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
}
-#![feature(rustc_private, once_cell)]
+#![feature(rustc_private)]
#![warn(rust_2018_idioms)]
#![warn(unused_lifetimes)]
#![warn(unreachable_pub)]
extern crate rustc_session;
extern crate rustc_target;
-use std::lazy::SyncLazy;
use std::panic;
use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
use rustc_interface::interface;
-use rustc_session::config::ErrorOutputType;
+use rustc_session::config::{ErrorOutputType, TrimmedDefPaths};
use rustc_session::early_error;
use rustc_target::spec::PanicStrategy;
+// FIXME use std::lazy::SyncLazy once it stabilizes
+use once_cell::sync::Lazy;
+
const BUG_REPORT_URL: &str = "https://github.com/bjorn3/rustc_codegen_cranelift/issues/new";
-static DEFAULT_HOOK: SyncLazy<Box<dyn Fn(&panic::PanicInfo<'_>) + Sync + Send + 'static>> =
- SyncLazy::new(|| {
+static DEFAULT_HOOK: Lazy<Box<dyn Fn(&panic::PanicInfo<'_>) + Sync + Send + 'static>> =
+ Lazy::new(|| {
let hook = panic::take_hook();
panic::set_hook(Box::new(|info| {
// Invoke the default handler, which prints the actual panic message and optionally a backtrace
config.opts.maybe_sysroot = Some(config.opts.maybe_sysroot.clone().unwrap_or_else(|| {
std::env::current_exe().unwrap().parent().unwrap().parent().unwrap().to_owned()
}));
+
+ config.opts.trimmed_def_paths = TrimmedDefPaths::GoodPath;
}
}
let start_rss = get_resident_set_size();
rustc_driver::init_rustc_env_logger();
let mut callbacks = CraneliftPassesCallbacks::default();
- SyncLazy::force(&DEFAULT_HOOK); // Install ice hook
+ Lazy::force(&DEFAULT_HOOK); // Install ice hook
let exit_code = rustc_driver::catch_with_exit_code(|| {
let args = std::env::args_os()
.enumerate()
pointer_ty(tcx)
}
}
- ty::Adt(adt_def, _) if adt_def.repr.simd() => {
+ ty::Adt(adt_def, _) if adt_def.repr().simd() => {
let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi
{
Abi::Vector { element, count } => (element.clone(), *count),
-macro builtin_functions($register:ident; $(fn $name:ident($($arg_name:ident: $arg_ty:ty),*) -> $ret_ty:ty;)*) {
- #[cfg(feature = "jit")]
- #[allow(improper_ctypes)]
- extern "C" {
- $(fn $name($($arg_name: $arg_ty),*) -> $ret_ty;)*
- }
+macro_rules! builtin_functions {
+ ($register:ident; $(fn $name:ident($($arg_name:ident: $arg_ty:ty),*) -> $ret_ty:ty;)*) => {
+ #[cfg(feature = "jit")]
+ #[allow(improper_ctypes)]
+ extern "C" {
+ $(fn $name($($arg_name: $arg_ty),*) -> $ret_ty;)*
+ }
- #[cfg(feature = "jit")]
- pub(crate) fn $register(builder: &mut cranelift_jit::JITBuilder) {
- for (name, val) in [$((stringify!($name), $name as *const u8)),*] {
- builder.symbol(name, val);
+ #[cfg(feature = "jit")]
+ pub(crate) fn $register(builder: &mut cranelift_jit::JITBuilder) {
+ for (name, val) in [$((stringify!($name), $name as *const u8)),*] {
+ builder.symbol(name, val);
+ }
}
- }
+ };
}
builtin_functions! {
//! Handling of `static`s, `const`s and promoted allocations
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_errors::ErrorGuaranteed;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::interpret::{
read_target_uint, AllocId, ConstAllocation, ConstValue, ErrorHandled, GlobalAlloc, Scalar,
{
all_constants_ok = false;
match err {
- ErrorHandled::Reported(ErrorGuaranteed) | ErrorHandled::Linted => {
+ ErrorHandled::Reported(_) | ErrorHandled::Linted => {
fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
}
ErrorHandled::TooGeneric => {
entry.set(gimli::DW_AT_decl_file, AttributeValue::FileIndex(Some(file_id)));
entry.set(gimli::DW_AT_decl_line, AttributeValue::Udata(loc.line as u64));
- // FIXME: probably omit this
entry.set(gimli::DW_AT_decl_column, AttributeValue::Udata(loc.col.to_usize() as u64));
}
#[cfg(all(feature = "jit", not(windows)))]
pub(crate) unsafe fn register_jit(self, jit_module: &cranelift_jit::JITModule) {
+ use std::mem::ManuallyDrop;
+
let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(self.endian));
self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
// FIXME support unregistering unwind tables once cranelift-jit supports deallocating
// individual functions
- #[allow(unused_variables)]
- let (eh_frame, eh_frame_len, _) = Vec::into_raw_parts(eh_frame);
+ let eh_frame = ManuallyDrop::new(eh_frame);
// =======================================================================
- // Everything after this line up to the end of the file is loosly based on
+ // Everything after this line up to the end of the file is loosely based on
// https://github.com/bytecodealliance/wasmtime/blob/4471a82b0c540ff48960eca6757ccce5b1b5c3e4/crates/jit/src/unwind/systemv.rs
#[cfg(target_os = "macos")]
{
// On macOS, `__register_frame` takes a pointer to a single FDE
- let start = eh_frame;
- let end = start.add(eh_frame_len);
+ let start = eh_frame.as_ptr();
+ let end = start.add(eh_frame.len());
let mut current = start;
// Walk all of the entries in the frame table and register them
#[cfg(not(target_os = "macos"))]
{
// On other platforms, `__register_frame` will walk the FDEs until an entry of length 0
- __register_frame(eh_frame);
+ __register_frame(eh_frame.as_ptr());
}
}
}
let layout = value.layout();
if layout.abi == Abi::Uninhabited {
- return trap_unreachable_ret_value(
- fx,
- dest_layout,
- "[panic] Tried to get discriminant for uninhabited type.",
- );
+ let true_ = fx.bcx.ins().iconst(types::I32, 1);
+ fx.bcx.ins().trapnz(true_, TrapCode::UnreachableCodeReached);
+ // Return a dummy value
+ return CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout);
}
let (tag_scalar, tag_field, tag_encoding) = match &layout.variants {
use std::cell::RefCell;
use std::ffi::CString;
-use std::lazy::SyncOnceCell;
use std::os::raw::{c_char, c_int};
use std::sync::{mpsc, Mutex};
use cranelift_jit::{JITBuilder, JITModule};
+// FIXME use std::lazy::SyncOnceCell once it stabilizes
+use once_cell::sync::OnceCell;
+
use crate::{prelude::*, BackendConfig};
use crate::{CodegenCx, CodegenMode};
}
/// The Sender owned by the rustc thread
-static GLOBAL_MESSAGE_SENDER: SyncOnceCell<Mutex<mpsc::Sender<UnsafeMessage>>> =
- SyncOnceCell::new();
+static GLOBAL_MESSAGE_SENDER: OnceCell<Mutex<mpsc::Sender<UnsafeMessage>>> = OnceCell::new();
/// A message that is sent from the jitted runtime to the rustc thread.
/// Senders are responsible for upholding `Send` semantics.
};
}
- if let Some((_, dest)) = destination {
- let ret_block = fx.get_block(dest);
- fx.bcx.ins().jump(ret_block, &[]);
- } else {
- trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
- }
+ let dest = destination.expect("all llvm intrinsics used by stdlib should return").1;
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
}
// llvm.x86.avx2.vperm2i128
//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
//! and LLVM intrinsics that have symbol names starting with `llvm.`.
-mod cpuid;
-mod llvm;
-mod simd;
-
-pub(crate) use cpuid::codegen_cpuid_call;
-pub(crate) use llvm::codegen_llvm_intrinsic_call;
-
-use rustc_middle::ty::print::with_no_trimmed_paths;
-use rustc_middle::ty::subst::SubstsRef;
-use rustc_span::symbol::{kw, sym, Symbol};
-
-use crate::prelude::*;
-use cranelift_codegen::ir::AtomicRmwOp;
-
-macro intrinsic_pat {
+macro_rules! intrinsic_pat {
(_) => {
_
- },
+ };
($name:ident) => {
sym::$name
- },
+ };
(kw.$name:ident) => {
kw::$name
- },
+ };
($name:literal) => {
$name
- },
+ };
}
-macro intrinsic_arg {
- (o $fx:expr, $arg:ident) => {},
+macro_rules! intrinsic_arg {
+ (o $fx:expr, $arg:ident) => {};
(c $fx:expr, $arg:ident) => {
let $arg = codegen_operand($fx, $arg);
- },
+ };
(v $fx:expr, $arg:ident) => {
let $arg = codegen_operand($fx, $arg).load_scalar($fx);
- }
+ };
}
-macro intrinsic_match {
+macro_rules! intrinsic_match {
($fx:expr, $intrinsic:expr, $args:expr,
_ => $unknown:block;
$(
}
}
+mod cpuid;
+mod llvm;
+mod simd;
+
+pub(crate) use cpuid::codegen_cpuid_call;
+pub(crate) use llvm::codegen_llvm_intrinsic_call;
+
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_span::symbol::{kw, sym, Symbol};
+
+use crate::prelude::*;
+use cranelift_codegen::ir::AtomicRmwOp;
+
fn report_atomic_type_validation_error<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
intrinsic: Symbol,
// Insert non returning intrinsics here
match intrinsic {
sym::abort => {
- trap_abort(fx, "Called intrinsic::abort.");
+ fx.bcx.ins().trap(TrapCode::User(0));
}
sym::transmute => {
crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
_ if intrinsic.as_str().starts_with("atomic_load"), (v ptr) {
let ty = substs.type_at(0);
match ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ // FIXME implement 128bit atomics
+ if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
+ // special case for compiler-builtins to avoid having to patch it
+ crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
+ let ret_block = fx.get_block(destination.unwrap().1);
+ fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ } else {
+ fx.tcx.sess.span_fatal(span, "128bit atomics not yet supported");
+ }
+ }
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, span, ty);
_ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
let ty = substs.type_at(0);
match ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ // FIXME implement 128bit atomics
+ if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
+ // special case for compiler-builtins to avoid having to patch it
+ crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
+ let ret_block = fx.get_block(destination.unwrap().1);
+ fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ } else {
+ fx.tcx.sess.span_fatal(span, "128bit atomics not yet supported");
+ }
+ }
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
_ => {
report_atomic_type_validation_error(fx, intrinsic, span, ty);
};
}
- if let Some((_, dest)) = destination {
- let ret_block = fx.get_block(dest);
- fx.bcx.ins().jump(ret_block, &[]);
- } else {
- trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
- }
+ let ret_block = fx.get_block(destination.unwrap().1);
+ fx.bcx.ins().jump(ret_block, &[]);
}
};
simd_reduce_add_ordered | simd_reduce_add_unordered, (c v, v acc) {
+ // FIXME there must be no acc param for integer vectors
if !v.layout().ty.is_simd() {
report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
return;
};
simd_reduce_mul_ordered | simd_reduce_mul_unordered, (c v, v acc) {
+ // FIXME there must be no acc param for integer vectors
if !v.layout().ty.is_simd() {
report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
return;
-#![feature(rustc_private, decl_macro)]
-#![cfg_attr(feature = "jit", feature(never_type, vec_into_raw_parts, once_cell))]
+#![feature(rustc_private)]
+// Note: please avoid adding other feature gates where possible
#![warn(rust_2018_idioms)]
#![warn(unused_lifetimes)]
#![warn(unreachable_pub)]
pub(crate) use crate::common::*;
pub(crate) use crate::debuginfo::{DebugContext, UnwindContext};
pub(crate) use crate::pointer::Pointer;
- pub(crate) use crate::trap::*;
pub(crate) use crate::value_and_place::{CPlace, CPlaceInner, CValue};
}
CodegenMode::Aot => driver::aot::run_aot(tcx, config, metadata, need_metadata_module),
CodegenMode::Jit | CodegenMode::JitLazy => {
#[cfg(feature = "jit")]
- let _: ! = driver::jit::run_jit(tcx, config);
+ driver::jit::run_jit(tcx, config);
#[cfg(not(feature = "jit"))]
tcx.sess.fatal("jit support was disabled when compiling rustc_codegen_cranelift");
}
};
- isa_builder.finish(flags)
+ match isa_builder.finish(flags) {
+ Ok(target_isa) => target_isa,
+ Err(err) => sess.fatal(&format!("failed to build TargetIsa: {}", err)),
+ }
}
/// This is the entrypoint for a hot plugged rustc_codegen_cranelift
fx.bcx.ins().call(puts, &[msg_ptr]);
}
-/// Trap code: user1
-pub(crate) fn trap_abort(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
- codegen_print(fx, msg.as_ref());
- fx.bcx.ins().trap(TrapCode::User(1));
-}
-
/// Use this for example when a function call should never return. This will fill the current block,
/// so you can **not** add instructions to it afterwards.
///
codegen_print(fx, msg.as_ref());
fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
}
-
-/// Like `trap_unreachable` but returns a fake value of the specified type.
-///
-/// Trap code: user65535
-pub(crate) fn trap_unreachable_ret_value<'tcx>(
- fx: &mut FunctionCx<'_, '_, 'tcx>,
- dest_layout: TyAndLayout<'tcx>,
- msg: impl AsRef<str>,
-) -> CValue<'tcx> {
- codegen_print(fx, msg.as_ref());
- let true_ = fx.bcx.ins().iconst(types::I32, 1);
- fx.bcx.ins().trapnz(true_, TrapCode::UnreachableCodeReached);
- CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
-}
-
/// Use this when something is unimplemented, but `libcore` or `libstd` requires it to codegen.
/// Unlike `trap_unreachable` this will not fill the current block, so you **must** add instructions
/// to it afterwards.
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
- for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() {
+ for i in 0..def_a.variant(VariantIdx::new(0)).fields.len() {
let src_f = src.value_field(fx, mir::Field::new(i));
let dst_f = dst.place_field(fx, mir::Field::new(i));
// Packed types ignore the alignment of their fields.
if let ty::Adt(def, _) = layout.ty.kind() {
- if def.repr.packed() {
+ if def.repr().packed() {
unsized_align = sized_align;
}
}
}
match field_layout.ty.kind() {
ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
- ty::Adt(def, _) if def.repr.packed() => {
+ ty::Adt(def, _) if def.repr().packed() => {
assert_eq!(layout.align.abi.bytes(), 1);
simple(fx)
}
// dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
}
(&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
- if adt_def_a.did == adt_def_b.did =>
+ if adt_def_a.did() == adt_def_b.did() =>
{
let mut types_a = substs_a.types();
let mut types_b = substs_b.types();
#!/usr/bin/env bash
-#![allow()] /*This line is ignored by bash
+#![deny(unsafe_code)] /*This line is ignored by bash
# This block is ignored by rustc
set -e
echo "[BUILD] y.rs" 1>&2
-rustc $0 -o ${0/.rs/.bin} -g
+rustc $0 -o ${0/.rs/.bin} -Cdebuginfo=1
exec ${0/.rs/.bin} $@
*/
--- /dev/null
+name: CI
+
+on:
+ - push
+ - pull_request
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ strategy:
+ fail-fast: false
+ matrix:
+ libgccjit_version: ["libgccjit.so", "libgccjit_without_int128.so"]
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - uses: actions/checkout@v2
+ with:
+ repository: llvm/llvm-project
+ path: llvm
+
+ - name: Install packages
+ run: sudo apt-get install ninja-build ripgrep
+
+ - name: Download artifact
+ uses: dawidd6/action-download-artifact@v2
+ with:
+ workflow: main.yml
+ name: ${{ matrix.libgccjit_version }}
+ path: gcc-build
+ repo: antoyo/gcc
+ search_artifacts: true # Because, instead, the action only check the last job ran and that won't work since we want multiple artifacts.
+
+ - name: Setup path to libgccjit
+ run: |
+ echo $(readlink -f gcc-build) > gcc_path
+ # NOTE: the filename is still libgccjit.so even when the artifact name is different.
+ ln gcc-build/libgccjit.so gcc-build/libgccjit.so.0
+
+ - name: Set env
+ run: |
+ echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
+ echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
+ echo "workspace="$GITHUB_WORKSPACE >> $GITHUB_ENV
+
+ - name: Set RUST_COMPILER_RT_ROOT
+ run: echo "RUST_COMPILER_RT_ROOT="${{ env.workspace }}/llvm/compiler-rt >> $GITHUB_ENV
+
+ # https://github.com/actions/cache/issues/133
+ - name: Fixup owner of ~/.cargo/
+ # Don't remove the trailing /. It is necessary to follow the symlink.
+ run: sudo chown -R $(whoami):$(id -ng) ~/.cargo/
+
+ - name: Cache cargo installed crates
+ uses: actions/cache@v1.1.2
+ with:
+ path: ~/.cargo/bin
+ key: cargo-installed-crates2-ubuntu-latest
+
+ - name: Cache cargo registry
+ uses: actions/cache@v1
+ with:
+ path: ~/.cargo/registry
+ key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }}
+
+ - name: Cache cargo index
+ uses: actions/cache@v1
+ with:
+ path: ~/.cargo/git
+ key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
+
+ - name: Cache cargo target dir
+ uses: actions/cache@v1.1.2
+ with:
+ path: target
+ key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }}
+
+ - name: Build
+ run: |
+ ./prepare_build.sh
+ ./build.sh
+ cargo test
+ ./clean_all.sh
+
+ - name: Prepare dependencies
+ run: |
+ git config --global user.email "user@example.com"
+ git config --global user.name "User"
+ ./prepare.sh
+
+ # Compile is a separate step, as the actions-rs/cargo action supports error annotations
+ - name: Compile
+ uses: actions-rs/cargo@v1.0.3
+ with:
+ command: build
+ args: --release
+
+ - name: Test
+ run: |
+ # Enable backtraces for easier debugging
+ export RUST_BACKTRACE=1
+
+ # Reduce amount of benchmark runs as they are slow
+ export COMPILE_RUNS=2
+ export RUN_RUNS=2
+
+ ./test.sh --release
+++ /dev/null
-name: CI
-
-on:
- - push
- - pull_request
-
-jobs:
- build:
- runs-on: ubuntu-latest
-
- strategy:
- fail-fast: false
-
- steps:
- - uses: actions/checkout@v2
-
- - name: Install packages
- run: sudo apt-get install ninja-build ripgrep
-
- - name: Download artifact
- uses: dawidd6/action-download-artifact@v2
- with:
- workflow: main.yml
- name: libgccjit.so
- path: gcc-build
- repo: antoyo/gcc
-
- - name: Setup path to libgccjit
- run: |
- echo $(readlink -f gcc-build) > gcc_path
- ln gcc-build/libgccjit.so gcc-build/libgccjit.so.0
-
- - name: Set LIBRARY_PATH
- run: |
- echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
- echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
-
- # https://github.com/actions/cache/issues/133
- - name: Fixup owner of ~/.cargo/
- # Don't remove the trailing /. It is necessary to follow the symlink.
- run: sudo chown -R $(whoami):$(id -ng) ~/.cargo/
-
- - name: Cache cargo installed crates
- uses: actions/cache@v1.1.2
- with:
- path: ~/.cargo/bin
- key: cargo-installed-crates2-ubuntu-latest
-
- - name: Cache cargo registry
- uses: actions/cache@v1
- with:
- path: ~/.cargo/registry
- key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }}
-
- - name: Cache cargo index
- uses: actions/cache@v1
- with:
- path: ~/.cargo/git
- key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
-
- - name: Cache cargo target dir
- uses: actions/cache@v1.1.2
- with:
- path: target
- key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }}
-
- - name: Build
- run: |
- ./prepare_build.sh
- ./build.sh
- cargo test
- ./clean_all.sh
-
- - name: Prepare dependencies
- run: |
- git config --global user.email "user@example.com"
- git config --global user.name "User"
- ./prepare.sh
-
- # Compile is a separate step, as the actions-rs/cargo action supports error annotations
- - name: Compile
- uses: actions-rs/cargo@v1.0.3
- with:
- command: build
- args: --release
-
- - name: Test
- run: |
- # Enable backtraces for easier debugging
- export RUST_BACKTRACE=1
-
- # Reduce amount of benchmark runs as they are slow
- export COMPILE_RUNS=2
- export RUN_RUNS=2
-
- ./test.sh --release
res
test-backend
gcc_path
+benchmarks
[[package]]
name = "gccjit"
version = "1.0.0"
-source = "git+https://github.com/antoyo/gccjit.rs#0672b78d162d65b6f36ea4062947253affe9fdef"
+source = "git+https://github.com/antoyo/gccjit.rs#bdecdecfb8a02ec861a39a350f990faa33bd31c3"
dependencies = [
"gccjit_sys",
]
[[package]]
name = "gccjit_sys"
version = "0.0.1"
-source = "git+https://github.com/antoyo/gccjit.rs#0672b78d162d65b6f36ea4062947253affe9fdef"
+source = "git+https://github.com/antoyo/gccjit.rs#bdecdecfb8a02ec861a39a350f990faa33bd31c3"
dependencies = [
"libc 0.1.12",
]
```bash
$ git clone https://github.com/rust-lang/rustc_codegen_gcc.git
$ cd rustc_codegen_gcc
+$ git clone https://github.com/llvm/llvm-project llvm --depth 1 --single-branch
+$ export RUST_COMPILER_RT_ROOT="$PWD/llvm/compiler-rt"
$ ./prepare_build.sh # download and patch sysroot src
$ ./build.sh --release
```
```
p loc->m_line
+p loc->m_filename->m_buffer
+```
+
+To print a debug representation of a tree:
+
+```c
+debug_tree(expr);
```
To get the `rustc` command to run in `gdb`, add the `--verbose` flag to `cargo build`.
* Set `linker='-Clinker=m68k-linux-gcc'`.
* Set the path to the cross-compiling libgccjit in `gcc_path`.
* Disable the 128-bit integer types if the target doesn't support them by using `let i128_type = context.new_type::<i64>();` in `context.rs` (same for u128_type).
+ * Comment the line: `context.add_command_line_option("-masm=intel");` in src/base.rs.
* (might not be necessary) Disable the compilation of libstd.so (and possibly libcore.so?).
#set -x
set -e
-if [ -f ./gcc_path ]; then
+codegen_channel=debug
+sysroot_channel=debug
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --release)
+ codegen_channel=release
+ shift
+ ;;
+ --release-sysroot)
+ sysroot_channel=release
+ shift
+ ;;
+ *)
+ echo "Unknown option $1"
+ exit 1
+ ;;
+ esac
+done
+
+if [ -f ./gcc_path ]; then
export GCC_PATH=$(cat gcc_path)
else
echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details'
export LD_LIBRARY_PATH="$GCC_PATH"
export LIBRARY_PATH="$GCC_PATH"
-if [[ "$1" == "--release" ]]; then
+features=
+
+if [[ "$1" == "--features" ]]; then
+ shift
+ features="--features $1"
+ shift
+fi
+
+if [[ "$codegen_channel" == "release" ]]; then
export CHANNEL='release'
- CARGO_INCREMENTAL=1 cargo rustc --release
+ CARGO_INCREMENTAL=1 cargo rustc --release $features
else
echo $LD_LIBRARY_PATH
export CHANNEL='debug'
- cargo rustc
+ cargo rustc $features
fi
source config.sh
mkdir -p target/out/gccjit
echo "[BUILD] sysroot"
-time ./build_sysroot/build_sysroot.sh $CHANNEL
+if [[ "$sysroot_channel" == "release" ]]; then
+ time ./build_sysroot/build_sysroot.sh --release
+else
+ time ./build_sysroot/build_sysroot.sh
+fi
+
RUSTFLAGS="$RUSTFLAGS -Zmir-opt-level=3" cargo build --target $TARGET_TRIPLE --release
else
sysroot_channel='debug'
- cargo build --target $TARGET_TRIPLE
+ cargo build --target $TARGET_TRIPLE --features compiler_builtins/c
fi
# Copy files to sysroot
source config.sh
# read nightly compiler from rust-toolchain file
-TOOLCHAIN=$(cat rust-toolchain)
+TOOLCHAIN=$(cat rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/')
popd >/dev/null
#[lang = "sized"]
pub trait Sized {}
+#[lang = "destruct"]
+pub trait Destruct {}
+
#[lang = "unsize"]
pub trait Unsize<T: ?Sized> {}
unsafe impl Copy for i32 {}
unsafe impl Copy for isize {}
unsafe impl Copy for f32 {}
+unsafe impl Copy for f64 {}
unsafe impl Copy for char {}
unsafe impl<'a, T: ?Sized> Copy for &'a T {}
unsafe impl<T: ?Sized> Copy for *const T {}
fn deref(&self) -> &Self::Target;
}
+pub trait Allocator {
+}
+
+pub struct Global;
+
+impl Allocator for Global {}
+
#[lang = "owned_box"]
-pub struct Box<T: ?Sized>(*mut T);
+pub struct Box<
+ T: ?Sized,
+ A: Allocator = Global,
+>(*mut T, A);
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
-impl<T: ?Sized> Drop for Box<T> {
+impl<T: ?Sized, A: Allocator> Drop for Box<T, A> {
fn drop(&mut self) {
// drop is currently performed by compiler.
}
}
#[lang = "box_free"]
-unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
+unsafe fn box_free<T: ?Sized, A: Allocator>(ptr: *mut T, alloc: A) {
libc::free(ptr as *mut u8);
}
+[package]
+name = "core"
+version = "0.0.0"
-+edition = "2018"
++edition = "2021"
+
+[lib]
+name = "coretests"
#[test]
fn cell_allows_array_cycle() {
-diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
-index 3e00e0a..8e5663b 100644
---- a/library/core/tests/slice.rs
-+++ b/library/core/tests/slice.rs
-@@ -2108,6 +2108,7 @@ fn test_copy_within_panics_src_out_of_bounds() {
- bytes.copy_within(usize::MAX..=usize::MAX, 0);
- }
-
-+/*
- #[test]
- fn test_is_sorted() {
- let empty: [i32; 0] = [];
-@@ -2122,6 +2123,7 @@ fn test_is_sorted() {
- assert!(!["c", "bb", "aaa"].is_sorted());
- assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len()));
- }
-+*/
-
- #[test]
- fn test_slice_run_destructors() {
-- 2.21.0 (Apple Git-122)
library/core/tests/lib.rs | 1 -
1 file changed, 1 deletion(-)
+diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
+index aa1ad93..95fbf55 100644
+--- a/library/core/src/lib.rs
++++ b/library/core/src/lib.rs
+@@ -398,23 +398,4 @@ pub mod arch {
+ }
+ }
+
+-// Pull in the `core_simd` crate directly into libcore. The contents of
+-// `core_simd` are in a different repository: rust-lang/portable-simd.
+-//
+-// `core_simd` depends on libcore, but the contents of this module are
+-// set up in such a way that directly pulling it here works such that the
+-// crate uses this crate as its libcore.
+-#[path = "../../portable-simd/crates/core_simd/src/mod.rs"]
+-#[allow(missing_debug_implementations, dead_code, unsafe_op_in_unsafe_fn, unused_unsafe)]
+-#[allow(rustdoc::bare_urls)]
+-#[unstable(feature = "portable_simd", issue = "86656")]
+-mod core_simd;
+-
+-#[doc = include_str!("../../portable-simd/crates/core_simd/src/core_simd_docs.md")]
+-#[unstable(feature = "portable_simd", issue = "86656")]
+-pub mod simd {
+- #[unstable(feature = "portable_simd", issue = "86656")]
+- pub use crate::core_simd::simd::*;
+-}
+-
+ include!("primitive_docs.rs");
+diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs
+index cd38c3a..ad632dc 100644
+--- a/library/core/src/slice/mod.rs
++++ b/library/core/src/slice/mod.rs
+@@ -17,6 +17,5 @@ use crate::ptr;
+ use crate::result::Result;
+ use crate::result::Result::{Err, Ok};
+-use crate::simd::{self, Simd};
+ use crate::slice;
+
+ #[unstable(
+@@ -3475,121 +3474,6 @@ impl<T> [T] {
+ }
+ }
+
+- /// Split a slice into a prefix, a middle of aligned SIMD types, and a suffix.
+- ///
+- /// This is a safe wrapper around [`slice::align_to`], so has the same weak
+- /// postconditions as that method. You're only assured that
+- /// `self.len() == prefix.len() + middle.len() * LANES + suffix.len()`.
+- ///
+- /// Notably, all of the following are possible:
+- /// - `prefix.len() >= LANES`.
+- /// - `middle.is_empty()` despite `self.len() >= 3 * LANES`.
+- /// - `suffix.len() >= LANES`.
+- ///
+- /// That said, this is a safe method, so if you're only writing safe code,
+- /// then this can at most cause incorrect logic, not unsoundness.
+- ///
+- /// # Panics
+- ///
+- /// This will panic if the size of the SIMD type is different from
+- /// `LANES` times that of the scalar.
+- ///
+- /// At the time of writing, the trait restrictions on `Simd<T, LANES>` keeps
+- /// that from ever happening, as only power-of-two numbers of lanes are
+- /// supported. It's possible that, in the future, those restrictions might
+- /// be lifted in a way that would make it possible to see panics from this
+- /// method for something like `LANES == 3`.
+- ///
+- /// # Examples
+- ///
+- /// ```
+- /// #![feature(portable_simd)]
+- ///
+- /// let short = &[1, 2, 3];
+- /// let (prefix, middle, suffix) = short.as_simd::<4>();
+- /// assert_eq!(middle, []); // Not enough elements for anything in the middle
+- ///
+- /// // They might be split in any possible way between prefix and suffix
+- /// let it = prefix.iter().chain(suffix).copied();
+- /// assert_eq!(it.collect::<Vec<_>>(), vec![1, 2, 3]);
+- ///
+- /// fn basic_simd_sum(x: &[f32]) -> f32 {
+- /// use std::ops::Add;
+- /// use std::simd::f32x4;
+- /// let (prefix, middle, suffix) = x.as_simd();
+- /// let sums = f32x4::from_array([
+- /// prefix.iter().copied().sum(),
+- /// 0.0,
+- /// 0.0,
+- /// suffix.iter().copied().sum(),
+- /// ]);
+- /// let sums = middle.iter().copied().fold(sums, f32x4::add);
+- /// sums.reduce_sum()
+- /// }
+- ///
+- /// let numbers: Vec<f32> = (1..101).map(|x| x as _).collect();
+- /// assert_eq!(basic_simd_sum(&numbers[1..99]), 4949.0);
+- /// ```
+- #[unstable(feature = "portable_simd", issue = "86656")]
+- pub fn as_simd<const LANES: usize>(&self) -> (&[T], &[Simd<T, LANES>], &[T])
+- where
+- Simd<T, LANES>: AsRef<[T; LANES]>,
+- T: simd::SimdElement,
+- simd::LaneCount<LANES>: simd::SupportedLaneCount,
+- {
+- // These are expected to always match, as vector types are laid out like
+- // arrays per <https://llvm.org/docs/LangRef.html#vector-type>, but we
+- // might as well double-check since it'll optimize away anyhow.
+- assert_eq!(mem::size_of::<Simd<T, LANES>>(), mem::size_of::<[T; LANES]>());
+-
+- // SAFETY: The simd types have the same layout as arrays, just with
+- // potentially-higher alignment, so the de-facto transmutes are sound.
+- unsafe { self.align_to() }
+- }
+-
+- /// Split a slice into a prefix, a middle of aligned SIMD types, and a suffix.
+- ///
+- /// This is a safe wrapper around [`slice::align_to_mut`], so has the same weak
+- /// postconditions as that method. You're only assured that
+- /// `self.len() == prefix.len() + middle.len() * LANES + suffix.len()`.
+- ///
+- /// Notably, all of the following are possible:
+- /// - `prefix.len() >= LANES`.
+- /// - `middle.is_empty()` despite `self.len() >= 3 * LANES`.
+- /// - `suffix.len() >= LANES`.
+- ///
+- /// That said, this is a safe method, so if you're only writing safe code,
+- /// then this can at most cause incorrect logic, not unsoundness.
+- ///
+- /// This is the mutable version of [`slice::as_simd`]; see that for examples.
+- ///
+- /// # Panics
+- ///
+- /// This will panic if the size of the SIMD type is different from
+- /// `LANES` times that of the scalar.
+- ///
+- /// At the time of writing, the trait restrictions on `Simd<T, LANES>` keeps
+- /// that from ever happening, as only power-of-two numbers of lanes are
+- /// supported. It's possible that, in the future, those restrictions might
+- /// be lifted in a way that would make it possible to see panics from this
+- /// method for something like `LANES == 3`.
+- #[unstable(feature = "portable_simd", issue = "86656")]
+- pub fn as_simd_mut<const LANES: usize>(&mut self) -> (&mut [T], &mut [Simd<T, LANES>], &mut [T])
+- where
+- Simd<T, LANES>: AsMut<[T; LANES]>,
+- T: simd::SimdElement,
+- simd::LaneCount<LANES>: simd::SupportedLaneCount,
+- {
+- // These are expected to always match, as vector types are laid out like
+- // arrays per <https://llvm.org/docs/LangRef.html#vector-type>, but we
+- // might as well double-check since it'll optimize away anyhow.
+- assert_eq!(mem::size_of::<Simd<T, LANES>>(), mem::size_of::<[T; LANES]>());
+-
+- // SAFETY: The simd types have the same layout as arrays, just with
+- // potentially-higher alignment, so the de-facto transmutes are sound.
+- unsafe { self.align_to_mut() }
+- }
+-
+ /// Checks if the elements of this slice are sorted.
+ ///
+ /// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the
diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
-index ec70034..7cd9e21 100644
+index 06c7be0..359e2e7 100644
--- a/library/core/tests/lib.rs
+++ b/library/core/tests/lib.rs
-@@ -121,7 +121,6 @@ mod pattern;
- mod pin;
+@@ -75,7 +75,6 @@
+ #![feature(never_type)]
+ #![feature(unwrap_infallible)]
+ #![feature(result_into_ok_or_err)]
+-#![feature(portable_simd)]
+ #![feature(ptr_metadata)]
+ #![feature(once_cell)]
+ #![feature(option_result_contains)]
+@@ -127,7 +126,6 @@ mod pin;
+ mod pin_macro;
mod ptr;
mod result;
-mod simd;
mod slice;
mod str;
mod str_lossy;
---
+diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs
+index 5dc586d..b6fc48f 100644
+--- a/library/std/src/lib.rs
++++ b/library/std/src/lib.rs
+@@ -312,6 +312,5 @@
+ #![feature(panic_can_unwind)]
+ #![feature(panic_unwind)]
+ #![feature(platform_intrinsics)]
+-#![feature(portable_simd)]
+ #![feature(prelude_import)]
+ #![feature(ptr_as_uninit)]
+@@ -508,23 +508,6 @@ pub mod time;
+ #[unstable(feature = "once_cell", issue = "74465")]
+ pub mod lazy;
+
+-// Pull in `std_float` crate into libstd. The contents of
+-// `std_float` are in a different repository: rust-lang/portable-simd.
+-#[path = "../../portable-simd/crates/std_float/src/lib.rs"]
+-#[allow(missing_debug_implementations, dead_code, unsafe_op_in_unsafe_fn, unused_unsafe)]
+-#[allow(rustdoc::bare_urls)]
+-#[unstable(feature = "portable_simd", issue = "86656")]
+-mod std_float;
+-
+-#[doc = include_str!("../../portable-simd/crates/core_simd/src/core_simd_docs.md")]
+-#[unstable(feature = "portable_simd", issue = "86656")]
+-pub mod simd {
+- #[doc(inline)]
+- pub use crate::std_float::StdFloat;
+- #[doc(inline)]
+- pub use core::simd::*;
+-}
+-
+ #[stable(feature = "futures_api", since = "1.36.0")]
+ pub mod task {
+ //! Types and Traits for working with asynchronous tasks.
+--
2.26.2.7.g19db9cfb68
-From 0ffdd8eda8df364391c8ac6e1ce92c73ba9254d4 Mon Sep 17 00:00:00 2001
+From eb703e627e7a84f1cd8d0d87f0f69da1f0acf765 Mon Sep 17 00:00:00 2001
From: bjorn3 <bjorn3@users.noreply.github.com>
Date: Fri, 3 Dec 2021 12:16:30 +0100
Subject: [PATCH] Disable long running tests
---
- library/core/tests/slice.rs | 3 +++
- 1 file changed, 3 insertions(+)
+ library/core/tests/slice.rs | 2 ++
+ 1 file changed, 2 insertions(+)
diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
-index 2c8f00a..44847ee 100644
+index 8402833..84592e0 100644
--- a/library/core/tests/slice.rs
+++ b/library/core/tests/slice.rs
-@@ -2332,7 +2332,8 @@ macro_rules! empty_max_mut {
- };
- }
+@@ -2462,6 +2462,7 @@ take_tests! {
+ #[cfg(not(miri))] // unused in Miri
+ const EMPTY_MAX: &'static [()] = &[(); usize::MAX];
+/*
- #[cfg(not(miri))] // Comparing usize::MAX many elements takes forever in Miri (and in rustc without optimizations)
- take_tests! {
- slice: &[(); usize::MAX], method: take,
- (take_in_bounds_max_range_to, (..usize::MAX), Some(EMPTY_MAX), &[(); 0]),
-@@ -2345,3 +2347,4 @@ take_tests! {
+ // can't be a constant due to const mutability rules
+ #[cfg(not(miri))] // unused in Miri
+ macro_rules! empty_max_mut {
+@@ -2485,6 +2486,7 @@ take_tests! {
(take_mut_oob_max_range_to_inclusive, (..=usize::MAX), None, empty_max_mut!()),
(take_mut_in_bounds_max_range_from, (usize::MAX..), Some(&mut [] as _), empty_max_mut!()),
}
+*/
+
+ #[test]
+ fn test_slice_from_ptr_range() {
--
2.26.2.7.g19db9cfb68
#!/bin/bash --verbose
set -e
-rustup component add rust-src rustc-dev llvm-tools-preview
./build_sysroot/prepare_sysroot_src.sh
-nightly-2021-12-30
+[toolchain]
+channel = "nightly-2022-03-26"
+components = ["rust-src", "rustc-dev", "llvm-tools-preview"]
-use gccjit::{ToRValue, Type};
+use gccjit::{ToLValue, ToRValue, Type};
use rustc_codegen_ssa::traits::{AbiBuilderMethods, BaseTypeMethods};
+use rustc_data_structures::stable_set::FxHashSet;
use rustc_middle::bug;
use rustc_middle::ty::Ty;
use rustc_target::abi::call::{CastTarget, FnAbi, PassMode, Reg, RegKind};
}
fn get_param(&mut self, index: usize) -> Self::Value {
- self.cx.current_func.borrow().expect("current func")
- .get_param(index as i32)
- .to_rvalue()
+ let func = self.current_func();
+ let param = func.get_param(index as i32);
+ let on_stack =
+ if let Some(on_stack_param_indices) = self.on_stack_function_params.borrow().get(&func) {
+ on_stack_param_indices.contains(&index)
+ }
+ else {
+ false
+ };
+ if on_stack {
+ param.to_lvalue().get_address(None)
+ }
+ else {
+ param.to_rvalue()
+ }
}
}
pub trait FnAbiGccExt<'gcc, 'tcx> {
// TODO(antoyo): return a function pointer type instead?
- fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool);
+ fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool, FxHashSet<usize>);
fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
}
impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
- fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool) {
+ fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool, FxHashSet<usize>) {
+ let mut on_stack_param_indices = FxHashSet::default();
let args_capacity: usize = self.args.iter().map(|arg|
if arg.pad.is_some() {
1
unimplemented!();
}
PassMode::Cast(cast) => cast.gcc_type(cx),
- PassMode::Indirect { extra_attrs: None, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
+ PassMode::Indirect { extra_attrs: None, on_stack: true, .. } => {
+ on_stack_param_indices.insert(argument_tys.len());
+ arg.memory_ty(cx)
+ },
+ PassMode::Indirect { extra_attrs: None, on_stack: false, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
};
argument_tys.push(arg_ty);
}
- (return_ty, argument_tys, self.c_variadic)
+ (return_ty, argument_tys, self.c_variadic, on_stack_param_indices)
}
fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
- let (return_type, params, variadic) = self.gcc_type(cx);
+ let (return_type, params, variadic, on_stack_param_indices) = self.gcc_type(cx);
let pointer_type = cx.context.new_function_pointer_type(None, return_type, ¶ms, variadic);
+ cx.on_stack_params.borrow_mut().insert(pointer_type.dyncast_function_ptr_type().expect("function ptr type"), on_stack_param_indices);
pointer_type
}
}
-use gccjit::{FunctionType, ToRValue};
+use gccjit::{FunctionType, GlobalKind, ToRValue};
use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
use rustc_middle::bug;
use rustc_middle::ty::TyCtxt;
+use rustc_session::config::OomStrategy;
use rustc_span::symbol::sym;
use crate::GccContext;
let _ret = context.new_call(None, callee, &args);
//llvm::LLVMSetTailCall(ret, True);
block.end_with_void_return(None);
+
+ let name = OomStrategy::SYMBOL.to_string();
+ let global = context.new_global(None, GlobalKind::Exported, i8, name);
+ let value = tcx.sess.opts.debugging_opts.oom.should_panic();
+ let value = context.new_rvalue_from_int(i8, value as i32);
+ global.global_set_initializer_rvalue(value);
}
if env::var("CG_GCCJIT_DUMP_MODULE_NAMES").as_deref() == Ok("1") {
println!("Module {}", module.name);
}
- if env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name) {
+ if env::var("CG_GCCJIT_DUMP_ALL_MODULES").as_deref() == Ok("1") || env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name) {
println!("Dumping reproducer {}", module.name);
let _ = fs::create_dir("/tmp/reproducers");
// FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by
context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name));
println!("Dumped reproducer {}", module.name);
}
+ if env::var("CG_GCCJIT_DUMP_TO_FILE").as_deref() == Ok("1") {
+ let _ = fs::create_dir("/tmp/gccjit_dumps");
+ let path = &format!("/tmp/gccjit_dumps/{}.c", module.name);
+ context.dump_to_file(path, true);
+ }
context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
}
}
}
-pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen<GccContext>, u64) {
+pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol, supports_128bit_integers: bool) -> (ModuleCodegen<GccContext>, u64) {
let prof_timer = tcx.prof.generic_activity("codegen_module");
let start_time = Instant::now();
let (module, _) = tcx.dep_graph.with_task(
dep_node,
tcx,
- cgu_name,
+ (cgu_name, supports_128bit_integers),
module_codegen,
Some(dep_graph::hash_result),
);
// the time we needed for codegenning it.
let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64;
- fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<GccContext> {
+ fn module_codegen(tcx: TyCtxt<'_>, (cgu_name, supports_128bit_integers): (Symbol, bool)) -> ModuleCodegen<GccContext> {
let cgu = tcx.codegen_unit(cgu_name);
// Instantiate monomorphizations without filling out definitions yet...
//let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str());
context.add_command_line_option("-fno-semantic-interposition");
// NOTE: Rust relies on LLVM not doing TBAA (https://github.com/rust-lang/unsafe-code-guidelines/issues/292).
context.add_command_line_option("-fno-strict-aliasing");
+
+ if tcx.sess.opts.debugging_opts.function_sections.unwrap_or(tcx.sess.target.function_sections) {
+ context.add_command_line_option("-ffunction-sections");
+ context.add_command_line_option("-fdata-sections");
+ }
+
if env::var("CG_GCCJIT_DUMP_CODE").as_deref() == Ok("1") {
context.set_dump_code_on_compile(true);
}
context.set_keep_intermediates(true);
}
+ // TODO(bjorn3): Remove once unwinding is properly implemented
+ context.set_allow_unreachable_blocks(true);
+
{
- let cx = CodegenCx::new(&context, cgu, tcx);
+ let cx = CodegenCx::new(&context, cgu, tcx, supports_128bit_integers);
let mono_items = cgu.items_in_deterministic_order(tcx);
for &(mono_item, (linkage, visibility)) in &mono_items {
OverflowOp,
StaticBuilderMethods,
};
+use rustc_data_structures::stable_set::FxHashSet;
use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
use rustc_span::Span;
pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
pub cx: &'a CodegenCx<'gcc, 'tcx>,
- pub block: Option<Block<'gcc>>,
+ pub block: Block<'gcc>,
stack_var_count: Cell<usize>,
}
impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
- fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>) -> Self {
+ fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
Builder {
cx,
- block: None,
+ block,
stack_var_count: Cell::new(0),
}
}
fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
- let size = self.cx.int_width(src.get_type()) / 8;
+ let size = src.get_type().get_size();
let func = self.current_func();
let after_block = func.new_block("after_while");
self.llbb().end_with_jump(None, while_block);
- // NOTE: since jumps were added and compare_exchange doesn't expect this, the current blocks in the
+ // NOTE: since jumps were added and compare_exchange doesn't expect this, the current block in the
// state need to be updated.
- self.block = Some(while_block);
- *self.cx.current_block.borrow_mut() = Some(while_block);
+ self.switch_to_block(while_block);
let comparison_operator =
match operation {
while_block.end_with_conditional(None, cond, while_block, after_block);
- // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
+ // NOTE: since jumps were added in a place rustc does not expect, the current block in the
// state need to be updated.
- self.block = Some(after_block);
- *self.cx.current_block.borrow_mut() = Some(after_block);
+ self.switch_to_block(after_block);
return_value.to_rvalue()
}
fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
- let size = self.cx.int_width(src.get_type());
- let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size / 8));
+ let size = src.get_type().get_size();
+ let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size));
let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
param_types.push(param);
}
+ let mut on_stack_param_indices = FxHashSet::default();
+ if let Some(indices) = self.on_stack_params.borrow().get(&gcc_func) {
+ on_stack_param_indices = indices.clone();
+ }
+
if all_args_match {
return Cow::Borrowed(args);
}
.into_iter()
.zip(args.iter())
.enumerate()
- .map(|(_i, (expected_ty, &actual_val))| {
+ .map(|(index, (expected_ty, &actual_val))| {
let actual_ty = actual_val.get_type();
if expected_ty != actual_ty {
- self.bitcast(actual_val, expected_ty)
+ if on_stack_param_indices.contains(&index) {
+ actual_val.dereference(None).to_rvalue()
+ }
+ else {
+ self.bitcast(actual_val, expected_ty)
+ }
}
else {
actual_val
}
pub fn current_func(&self) -> Function<'gcc> {
- self.block.expect("block").get_function()
+ self.block.get_function()
}
fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
// gccjit requires to use the result of functions, even when it's not used.
// That's why we assign the result to a local or call add_eval().
let return_type = func.get_return_type();
- let current_block = self.current_block.borrow().expect("block");
let void_type = self.context.new_type::<()>();
- let current_func = current_block.get_function();
+ let current_func = self.block.get_function();
if return_type != void_type {
unsafe { RETURN_VALUE_COUNT += 1 };
let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
- current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
+ self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
result.to_rvalue()
}
else {
- current_block.add_eval(None, self.cx.context.new_call(None, func, &args));
+ self.block.add_eval(None, self.cx.context.new_call(None, func, &args));
// Return dummy value when not having return value.
self.context.new_rvalue_from_long(self.isize_type, 0)
}
// That's why we assign the result to a local or call add_eval().
let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
let mut return_type = gcc_func.get_return_type();
- let current_block = self.current_block.borrow().expect("block");
let void_type = self.context.new_type::<()>();
- let current_func = current_block.get_function();
+ let current_func = self.block.get_function();
// FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
if gcc_func.get_param_count() == 0 && format!("{:?}", func_ptr) == "__builtin_ia32_pmovmskb128" {
if return_type != void_type {
unsafe { RETURN_VALUE_COUNT += 1 };
- let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
- current_block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+ let result = current_func.new_local(None, return_type, &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
+ self.block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
result.to_rvalue()
}
else {
if gcc_func.get_param_count() == 0 {
// FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
- current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
+ self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
}
else {
- current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+ self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
}
// Return dummy value when not having return value.
let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
- current_block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
+ self.block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
result.to_rvalue()
}
}
- pub fn overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
+ pub fn overflow_call(&self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
// gccjit requires to use the result of functions, even when it's not used.
// That's why we assign the result to a local.
let return_type = self.context.new_type::<bool>();
- let current_block = self.current_block.borrow().expect("block");
- let current_func = current_block.get_function();
+ let current_func = self.block.get_function();
// TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
unsafe { RETURN_VALUE_COUNT += 1 };
- let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
- current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
+ let result = current_func.new_local(None, return_type, &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
+ self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
result.to_rvalue()
}
}
impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
- let mut bx = Builder::with_cx(cx);
- *cx.current_block.borrow_mut() = Some(block);
- bx.block = Some(block);
- bx
+ Builder::with_cx(cx, block)
}
fn llbb(&self) -> Block<'gcc> {
- self.block.expect("block")
+ self.block
}
fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> {
}
fn switch_to_block(&mut self, block: Self::BasicBlock) {
- *self.cx.current_block.borrow_mut() = Some(block);
- self.block = Some(block);
+ self.block = block;
}
fn ret_void(&mut self) {
let on_val = self.const_uint_big(typ, on_val);
gcc_cases.push(self.context.new_case(on_val, on_val, dest));
}
- self.block.expect("block").end_with_switch(None, value, default_block, &gcc_cases);
+ self.block.end_with_switch(None, value, default_block, &gcc_cases);
}
- fn invoke(&mut self, _typ: Type<'gcc>, _func: RValue<'gcc>, _args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
- let condition = self.context.new_rvalue_from_int(self.bool_type, 0);
+ fn invoke(&mut self, typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
+ // TODO(bjorn3): Properly implement unwinding.
+ let call_site = self.call(typ, func, args, None);
+ let condition = self.context.new_rvalue_from_int(self.bool_type, 1);
self.llbb().end_with_conditional(None, condition, then, catch);
- self.context.new_rvalue_from_int(self.int_type, 0)
-
- // TODO(antoyo)
+ call_site
}
fn unreachable(&mut self) {
let func = self.context.get_builtin_function("__builtin_unreachable");
- let block = self.block.expect("block");
- block.add_eval(None, self.context.new_call(None, func, &[]));
- let return_type = block.get_function().get_return_type();
+ self.block.add_eval(None, self.context.new_call(None, func, &[]));
+ let return_type = self.block.get_function().get_return_type();
let void_type = self.context.new_type::<()>();
if return_type == void_type {
- block.end_with_void_return(None)
+ self.block.end_with_void_return(None)
}
else {
let return_value = self.current_func()
.new_local(None, return_type, "unreachableReturn");
- block.end_with_return(None, return_value)
+ self.block.end_with_return(None, return_value)
}
}
- fn add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
- // FIXME(antoyo): this should not be required.
- if format!("{:?}", a.get_type()) != format!("{:?}", b.get_type()) {
- b = self.context.new_cast(None, b, a.get_type());
- }
- a + b
+ fn add(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_add(a, b)
}
fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
a + b
}
- fn sub(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
- if a.get_type() != b.get_type() {
- b = self.context.new_cast(None, b, a.get_type());
- }
- a - b
+ fn sub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_sub(a, b)
}
fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
}
fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- a * b
+ self.gcc_mul(a, b)
}
fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
}
fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // TODO(antoyo): convert the arguments to unsigned?
- a / b
+ self.gcc_udiv(a, b)
}
fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
}
fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // TODO(antoyo): convert the arguments to signed?
- a / b
+ self.gcc_sdiv(a, b)
}
fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
}
fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- a % b
+ self.gcc_urem(a, b)
}
fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- a % b
+ self.gcc_srem(a, b)
}
fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
}
fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
- let a_type = a.get_type();
- let b_type = b.get_type();
- if a_type.is_unsigned(self) && b_type.is_signed(self) {
- let a = self.context.new_cast(None, a, b_type);
- let result = a << b;
- self.context.new_cast(None, result, a_type)
- }
- else if a_type.is_signed(self) && b_type.is_unsigned(self) {
- let b = self.context.new_cast(None, b, a_type);
- a << b
- }
- else {
- a << b
- }
+ self.gcc_shl(a, b)
}
fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
- // TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
- let a_type = a.get_type();
- let b_type = b.get_type();
- if a_type.is_unsigned(self) && b_type.is_signed(self) {
- let a = self.context.new_cast(None, a, b_type);
- let result = a >> b;
- self.context.new_cast(None, result, a_type)
- }
- else if a_type.is_signed(self) && b_type.is_unsigned(self) {
- let b = self.context.new_cast(None, b, a_type);
- a >> b
- }
- else {
- a >> b
- }
+ self.gcc_lshr(a, b)
}
fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): check whether behavior is an arithmetic shift for >> .
- // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
- let a_type = a.get_type();
- let b_type = b.get_type();
- if a_type.is_unsigned(self) && b_type.is_signed(self) {
- let a = self.context.new_cast(None, a, b_type);
- let result = a >> b;
- self.context.new_cast(None, result, a_type)
- }
- else if a_type.is_signed(self) && b_type.is_unsigned(self) {
- let b = self.context.new_cast(None, b, a_type);
- a >> b
- }
- else {
- a >> b
- }
+ // It seems to be if the value is signed.
+ self.gcc_lshr(a, b)
}
- fn and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
- if a.get_type() != b.get_type() {
- b = self.context.new_cast(None, b, a.get_type());
- }
- a & b
+ fn and(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_and(a, b)
}
- fn or(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
- if a.get_type() != b.get_type() {
- b = self.context.new_cast(None, b, a.get_type());
- }
- a | b
+ fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.cx.gcc_or(a, b)
}
fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- a ^ b
+ self.gcc_xor(a, b)
}
fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
- self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
+ self.gcc_neg(a)
}
fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
}
fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
- let operation =
- if a.get_type().is_bool() {
- UnaryOp::LogicalNegate
- }
- else {
- UnaryOp::BitwiseNegate
- };
- self.cx.context.new_unary_op(None, operation, a.get_type(), a)
+ self.gcc_not(a)
}
fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
}
fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- a + b
+ self.gcc_add(a, b)
}
fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): should generate poison value?
- a - b
+ self.gcc_sub(a, b)
}
fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
}
fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
- use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
-
- let new_kind =
- match typ.kind() {
- Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
- Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
- t @ (Uint(_) | Int(_)) => t.clone(),
- _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
- };
-
- // TODO(antoyo): remove duplication with intrinsic?
- let name =
- match oop {
- OverflowOp::Add =>
- match new_kind {
- Int(I8) => "__builtin_add_overflow",
- Int(I16) => "__builtin_add_overflow",
- Int(I32) => "__builtin_sadd_overflow",
- Int(I64) => "__builtin_saddll_overflow",
- Int(I128) => "__builtin_add_overflow",
-
- Uint(U8) => "__builtin_add_overflow",
- Uint(U16) => "__builtin_add_overflow",
- Uint(U32) => "__builtin_uadd_overflow",
- Uint(U64) => "__builtin_uaddll_overflow",
- Uint(U128) => "__builtin_add_overflow",
-
- _ => unreachable!(),
- },
- OverflowOp::Sub =>
- match new_kind {
- Int(I8) => "__builtin_sub_overflow",
- Int(I16) => "__builtin_sub_overflow",
- Int(I32) => "__builtin_ssub_overflow",
- Int(I64) => "__builtin_ssubll_overflow",
- Int(I128) => "__builtin_sub_overflow",
-
- Uint(U8) => "__builtin_sub_overflow",
- Uint(U16) => "__builtin_sub_overflow",
- Uint(U32) => "__builtin_usub_overflow",
- Uint(U64) => "__builtin_usubll_overflow",
- Uint(U128) => "__builtin_sub_overflow",
-
- _ => unreachable!(),
- },
- OverflowOp::Mul =>
- match new_kind {
- Int(I8) => "__builtin_mul_overflow",
- Int(I16) => "__builtin_mul_overflow",
- Int(I32) => "__builtin_smul_overflow",
- Int(I64) => "__builtin_smulll_overflow",
- Int(I128) => "__builtin_mul_overflow",
-
- Uint(U8) => "__builtin_mul_overflow",
- Uint(U16) => "__builtin_mul_overflow",
- Uint(U32) => "__builtin_umul_overflow",
- Uint(U64) => "__builtin_umulll_overflow",
- Uint(U128) => "__builtin_mul_overflow",
-
- _ => unreachable!(),
- },
- };
-
- let intrinsic = self.context.get_builtin_function(&name);
- let res = self.current_func()
- // TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
- .new_local(None, rhs.get_type(), "binopResult")
- .get_address(None);
- let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
- (res.dereference(None).to_rvalue(), overflow)
+ self.gcc_checked_binop(oop, typ, lhs, rhs)
}
fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
/* Casts */
fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): check that it indeed truncate the value.
- self.context.new_cast(None, value, dest_ty)
+ self.gcc_int_cast(value, dest_ty)
}
fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
}
fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- self.context.new_cast(None, value, dest_ty)
+ self.gcc_float_to_uint_cast(value, dest_ty)
}
fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- self.context.new_cast(None, value, dest_ty)
+ self.gcc_float_to_int_cast(value, dest_ty)
}
fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- self.context.new_cast(None, value, dest_ty)
+ self.gcc_uint_to_float_cast(value, dest_ty)
}
fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- self.context.new_cast(None, value, dest_ty)
+ self.gcc_int_to_float_cast(value, dest_ty)
}
fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
}
fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- self.cx.ptrtoint(self.block.expect("block"), value, dest_ty)
+ let usize_value = self.cx.const_bitcast(value, self.cx.type_isize());
+ self.intcast(usize_value, dest_ty, false)
}
fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- self.cx.inttoptr(self.block.expect("block"), value, dest_ty)
+ let usize_value = self.intcast(value, self.cx.type_isize(), false);
+ self.cx.const_bitcast(usize_value, dest_ty)
}
fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
// NOTE: is_signed is for value, not dest_typ.
- self.cx.context.new_cast(None, value, dest_typ)
+ self.gcc_int_cast(value, dest_typ)
}
fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
}
/* Comparisons */
- fn icmp(&mut self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
- let left_type = lhs.get_type();
- let right_type = rhs.get_type();
- if left_type != right_type {
- // NOTE: because libgccjit cannot compare function pointers.
- if left_type.dyncast_function_ptr_type().is_some() && right_type.dyncast_function_ptr_type().is_some() {
- lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer());
- rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer());
- }
- // NOTE: hack because we try to cast a vector type to the same vector type.
- else if format!("{:?}", left_type) != format!("{:?}", right_type) {
- rhs = self.context.new_cast(None, rhs, left_type);
- }
- }
- self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
+ fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_icmp(op, lhs, rhs)
}
fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
}
/* Miscellaneous instructions */
- fn memcpy(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
- if flags.contains(MemFlags::NONTEMPORAL) {
- // HACK(nox): This is inefficient but there is no nontemporal memcpy.
- let val = self.load(src.get_type(), src, src_align);
- let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
- self.store_with_flags(val, ptr, dst_align, flags);
- return;
- }
+ fn memcpy(&mut self, dst: RValue<'gcc>, _dst_align: Align, src: RValue<'gcc>, _src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
+ assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
let size = self.intcast(size, self.type_size_t(), false);
let _is_volatile = flags.contains(MemFlags::VOLATILE);
let dst = self.pointercast(dst, self.type_i8p());
let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
let memcpy = self.context.get_builtin_function("memcpy");
- let block = self.block.expect("block");
// TODO(antoyo): handle aligns and is_volatile.
- block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
+ self.block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
}
fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
let memmove = self.context.get_builtin_function("memmove");
- let block = self.block.expect("block");
// TODO(antoyo): handle is_volatile.
- block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
+ self.block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
}
fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
let _is_volatile = flags.contains(MemFlags::VOLATILE);
let ptr = self.pointercast(ptr, self.type_i8p());
let memset = self.context.get_builtin_function("memset");
- let block = self.block.expect("block");
// TODO(antoyo): handle align and is_volatile.
let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
let size = self.intcast(size, self.type_size_t(), false);
- block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
+ self.block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
}
fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
then_block.add_assignment(None, variable, then_val);
then_block.end_with_jump(None, after_block);
- if then_val.get_type() != else_val.get_type() {
+ if !then_val.get_type().is_compatible_with(else_val.get_type()) {
else_val = self.context.new_cast(None, else_val, then_val.get_type());
}
else_block.add_assignment(None, variable, else_val);
else_block.end_with_jump(None, after_block);
- // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
+ // NOTE: since jumps were added in a place rustc does not expect, the current block in the
// state need to be updated.
- self.block = Some(after_block);
- *self.cx.current_block.borrow_mut() = Some(after_block);
+ self.switch_to_block(after_block);
variable.to_rvalue()
}
}
fn cleanup_landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>) -> RValue<'gcc> {
- let field1 = self.context.new_field(None, self.u8_type, "landing_pad_field_1");
+ let field1 = self.context.new_field(None, self.u8_type.make_pointer(), "landing_pad_field_1");
let field2 = self.context.new_field(None, self.i32_type, "landing_pad_field_1");
let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]);
self.current_func().new_local(None, struct_type.as_type(), "landing_pad")
}
fn resume(&mut self, _exn: RValue<'gcc>) {
- unimplemented!();
+ // TODO(bjorn3): Properly implement unwinding.
+ self.unreachable();
}
fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
}
fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
- let size = self.cx.int_width(src.get_type()) / 8;
+ let size = src.get_type().get_size();
let name =
match op {
AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
// Fix the code in codegen_ssa::base::from_immediate.
return value;
}
- self.context.new_cast(None, value, dest_typ)
+ self.gcc_int_cast(value, dest_typ)
}
fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
}
fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
- unimplemented!();
+ // FIMXE(bjorn3): implement
}
fn set_span(&mut self, _span: Span) {}
}
}
-trait ToGccComp {
+pub trait ToGccComp {
fn to_gcc_comparison(&self) -> ComparisonOp;
}
-use std::convert::TryFrom;
-
use gccjit::LValue;
-use gccjit::{Block, CType, RValue, Type, ToRValue};
+use gccjit::{RValue, Type, ToRValue};
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::{
BaseTypeMethods,
global
// TODO(antoyo): set linkage.
}
-
- pub fn inttoptr(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- let func = block.get_function();
- let local = func.new_local(None, value.get_type(), "intLocal");
- block.add_assignment(None, local, value);
- let value_address = local.get_address(None);
-
- let ptr = self.context.new_cast(None, value_address, dest_ty.make_pointer());
- ptr.dereference(None).to_rvalue()
- }
-
- pub fn ptrtoint(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- // TODO(antoyo): when libgccjit allow casting from pointer to int, remove this.
- let func = block.get_function();
- let local = func.new_local(None, value.get_type(), "ptrLocal");
- block.add_assignment(None, local, value);
- let ptr_address = local.get_address(None);
-
- let ptr = self.context.new_cast(None, ptr_address, dest_ty.make_pointer());
- ptr.dereference(None).to_rvalue()
- }
}
pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> {
}
fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
- self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
+ self.gcc_int(typ, int)
}
fn const_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
- self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
+ self.gcc_uint(typ, int)
}
fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
- if num >> 64 != 0 {
- // FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()?
- let low = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
- let high = self.context.new_rvalue_from_long(typ, (num >> 64) as u64 as i64);
-
- let sixty_four = self.context.new_rvalue_from_long(typ, 64);
- (high << sixty_four) | self.context.new_cast(None, low, typ)
- }
- else if typ.is_i128(self) {
- let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
- self.context.new_cast(None, num, typ)
- }
- else {
- self.context.new_rvalue_from_long(typ, num as u64 as i64)
- }
+ self.gcc_uint_big(typ, num)
}
fn const_bool(&self, val: bool) -> RValue<'gcc> {
}
let value = self.const_uint_big(self.type_ix(bitsize), data);
- if layout.value == Pointer {
- self.inttoptr(self.current_block.borrow().expect("block"), value, ty)
- } else {
- self.const_bitcast(value, ty)
- }
+ // TODO(bjorn3): assert size is correct
+ self.const_bitcast(value, ty)
}
Scalar::Ptr(ptr, _size) => {
let (alloc_id, offset) = ptr.into_parts();
}
fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
- self.unqualified() == cx.context.new_c_type(CType::Int128t)
+ self.unqualified() == cx.i128_type.unqualified()
}
fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
- self.unqualified() == cx.context.new_c_type(CType::UInt128t)
+ self.unqualified() == cx.u128_type.unqualified()
}
fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
-use gccjit::{LValue, RValue, ToRValue, Type};
+use gccjit::{GlobalKind, LValue, RValue, ToRValue, Type};
use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods};
use rustc_hir as hir;
use rustc_hir::Node;
// following:
for (value, variable) in &*self.const_globals.borrow() {
if format!("{:?}", value) == format!("{:?}", cv) {
- // TODO(antoyo): upgrade alignment.
+ if let Some(global_variable) = self.global_lvalues.borrow().get(variable) {
+ let alignment = align.bits() as i32;
+ if alignment > global_variable.get_alignment() {
+ global_variable.set_alignment(alignment);
+ }
+ }
return *variable;
}
}
match kind {
Some(kind) if !self.tcx.sess.fewer_names() => {
let name = self.generate_local_symbol_name(kind);
- // TODO(antoyo): check if it's okay that TLS is off here.
- // TODO(antoyo): check if it's okay that link_section is None here.
+ // TODO(antoyo): check if it's okay that no link_section is set.
// TODO(antoyo): set alignment here as well.
- let global = self.define_global(&name[..], self.val_ty(cv), false, None);
- // TODO(antoyo): set linkage.
+ let global = self.declare_private_global(&name[..], self.val_ty(cv));
global
}
_ => {
global
},
};
- // FIXME(antoyo): I think the name coming from generate_local_symbol_name() above cannot be used
- // globally.
global.global_set_initializer_rvalue(cv);
// TODO(antoyo): set unnamed address.
- global.get_address(None)
+ let rvalue = global.get_address(None);
+ self.global_lvalues.borrow_mut().insert(rvalue, global);
+ rvalue
}
pub fn get_static(&self, def_id: DefId) -> LValue<'gcc> {
}
let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
- let global = self.declare_global(&sym, llty, is_tls, fn_attrs.link_section);
+ let global = self.declare_global(
+ &sym,
+ llty,
+ GlobalKind::Exported,
+ is_tls,
+ fn_attrs.link_section,
+ );
if !self.tcx.is_reachable_non_generic(def_id) {
// TODO(antoyo): set visibility.
// don't do this then linker errors can be generated where the linker
// complains that one object files has a thread local version of the
// symbol and another one doesn't.
- cx.declare_global(&sym, llty, is_tls, attrs.link_section)
+ cx.declare_global(&sym, llty, GlobalKind::Imported, is_tls, attrs.link_section)
}
}
use std::cell::{Cell, RefCell};
-use gccjit::{Block, CType, Context, Function, FunctionType, LValue, RValue, Struct, Type};
+use gccjit::{Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, RValue, Struct, Type};
use rustc_codegen_ssa::base::wants_msvc_seh;
use rustc_codegen_ssa::traits::{
BackendTypes,
use rustc_target::spec::{HasTargetSpec, Target, TlsModel};
use crate::callee::get_fn;
-use crate::declare::mangle_name;
#[derive(Clone)]
pub struct FuncSig<'gcc> {
pub codegen_unit: &'tcx CodegenUnit<'tcx>,
pub context: &'gcc Context<'gcc>,
- // TODO(antoyo): First set it to a dummy block to avoid using Option?
- pub current_block: RefCell<Option<Block<'gcc>>>,
+ // TODO(bjorn3): Can this field be removed?
pub current_func: RefCell<Option<Function<'gcc>>>,
pub normal_function_addresses: RefCell<FxHashSet<RValue<'gcc>>>,
pub ulonglong_type: Type<'gcc>,
pub sizet_type: Type<'gcc>,
+ pub supports_128bit_integers: bool,
+
pub float_type: Type<'gcc>,
pub double_type: Type<'gcc>,
/// Cache generated vtables
pub vtables: RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>>,
+ // TODO(antoyo): improve the SSA API to not require those.
+ // Mapping from function pointer type to indexes of on stack parameters.
+ pub on_stack_params: RefCell<FxHashMap<FunctionPtrType<'gcc>, FxHashSet<usize>>>,
+ // Mapping from function to indexes of on stack parameters.
+ pub on_stack_function_params: RefCell<FxHashMap<Function<'gcc>, FxHashSet<usize>>>,
+
/// Cache of emitted const globals (value -> global)
pub const_globals: RefCell<FxHashMap<RValue<'gcc>, RValue<'gcc>>>,
+ /// Map from the address of a global variable (rvalue) to the global variable itself (lvalue).
+ /// TODO(antoyo): remove when the rustc API is fixed.
+ pub global_lvalues: RefCell<FxHashMap<RValue<'gcc>, LValue<'gcc>>>,
+
/// Cache of constant strings,
pub const_str_cache: RefCell<FxHashMap<Symbol, LValue<'gcc>>>,
/// A counter that is used for generating local symbol names
local_gen_sym_counter: Cell<usize>,
- pub global_gen_sym_counter: Cell<usize>,
eh_personality: Cell<Option<RValue<'gcc>>>,
}
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
- pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
+ pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>, supports_128bit_integers: bool) -> Self {
let check_overflow = tcx.sess.overflow_checks();
- // TODO(antoyo): fix this mess. libgccjit seems to return random type when using new_int_type().
- let isize_type = context.new_c_type(CType::LongLong);
- let usize_type = context.new_c_type(CType::ULongLong);
- let bool_type = context.new_type::<bool>();
- let i8_type = context.new_type::<i8>();
- let i16_type = context.new_type::<i16>();
- let i32_type = context.new_type::<i32>();
- let i64_type = context.new_c_type(CType::LongLong);
- let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?
- let u8_type = context.new_type::<u8>();
- let u16_type = context.new_type::<u16>();
- let u32_type = context.new_type::<u32>();
- let u64_type = context.new_c_type(CType::ULongLong);
- let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?
+
+ let i8_type = context.new_c_type(CType::Int8t);
+ let i16_type = context.new_c_type(CType::Int16t);
+ let i32_type = context.new_c_type(CType::Int32t);
+ let i64_type = context.new_c_type(CType::Int64t);
+ let u8_type = context.new_c_type(CType::UInt8t);
+ let u16_type = context.new_c_type(CType::UInt16t);
+ let u32_type = context.new_c_type(CType::UInt32t);
+ let u64_type = context.new_c_type(CType::UInt64t);
+
+ let (i128_type, u128_type) =
+ if supports_128bit_integers {
+ let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?;
+ let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?;
+ (i128_type, u128_type)
+ }
+ else {
+ let i128_type = context.new_array_type(None, i64_type, 2);
+ let u128_type = context.new_array_type(None, u64_type, 2);
+ (i128_type, u128_type)
+ };
let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
let ulonglong_type = context.new_c_type(CType::ULongLong);
let sizet_type = context.new_c_type(CType::SizeT);
- assert_eq!(isize_type, i64_type);
- assert_eq!(usize_type, u64_type);
+ let isize_type = context.new_c_type(CType::LongLong);
+ let usize_type = context.new_c_type(CType::ULongLong);
+ let bool_type = context.new_type::<bool>();
+
+ // TODO(antoyo): only have those assertions on x86_64.
+ assert_eq!(isize_type.get_size(), i64_type.get_size());
+ assert_eq!(usize_type.get_size(), u64_type.get_size());
let mut functions = FxHashMap::default();
let builtins = [
check_overflow,
codegen_unit,
context,
- current_block: RefCell::new(None),
current_func: RefCell::new(None),
normal_function_addresses: Default::default(),
functions: RefCell::new(functions),
ulonglong_type,
sizet_type,
+ supports_128bit_integers,
+
float_type,
double_type,
linkage: Cell::new(FunctionType::Internal),
instances: Default::default(),
function_instances: Default::default(),
+ on_stack_params: Default::default(),
+ on_stack_function_params: Default::default(),
vtables: Default::default(),
const_globals: Default::default(),
+ global_lvalues: Default::default(),
const_str_cache: Default::default(),
globals: Default::default(),
scalar_types: Default::default(),
struct_types: Default::default(),
types_with_fields_to_set: Default::default(),
local_gen_sym_counter: Cell::new(0),
- global_gen_sym_counter: Cell::new(0),
eh_personality: Cell::new(None),
pointee_infos: Default::default(),
structs_as_pointer: Default::default(),
function
}
+ pub fn is_native_int_type(&self, typ: Type<'gcc>) -> bool {
+ let types = [
+ self.u8_type,
+ self.u16_type,
+ self.u32_type,
+ self.u64_type,
+ self.i8_type,
+ self.i16_type,
+ self.i32_type,
+ self.i64_type,
+ ];
+
+ for native_type in types {
+ if native_type.is_compatible_with(typ) {
+ return true;
+ }
+ }
+
+ self.supports_128bit_integers &&
+ (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
+ }
+
+ pub fn is_non_native_int_type(&self, typ: Type<'gcc>) -> bool {
+ !self.supports_128bit_integers &&
+ (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
+ }
+
+ pub fn is_native_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
+ self.is_native_int_type(typ) || typ == self.bool_type
+ }
+
+ pub fn is_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
+ self.is_native_int_type(typ) || self.is_non_native_int_type(typ) || typ == self.bool_type
+ }
+
pub fn sess(&self) -> &Session {
&self.tcx.sess
}
}
}
-pub fn unit_name<'tcx>(codegen_unit: &CodegenUnit<'tcx>) -> String {
- let name = &codegen_unit.name().to_string();
- mangle_name(&name.replace('-', "_"))
-}
-
fn to_gcc_tls_mode(tls_model: TlsModel) -> gccjit::TlsModel {
match tls_model {
TlsModel::GeneralDynamic => gccjit::TlsModel::GlobalDynamic,
}
impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
- fn create_vtable_metadata(&self, _ty: Ty<'tcx>, _trait_ref: Option<PolyExistentialTraitRef<'tcx>>, _vtable: Self::Value) {
+ fn create_vtable_debuginfo(&self, _ty: Ty<'tcx>, _trait_ref: Option<PolyExistentialTraitRef<'tcx>>, _vtable: Self::Value) {
// TODO(antoyo)
}
use rustc_target::abi::call::FnAbi;
use crate::abi::FnAbiGccExt;
-use crate::context::{CodegenCx, unit_name};
+use crate::context::CodegenCx;
use crate::intrinsic::llvm;
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
global
}
else {
- self.declare_global(name, ty, is_tls, link_section)
+ self.declare_global(name, ty, GlobalKind::Exported, is_tls, link_section)
}
}
pub fn declare_unnamed_global(&self, ty: Type<'gcc>) -> LValue<'gcc> {
- let index = self.global_gen_sym_counter.get();
- self.global_gen_sym_counter.set(index + 1);
- let name = format!("global_{}_{}", index, unit_name(&self.codegen_unit));
- self.context.new_global(None, GlobalKind::Exported, ty, &name)
+ let name = self.generate_local_symbol_name("global");
+ self.context.new_global(None, GlobalKind::Internal, ty, &name)
}
pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> LValue<'gcc> {
unsafe { std::mem::transmute(func) }
}*/
- pub fn declare_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
- let global = self.context.new_global(None, GlobalKind::Exported, ty, name);
+ pub fn declare_global(&self, name: &str, ty: Type<'gcc>, global_kind: GlobalKind, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
+ let global = self.context.new_global(None, global_kind, ty, name);
if is_tls {
global.set_tls_model(self.tls_model);
}
}
pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> RValue<'gcc> {
- let (return_type, params, variadic) = fn_abi.gcc_type(self);
+ let (return_type, params, variadic, on_stack_param_indices) = fn_abi.gcc_type(self);
let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, ¶ms, variadic);
+ self.on_stack_function_params.borrow_mut().insert(func, on_stack_param_indices);
// FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
unsafe { std::mem::transmute(func) }
}
--- /dev/null
+//! Module to handle integer operations.
+//! This module exists because some integer types are not supported on some gcc platforms, e.g.
+//! 128-bit integers on 32-bit platforms and thus require to be handled manually.
+
+use std::convert::TryFrom;
+
+use gccjit::{ComparisonOp, FunctionType, RValue, ToRValue, Type, UnaryOp, BinaryOp};
+use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
+use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, BuilderMethods, OverflowOp};
+use rustc_middle::ty::Ty;
+
+use crate::builder::ToGccComp;
+use crate::{builder::Builder, common::{SignType, TypeReflection}, context::CodegenCx};
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+ pub fn gcc_urem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // 128-bit unsigned %: __umodti3
+ self.multiplicative_operation(BinaryOp::Modulo, "mod", false, a, b)
+ }
+
+ pub fn gcc_srem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // 128-bit signed %: __modti3
+ self.multiplicative_operation(BinaryOp::Modulo, "mod", true, a, b)
+ }
+
+ pub fn gcc_not(&self, a: RValue<'gcc>) -> RValue<'gcc> {
+ let typ = a.get_type();
+ if self.is_native_int_type_or_bool(typ) {
+ let operation =
+ if typ.is_bool() {
+ UnaryOp::LogicalNegate
+ }
+ else {
+ UnaryOp::BitwiseNegate
+ };
+ self.cx.context.new_unary_op(None, operation, typ, a)
+ }
+ else {
+ // TODO(antoyo): use __negdi2 and __negti2 instead?
+ let element_type = typ.dyncast_array().expect("element type");
+ let values = [
+ self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.low(a)),
+ self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.high(a)),
+ ];
+ self.cx.context.new_array_constructor(None, typ, &values)
+ }
+ }
+
+ pub fn gcc_neg(&self, a: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ if self.is_native_int_type(a_type) {
+ self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
+ }
+ else {
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a], "__negti2", false);
+ self.context.new_call(None, func, &[a])
+ }
+ }
+
+ pub fn gcc_and(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.cx.bitwise_operation(BinaryOp::BitwiseAnd, a, b)
+ }
+
+ pub fn gcc_lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ let a_native = self.is_native_int_type(a_type);
+ let b_native = self.is_native_int_type(b_type);
+ if a_native && b_native {
+ // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by a signed number.
+ // TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
+ if a_type.is_signed(self) != b_type.is_signed(self) {
+ let b = self.context.new_cast(None, b, a_type);
+ a >> b
+ }
+ else {
+ a >> b
+ }
+ }
+ else if a_native && !b_native {
+ self.gcc_lshr(a, self.gcc_int_cast(b, a_type))
+ }
+ else {
+ // NOTE: we cannot use the lshr builtin because it's calling hi() (to get the most
+ // significant half of the number) which uses lshr.
+
+ let native_int_type = a_type.dyncast_array().expect("get element type");
+
+ let func = self.current_func();
+ let then_block = func.new_block("then");
+ let else_block = func.new_block("else");
+ let after_block = func.new_block("after");
+ let b0_block = func.new_block("b0");
+ let actual_else_block = func.new_block("actual_else");
+
+ let result = func.new_local(None, a_type, "shiftResult");
+
+ let sixty_four = self.gcc_int(native_int_type, 64);
+ let sixty_three = self.gcc_int(native_int_type, 63);
+ let zero = self.gcc_zero(native_int_type);
+ let b = self.gcc_int_cast(b, native_int_type);
+ let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
+ self.llbb().end_with_conditional(None, condition, then_block, else_block);
+
+ // TODO(antoyo): take endianness into account.
+ let shift_value = self.gcc_sub(b, sixty_four);
+ let high = self.high(a);
+ let sign =
+ if a_type.is_signed(self) {
+ high >> sixty_three
+ }
+ else {
+ zero
+ };
+ let values = [
+ high >> shift_value,
+ sign,
+ ];
+ let array_value = self.context.new_array_constructor(None, a_type, &values);
+ then_block.add_assignment(None, result, array_value);
+ then_block.end_with_jump(None, after_block);
+
+ let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
+ else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
+
+ b0_block.add_assignment(None, result, a);
+ b0_block.end_with_jump(None, after_block);
+
+ let shift_value = self.gcc_sub(sixty_four, b);
+ // NOTE: cast low to its unsigned type in order to perform a logical right shift.
+ let unsigned_type = native_int_type.to_unsigned(&self.cx);
+ let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
+ let shifted_low = casted_low >> self.context.new_cast(None, b, unsigned_type);
+ let shifted_low = self.context.new_cast(None, shifted_low, native_int_type);
+ let values = [
+ (high << shift_value) | shifted_low,
+ high >> b,
+ ];
+ let array_value = self.context.new_array_constructor(None, a_type, &values);
+ actual_else_block.add_assignment(None, result, array_value);
+ actual_else_block.end_with_jump(None, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+ // state need to be updated.
+ self.switch_to_block(after_block);
+
+ result.to_rvalue()
+ }
+ }
+
+ fn additive_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
+ if a.get_type() != b.get_type() {
+ b = self.context.new_cast(None, b, a.get_type());
+ }
+ self.context.new_binary_op(None, operation, a_type, a, b)
+ }
+ else {
+ let signed = a_type.is_compatible_with(self.i128_type);
+ let func_name =
+ match (operation, signed) {
+ (BinaryOp::Plus, true) => "__rust_i128_add",
+ (BinaryOp::Plus, false) => "__rust_u128_add",
+ (BinaryOp::Minus, true) => "__rust_i128_sub",
+ (BinaryOp::Minus, false) => "__rust_u128_sub",
+ _ => unreachable!("unexpected additive operation {:?}", operation),
+ };
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let param_b = self.context.new_parameter(None, b_type, "b");
+ let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
+ self.context.new_call(None, func, &[a, b])
+ }
+ }
+
+ pub fn gcc_add(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.additive_operation(BinaryOp::Plus, a, b)
+ }
+
+ pub fn gcc_mul(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.multiplicative_operation(BinaryOp::Mult, "mul", true, a, b)
+ }
+
+ pub fn gcc_sub(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.additive_operation(BinaryOp::Minus, a, b)
+ }
+
+ fn multiplicative_operation(&self, operation: BinaryOp, operation_name: &str, signed: bool, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
+ self.context.new_binary_op(None, operation, a_type, a, b)
+ }
+ else {
+ let sign =
+ if signed {
+ ""
+ }
+ else {
+ "u"
+ };
+ let func_name = format!("__{}{}ti3", sign, operation_name);
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let param_b = self.context.new_parameter(None, b_type, "b");
+ let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
+ self.context.new_call(None, func, &[a, b])
+ }
+ }
+
+ pub fn gcc_sdiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): check if the types are signed?
+ // 128-bit, signed: __divti3
+ // TODO(antoyo): convert the arguments to signed?
+ self.multiplicative_operation(BinaryOp::Divide, "div", true, a, b)
+ }
+
+ pub fn gcc_udiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // 128-bit, unsigned: __udivti3
+ self.multiplicative_operation(BinaryOp::Divide, "div", false, a, b)
+ }
+
+ pub fn gcc_checked_binop(&self, oop: OverflowOp, typ: Ty<'_>, lhs: <Self as BackendTypes>::Value, rhs: <Self as BackendTypes>::Value) -> (<Self as BackendTypes>::Value, <Self as BackendTypes>::Value) {
+ use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
+
+ let new_kind =
+ match typ.kind() {
+ Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
+ Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
+ t @ (Uint(_) | Int(_)) => t.clone(),
+ _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
+ };
+
+ // TODO(antoyo): remove duplication with intrinsic?
+ let name =
+ if self.is_native_int_type(lhs.get_type()) {
+ match oop {
+ OverflowOp::Add =>
+ match new_kind {
+ Int(I8) => "__builtin_add_overflow",
+ Int(I16) => "__builtin_add_overflow",
+ Int(I32) => "__builtin_sadd_overflow",
+ Int(I64) => "__builtin_saddll_overflow",
+ Int(I128) => "__builtin_add_overflow",
+
+ Uint(U8) => "__builtin_add_overflow",
+ Uint(U16) => "__builtin_add_overflow",
+ Uint(U32) => "__builtin_uadd_overflow",
+ Uint(U64) => "__builtin_uaddll_overflow",
+ Uint(U128) => "__builtin_add_overflow",
+
+ _ => unreachable!(),
+ },
+ OverflowOp::Sub =>
+ match new_kind {
+ Int(I8) => "__builtin_sub_overflow",
+ Int(I16) => "__builtin_sub_overflow",
+ Int(I32) => "__builtin_ssub_overflow",
+ Int(I64) => "__builtin_ssubll_overflow",
+ Int(I128) => "__builtin_sub_overflow",
+
+ Uint(U8) => "__builtin_sub_overflow",
+ Uint(U16) => "__builtin_sub_overflow",
+ Uint(U32) => "__builtin_usub_overflow",
+ Uint(U64) => "__builtin_usubll_overflow",
+ Uint(U128) => "__builtin_sub_overflow",
+
+ _ => unreachable!(),
+ },
+ OverflowOp::Mul =>
+ match new_kind {
+ Int(I8) => "__builtin_mul_overflow",
+ Int(I16) => "__builtin_mul_overflow",
+ Int(I32) => "__builtin_smul_overflow",
+ Int(I64) => "__builtin_smulll_overflow",
+ Int(I128) => "__builtin_mul_overflow",
+
+ Uint(U8) => "__builtin_mul_overflow",
+ Uint(U16) => "__builtin_mul_overflow",
+ Uint(U32) => "__builtin_umul_overflow",
+ Uint(U64) => "__builtin_umulll_overflow",
+ Uint(U128) => "__builtin_mul_overflow",
+
+ _ => unreachable!(),
+ },
+ }
+ }
+ else {
+ match new_kind {
+ Int(I128) | Uint(U128) => {
+ let func_name =
+ match oop {
+ OverflowOp::Add =>
+ match new_kind {
+ Int(I128) => "__rust_i128_addo",
+ Uint(U128) => "__rust_u128_addo",
+ _ => unreachable!(),
+ },
+ OverflowOp::Sub =>
+ match new_kind {
+ Int(I128) => "__rust_i128_subo",
+ Uint(U128) => "__rust_u128_subo",
+ _ => unreachable!(),
+ },
+ OverflowOp::Mul =>
+ match new_kind {
+ Int(I128) => "__rust_i128_mulo", // TODO(antoyo): use __muloti4d instead?
+ Uint(U128) => "__rust_u128_mulo",
+ _ => unreachable!(),
+ },
+ };
+ let a_type = lhs.get_type();
+ let b_type = rhs.get_type();
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let param_b = self.context.new_parameter(None, b_type, "b");
+ let result_field = self.context.new_field(None, a_type, "result");
+ let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
+ let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
+ let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
+ let result = self.context.new_call(None, func, &[lhs, rhs]);
+ let overflow = result.access_field(None, overflow_field);
+ let int_result = result.access_field(None, result_field);
+ return (int_result, overflow);
+ },
+ _ => {
+ match oop {
+ OverflowOp::Mul =>
+ match new_kind {
+ Int(I32) => "__mulosi4",
+ Int(I64) => "__mulodi4",
+ _ => unreachable!(),
+ },
+ _ => unimplemented!("overflow operation for {:?}", new_kind),
+ }
+ }
+ }
+ };
+
+ let intrinsic = self.context.get_builtin_function(&name);
+ let res = self.current_func()
+ // TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
+ .new_local(None, rhs.get_type(), "binopResult")
+ .get_address(None);
+ let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
+ (res.dereference(None).to_rvalue(), overflow)
+ }
+
+ pub fn gcc_icmp(&self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = lhs.get_type();
+ let b_type = rhs.get_type();
+ if self.is_non_native_int_type(a_type) || self.is_non_native_int_type(b_type) {
+ let signed = a_type.is_compatible_with(self.i128_type);
+ let sign =
+ if signed {
+ ""
+ }
+ else {
+ "u"
+ };
+ let func_name = format!("__{}cmpti2", sign);
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let param_b = self.context.new_parameter(None, b_type, "b");
+ let func = self.context.new_function(None, FunctionType::Extern, self.int_type, &[param_a, param_b], func_name, false);
+ let cmp = self.context.new_call(None, func, &[lhs, rhs]);
+ let (op, limit) =
+ match op {
+ IntPredicate::IntEQ => {
+ return self.context.new_comparison(None, ComparisonOp::Equals, cmp, self.context.new_rvalue_one(self.int_type));
+ },
+ IntPredicate::IntNE => {
+ return self.context.new_comparison(None, ComparisonOp::NotEquals, cmp, self.context.new_rvalue_one(self.int_type));
+ },
+ IntPredicate::IntUGT => (ComparisonOp::Equals, 2),
+ IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1),
+ IntPredicate::IntULT => (ComparisonOp::Equals, 0),
+ IntPredicate::IntULE => (ComparisonOp::LessThanEquals, 1),
+ IntPredicate::IntSGT => (ComparisonOp::Equals, 2),
+ IntPredicate::IntSGE => (ComparisonOp::GreaterThanEquals, 1),
+ IntPredicate::IntSLT => (ComparisonOp::Equals, 0),
+ IntPredicate::IntSLE => (ComparisonOp::LessThanEquals, 1),
+ };
+ self.context.new_comparison(None, op, cmp, self.context.new_rvalue_from_int(self.int_type, limit))
+ }
+ else {
+ let left_type = lhs.get_type();
+ let right_type = rhs.get_type();
+ if left_type != right_type {
+ // NOTE: because libgccjit cannot compare function pointers.
+ if left_type.dyncast_function_ptr_type().is_some() && right_type.dyncast_function_ptr_type().is_some() {
+ lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer());
+ rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer());
+ }
+ // NOTE: hack because we try to cast a vector type to the same vector type.
+ else if format!("{:?}", left_type) != format!("{:?}", right_type) {
+ rhs = self.context.new_cast(None, rhs, left_type);
+ }
+ }
+ self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
+ }
+ }
+
+ pub fn gcc_xor(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
+ a ^ b
+ }
+ else {
+ let values = [
+ self.low(a) ^ self.low(b),
+ self.high(a) ^ self.high(b),
+ ];
+ self.context.new_array_constructor(None, a_type, &values)
+ }
+ }
+
+ pub fn gcc_shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ let a_native = self.is_native_int_type(a_type);
+ let b_native = self.is_native_int_type(b_type);
+ if a_native && b_native {
+ // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
+ if a_type.is_unsigned(self) && b_type.is_signed(self) {
+ let a = self.context.new_cast(None, a, b_type);
+ let result = a << b;
+ self.context.new_cast(None, result, a_type)
+ }
+ else if a_type.is_signed(self) && b_type.is_unsigned(self) {
+ let b = self.context.new_cast(None, b, a_type);
+ a << b
+ }
+ else {
+ a << b
+ }
+ }
+ else if a_native && !b_native {
+ self.gcc_shl(a, self.gcc_int_cast(b, a_type))
+ }
+ else {
+ // NOTE: we cannot use the ashl builtin because it's calling widen_hi() which uses ashl.
+ let native_int_type = a_type.dyncast_array().expect("get element type");
+
+ let func = self.current_func();
+ let then_block = func.new_block("then");
+ let else_block = func.new_block("else");
+ let after_block = func.new_block("after");
+ let b0_block = func.new_block("b0");
+ let actual_else_block = func.new_block("actual_else");
+
+ let result = func.new_local(None, a_type, "shiftResult");
+
+ let b = self.gcc_int_cast(b, native_int_type);
+ let sixty_four = self.gcc_int(native_int_type, 64);
+ let zero = self.gcc_zero(native_int_type);
+ let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
+ self.llbb().end_with_conditional(None, condition, then_block, else_block);
+
+ // TODO(antoyo): take endianness into account.
+ let values = [
+ zero,
+ self.low(a) << (b - sixty_four),
+ ];
+ let array_value = self.context.new_array_constructor(None, a_type, &values);
+ then_block.add_assignment(None, result, array_value);
+ then_block.end_with_jump(None, after_block);
+
+ let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
+ else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
+
+ b0_block.add_assignment(None, result, a);
+ b0_block.end_with_jump(None, after_block);
+
+ // NOTE: cast low to its unsigned type in order to perform a logical right shift.
+ let unsigned_type = native_int_type.to_unsigned(&self.cx);
+ let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
+ let shift_value = self.context.new_cast(None, sixty_four - b, unsigned_type);
+ let high_low = self.context.new_cast(None, casted_low >> shift_value, native_int_type);
+ let values = [
+ self.low(a) << b,
+ (self.high(a) << b) | high_low,
+ ];
+
+ let array_value = self.context.new_array_constructor(None, a_type, &values);
+ actual_else_block.add_assignment(None, result, array_value);
+ actual_else_block.end_with_jump(None, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+ // state need to be updated.
+ self.switch_to_block(after_block);
+
+ result.to_rvalue()
+ }
+ }
+
+ pub fn gcc_bswap(&mut self, mut arg: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+ let arg_type = arg.get_type();
+ if !self.is_native_int_type(arg_type) {
+ let native_int_type = arg_type.dyncast_array().expect("get element type");
+ let lsb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 0)).to_rvalue();
+ let swapped_lsb = self.gcc_bswap(lsb, width / 2);
+ let swapped_lsb = self.context.new_cast(None, swapped_lsb, native_int_type);
+ let msb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 1)).to_rvalue();
+ let swapped_msb = self.gcc_bswap(msb, width / 2);
+ let swapped_msb = self.context.new_cast(None, swapped_msb, native_int_type);
+
+ // NOTE: we also need to swap the two elements here, in addition to swapping inside
+ // the elements themselves like done above.
+ return self.context.new_array_constructor(None, arg_type, &[swapped_msb, swapped_lsb]);
+ }
+
+ // TODO(antoyo): check if it's faster to use string literals and a
+ // match instead of format!.
+ let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
+ // FIXME(antoyo): this cast should not be necessary. Remove
+ // when having proper sized integer types.
+ let param_type = bswap.get_param(0).to_rvalue().get_type();
+ if param_type != arg_type {
+ arg = self.bitcast(arg, param_type);
+ }
+ self.cx.context.new_call(None, bswap, &[arg])
+ }
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn gcc_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
+ if self.is_native_int_type_or_bool(typ) {
+ self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
+ }
+ else {
+ // NOTE: set the sign in high.
+ self.from_low_high(typ, int, -(int.is_negative() as i64))
+ }
+ }
+
+ pub fn gcc_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
+ if self.is_native_int_type_or_bool(typ) {
+ self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
+ }
+ else {
+ self.from_low_high(typ, int as i64, 0)
+ }
+ }
+
+ pub fn gcc_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
+ let low = num as u64;
+ let high = (num >> 64) as u64;
+ if num >> 64 != 0 {
+ // FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()?
+ if self.is_native_int_type(typ) {
+ let low = self.context.new_rvalue_from_long(self.u64_type, low as i64);
+ let high = self.context.new_rvalue_from_long(typ, high as i64);
+
+ let sixty_four = self.context.new_rvalue_from_long(typ, 64);
+ let shift = high << sixty_four;
+ shift | self.context.new_cast(None, low, typ)
+ }
+ else {
+ self.from_low_high(typ, low as i64, high as i64)
+ }
+ }
+ else if typ.is_i128(self) {
+ let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
+ self.gcc_int_cast(num, typ)
+ }
+ else {
+ self.gcc_uint(typ, num as u64)
+ }
+ }
+
+ pub fn gcc_zero(&self, typ: Type<'gcc>) -> RValue<'gcc> {
+ if self.is_native_int_type_or_bool(typ) {
+ self.context.new_rvalue_zero(typ)
+ }
+ else {
+ self.from_low_high(typ, 0, 0)
+ }
+ }
+
+ pub fn gcc_int_width(&self, typ: Type<'gcc>) -> u64 {
+ if self.is_native_int_type_or_bool(typ) {
+ typ.get_size() as u64 * 8
+ }
+ else {
+ // NOTE: the only unsupported types are u128 and i128.
+ 128
+ }
+ }
+
+ fn bitwise_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ let a_native = self.is_native_int_type_or_bool(a_type);
+ let b_native = self.is_native_int_type_or_bool(b_type);
+ if a_native && b_native {
+ if a_type != b_type {
+ b = self.context.new_cast(None, b, a_type);
+ }
+ self.context.new_binary_op(None, operation, a_type, a, b)
+ }
+ else {
+ assert!(!a_native && !b_native, "both types should either be native or non-native for or operation");
+ let native_int_type = a_type.dyncast_array().expect("get element type");
+ let values = [
+ self.context.new_binary_op(None, operation, native_int_type, self.low(a), self.low(b)),
+ self.context.new_binary_op(None, operation, native_int_type, self.high(a), self.high(b)),
+ ];
+ self.context.new_array_constructor(None, a_type, &values)
+ }
+ }
+
+ pub fn gcc_or(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.bitwise_operation(BinaryOp::BitwiseOr, a, b)
+ }
+
+ // TODO(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead?
+ pub fn gcc_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ let value_type = value.get_type();
+ if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type) {
+ self.context.new_cast(None, value, dest_typ)
+ }
+ else if self.is_native_int_type_or_bool(dest_typ) {
+ self.context.new_cast(None, self.low(value), dest_typ)
+ }
+ else if self.is_native_int_type_or_bool(value_type) {
+ let dest_element_type = dest_typ.dyncast_array().expect("get element type");
+
+ // NOTE: set the sign of the value.
+ let zero = self.context.new_rvalue_zero(value_type);
+ let is_negative = self.context.new_comparison(None, ComparisonOp::LessThan, value, zero);
+ let is_negative = self.gcc_int_cast(is_negative, dest_element_type);
+ let values = [
+ self.context.new_cast(None, value, dest_element_type),
+ self.context.new_unary_op(None, UnaryOp::Minus, dest_element_type, is_negative),
+ ];
+ self.context.new_array_constructor(None, dest_typ, &values)
+ }
+ else {
+ // Since u128 and i128 are the only types that can be unsupported, we know the type of
+ // value and the destination type have the same size, so a bitcast is fine.
+ self.context.new_bitcast(None, value, dest_typ)
+ }
+ }
+
+ fn int_to_float_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ let value_type = value.get_type();
+ if self.is_native_int_type_or_bool(value_type) {
+ return self.context.new_cast(None, value, dest_typ);
+ }
+
+ let name_suffix =
+ match self.type_kind(dest_typ) {
+ TypeKind::Float => "tisf",
+ TypeKind::Double => "tidf",
+ kind => panic!("cannot cast a non-native integer to type {:?}", kind),
+ };
+ let sign =
+ if signed {
+ ""
+ }
+ else {
+ "un"
+ };
+ let func_name = format!("__float{}{}", sign, name_suffix);
+ let param = self.context.new_parameter(None, value_type, "n");
+ let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
+ self.context.new_call(None, func, &[value])
+ }
+
+ pub fn gcc_int_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ self.int_to_float_cast(true, value, dest_typ)
+ }
+
+ pub fn gcc_uint_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ self.int_to_float_cast(false, value, dest_typ)
+ }
+
+ fn float_to_int_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ let value_type = value.get_type();
+ if self.is_native_int_type_or_bool(dest_typ) {
+ return self.context.new_cast(None, value, dest_typ);
+ }
+
+ let name_suffix =
+ match self.type_kind(value_type) {
+ TypeKind::Float => "sfti",
+ TypeKind::Double => "dfti",
+ kind => panic!("cannot cast a {:?} to non-native integer", kind),
+ };
+ let sign =
+ if signed {
+ ""
+ }
+ else {
+ "uns"
+ };
+ let func_name = format!("__fix{}{}", sign, name_suffix);
+ let param = self.context.new_parameter(None, value_type, "n");
+ let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
+ self.context.new_call(None, func, &[value])
+ }
+
+ pub fn gcc_float_to_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ self.float_to_int_cast(true, value, dest_typ)
+ }
+
+ pub fn gcc_float_to_uint_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ self.float_to_int_cast(false, value, dest_typ)
+ }
+
+ fn high(&self, value: RValue<'gcc>) -> RValue<'gcc> {
+ self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 1))
+ .to_rvalue()
+ }
+
+ fn low(&self, value: RValue<'gcc>) -> RValue<'gcc> {
+ self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 0))
+ .to_rvalue()
+ }
+
+ fn from_low_high(&self, typ: Type<'gcc>, low: i64, high: i64) -> RValue<'gcc> {
+ let native_int_type = typ.dyncast_array().expect("get element type");
+ let values = [
+ self.context.new_rvalue_from_long(native_int_type, low),
+ self.context.new_rvalue_from_long(native_int_type, high),
+ ];
+ self.context.new_array_constructor(None, typ, &values)
+ }
+}
pub mod llvm;
mod simd;
-use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp};
+use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp, FunctionType};
use rustc_codegen_ssa::MemFlags;
use rustc_codegen_ssa::base::wants_msvc_seh;
use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error};
let arg = args[0].immediate();
let result = func.new_local(None, arg.get_type(), "zeros");
- let zero = self.cx.context.new_rvalue_zero(arg.get_type());
- let cond = self.cx.context.new_comparison(None, ComparisonOp::Equals, arg, zero);
+ let zero = self.cx.gcc_zero(arg.get_type());
+ let cond = self.gcc_icmp(IntPredicate::IntEQ, arg, zero);
self.llbb().end_with_conditional(None, cond, then_block, else_block);
- let zero_result = self.cx.context.new_rvalue_from_long(arg.get_type(), width as i64);
+ let zero_result = self.cx.gcc_uint(arg.get_type(), width);
then_block.add_assignment(None, result, zero_result);
then_block.end_with_jump(None, after_block);
// NOTE: since jumps were added in a place
- // count_leading_zeroes() does not expect, the current blocks
+ // count_leading_zeroes() does not expect, the current block
// in the state need to be updated.
- *self.current_block.borrow_mut() = Some(else_block);
- self.block = Some(else_block);
+ self.switch_to_block(else_block);
let zeros =
match name {
sym::cttz => self.count_trailing_zeroes(width, arg),
_ => unreachable!(),
};
- else_block.add_assignment(None, result, zeros);
- else_block.end_with_jump(None, after_block);
+ self.llbb().add_assignment(None, result, zeros);
+ self.llbb().end_with_jump(None, after_block);
// NOTE: since jumps were added in a place rustc does not
- // expect, the current blocks in the state need to be updated.
- *self.current_block.borrow_mut() = Some(after_block);
- self.block = Some(after_block);
+ // expect, the current block in the state need to be updated.
+ self.switch_to_block(after_block);
result.to_rvalue()
}
args[0].immediate() // byte swap a u8/i8 is just a no-op
}
else {
- // TODO(antoyo): check if it's faster to use string literals and a
- // match instead of format!.
- let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
- let mut arg = args[0].immediate();
- // FIXME(antoyo): this cast should not be necessary. Remove
- // when having proper sized integer types.
- let param_type = bswap.get_param(0).to_rvalue().get_type();
- if param_type != arg.get_type() {
- arg = self.bitcast(arg, param_type);
- }
- self.cx.context.new_call(None, bswap, &[arg])
+ self.gcc_bswap(args[0].immediate(), width)
}
},
sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
val.to_rvalue()
};
match self.mode {
- PassMode::Ignore => {}
+ PassMode::Ignore => {},
PassMode::Pair(..) => {
OperandValue::Pair(next(), next()).store(bx, dst);
- }
+ },
PassMode::Indirect { extra_attrs: Some(_), .. } => {
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
- }
+ },
PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(_) => {
let next_arg = next();
- self.store(bx, next_arg.to_rvalue(), dst);
- }
+ self.store(bx, next_arg, dst);
+ },
}
}
}
let value =
if result_type.is_signed(self.cx) {
- self.context.new_cast(None, value, typ)
+ self.gcc_int_cast(value, typ)
}
else {
value
},
128 => {
// TODO(antoyo): find a more efficient implementation?
- let sixty_four = self.context.new_rvalue_from_long(typ, 64);
- let high = self.context.new_cast(None, value >> sixty_four, self.u64_type);
- let low = self.context.new_cast(None, value, self.u64_type);
+ let sixty_four = self.gcc_int(typ, 64);
+ let right_shift = self.gcc_lshr(value, sixty_four);
+ let high = self.gcc_int_cast(right_shift, self.u64_type);
+ let low = self.gcc_int_cast(value, self.u64_type);
let reversed_high = self.bit_reverse(64, high);
let reversed_low = self.bit_reverse(64, low);
- let new_low = self.context.new_cast(None, reversed_high, typ);
- let new_high = self.context.new_cast(None, reversed_low, typ) << sixty_four;
+ let new_low = self.gcc_int_cast(reversed_high, typ);
+ let new_high = self.shl(self.gcc_int_cast(reversed_low, typ), sixty_four);
- new_low | new_high
+ self.gcc_or(new_low, new_high)
},
_ => {
panic!("cannot bit reverse with width = {}", width);
},
};
- self.context.new_cast(None, result, result_type)
+ self.gcc_int_cast(result, result_type)
}
- fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
+ fn count_leading_zeroes(&mut self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): use width?
let arg_type = arg.get_type();
let count_leading_zeroes =
+ // TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
+ // instead of using is_uint().
if arg_type.is_uint(&self.cx) {
"__builtin_clz"
}
let result = self.current_func()
.new_local(None, array_type, "count_loading_zeroes_results");
- let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
- let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
- let low = self.context.new_cast(None, arg, self.u64_type);
+ let sixty_four = self.const_uint(arg_type, 64);
+ let shift = self.lshr(arg, sixty_four);
+ let high = self.gcc_int_cast(shift, self.u64_type);
+ let low = self.gcc_int_cast(arg, self.u64_type);
let zero = self.context.new_rvalue_zero(self.usize_type);
let one = self.context.new_rvalue_one(self.usize_type);
let clzll = self.context.get_builtin_function("__builtin_clzll");
let first_elem = self.context.new_array_access(None, result, zero);
- let first_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[high]), arg_type);
+ let first_value = self.gcc_int_cast(self.context.new_call(None, clzll, &[high]), arg_type);
self.llbb()
.add_assignment(None, first_elem, first_value);
let second_elem = self.context.new_array_access(None, result, one);
- let second_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[low]), arg_type) + sixty_four;
+ let cast = self.gcc_int_cast(self.context.new_call(None, clzll, &[low]), arg_type);
+ let second_value = self.add(cast, sixty_four);
self.llbb()
.add_assignment(None, second_elem, second_value);
let third_elem = self.context.new_array_access(None, result, two);
- let third_value = self.context.new_rvalue_from_long(arg_type, 128);
+ let third_value = self.const_uint(arg_type, 128);
self.llbb()
.add_assignment(None, third_elem, third_value);
let res = self.context.new_array_access(None, result, index);
- return self.context.new_cast(None, res, arg_type);
+ return self.gcc_int_cast(res.to_rvalue(), arg_type);
}
else {
- let count_leading_zeroes = self.context.get_builtin_function("__builtin_clz");
- let arg = self.context.new_cast(None, arg, self.uint_type);
- let diff = self.int_width(self.uint_type) - self.int_width(arg_type);
- let diff = self.context.new_rvalue_from_long(self.int_type, diff);
+ let count_leading_zeroes = self.context.get_builtin_function("__builtin_clzll");
+ let arg = self.context.new_cast(None, arg, self.ulonglong_type);
+ let diff = self.ulonglong_type.get_size() as i64 - arg_type.get_size() as i64;
+ let diff = self.context.new_rvalue_from_long(self.int_type, diff * 8);
let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff;
return self.context.new_cast(None, res, arg_type);
};
self.context.new_cast(None, res, arg_type)
}
- fn count_trailing_zeroes(&self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
+ fn count_trailing_zeroes(&mut self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
let result_type = arg.get_type();
let arg =
if result_type.is_signed(self.cx) {
let new_type = result_type.to_unsigned(self.cx);
- self.context.new_cast(None, arg, new_type)
+ self.gcc_int_cast(arg, new_type)
}
else {
arg
};
let arg_type = arg.get_type();
let (count_trailing_zeroes, expected_type) =
+ // TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
+ // instead of using is_uint().
if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) {
// NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero.
("__builtin_ctz", self.cx.uint_type)
let result = self.current_func()
.new_local(None, array_type, "count_loading_zeroes_results");
- let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
- let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
- let low = self.context.new_cast(None, arg, self.u64_type);
+ let sixty_four = self.gcc_int(arg_type, 64);
+ let shift = self.gcc_lshr(arg, sixty_four);
+ let high = self.gcc_int_cast(shift, self.u64_type);
+ let low = self.gcc_int_cast(arg, self.u64_type);
let zero = self.context.new_rvalue_zero(self.usize_type);
let one = self.context.new_rvalue_one(self.usize_type);
let ctzll = self.context.get_builtin_function("__builtin_ctzll");
let first_elem = self.context.new_array_access(None, result, zero);
- let first_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[low]), arg_type);
+ let first_value = self.gcc_int_cast(self.context.new_call(None, ctzll, &[low]), arg_type);
self.llbb()
.add_assignment(None, first_elem, first_value);
let second_elem = self.context.new_array_access(None, result, one);
- let second_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[high]), arg_type) + sixty_four;
+ let second_value = self.gcc_add(self.gcc_int_cast(self.context.new_call(None, ctzll, &[high]), arg_type), sixty_four);
self.llbb()
.add_assignment(None, second_elem, second_value);
let third_elem = self.context.new_array_access(None, result, two);
- let third_value = self.context.new_rvalue_from_long(arg_type, 128);
+ let third_value = self.gcc_int(arg_type, 128);
self.llbb()
.add_assignment(None, third_elem, third_value);
let res = self.context.new_array_access(None, result, index);
- return self.context.new_cast(None, res, result_type);
+ return self.gcc_int_cast(res.to_rvalue(), result_type);
}
else {
- unimplemented!("count_trailing_zeroes for {:?}", arg_type);
+ let count_trailing_zeroes = self.context.get_builtin_function("__builtin_ctzll");
+ let arg_size = arg_type.get_size();
+ let casted_arg = self.context.new_cast(None, arg, self.ulonglong_type);
+ let byte_diff = self.ulonglong_type.get_size() as i64 - arg_size as i64;
+ let diff = self.context.new_rvalue_from_long(self.int_type, byte_diff * 8);
+ let mask = self.context.new_rvalue_from_long(arg_type, -1); // To get the value with all bits set.
+ let masked = mask & self.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, arg);
+ let cond = self.context.new_comparison(None, ComparisonOp::Equals, masked, mask);
+ let diff = diff * self.context.new_cast(None, cond, self.int_type);
+ let res = self.context.new_call(None, count_trailing_zeroes, &[casted_arg]) - diff;
+ return self.context.new_cast(None, res, result_type);
};
let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
let arg =
self.context.new_cast(None, res, result_type)
}
- fn int_width(&self, typ: Type<'gcc>) -> i64 {
- self.cx.int_width(typ) as i64
- }
-
- fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> {
+ fn pop_count(&mut self, value: RValue<'gcc>) -> RValue<'gcc> {
// TODO(antoyo): use the optimized version with fewer operations.
let result_type = value.get_type();
let value_type = result_type.to_unsigned(self.cx);
let value =
if result_type.is_signed(self.cx) {
- self.context.new_cast(None, value, value_type)
+ self.gcc_int_cast(value, value_type)
}
else {
value
// TODO(antoyo): implement in the normal algorithm below to have a more efficient
// implementation (that does not require a call to __popcountdi2).
let popcount = self.context.get_builtin_function("__builtin_popcountll");
- let sixty_four = self.context.new_rvalue_from_long(value_type, 64);
- let high = self.context.new_cast(None, value >> sixty_four, self.cx.ulonglong_type);
+ let sixty_four = self.gcc_int(value_type, 64);
+ let right_shift = self.gcc_lshr(value, sixty_four);
+ let high = self.gcc_int_cast(right_shift, self.cx.ulonglong_type);
let high = self.context.new_call(None, popcount, &[high]);
- let low = self.context.new_cast(None, value, self.cx.ulonglong_type);
+ let low = self.gcc_int_cast(value, self.cx.ulonglong_type);
let low = self.context.new_call(None, popcount, &[low]);
let res = high + low;
- return self.context.new_cast(None, res, result_type);
+ return self.gcc_int_cast(res, result_type);
}
// First step.
// Algorithm from: https://blog.regehr.org/archives/1063
fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
- let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
- let shift = shift % max;
+ let max = self.const_uint(shift.get_type(), width);
+ let shift = self.urem(shift, max);
let lhs = self.shl(value, shift);
+ let result_neg = self.neg(shift);
let result_and =
self.and(
- self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
- self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
+ result_neg,
+ self.const_uint(shift.get_type(), width - 1),
);
let rhs = self.lshr(value, result_and);
self.or(lhs, rhs)
// Algorithm from: https://blog.regehr.org/archives/1063
fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
- let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
- let shift = shift % max;
+ let max = self.const_uint(shift.get_type(), width);
+ let shift = self.urem(shift, max);
let lhs = self.lshr(value, shift);
+ let result_neg = self.neg(shift);
let result_and =
self.and(
- self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
- self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
+ result_neg,
+ self.const_uint(shift.get_type(), width - 1),
);
let rhs = self.shl(value, result_and);
self.or(lhs, rhs)
self.llbb().end_with_conditional(None, overflow, then_block, after_block);
// NOTE: since jumps were added in a place rustc does not
- // expect, the current blocks in the state need to be updated.
- *self.current_block.borrow_mut() = Some(after_block);
- self.block = Some(after_block);
+ // expect, the current block in the state need to be updated.
+ self.switch_to_block(after_block);
res.to_rvalue()
}
fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
if signed {
// Also based on algorithm from: https://stackoverflow.com/a/56531252/389119
- let func_name =
- match width {
- 8 => "__builtin_sub_overflow",
- 16 => "__builtin_sub_overflow",
- 32 => "__builtin_ssub_overflow",
- 64 => "__builtin_ssubll_overflow",
- 128 => "__builtin_sub_overflow",
- _ => unreachable!(),
- };
- let overflow_func = self.context.get_builtin_function(func_name);
let result_type = lhs.get_type();
let func = self.current_func.borrow().expect("func");
let res = func.new_local(None, result_type, "saturating_diff");
- let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
+ let supports_native_type = self.is_native_int_type(result_type);
+ let overflow =
+ if supports_native_type {
+ let func_name =
+ match width {
+ 8 => "__builtin_sub_overflow",
+ 16 => "__builtin_sub_overflow",
+ 32 => "__builtin_ssub_overflow",
+ 64 => "__builtin_ssubll_overflow",
+ 128 => "__builtin_sub_overflow",
+ _ => unreachable!(),
+ };
+ let overflow_func = self.context.get_builtin_function(func_name);
+ self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None)
+ }
+ else {
+ let func_name =
+ match width {
+ 128 => "__rust_i128_subo",
+ _ => unreachable!(),
+ };
+ let param_a = self.context.new_parameter(None, result_type, "a");
+ let param_b = self.context.new_parameter(None, result_type, "b");
+ let result_field = self.context.new_field(None, result_type, "result");
+ let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
+ let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
+ let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
+ let result = self.context.new_call(None, func, &[lhs, rhs]);
+ let overflow = result.access_field(None, overflow_field);
+ let int_result = result.access_field(None, result_field);
+ self.llbb().add_assignment(None, res, int_result);
+ overflow
+ };
let then_block = func.new_block("then");
let after_block = func.new_block("after");
- let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
- let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
- let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
- self.context.new_rvalue_from_int(unsigned_type, 0)
- );
- let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
- then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
+ // NOTE: convert the type to unsigned to have an unsigned shift.
+ let unsigned_type = result_type.to_unsigned(&self.cx);
+ let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1));
+ let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0));
+ let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1));
+ then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type));
then_block.end_with_jump(None, after_block);
self.llbb().end_with_conditional(None, overflow, then_block, after_block);
// NOTE: since jumps were added in a place rustc does not
- // expect, the current blocks in the state need to be updated.
- *self.current_block.borrow_mut() = Some(after_block);
- self.block = Some(after_block);
+ // expect, the current block in the state need to be updated.
+ self.switch_to_block(after_block);
res.to_rvalue()
}
}
fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) {
- if bx.sess().panic_strategy() == PanicStrategy::Abort {
+ // NOTE: the `|| true` here is to use the panic=abort strategy with panic=unwind too
+ if bx.sess().panic_strategy() == PanicStrategy::Abort || true {
+ // TODO(bjorn3): Properly implement unwinding and remove the `|| true` once this is done.
bx.call(bx.type_void(), try_func, &[data], None);
// Return 0 unconditionally from the intrinsic call;
// we can never unwind.
simd_xor: Uint, Int => xor;
}
+ macro_rules! arith_unary {
+ ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+ $(if name == sym::$name {
+ match in_elem.kind() {
+ $($(ty::$p(_))|* => {
+ return Ok(bx.$call(args[0].immediate()))
+ })*
+ _ => {},
+ }
+ require!(false,
+ "unsupported operation on `{}` with element `{}`",
+ in_ty,
+ in_elem)
+ })*
+ }
+ }
+
+ arith_unary! {
+ simd_neg: Int => neg, Float => fneg;
+ }
+
unimplemented!("simd {}", name);
}
/*
+ * TODO(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?)
* TODO(antoyo): support #[inline] attributes.
- * TODO(antoyo): support LTO.
+ * TODO(antoyo): support LTO (gcc's equivalent to Thin LTO is enabled by -fwhopr: https://stackoverflow.com/questions/64954525/does-gcc-have-thin-lto).
*
* TODO(antoyo): remove the patches.
*/
extern crate rustc_session;
extern crate rustc_span;
extern crate rustc_target;
+extern crate tempfile;
// This prevents duplicating functions and statics that are already part of the host rustc process.
#[allow(unused_extern_crates)]
mod coverageinfo;
mod debuginfo;
mod declare;
+mod int;
mod intrinsic;
mod mono_item;
mod type_;
mod type_of;
use std::any::Any;
-use std::sync::Arc;
+use std::sync::{Arc, Mutex};
-use gccjit::{Context, OptimizationLevel};
+use gccjit::{Context, OptimizationLevel, CType};
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
use rustc_codegen_ssa::base::codegen_crate;
use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::ty::TyCtxt;
+use rustc_middle::ty::query::Providers;
use rustc_session::config::{Lto, OptLevel, OutputFilenames};
use rustc_session::Session;
use rustc_span::Symbol;
use rustc_span::fatal_error::FatalError;
+use tempfile::TempDir;
pub struct PrintOnPanic<F: Fn() -> String>(pub F);
}
#[derive(Clone)]
-pub struct GccCodegenBackend;
+pub struct GccCodegenBackend {
+ supports_128bit_integers: Arc<Mutex<bool>>,
+}
impl CodegenBackend for GccCodegenBackend {
fn init(&self, sess: &Session) {
if sess.lto() != Lto::No {
sess.warn("LTO is not supported. You may get a linker error.");
}
+
+ let temp_dir = TempDir::new().expect("cannot create temporary directory");
+ let temp_file = temp_dir.into_path().join("result.asm");
+ let check_context = Context::default();
+ check_context.set_print_errors_to_stderr(false);
+ let _int128_ty = check_context.new_c_type(CType::UInt128t);
+ // NOTE: we cannot just call compile() as this would require other files than libgccjit.so.
+ check_context.compile_to_file(gccjit::OutputKind::Assembler, temp_file.to_str().expect("path to str"));
+ *self.supports_128bit_integers.lock().expect("lock") = check_context.get_last_error() == Ok(None);
+ }
+
+ fn provide(&self, providers: &mut Providers) {
+ // FIXME(antoyo) compute list of enabled features from cli flags
+ providers.global_backend_features = |_tcx, ()| vec![];
}
fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: EncodedMetadata, need_metadata_module: bool) -> Box<dyn Any> {
}
fn compile_codegen_unit<'tcx>(&self, tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen<Self::Module>, u64) {
- base::compile_codegen_unit(tcx, cgu_name)
+ base::compile_codegen_unit(tcx, cgu_name, *self.supports_128bit_integers.lock().expect("lock"))
}
fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel, _features: &[String]) -> TargetMachineFactoryFn<Self> {
/// This is the entrypoint for a hot plugged rustc_codegen_gccjit
#[no_mangle]
pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
- Box::new(GccCodegenBackend)
+ Box::new(GccCodegenBackend {
+ supports_128bit_integers: Arc::new(Mutex::new(false)),
+ })
}
fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel {
use rustc_middle::ty::layout::TyAndLayout;
use rustc_target::abi::{AddressSpace, Align, Integer, Size};
-use crate::common::TypeReflection;
use crate::context::CodegenCx;
use crate::type_of::LayoutGccExt;
}
fn type_kind(&self, typ: Type<'gcc>) -> TypeKind {
- if typ.is_integral() {
+ if self.is_int_type_or_bool(typ) {
TypeKind::Integer
}
+ else if typ.is_compatible_with(self.float_type) {
+ TypeKind::Float
+ }
+ else if typ.is_compatible_with(self.double_type) {
+ TypeKind::Double
+ }
else if typ.dyncast_vector().is_some() {
TypeKind::Vector
}
}
fn int_width(&self, typ: Type<'gcc>) -> u64 {
- if typ.is_i8(self) || typ.is_u8(self) {
- 8
- }
- else if typ.is_i16(self) || typ.is_u16(self) {
- 16
- }
- else if typ.is_i32(self) || typ.is_u32(self) {
- 32
- }
- else if typ.is_i64(self) || typ.is_u64(self) {
- 64
- }
- else if typ.is_i128(self) || typ.is_u128(self) {
- 128
- }
- else {
- panic!("Cannot get width of int type {:?}", typ);
- }
+ self.gcc_int_width(typ)
}
fn val_ty(&self, value: RValue<'gcc>) -> Type<'gcc> {
if let (&ty::Adt(def, _), &Variants::Single { index }) =
(layout.ty.kind(), &layout.variants)
{
- if def.is_enum() && !def.variants.is_empty() {
- write!(&mut name, "::{}", def.variants[index].name).unwrap();
+ if def.is_enum() && !def.variants().is_empty() {
+ write!(&mut name, "::{}", def.variant(index).name).unwrap();
}
}
if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
ty::Ref(..) | ty::RawPtr(_) => {
return self.field(cx, index).gcc_type(cx, true);
}
- ty::Adt(def, _) if def.is_box() => {
+ // only wide pointer boxes are handled as pointers
+ // thin pointer boxes with scalar allocators are handled by the general logic below
+ ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate);
}
set -e
-if [ -f ./gcc_path ]; then
+if [ -f ./gcc_path ]; then
export GCC_PATH=$(cat gcc_path)
else
echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details'
export LD_LIBRARY_PATH="$GCC_PATH"
export LIBRARY_PATH="$GCC_PATH"
+features=
+
+if [[ "$1" == "--features" ]]; then
+ shift
+ features="--features $1"
+ shift
+fi
+
if [[ "$1" == "--release" ]]; then
export CHANNEL='release'
- CARGO_INCREMENTAL=1 cargo rustc --release
+ CARGO_INCREMENTAL=1 cargo rustc --release $features
shift
else
echo $LD_LIBRARY_PATH
export CHANNEL='debug'
- cargo rustc
+ cargo rustc $features
+fi
+
+if [[ "$1" == "--build" ]]; then
+ exit
fi
source config.sh
echo
echo "[TEST] rust-lang/rust"
- rust_toolchain=$(cat rust-toolchain)
+ rust_toolchain=$(cat rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/')
git clone https://github.com/rust-lang/rust.git || true
cd rust
git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(')
export RUSTFLAGS=
+ git apply - <<EOF
+diff --git a/src/tools/compiletest/src/header.rs b/src/tools/compiletest/src/header.rs
+index 887d27fd6dca4..2c2239f2b83d1 100644
+--- a/src/tools/compiletest/src/header.rs
++++ b/src/tools/compiletest/src/header.rs
+@@ -806,8 +806,8 @@ pub fn make_test_description<R: Read>(
+ cfg: Option<&str>,
+ ) -> test::TestDesc {
+ let mut ignore = false;
+ #[cfg(not(bootstrap))]
+- let ignore_message: Option<String> = None;
++ let ignore_message: Option<&str> = None;
+ let mut should_fail = false;
+
+ let rustc_has_profiler_support = env::var_os("RUSTC_PROFILER_SUPPORT").is_some();
+
+EOF
+
rm config.toml || true
cat > config.toml <<EOF
git checkout -- src/test/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed
- rm -r src/test/ui/{abi*,extern/,llvm-asm/,panic-runtime/,panics/,unsized-locals/,proc-macro/,threads-sendsync/,thinlto/,simd*,borrowck/,test*,*lto*.rs} || true
+ rm -r src/test/ui/{abi*,extern/,panic-runtime/,panics/,unsized-locals/,proc-macro/,threads-sendsync/,thinlto/,simd*,borrowck/,test*,*lto*.rs} || true
for test in $(rg --files-with-matches "catch_unwind|should_panic|thread|lto" src/test/ui); do
rm $test
done
git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice.rs
git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice2.rs
- rm src/test/ui/llvm-asm/llvm-asm-in-out-operand.rs || true # TODO(antoyo): Enable back this test if I ever implement the llvm_asm! macro.
RUSTC_ARGS="-Zpanic-abort-tests -Csymbol-mangling-version=v0 -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot -Cpanic=abort"
clean_ui_tests
;;
+ "--std-tests")
+ std_tests
+ ;;
+
+ "--build-sysroot")
+ build_sysroot
+ ;;
+
*)
clean
mini_tests
pub fn fflush(stream: *mut i32) -> i32;
pub fn printf(format: *const i8, ...) -> i32;
- pub static STDOUT: *mut i32;
+ pub static stdout: *mut i32;
}
}
pub fn panic(_msg: &str) -> ! {
unsafe {
libc::puts("Panicking\0" as *const str as *const u8);
- libc::fflush(libc::STDOUT);
+ libc::fflush(libc::stdout);
intrinsics::abort();
}
}
--- /dev/null
+// Compiler:
+//
+// Run-time:
+// status: 0
+
+#![feature(arbitrary_self_types, auto_traits, core_intrinsics, lang_items, start, intrinsics)]
+
+#![no_std]
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+/*
+ * Core
+ */
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn puts(s: *const u8) -> i32;
+ }
+}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+ unsafe {
+ core::intrinsics::abort();
+ }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+ let var = 134217856_u128;
+ let var2 = 10475372733397991552_u128;
+ let var3 = 193236519889708027473620326106273939584_u128;
+ let var4 = 123236519889708027473620326106273939584_u128;
+ let var5 = 153236519889708027473620326106273939584_u128;
+ let var6 = 18446744073709551616_i128;
+ let var7 = 170141183460469231731687303715884105728_u128;
+
+ // Shifts.
+ assert_eq!(var << (argc as u128 - 1), var);
+ assert_eq!(var << argc as u128, 268435712);
+ assert_eq!(var << (argc + 32) as u128, 1152922604118474752);
+ assert_eq!(var << (argc + 48) as u128, 75557935783508361347072);
+ assert_eq!(var << (argc + 60) as u128, 309485304969250248077606912);
+ assert_eq!(var << (argc + 62) as u128, 1237941219877000992310427648);
+ assert_eq!(var << (argc + 63) as u128, 2475882439754001984620855296);
+ assert_eq!(var << (argc + 80) as u128, 324518863143436548128224745357312);
+
+ assert_eq!(var2 << argc as u128, 20950745466795983104);
+ assert_eq!(var2 << (argc as u128 - 1), var2);
+ assert_eq!(var2 << (argc + 32) as u128, 89982766606709001335848566784);
+ assert_eq!(var2 << (argc + 48) as u128, 5897110592337281111546171672756224);
+ assert_eq!(var2 << (argc + 60) as u128, 24154564986213503432893119171609493504);
+ assert_eq!(var2 << (argc + 62) as u128, 96618259944854013731572476686437974016);
+ assert_eq!(var2 << (argc + 63) as u128, 193236519889708027463144953372875948032);
+
+ assert_eq!(var3 << argc as u128, 46190672858477591483866044780779667712);
+ assert_eq!(var3 << (argc as u128 - 1), var3);
+ assert_eq!(var3 << (argc + 32) as u128, 21267668304951024224840338247585366016);
+ assert_eq!(var3 << (argc + 48) as u128, 1335125106377253154015353231953100800);
+ assert_eq!(var3 << (argc + 60) as u128, 24154564986213503432893119171609493504);
+ assert_eq!(var3 << (argc + 62) as u128, 96618259944854013731572476686437974016);
+ assert_eq!(var3 << (argc + 63) as u128, 193236519889708027463144953372875948032);
+
+ assert_eq!((2220326408_u32 + argc as u32) >> (32 - 6), 33);
+
+ assert_eq!(var >> (argc as u128 - 1), var);
+ assert_eq!(var >> argc as u128, 67108928);
+ assert_eq!(var >> (argc + 32) as u128, 0);
+ assert_eq!(var >> (argc + 48) as u128, 0);
+ assert_eq!(var >> (argc + 60) as u128, 0);
+ assert_eq!(var >> (argc + 62) as u128, 0);
+ assert_eq!(var >> (argc + 63) as u128, 0);
+
+ assert_eq!(var2 >> argc as u128, 5237686366698995776);
+ assert_eq!(var2 >> (argc as u128 - 1), var2);
+ assert_eq!(var2 >> (argc + 32) as u128, 1219493888);
+ assert_eq!(var2 >> (argc + 48) as u128, 18608);
+ assert_eq!(var2 >> (argc + 60) as u128, 4);
+ assert_eq!(var2 >> (argc + 62) as u128, 1);
+ assert_eq!(var2 >> (argc + 63) as u128, 0);
+
+ assert_eq!(var3 >> (argc as u128 - 1), var3);
+ assert_eq!(var3 >> argc as u128, 96618259944854013736810163053136969792);
+ assert_eq!(var3 >> (argc + 32) as u128, 22495691651677250335181635584);
+ assert_eq!(var3 >> (argc + 48) as u128, 343257013727985387194544);
+ assert_eq!(var3 >> (argc + 60) as u128, 83802981867183932420);
+ assert_eq!(var3 >> (argc + 62) as u128, 20950745466795983105);
+ assert_eq!(var3 >> (argc + 63) as u128, 10475372733397991552);
+ assert_eq!(var3 >> (argc + 80) as u128, 79920751444992);
+
+ assert_eq!(var6 >> argc as u128, 9223372036854775808);
+ assert_eq!((var6 - 1) >> argc as u128, 9223372036854775807);
+ assert_eq!(var7 >> argc as u128, 85070591730234615865843651857942052864);
+
+ // Casts
+ assert_eq!((var >> (argc + 32) as u128) as u64, 0);
+ assert_eq!((var >> argc as u128) as u64, 67108928);
+
+ // Addition.
+ assert_eq!(var + argc as u128, 134217857);
+
+ assert_eq!(var2 + argc as u128, 10475372733397991553);
+ assert_eq!(var2 + (var2 + argc as u128) as u128, 20950745466795983105);
+
+ assert_eq!(var3 + argc as u128, 193236519889708027473620326106273939585);
+
+ // Subtraction
+ assert_eq!(var - argc as u128, 134217855);
+
+ assert_eq!(var2 - argc as u128, 10475372733397991551);
+
+ assert_eq!(var3 - argc as u128, 193236519889708027473620326106273939583);
+
+ // Multiplication
+ assert_eq!(var * (argc + 1) as u128, 268435712);
+ assert_eq!(var * (argc as u128 + var2), 1405982069077538020949770368);
+
+ assert_eq!(var2 * (argc + 1) as u128, 20950745466795983104);
+ assert_eq!(var2 * (argc as u128 + var2), 109733433903618109003204073240861360256);
+
+ assert_eq!(var3 * argc as u128, 193236519889708027473620326106273939584);
+
+ assert_eq!(var4 * (argc + 1) as u128, 246473039779416054947240652212547879168);
+
+ assert_eq!(var5 * (argc + 1) as u128, 306473039779416054947240652212547879168);
+
+ // Division.
+ assert_eq!(var / (argc + 1) as u128, 67108928);
+ assert_eq!(var / (argc + 2) as u128, 44739285);
+
+ assert_eq!(var2 / (argc + 1) as u128, 5237686366698995776);
+ assert_eq!(var2 / (argc + 2) as u128, 3491790911132663850);
+
+ assert_eq!(var3 / (argc + 1) as u128, 96618259944854013736810163053136969792);
+ assert_eq!(var3 / (argc + 2) as u128, 64412173296569342491206775368757979861);
+ assert_eq!(var3 / (argc as u128 + var4), 1);
+ assert_eq!(var3 / (argc as u128 + var2), 18446744073709551615);
+
+ assert_eq!(var4 / (argc + 1) as u128, 61618259944854013736810163053136969792);
+ assert_eq!(var4 / (argc + 2) as u128, 41078839963236009157873442035424646528);
+
+ 0
+}
pub fn puts(s: *const u8) -> i32;
pub fn fflush(stream: *mut i32) -> i32;
- pub static STDOUT: *mut i32;
+ pub static stdout: *mut i32;
}
}
pub fn panic(_msg: &str) -> ! {
unsafe {
libc::puts("Panicking\0" as *const str as *const u8);
- libc::fflush(libc::STDOUT);
+ libc::fflush(libc::stdout);
intrinsics::abort();
}
}
pub fn fflush(stream: *mut i32) -> i32;
pub fn printf(format: *const i8, ...) -> i32;
- pub static STDOUT: *mut i32;
+ pub static stdout: *mut i32;
}
}
pub fn panic(_msg: &str) -> ! {
unsafe {
libc::puts("Panicking\0" as *const str as *const u8);
- libc::fflush(libc::STDOUT);
+ libc::fflush(libc::stdout);
intrinsics::abort();
}
}
pub fn puts(s: *const u8) -> i32;
pub fn fflush(stream: *mut i32) -> i32;
- pub static STDOUT: *mut i32;
+ pub static stdout: *mut i32;
}
}
pub fn panic(_msg: &str) -> ! {
unsafe {
libc::puts("Panicking\0" as *const str as *const u8);
- libc::fflush(libc::STDOUT);
+ libc::fflush(libc::stdout);
intrinsics::abort();
}
}
#[lang = "sized"]
pub trait Sized {}
+#[lang = "destruct"]
+pub trait Destruct {}
+
+#[lang = "drop"]
+pub trait Drop {}
+
#[lang = "copy"]
trait Copy {
}
use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
use rustc_middle::bug;
use rustc_middle::ty::TyCtxt;
-use rustc_session::config::DebugInfo;
+use rustc_session::config::{DebugInfo, OomStrategy};
use rustc_span::symbol::sym;
use crate::debuginfo;
llvm::LLVMBuildRetVoid(llbuilder);
llvm::LLVMDisposeBuilder(llbuilder);
+ // __rust_alloc_error_handler_should_panic
+ let name = OomStrategy::SYMBOL;
+ let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8);
+ if tcx.sess.target.default_hidden_visibility {
+ llvm::LLVMRustSetVisibility(ll_g, llvm::Visibility::Hidden);
+ }
+ let val = tcx.sess.opts.debugging_opts.oom.should_panic();
+ let llval = llvm::LLVMConstInt(i8, val as u64, False);
+ llvm::LLVMSetInitializer(ll_g, llval);
+
if tcx.sess.opts.debuginfo != DebugInfo::None {
- let dbg_cx = debuginfo::CrateDebugContext::new(llmod);
- debuginfo::metadata::compile_unit_metadata(tcx, module_name, &dbg_cx);
+ let dbg_cx = debuginfo::CodegenUnitDebugContext::new(llmod);
+ debuginfo::metadata::build_compile_unit_di_node(tcx, module_name, &dbg_cx);
dbg_cx.finalize(tcx.sess);
}
}
llvm::LLVMSetGlobalConstant(g, llvm::True);
}
- debuginfo::create_global_var_metadata(self, def_id, g);
+ debuginfo::build_global_var_di_node(self, def_id, g);
if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
llvm::set_thread_local_mode(g, self.tls_model);
pub isize_ty: &'ll Type,
pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'ll, 'tcx>>,
- pub dbg_cx: Option<debuginfo::CrateDebugContext<'ll, 'tcx>>,
+ pub dbg_cx: Option<debuginfo::CodegenUnitDebugContext<'ll, 'tcx>>,
eh_personality: Cell<Option<&'ll Value>>,
eh_catch_typeinfo: Cell<Option<&'ll Value>>,
};
let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None {
- let dctx = debuginfo::CrateDebugContext::new(llmod);
- debuginfo::metadata::compile_unit_metadata(tcx, codegen_unit.name().as_str(), &dctx);
+ let dctx = debuginfo::CodegenUnitDebugContext::new(llmod);
+ debuginfo::metadata::build_compile_unit_di_node(
+ tcx,
+ codegen_unit.name().as_str(),
+ &dctx,
+ );
Some(dctx)
} else {
None
fn_dbg_scope: &'ll DIScope,
debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
) {
- // Find all the scopes with variables defined in them.
- let mut has_variables = BitSet::new_empty(mir.source_scopes.len());
-
- // Only consider variables when they're going to be emitted.
- // FIXME(eddyb) don't even allocate `has_variables` otherwise.
- if cx.sess().opts.debuginfo == DebugInfo::Full {
+ // Find all scopes with variables defined in them.
+ let variables = if cx.sess().opts.debuginfo == DebugInfo::Full {
+ let mut vars = BitSet::new_empty(mir.source_scopes.len());
// FIXME(eddyb) take into account that arguments always have debuginfo,
// irrespective of their name (assuming full debuginfo is enabled).
// NOTE(eddyb) actually, on second thought, those are always in the
// function scope, which always exists.
for var_debug_info in &mir.var_debug_info {
- has_variables.insert(var_debug_info.source_info.scope);
+ vars.insert(var_debug_info.source_info.scope);
}
- }
+ Some(vars)
+ } else {
+ // Nothing to emit, of course.
+ None
+ };
// Instantiate all scopes.
for idx in 0..mir.source_scopes.len() {
let scope = SourceScope::new(idx);
- make_mir_scope(cx, instance, mir, fn_dbg_scope, &has_variables, debug_context, scope);
+ make_mir_scope(cx, instance, mir, fn_dbg_scope, &variables, debug_context, scope);
}
}
instance: Instance<'tcx>,
mir: &Body<'tcx>,
fn_dbg_scope: &'ll DIScope,
- has_variables: &BitSet<SourceScope>,
+ variables: &Option<BitSet<SourceScope>>,
debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
scope: SourceScope,
) {
let scope_data = &mir.source_scopes[scope];
let parent_scope = if let Some(parent) = scope_data.parent_scope {
- make_mir_scope(cx, instance, mir, fn_dbg_scope, has_variables, debug_context, parent);
+ make_mir_scope(cx, instance, mir, fn_dbg_scope, variables, debug_context, parent);
debug_context.scopes[parent]
} else {
// The root is the function itself.
return;
};
- if !has_variables.contains(scope) && scope_data.inlined.is_none() {
+ if let Some(vars) = variables && !vars.contains(scope) && scope_data.inlined.is_none() {
// Do not create a DIScope if there are no variables defined in this
// MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
debug_context.scopes[scope] = parent_scope;
that exact file path.
All private state used by the module is stored within either the
-CrateDebugContext struct (owned by the CodegenCx) or the
+CodegenUnitDebugContext struct (owned by the CodegenCx) or the
FunctionDebugContext (owned by the FunctionCx).
This file consists of three conceptual sections:
...
```
-To break cycles like these, we use "forward declarations". That is, when
+To break cycles like these, we use "stubs". That is, when
the algorithm encounters a possibly recursive type (any struct or enum), it
immediately creates a type description node and inserts it into the cache
*before* describing the members of the type. This type description is just
allows the algorithm to already refer to the type. After the stub is
inserted into the cache, the algorithm continues as before. If it now
encounters a recursive reference, it will hit the cache and does not try to
-describe the type anew.
-
-This behavior is encapsulated in the 'RecursiveTypeDescription' enum,
-which represents a kind of continuation, storing all state needed to
-continue traversal at the type members after the type has been registered
-with the cache. (This implementation approach might be a tad over-
-engineered and may change in the future)
+describe the type anew. This behavior is encapsulated in the
+`type_map::build_type_with_children()` function.
## Source Locations and Line Information
of linking the `llvm.dbg.declare` instructions to the correct source
locations even while source location emission is still disabled, so there
is no need to do anything special with source location handling here.
-
-## Unique Type Identification
-
-In order for link-time optimization to work properly, LLVM needs a unique
-type identifier that tells it across compilation units which types are the
-same as others. This type identifier is created by
-`TypeMap::get_unique_type_id_of_type()` using the following algorithm:
-
-1. Primitive types have their name as ID
-
-2. Structs, enums and traits have a multipart identifier
-
- 1. The first part is the SVH (strict version hash) of the crate they
- were originally defined in
-
- 2. The second part is the ast::NodeId of the definition in their
- original crate
-
- 3. The final part is a concatenation of the type IDs of their concrete
- type arguments if they are generic types.
-
-3. Tuple-, pointer-, and function types are structurally identified, which
- means that they are equivalent if their component types are equivalent
- (i.e., `(i32, i32)` is the same regardless in which crate it is used).
-
-This algorithm also provides a stable ID for types that are defined in one
-crate but instantiated from metadata within another crate. We just have to
-take care to always map crate and `NodeId`s back to the original crate
-context.
-
-As a side-effect these unique type IDs also help to solve a problem arising
-from lifetime parameters. Since lifetime parameters are completely omitted
-in debuginfo, more than one `Ty` instance may map to the same debuginfo
-type metadata, that is, some struct `Struct<'a>` may have N instantiations
-with different concrete substitutions for `'a`, and thus there will be N
-`Ty` instances for the type `Struct<'a>` even though it is not generic
-otherwise. Unfortunately this means that we cannot use `ty::type_id()` as
-cheap identifier for type metadata -- we have done this in the past, but it
-led to unnecessary metadata duplication in the best case and LLVM
-assertions in the worst. However, the unique type ID as described above
-*can* be used as identifier. Since it is comparatively expensive to
-construct, though, `ty::type_id()` is still used additionally as an
-optimization for cases where the exact same type has been seen before
-(which is most of the time).
-use self::MemberDescriptionFactory::*;
-use self::RecursiveTypeDescription::*;
+use self::type_map::DINodeCreationResult;
+use self::type_map::Stub;
+use self::type_map::UniqueTypeId;
use super::namespace::mangled_name_of_instance;
use super::type_names::{compute_debuginfo_type_name, compute_debuginfo_vtable_name};
use super::utils::{
create_DIArray, debug_context, get_namespace_for_item, is_node_local_to_unit, DIB,
};
-use super::CrateDebugContext;
+use super::CodegenUnitDebugContext;
use crate::abi;
use crate::common::CodegenCx;
+use crate::debuginfo::metadata::type_map::build_type_with_children;
use crate::debuginfo::utils::fat_pointer_kind;
use crate::debuginfo::utils::FatPtrKind;
use crate::llvm;
use crate::llvm::debuginfo::{
- DIArray, DICompositeType, DIDescriptor, DIFile, DIFlags, DILexicalBlock, DIScope, DIType,
- DebugEmissionKind,
+ DIDescriptor, DIFile, DIFlags, DILexicalBlock, DIScope, DIType, DebugEmissionKind,
};
use crate::value::Value;
use rustc_codegen_ssa::debuginfo::type_names::cpp_like_debuginfo;
use rustc_codegen_ssa::debuginfo::type_names::VTableNameKind;
use rustc_codegen_ssa::traits::*;
-use rustc_data_structures::fx::FxHashMap;
use rustc_fs_util::path_to_c_string;
use rustc_hir::def::CtorKind;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_index::vec::{Idx, IndexVec};
use rustc_middle::bug;
use rustc_middle::mir::{self, GeneratorLayout};
-use rustc_middle::ty::layout::{self, IntegerExt, LayoutOf, PrimitiveExt, TyAndLayout};
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::subst::GenericArgKind;
-use rustc_middle::ty::{
- self, AdtKind, GeneratorSubsts, Instance, ParamEnv, Ty, TyCtxt, COMMON_VTABLE_ENTRIES,
-};
+use rustc_middle::ty::{self, AdtKind, Instance, ParamEnv, Ty, TyCtxt, COMMON_VTABLE_ENTRIES};
use rustc_session::config::{self, DebugInfo};
use rustc_span::symbol::Symbol;
use rustc_span::FileNameDisplayPreference;
use rustc_span::{self, SourceFile, SourceFileHash};
-use rustc_target::abi::{Abi, Align, HasDataLayout, Integer, TagEncoding};
-use rustc_target::abi::{Int, Pointer, F32, F64};
-use rustc_target::abi::{Primitive, Size, VariantIdx, Variants};
-use smallvec::SmallVec;
+use rustc_target::abi::{Align, Size};
+use smallvec::smallvec;
use tracing::debug;
use libc::{c_longlong, c_uint};
-use std::cell::RefCell;
+use std::borrow::Cow;
use std::collections::hash_map::Entry;
use std::fmt::{self, Write};
use std::hash::{Hash, Hasher};
#[allow(non_upper_case_globals)]
const DW_ATE_UTF: c_uint = 0x10;
-pub const UNKNOWN_LINE_NUMBER: c_uint = 0;
-pub const UNKNOWN_COLUMN_NUMBER: c_uint = 0;
-
-pub const NO_SCOPE_METADATA: Option<&DIScope> = None;
-
-mod unique_type_id {
- use rustc_data_structures::{
- fingerprint::Fingerprint,
- stable_hasher::{HashStable, NodeIdHashingMode, StableHasher},
- };
- use rustc_middle::ty::{ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt};
- use rustc_target::abi::VariantIdx;
-
- // This type cannot be constructed outside of this module because
- // it has a private field. We make use of this in order to prevent
- // `UniqueTypeId` from being constructed directly, without asserting
- // the preconditions.
- #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, HashStable)]
- pub struct HiddenZst {
- _inaccessible: (),
- }
-
- /// A unique identifier for anything that we create a debuginfo node for.
- /// The types it contains are expected to already be normalized (which
- /// is debug_asserted in the constructors).
- ///
- /// Note that there are some things that only show up in debuginfo, like
- /// the separate type descriptions for each enum variant. These get an ID
- /// too because they have their own debuginfo node in LLVM IR.
- #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, HashStable)]
- pub(super) enum UniqueTypeId<'tcx> {
- /// The ID of a regular type as it shows up at the language level.
- Ty(Ty<'tcx>, HiddenZst),
- /// The ID for the artificial struct type describing a single enum variant.
- Variant(Ty<'tcx>, VariantIdx, HiddenZst),
- /// The ID for the single DW_TAG_variant_part nested inside the top-level
- /// DW_TAG_structure_type that describes enums and generators.
- VariantPart(Ty<'tcx>, HiddenZst),
- /// The ID of the artificial type we create for VTables.
- VTableTy(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>, HiddenZst),
- }
-
- impl<'tcx> UniqueTypeId<'tcx> {
- pub fn for_ty(tcx: TyCtxt<'tcx>, t: Ty<'tcx>) -> Self {
- debug_assert_eq!(t, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t));
- UniqueTypeId::Ty(t, HiddenZst { _inaccessible: () })
- }
-
- pub fn for_enum_variant(
- tcx: TyCtxt<'tcx>,
- enum_ty: Ty<'tcx>,
- variant_idx: VariantIdx,
- ) -> Self {
- debug_assert_eq!(
- enum_ty,
- tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty)
- );
- UniqueTypeId::Variant(enum_ty, variant_idx, HiddenZst { _inaccessible: () })
- }
-
- pub fn for_enum_variant_part(tcx: TyCtxt<'tcx>, enum_ty: Ty<'tcx>) -> Self {
- debug_assert_eq!(
- enum_ty,
- tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty)
- );
- UniqueTypeId::VariantPart(enum_ty, HiddenZst { _inaccessible: () })
- }
-
- pub fn for_vtable_ty(
- tcx: TyCtxt<'tcx>,
- self_type: Ty<'tcx>,
- implemented_trait: Option<PolyExistentialTraitRef<'tcx>>,
- ) -> Self {
- debug_assert_eq!(
- self_type,
- tcx.normalize_erasing_regions(ParamEnv::reveal_all(), self_type)
- );
- debug_assert_eq!(
- implemented_trait,
- tcx.normalize_erasing_regions(ParamEnv::reveal_all(), implemented_trait)
- );
- UniqueTypeId::VTableTy(self_type, implemented_trait, HiddenZst { _inaccessible: () })
- }
-
- /// Generates a string version of this [UniqueTypeId], which can be used as the `UniqueId`
- /// argument of the various `LLVMRustDIBuilderCreate*Type()` methods.
- ///
- /// Right now this takes the form of a hex-encoded opaque hash value.
- pub fn generate_unique_id_string(&self, tcx: TyCtxt<'tcx>) -> String {
- let mut hasher = StableHasher::new();
- let mut hcx = tcx.create_stable_hashing_context();
- hcx.while_hashing_spans(false, |hcx| {
- hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
- self.hash_stable(hcx, &mut hasher);
- });
- });
- hasher.finish::<Fingerprint>().to_hex()
- }
- }
-}
-use unique_type_id::*;
-
-/// The `TypeMap` is where the debug context holds the type metadata nodes
-/// created so far. The metadata nodes are indexed by `UniqueTypeId`.
-#[derive(Default)]
-pub struct TypeMap<'ll, 'tcx> {
- unique_id_to_metadata: RefCell<FxHashMap<UniqueTypeId<'tcx>, &'ll DIType>>,
-}
-
-impl<'ll, 'tcx> TypeMap<'ll, 'tcx> {
- /// Adds a `UniqueTypeId` to metadata mapping to the `TypeMap`. The method will
- /// fail if the mapping already exists.
- fn register_unique_id_with_metadata(
- &self,
- unique_type_id: UniqueTypeId<'tcx>,
- metadata: &'ll DIType,
- ) {
- if self.unique_id_to_metadata.borrow_mut().insert(unique_type_id, metadata).is_some() {
- bug!("type metadata for unique ID '{:?}' is already in the `TypeMap`!", unique_type_id);
- }
- }
-
- fn find_metadata_for_unique_id(
- &self,
- unique_type_id: UniqueTypeId<'tcx>,
- ) -> Option<&'ll DIType> {
- self.unique_id_to_metadata.borrow().get(&unique_type_id).cloned()
- }
-}
-
-/// A description of some recursive type. It can either be already finished (as
-/// with `FinalMetadata`) or it is not yet finished, but contains all information
-/// needed to generate the missing parts of the description. See the
-/// documentation section on Recursive Types at the top of this file for more
-/// information.
-enum RecursiveTypeDescription<'ll, 'tcx> {
- UnfinishedMetadata {
- unfinished_type: Ty<'tcx>,
- unique_type_id: UniqueTypeId<'tcx>,
- metadata_stub: &'ll DICompositeType,
- member_holding_stub: &'ll DICompositeType,
- member_description_factory: MemberDescriptionFactory<'ll, 'tcx>,
- },
- FinalMetadata(&'ll DICompositeType),
-}
+pub(super) const UNKNOWN_LINE_NUMBER: c_uint = 0;
+pub(super) const UNKNOWN_COLUMN_NUMBER: c_uint = 0;
-fn create_and_register_recursive_type_forward_declaration<'ll, 'tcx>(
- cx: &CodegenCx<'ll, 'tcx>,
- unfinished_type: Ty<'tcx>,
- unique_type_id: UniqueTypeId<'tcx>,
- metadata_stub: &'ll DICompositeType,
- member_holding_stub: &'ll DICompositeType,
- member_description_factory: MemberDescriptionFactory<'ll, 'tcx>,
-) -> RecursiveTypeDescription<'ll, 'tcx> {
- // Insert the stub into the `TypeMap` in order to allow for recursive references.
- debug_context(cx).type_map.register_unique_id_with_metadata(unique_type_id, metadata_stub);
-
- UnfinishedMetadata {
- unfinished_type,
- unique_type_id,
- metadata_stub,
- member_holding_stub,
- member_description_factory,
- }
-}
+const NO_SCOPE_METADATA: Option<&DIScope> = None;
+/// A function that returns an empty list of generic parameter debuginfo nodes.
+const NO_GENERICS: for<'ll> fn(&CodegenCx<'ll, '_>) -> SmallVec<&'ll DIType> = |_| SmallVec::new();
-impl<'ll, 'tcx> RecursiveTypeDescription<'ll, 'tcx> {
- /// Finishes up the description of the type in question (mostly by providing
- /// descriptions of the fields of the given type) and returns the final type
- /// metadata.
- fn finalize(&self, cx: &CodegenCx<'ll, 'tcx>) -> MetadataCreationResult<'ll> {
- match *self {
- FinalMetadata(metadata) => MetadataCreationResult::new(metadata, false),
- UnfinishedMetadata {
- unfinished_type,
- unique_type_id,
- metadata_stub,
- member_holding_stub,
- ref member_description_factory,
- } => {
- // Make sure that we have a forward declaration of the type in
- // the TypeMap so that recursive references are possible. This
- // will always be the case if the RecursiveTypeDescription has
- // been properly created through the
- // `create_and_register_recursive_type_forward_declaration()`
- // function.
- {
- if debug_context(cx)
- .type_map
- .find_metadata_for_unique_id(unique_type_id)
- .is_none()
- {
- bug!(
- "Forward declaration of potentially recursive type \
- '{:?}' was not found in TypeMap!",
- unfinished_type
- );
- }
- }
+// SmallVec is used quite a bit in this module, so create a shorthand.
+// The actual number of elements is not so important.
+pub type SmallVec<T> = smallvec::SmallVec<[T; 16]>;
- // ... then create the member descriptions ...
- let member_descriptions = member_description_factory.create_member_descriptions(cx);
- let type_params = compute_type_parameters(cx, unfinished_type);
+mod enums;
+mod type_map;
- // ... and attach them to the stub to complete it.
- set_members_of_composite_type(
- cx,
- member_holding_stub,
- member_descriptions,
- None,
- type_params,
- );
- MetadataCreationResult::new(metadata_stub, true)
- }
- }
- }
-}
+pub(crate) use type_map::TypeMap;
-/// Returns from the enclosing function if the type metadata with the given
+/// Returns from the enclosing function if the type debuginfo node with the given
/// unique ID can be found in the type map.
-macro_rules! return_if_metadata_created_in_meantime {
+macro_rules! return_if_di_node_created_in_meantime {
($cx: expr, $unique_type_id: expr) => {
- if let Some(metadata) =
- debug_context($cx).type_map.find_metadata_for_unique_id($unique_type_id)
- {
- return MetadataCreationResult::new(metadata, true);
+ if let Some(di_node) = debug_context($cx).type_map.di_node_for_unique_id($unique_type_id) {
+ return DINodeCreationResult::new(di_node, true);
}
};
}
+/// Extract size and alignment from a TyAndLayout.
+fn size_and_align_of<'tcx>(ty_and_layout: TyAndLayout<'tcx>) -> (Size, Align) {
+ (ty_and_layout.size, ty_and_layout.align.abi)
+}
+
/// Creates debuginfo for a fixed size array (e.g. `[u64; 123]`).
-/// For slices (that is, "arrays" of unknown size) use [slice_type_metadata].
-fn fixed_size_array_metadata<'ll, 'tcx>(
+/// For slices (that is, "arrays" of unknown size) use [build_slice_type_di_node].
+fn build_fixed_size_array_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
unique_type_id: UniqueTypeId<'tcx>,
array_type: Ty<'tcx>,
-) -> MetadataCreationResult<'ll> {
+) -> DINodeCreationResult<'ll> {
let ty::Array(element_type, len) = array_type.kind() else {
- bug!("fixed_size_array_metadata() called with non-ty::Array type `{:?}`", array_type)
+ bug!("build_fixed_size_array_di_node() called with non-ty::Array type `{:?}`", array_type)
};
- let element_type_metadata = type_metadata(cx, *element_type);
+ let element_type_di_node = type_di_node(cx, *element_type);
- return_if_metadata_created_in_meantime!(cx, unique_type_id);
+ return_if_di_node_created_in_meantime!(cx, unique_type_id);
let (size, align) = cx.size_and_align_of(array_type);
unsafe { Some(llvm::LLVMRustDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound)) };
let subscripts = create_DIArray(DIB(cx), &[subrange]);
- let metadata = unsafe {
+ let di_node = unsafe {
llvm::LLVMRustDIBuilderCreateArrayType(
DIB(cx),
size.bits(),
align.bits() as u32,
- element_type_metadata,
+ element_type_di_node,
subscripts,
)
};
- MetadataCreationResult::new(metadata, false)
+ DINodeCreationResult::new(di_node, false)
}
/// Creates debuginfo for built-in pointer-like things:
///
/// At some point we might want to remove the special handling of Box
/// and treat it the same as other smart pointers (like Rc, Arc, ...).
-fn pointer_or_reference_metadata<'ll, 'tcx>(
+fn build_pointer_or_reference_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
ptr_type: Ty<'tcx>,
pointee_type: Ty<'tcx>,
unique_type_id: UniqueTypeId<'tcx>,
-) -> MetadataCreationResult<'ll> {
- let pointee_type_metadata = type_metadata(cx, pointee_type);
+) -> DINodeCreationResult<'ll> {
+ // The debuginfo generated by this function is only valid if `ptr_type` is really just
+ // a (fat) pointer. Make sure it is not called for e.g. `Box<T, NonZSTAllocator>`.
+ debug_assert_eq!(
+ cx.size_and_align_of(ptr_type),
+ cx.size_and_align_of(cx.tcx.mk_mut_ptr(pointee_type))
+ );
- return_if_metadata_created_in_meantime!(cx, unique_type_id);
+ let pointee_type_di_node = type_di_node(cx, pointee_type);
+
+ return_if_di_node_created_in_meantime!(cx, unique_type_id);
let (thin_pointer_size, thin_pointer_align) =
cx.size_and_align_of(cx.tcx.mk_imm_ptr(cx.tcx.types.unit));
let ptr_type_debuginfo_name = compute_debuginfo_type_name(cx.tcx, ptr_type, true);
- let pointer_type_metadata = match fat_pointer_kind(cx, pointee_type) {
+ match fat_pointer_kind(cx, pointee_type) {
None => {
// This is a thin pointer. Create a regular pointer type and give it the correct name.
debug_assert_eq!(
pointee_type,
);
- unsafe {
+ let di_node = unsafe {
llvm::LLVMRustDIBuilderCreatePointerType(
DIB(cx),
- pointee_type_metadata,
+ pointee_type_di_node,
thin_pointer_size.bits(),
thin_pointer_align.bits() as u32,
0, // Ignore DWARF address space.
ptr_type_debuginfo_name.as_ptr().cast(),
ptr_type_debuginfo_name.len(),
)
- }
+ };
+
+ DINodeCreationResult { di_node, already_stored_in_typemap: false }
}
Some(fat_pointer_kind) => {
- let layout = cx.layout_of(ptr_type);
-
- let addr_field = layout.field(cx, abi::FAT_PTR_ADDR);
- let extra_field = layout.field(cx, abi::FAT_PTR_EXTRA);
-
- let (addr_field_name, extra_field_name) = match fat_pointer_kind {
- FatPtrKind::Dyn => ("pointer", "vtable"),
- FatPtrKind::Slice => ("data_ptr", "length"),
- };
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &ptr_type_debuginfo_name,
+ cx.size_and_align_of(ptr_type),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagZero,
+ ),
+ |cx, owner| {
+ // FIXME: If this fat pointer is a `Box` then we don't want to use its
+ // type layout and instead use the layout of the raw pointer inside
+ // of it.
+ // The proper way to handle this is to not treat Box as a pointer
+ // at all and instead emit regular struct debuginfo for it. We just
+ // need to make sure that we don't break existing debuginfo consumers
+ // by doing that (at least not without a warning period).
+ let layout_type =
+ if ptr_type.is_box() { cx.tcx.mk_mut_ptr(pointee_type) } else { ptr_type };
+
+ let layout = cx.layout_of(layout_type);
+ let addr_field = layout.field(cx, abi::FAT_PTR_ADDR);
+ let extra_field = layout.field(cx, abi::FAT_PTR_EXTRA);
+
+ let (addr_field_name, extra_field_name) = match fat_pointer_kind {
+ FatPtrKind::Dyn => ("pointer", "vtable"),
+ FatPtrKind::Slice => ("data_ptr", "length"),
+ };
- debug_assert_eq!(abi::FAT_PTR_ADDR, 0);
- debug_assert_eq!(abi::FAT_PTR_EXTRA, 1);
+ debug_assert_eq!(abi::FAT_PTR_ADDR, 0);
+ debug_assert_eq!(abi::FAT_PTR_EXTRA, 1);
- // The data pointer type is a regular, thin pointer, regardless of whether this is a slice
- // or a trait object.
- let data_ptr_type_metadata = unsafe {
- llvm::LLVMRustDIBuilderCreatePointerType(
- DIB(cx),
- pointee_type_metadata,
- addr_field.size.bits(),
- addr_field.align.abi.bits() as u32,
- 0, // Ignore DWARF address space.
- std::ptr::null(),
- 0,
- )
- };
+ // The data pointer type is a regular, thin pointer, regardless of whether this
+ // is a slice or a trait object.
+ let data_ptr_type_di_node = unsafe {
+ llvm::LLVMRustDIBuilderCreatePointerType(
+ DIB(cx),
+ pointee_type_di_node,
+ addr_field.size.bits(),
+ addr_field.align.abi.bits() as u32,
+ 0, // Ignore DWARF address space.
+ std::ptr::null(),
+ 0,
+ )
+ };
- let member_descriptions = vec![
- MemberDescription {
- name: addr_field_name.into(),
- type_metadata: data_ptr_type_metadata,
- offset: layout.fields.offset(abi::FAT_PTR_ADDR),
- size: addr_field.size,
- align: addr_field.align.abi,
- flags: DIFlags::FlagZero,
- discriminant: None,
- source_info: None,
- },
- MemberDescription {
- name: extra_field_name.into(),
- type_metadata: type_metadata(cx, extra_field.ty),
- offset: layout.fields.offset(abi::FAT_PTR_EXTRA),
- size: extra_field.size,
- align: extra_field.align.abi,
- flags: DIFlags::FlagZero,
- discriminant: None,
- source_info: None,
+ smallvec![
+ build_field_di_node(
+ cx,
+ owner,
+ addr_field_name,
+ (addr_field.size, addr_field.align.abi),
+ layout.fields.offset(abi::FAT_PTR_ADDR),
+ DIFlags::FlagZero,
+ data_ptr_type_di_node,
+ ),
+ build_field_di_node(
+ cx,
+ owner,
+ extra_field_name,
+ (extra_field.size, extra_field.align.abi),
+ layout.fields.offset(abi::FAT_PTR_EXTRA),
+ DIFlags::FlagZero,
+ type_di_node(cx, extra_field.ty),
+ ),
+ ]
},
- ];
-
- composite_type_metadata(
- cx,
- ptr_type,
- &ptr_type_debuginfo_name,
- unique_type_id,
- member_descriptions,
- NO_SCOPE_METADATA,
+ NO_GENERICS,
)
}
- };
-
- MetadataCreationResult { metadata: pointer_type_metadata, already_stored_in_typemap: false }
+ }
}
-fn subroutine_type_metadata<'ll, 'tcx>(
+fn build_subroutine_type_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
unique_type_id: UniqueTypeId<'tcx>,
-) -> MetadataCreationResult<'ll> {
+) -> DINodeCreationResult<'ll> {
// It's possible to create a self-referential
// type in Rust by using 'impl trait':
//
// Once that is created, we replace the marker in the typemap with the actual type.
debug_context(cx)
.type_map
- .unique_id_to_metadata
+ .unique_id_to_di_node
.borrow_mut()
- .insert(unique_type_id, recursion_marker_type(cx));
-
- let UniqueTypeId::Ty(fn_ty, _) = unique_type_id else {
- bug!("subroutine_type_metadata() called with unexpected input type: {:?}", unique_type_id)
- };
+ .insert(unique_type_id, recursion_marker_type_di_node(cx));
+ let fn_ty = unique_type_id.expect_ty();
let signature = cx
.tcx
.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), fn_ty.fn_sig(cx.tcx));
- let signature_metadata: SmallVec<[_; 32]> = iter::once(
+ let signature_di_nodes: SmallVec<_> = iter::once(
// return type
match signature.output().kind() {
ty::Tuple(tys) if tys.is_empty() => {
// this is a "void" function
None
}
- _ => Some(type_metadata(cx, signature.output())),
+ _ => Some(type_di_node(cx, signature.output())),
},
)
.chain(
// regular arguments
- signature.inputs().iter().map(|&argument_type| Some(type_metadata(cx, argument_type))),
+ signature.inputs().iter().map(|&argument_type| Some(type_di_node(cx, argument_type))),
)
.collect();
- debug_context(cx).type_map.unique_id_to_metadata.borrow_mut().remove(&unique_type_id);
+ debug_context(cx).type_map.unique_id_to_di_node.borrow_mut().remove(&unique_type_id);
- let fn_metadata = unsafe {
+ let fn_di_node = unsafe {
llvm::LLVMRustDIBuilderCreateSubroutineType(
DIB(cx),
- create_DIArray(DIB(cx), &signature_metadata[..]),
+ create_DIArray(DIB(cx), &signature_di_nodes[..]),
)
};
// This is actually a function pointer, so wrap it in pointer DI.
let name = compute_debuginfo_type_name(cx.tcx, fn_ty, false);
- let metadata = unsafe {
+ let di_node = unsafe {
llvm::LLVMRustDIBuilderCreatePointerType(
DIB(cx),
- fn_metadata,
+ fn_di_node,
cx.tcx.data_layout.pointer_size.bits(),
cx.tcx.data_layout.pointer_align.abi.bits() as u32,
0, // Ignore DWARF address space.
)
};
- MetadataCreationResult::new(metadata, false)
+ DINodeCreationResult::new(di_node, false)
}
/// Create debuginfo for `dyn SomeTrait` types. Currently these are empty structs
/// we with the correct type name (e.g. "dyn SomeTrait<Foo, Item=u32> + Sync").
-fn dyn_type_metadata<'ll, 'tcx>(
+fn build_dyn_type_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
dyn_type: Ty<'tcx>,
unique_type_id: UniqueTypeId<'tcx>,
-) -> &'ll DIType {
+) -> DINodeCreationResult<'ll> {
if let ty::Dynamic(..) = dyn_type.kind() {
let type_name = compute_debuginfo_type_name(cx.tcx, dyn_type, true);
- composite_type_metadata(cx, dyn_type, &type_name, unique_type_id, vec![], NO_SCOPE_METADATA)
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &type_name,
+ cx.size_and_align_of(dyn_type),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagZero,
+ ),
+ |_, _| smallvec![],
+ NO_GENERICS,
+ )
} else {
- bug!("Only ty::Dynamic is valid for dyn_type_metadata(). Found {:?} instead.", dyn_type)
+ bug!(
+ "Only ty::Dynamic is valid for build_dyn_type_di_node(). Found {:?} instead.",
+ dyn_type
+ )
}
}
/// `struct Foo { unsized_field: u8 }` in debuginfo. If the length of the
/// slice is zero, then accessing `unsized_field` in the debugger would
/// result in an out-of-bounds access.
-fn slice_type_metadata<'ll, 'tcx>(
+fn build_slice_type_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
slice_type: Ty<'tcx>,
unique_type_id: UniqueTypeId<'tcx>,
-) -> MetadataCreationResult<'ll> {
+) -> DINodeCreationResult<'ll> {
let element_type = match slice_type.kind() {
ty::Slice(element_type) => *element_type,
ty::Str => cx.tcx.types.u8,
_ => {
bug!(
- "Only ty::Slice is valid for slice_type_metadata(). Found {:?} instead.",
+ "Only ty::Slice is valid for build_slice_type_di_node(). Found {:?} instead.",
slice_type
)
}
};
- let element_type_metadata = type_metadata(cx, element_type);
- return_if_metadata_created_in_meantime!(cx, unique_type_id);
- MetadataCreationResult { metadata: element_type_metadata, already_stored_in_typemap: false }
+ let element_type_di_node = type_di_node(cx, element_type);
+ return_if_di_node_created_in_meantime!(cx, unique_type_id);
+ DINodeCreationResult { di_node: element_type_di_node, already_stored_in_typemap: false }
}
-pub fn type_metadata<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
+/// Get the debuginfo node for the given type.
+///
+/// This function will look up the debuginfo node in the TypeMap. If it can't find it, it
+/// will create the node by dispatching to the corresponding `build_*_di_node()` function.
+pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
let unique_type_id = UniqueTypeId::for_ty(cx.tcx, t);
- if let Some(metadata) = debug_context(cx).type_map.find_metadata_for_unique_id(unique_type_id) {
- return metadata;
+ if let Some(existing_di_node) = debug_context(cx).type_map.di_node_for_unique_id(unique_type_id)
+ {
+ return existing_di_node;
}
- debug!("type_metadata: {:?}", t);
+ debug!("type_di_node: {:?}", t);
- let MetadataCreationResult { metadata, already_stored_in_typemap } = match *t.kind() {
+ let DINodeCreationResult { di_node, already_stored_in_typemap } = match *t.kind() {
ty::Never | ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) => {
- MetadataCreationResult::new(basic_type_metadata(cx, t), false)
+ DINodeCreationResult::new(build_basic_type_di_node(cx, t), false)
}
ty::Tuple(elements) if elements.is_empty() => {
- MetadataCreationResult::new(basic_type_metadata(cx, t), false)
- }
- ty::Array(..) => fixed_size_array_metadata(cx, unique_type_id, t),
- ty::Slice(_) | ty::Str => slice_type_metadata(cx, t, unique_type_id),
- ty::Dynamic(..) => {
- MetadataCreationResult::new(dyn_type_metadata(cx, t, unique_type_id), false)
- }
- ty::Foreign(..) => {
- MetadataCreationResult::new(foreign_type_metadata(cx, t, unique_type_id), false)
+ DINodeCreationResult::new(build_basic_type_di_node(cx, t), false)
}
+ ty::Array(..) => build_fixed_size_array_di_node(cx, unique_type_id, t),
+ ty::Slice(_) | ty::Str => build_slice_type_di_node(cx, t, unique_type_id),
+ ty::Dynamic(..) => build_dyn_type_di_node(cx, t, unique_type_id),
+ ty::Foreign(..) => build_foreign_type_di_node(cx, t, unique_type_id),
ty::RawPtr(ty::TypeAndMut { ty: pointee_type, .. }) | ty::Ref(_, pointee_type, _) => {
- pointer_or_reference_metadata(cx, t, pointee_type, unique_type_id)
+ build_pointer_or_reference_di_node(cx, t, pointee_type, unique_type_id)
}
// Box<T, A> may have a non-ZST allocator A. In that case, we
// cannot treat Box<T, A> as just an owned alias of `*mut T`.
ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
- pointer_or_reference_metadata(cx, t, t.boxed_ty(), unique_type_id)
- }
- ty::FnDef(..) | ty::FnPtr(_) => subroutine_type_metadata(cx, unique_type_id),
- ty::Closure(def_id, substs) => {
- let upvar_tys: Vec<_> = substs.as_closure().upvar_tys().collect();
- let containing_scope = get_namespace_for_item(cx, def_id);
- prepare_tuple_metadata(cx, t, &upvar_tys, unique_type_id, Some(containing_scope))
- .finalize(cx)
- }
- ty::Generator(def_id, substs, _) => {
- let upvar_tys: Vec<_> = substs
- .as_generator()
- .prefix_tys()
- .map(|t| cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
- .collect();
- prepare_enum_metadata(cx, t, def_id, unique_type_id, upvar_tys).finalize(cx)
+ build_pointer_or_reference_di_node(cx, t, t.boxed_ty(), unique_type_id)
}
+ ty::FnDef(..) | ty::FnPtr(_) => build_subroutine_type_di_node(cx, unique_type_id),
+ ty::Closure(..) => build_closure_env_di_node(cx, unique_type_id),
+ ty::Generator(..) => enums::build_generator_di_node(cx, unique_type_id),
ty::Adt(def, ..) => match def.adt_kind() {
- AdtKind::Struct => prepare_struct_metadata(cx, t, unique_type_id).finalize(cx),
- AdtKind::Union => prepare_union_metadata(cx, t, unique_type_id).finalize(cx),
- AdtKind::Enum => {
- prepare_enum_metadata(cx, t, def.did, unique_type_id, vec![]).finalize(cx)
- }
+ AdtKind::Struct => build_struct_type_di_node(cx, unique_type_id),
+ AdtKind::Union => build_union_type_di_node(cx, unique_type_id),
+ AdtKind::Enum => enums::build_enum_type_di_node(cx, unique_type_id),
},
- ty::Tuple(tys) => {
- prepare_tuple_metadata(cx, t, tys, unique_type_id, NO_SCOPE_METADATA).finalize(cx)
- }
+ ty::Tuple(_) => build_tuple_type_di_node(cx, unique_type_id),
// Type parameters from polymorphized functions.
- ty::Param(_) => MetadataCreationResult::new(param_type_metadata(cx, t), false),
- _ => bug!("debuginfo: unexpected type in type_metadata: {:?}", t),
+ ty::Param(_) => build_param_type_di_node(cx, t),
+ _ => bug!("debuginfo: unexpected type in type_di_node(): {:?}", t),
};
{
if already_stored_in_typemap {
// Make sure that we really do have a `TypeMap` entry for the unique type ID.
- let metadata_for_uid =
- match debug_context(cx).type_map.find_metadata_for_unique_id(unique_type_id) {
- Some(metadata) => metadata,
+ let di_node_for_uid =
+ match debug_context(cx).type_map.di_node_for_unique_id(unique_type_id) {
+ Some(di_node) => di_node,
None => {
bug!(
- "expected type metadata for unique \
+ "expected type debuginfo node for unique \
type ID '{:?}' to already be in \
the `debuginfo::TypeMap` but it \
was not.",
}
};
- debug_assert_eq!(metadata_for_uid as *const _, metadata as *const _);
+ debug_assert_eq!(di_node_for_uid as *const _, di_node as *const _);
} else {
- debug_context(cx).type_map.register_unique_id_with_metadata(unique_type_id, metadata);
+ debug_context(cx).type_map.insert(unique_type_id, di_node);
}
}
- metadata
+ di_node
}
-fn recursion_marker_type<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> &'ll DIType {
+// FIXME(mw): Cache this via a regular UniqueTypeId instead of an extra field in the debug context.
+fn recursion_marker_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> &'ll DIType {
*debug_context(cx).recursion_marker_type.get_or_init(move || {
unsafe {
// The choice of type here is pretty arbitrary -
}
}
-fn basic_type_metadata<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
- debug!("basic_type_metadata: {:?}", t);
+fn build_basic_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
+ debug!("build_basic_type_di_node: {:?}", t);
// When targeting MSVC, emit MSVC style type names for compatibility with
// .natvis visualizers (and perhaps other existing native debuggers?)
ty::Int(int_ty) => (int_ty.name_str(), DW_ATE_signed),
ty::Uint(uint_ty) => (uint_ty.name_str(), DW_ATE_unsigned),
ty::Float(float_ty) => (float_ty.name_str(), DW_ATE_float),
- _ => bug!("debuginfo::basic_type_metadata - `t` is invalid type"),
+ _ => bug!("debuginfo::build_basic_type_di_node - `t` is invalid type"),
};
- let ty_metadata = unsafe {
+ let ty_di_node = unsafe {
llvm::LLVMRustDIBuilderCreateBasicType(
DIB(cx),
name.as_ptr().cast(),
};
if !cpp_like_debuginfo {
- return ty_metadata;
+ return ty_di_node;
}
let typedef_name = match t.kind() {
ty::Int(int_ty) => int_ty.name_str(),
ty::Uint(uint_ty) => uint_ty.name_str(),
ty::Float(float_ty) => float_ty.name_str(),
- _ => return ty_metadata,
+ _ => return ty_di_node,
};
- let typedef_metadata = unsafe {
+ let typedef_di_node = unsafe {
llvm::LLVMRustDIBuilderCreateTypedef(
DIB(cx),
- ty_metadata,
+ ty_di_node,
typedef_name.as_ptr().cast(),
typedef_name.len(),
unknown_file_metadata(cx),
)
};
- typedef_metadata
+ typedef_di_node
}
-fn foreign_type_metadata<'ll, 'tcx>(
+fn build_foreign_type_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
t: Ty<'tcx>,
unique_type_id: UniqueTypeId<'tcx>,
-) -> &'ll DIType {
- debug!("foreign_type_metadata: {:?}", t);
+) -> DINodeCreationResult<'ll> {
+ debug!("build_foreign_type_di_node: {:?}", t);
- let name = compute_debuginfo_type_name(cx.tcx, t, false);
- let (size, align) = cx.size_and_align_of(t);
- create_struct_stub(
+ let &ty::Foreign(def_id) = unique_type_id.expect_ty().kind() else {
+ bug!("build_foreign_type_di_node() called with unexpected type: {:?}", unique_type_id.expect_ty());
+ };
+
+ build_type_with_children(
cx,
- size,
- align,
- &name,
- unique_type_id,
- NO_SCOPE_METADATA,
- DIFlags::FlagZero,
- None,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &compute_debuginfo_type_name(cx.tcx, t, false),
+ cx.size_and_align_of(t),
+ Some(get_namespace_for_item(cx, def_id)),
+ DIFlags::FlagZero,
+ ),
+ |_, _| smallvec![],
+ NO_GENERICS,
)
}
-fn param_type_metadata<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
- debug!("param_type_metadata: {:?}", t);
+fn build_param_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ t: Ty<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ debug!("build_param_type_di_node: {:?}", t);
let name = format!("{:?}", t);
- unsafe {
- llvm::LLVMRustDIBuilderCreateBasicType(
- DIB(cx),
- name.as_ptr().cast(),
- name.len(),
- Size::ZERO.bits(),
- DW_ATE_unsigned,
- )
+ DINodeCreationResult {
+ di_node: unsafe {
+ llvm::LLVMRustDIBuilderCreateBasicType(
+ DIB(cx),
+ name.as_ptr().cast(),
+ name.len(),
+ Size::ZERO.bits(),
+ DW_ATE_unsigned,
+ )
+ },
+ already_stored_in_typemap: false,
}
}
-pub fn compile_unit_metadata<'ll, 'tcx>(
+pub fn build_compile_unit_di_node<'ll, 'tcx>(
tcx: TyCtxt<'tcx>,
codegen_unit_name: &str,
- debug_context: &CrateDebugContext<'ll, 'tcx>,
+ debug_context: &CodegenUnitDebugContext<'ll, 'tcx>,
) -> &'ll DIDescriptor {
let mut name_in_debuginfo = match tcx.sess.local_crate_source_file {
Some(ref path) => path.clone(),
name_in_debuginfo.push("@");
name_in_debuginfo.push(codegen_unit_name);
- debug!("compile_unit_metadata: {:?}", name_in_debuginfo);
+ debug!("build_compile_unit_di_node: {:?}", name_in_debuginfo);
let rustc_producer =
format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION"),);
// FIXME(#41252) Remove "clang LLVM" if we can get GDB and LLVM to play nice.
}
}
-struct MetadataCreationResult<'ll> {
- metadata: &'ll DIType,
- already_stored_in_typemap: bool,
-}
-
-impl<'ll> MetadataCreationResult<'ll> {
- fn new(metadata: &'ll DIType, already_stored_in_typemap: bool) -> Self {
- MetadataCreationResult { metadata, already_stored_in_typemap }
- }
-}
-
-#[derive(Debug)]
-struct SourceInfo<'ll> {
- file: &'ll DIFile,
- line: u32,
-}
-
-/// Description of a type member, which can either be a regular field (as in
-/// structs or tuples) or an enum variant.
-#[derive(Debug)]
-struct MemberDescription<'ll> {
- name: String,
- type_metadata: &'ll DIType,
+/// Creates a `DW_TAG_member` entry inside the DIE represented by the given `type_di_node`.
+fn build_field_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ owner: &'ll DIScope,
+ name: &str,
+ size_and_align: (Size, Align),
offset: Size,
- size: Size,
- align: Align,
flags: DIFlags,
- discriminant: Option<u64>,
- source_info: Option<SourceInfo<'ll>>,
-}
-
-impl<'ll> MemberDescription<'ll> {
- fn into_metadata(
- self,
- cx: &CodegenCx<'ll, '_>,
- composite_type_metadata: &'ll DIScope,
- ) -> &'ll DIType {
- let (file, line) = self
- .source_info
- .map(|info| (info.file, info.line))
- .unwrap_or_else(|| (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER));
- unsafe {
- llvm::LLVMRustDIBuilderCreateVariantMemberType(
- DIB(cx),
- composite_type_metadata,
- self.name.as_ptr().cast(),
- self.name.len(),
- file,
- line,
- self.size.bits(),
- self.align.bits() as u32,
- self.offset.bits(),
- self.discriminant.map(|v| cx.const_u64(v)),
- self.flags,
- self.type_metadata,
- )
- }
- }
-}
-
-/// A factory for `MemberDescription`s. It produces a list of member descriptions
-/// for some record-like type. `MemberDescriptionFactory`s are used to defer the
-/// creation of type member descriptions in order to break cycles arising from
-/// recursive type definitions.
-enum MemberDescriptionFactory<'ll, 'tcx> {
- StructMDF(StructMemberDescriptionFactory<'tcx>),
- TupleMDF(TupleMemberDescriptionFactory<'tcx>),
- EnumMDF(EnumMemberDescriptionFactory<'ll, 'tcx>),
- UnionMDF(UnionMemberDescriptionFactory<'tcx>),
- VariantMDF(VariantMemberDescriptionFactory<'tcx>),
-}
-
-impl<'ll, 'tcx> MemberDescriptionFactory<'ll, 'tcx> {
- fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> {
- match *self {
- StructMDF(ref this) => this.create_member_descriptions(cx),
- TupleMDF(ref this) => this.create_member_descriptions(cx),
- EnumMDF(ref this) => this.create_member_descriptions(cx),
- UnionMDF(ref this) => this.create_member_descriptions(cx),
- VariantMDF(ref this) => this.create_member_descriptions(cx),
- }
- }
-}
-
-//=-----------------------------------------------------------------------------
-// Structs
-//=-----------------------------------------------------------------------------
-
-/// Creates `MemberDescription`s for the fields of a struct.
-struct StructMemberDescriptionFactory<'tcx> {
- ty: Ty<'tcx>,
- variant: &'tcx ty::VariantDef,
-}
-
-impl<'tcx> StructMemberDescriptionFactory<'tcx> {
- fn create_member_descriptions<'ll>(
- &self,
- cx: &CodegenCx<'ll, 'tcx>,
- ) -> Vec<MemberDescription<'ll>> {
- let layout = cx.layout_of(self.ty);
- self.variant
- .fields
- .iter()
- .enumerate()
- .map(|(i, f)| {
- let name = if self.variant.ctor_kind == CtorKind::Fn {
- format!("__{}", i)
- } else {
- f.name.to_string()
- };
- let field = layout.field(cx, i);
- MemberDescription {
- name,
- type_metadata: type_metadata(cx, field.ty),
- offset: layout.fields.offset(i),
- size: field.size,
- align: field.align.abi,
- flags: DIFlags::FlagZero,
- discriminant: None,
- source_info: None,
- }
- })
- .collect()
+ type_di_node: &'ll DIType,
+) -> &'ll DIType {
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateMemberType(
+ DIB(cx),
+ owner,
+ name.as_ptr().cast(),
+ name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ size_and_align.0.bits(),
+ size_and_align.1.bits() as u32,
+ offset.bits(),
+ flags,
+ type_di_node,
+ )
}
}
-fn prepare_struct_metadata<'ll, 'tcx>(
+/// Creates the debuginfo node for a Rust struct type. Maybe be a regular struct or a tuple-struct.
+fn build_struct_type_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
- struct_type: Ty<'tcx>,
unique_type_id: UniqueTypeId<'tcx>,
-) -> RecursiveTypeDescription<'ll, 'tcx> {
- let struct_name = compute_debuginfo_type_name(cx.tcx, struct_type, false);
-
- let (struct_def_id, variant) = match struct_type.kind() {
- ty::Adt(def, _) => (def.did, def.non_enum_variant()),
- _ => bug!("prepare_struct_metadata on a non-ADT"),
+) -> DINodeCreationResult<'ll> {
+ let struct_type = unique_type_id.expect_ty();
+ let ty::Adt(adt_def, _) = struct_type.kind() else {
+ bug!("build_struct_type_di_node() called with non-struct-type: {:?}", struct_type);
};
+ debug_assert!(adt_def.is_struct());
+ let containing_scope = get_namespace_for_item(cx, adt_def.did());
+ let struct_type_and_layout = cx.layout_of(struct_type);
+ let variant_def = adt_def.non_enum_variant();
- let containing_scope = get_namespace_for_item(cx, struct_def_id);
- let (size, align) = cx.size_and_align_of(struct_type);
-
- let struct_metadata_stub = create_struct_stub(
- cx,
- size,
- align,
- &struct_name,
- unique_type_id,
- Some(containing_scope),
- DIFlags::FlagZero,
- None,
- );
-
- create_and_register_recursive_type_forward_declaration(
+ type_map::build_type_with_children(
cx,
- struct_type,
- unique_type_id,
- struct_metadata_stub,
- struct_metadata_stub,
- StructMDF(StructMemberDescriptionFactory { ty: struct_type, variant }),
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &compute_debuginfo_type_name(cx.tcx, struct_type, false),
+ size_and_align_of(struct_type_and_layout),
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ ),
+ // Fields:
+ |cx, owner| {
+ variant_def
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(i, f)| {
+ let field_name = if variant_def.ctor_kind == CtorKind::Fn {
+ // This is a tuple struct
+ tuple_field_name(i)
+ } else {
+ // This is struct with named fields
+ Cow::Borrowed(f.name.as_str())
+ };
+ let field_layout = struct_type_and_layout.field(cx, i);
+ build_field_di_node(
+ cx,
+ owner,
+ &field_name[..],
+ (field_layout.size, field_layout.align.abi),
+ struct_type_and_layout.fields.offset(i),
+ DIFlags::FlagZero,
+ type_di_node(cx, field_layout.ty),
+ )
+ })
+ .collect()
+ },
+ |cx| build_generic_type_param_di_nodes(cx, struct_type),
)
}
/// Here are some examples:
/// - `name__field1__field2` when the upvar is captured by value.
/// - `_ref__name__field` when the upvar is captured by reference.
-fn closure_saved_names_of_captured_variables(tcx: TyCtxt<'_>, def_id: DefId) -> Vec<String> {
+///
+/// For generators this only contains upvars that are shared by all states.
+fn closure_saved_names_of_captured_variables(tcx: TyCtxt<'_>, def_id: DefId) -> SmallVec<String> {
let body = tcx.optimized_mir(def_id);
body.var_debug_info
let prefix = if is_ref { "_ref__" } else { "" };
Some(prefix.to_owned() + var.name.as_str())
})
- .collect::<Vec<_>>()
+ .collect()
}
-/// Creates `MemberDescription`s for the fields of a tuple.
-struct TupleMemberDescriptionFactory<'tcx> {
- ty: Ty<'tcx>,
- component_types: Vec<Ty<'tcx>>,
-}
+/// Builds the DW_TAG_member debuginfo nodes for the upvars of a closure or generator.
+/// For a generator, this will handle upvars shared by all states.
+fn build_upvar_field_di_nodes<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ closure_or_generator_ty: Ty<'tcx>,
+ closure_or_generator_di_node: &'ll DIType,
+) -> SmallVec<&'ll DIType> {
+ let (&def_id, up_var_tys) = match closure_or_generator_ty.kind() {
+ ty::Generator(def_id, substs, _) => {
+ let upvar_tys: SmallVec<_> = substs.as_generator().prefix_tys().collect();
+ (def_id, upvar_tys)
+ }
+ ty::Closure(def_id, substs) => {
+ let upvar_tys: SmallVec<_> = substs.as_closure().upvar_tys().collect();
+ (def_id, upvar_tys)
+ }
+ _ => {
+ bug!(
+ "build_upvar_field_di_nodes() called with non-closure-or-generator-type: {:?}",
+ closure_or_generator_ty
+ )
+ }
+ };
-impl<'tcx> TupleMemberDescriptionFactory<'tcx> {
- fn create_member_descriptions<'ll>(
- &self,
- cx: &CodegenCx<'ll, 'tcx>,
- ) -> Vec<MemberDescription<'ll>> {
- let mut capture_names = match *self.ty.kind() {
- ty::Generator(def_id, ..) | ty::Closure(def_id, ..) => {
- Some(closure_saved_names_of_captured_variables(cx.tcx, def_id).into_iter())
- }
- _ => None,
- };
- let layout = cx.layout_of(self.ty);
- self.component_types
+ debug_assert!(
+ up_var_tys
.iter()
- .enumerate()
- .map(|(i, &component_type)| {
- let (size, align) = cx.size_and_align_of(component_type);
- let name = if let Some(names) = capture_names.as_mut() {
- names.next().unwrap()
- } else {
- format!("__{}", i)
- };
- MemberDescription {
- name,
- type_metadata: type_metadata(cx, component_type),
- offset: layout.fields.offset(i),
- size,
- align,
- flags: DIFlags::FlagZero,
- discriminant: None,
- source_info: None,
- }
- })
- .collect()
- }
+ .all(|&t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
+ );
+
+ let capture_names = closure_saved_names_of_captured_variables(cx.tcx, def_id);
+ let layout = cx.layout_of(closure_or_generator_ty);
+
+ up_var_tys
+ .into_iter()
+ .zip(capture_names.iter())
+ .enumerate()
+ .map(|(index, (up_var_ty, capture_name))| {
+ build_field_di_node(
+ cx,
+ closure_or_generator_di_node,
+ capture_name,
+ cx.size_and_align_of(up_var_ty),
+ layout.fields.offset(index),
+ DIFlags::FlagZero,
+ type_di_node(cx, up_var_ty),
+ )
+ })
+ .collect()
}
-fn prepare_tuple_metadata<'ll, 'tcx>(
+/// Builds the DW_TAG_structure_type debuginfo node for a Rust tuple type.
+fn build_tuple_type_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
- tuple_type: Ty<'tcx>,
- component_types: &[Ty<'tcx>],
unique_type_id: UniqueTypeId<'tcx>,
- containing_scope: Option<&'ll DIScope>,
-) -> RecursiveTypeDescription<'ll, 'tcx> {
- let (size, align) = cx.size_and_align_of(tuple_type);
- let tuple_name = compute_debuginfo_type_name(cx.tcx, tuple_type, false);
+) -> DINodeCreationResult<'ll> {
+ let tuple_type = unique_type_id.expect_ty();
+ let &ty::Tuple(component_types) = tuple_type.kind() else {
+ bug!("build_tuple_type_di_node() called with non-tuple-type: {:?}", tuple_type)
+ };
- let struct_stub = create_struct_stub(
- cx,
- size,
- align,
- &tuple_name[..],
- unique_type_id,
- containing_scope,
- DIFlags::FlagZero,
- None,
- );
+ let tuple_type_and_layout = cx.layout_of(tuple_type);
+ let type_name = compute_debuginfo_type_name(cx.tcx, tuple_type, false);
- create_and_register_recursive_type_forward_declaration(
+ type_map::build_type_with_children(
cx,
- tuple_type,
- unique_type_id,
- struct_stub,
- struct_stub,
- TupleMDF(TupleMemberDescriptionFactory {
- ty: tuple_type,
- component_types: component_types.to_vec(),
- }),
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &type_name,
+ size_and_align_of(tuple_type_and_layout),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagZero,
+ ),
+ // Fields:
+ |cx, tuple_di_node| {
+ component_types
+ .into_iter()
+ .enumerate()
+ .map(|(index, component_type)| {
+ build_field_di_node(
+ cx,
+ tuple_di_node,
+ &tuple_field_name(index),
+ cx.size_and_align_of(component_type),
+ tuple_type_and_layout.fields.offset(index),
+ DIFlags::FlagZero,
+ type_di_node(cx, component_type),
+ )
+ })
+ .collect()
+ },
+ NO_GENERICS,
)
}
-//=-----------------------------------------------------------------------------
-// Unions
-//=-----------------------------------------------------------------------------
-
-struct UnionMemberDescriptionFactory<'tcx> {
- layout: TyAndLayout<'tcx>,
- variant: &'tcx ty::VariantDef,
-}
+/// Builds the debuginfo node for a closure environment.
+fn build_closure_env_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let closure_env_type = unique_type_id.expect_ty();
+ let &ty::Closure(def_id, _substs) = closure_env_type.kind() else {
+ bug!("build_closure_env_di_node() called with non-closure-type: {:?}", closure_env_type)
+ };
+ let containing_scope = get_namespace_for_item(cx, def_id);
+ let type_name = compute_debuginfo_type_name(cx.tcx, closure_env_type, false);
-impl<'tcx> UnionMemberDescriptionFactory<'tcx> {
- fn create_member_descriptions<'ll>(
- &self,
- cx: &CodegenCx<'ll, 'tcx>,
- ) -> Vec<MemberDescription<'ll>> {
- self.variant
- .fields
- .iter()
- .enumerate()
- .map(|(i, f)| {
- let field = self.layout.field(cx, i);
- MemberDescription {
- name: f.name.to_string(),
- type_metadata: type_metadata(cx, field.ty),
- offset: Size::ZERO,
- size: field.size,
- align: field.align.abi,
- flags: DIFlags::FlagZero,
- discriminant: None,
- source_info: None,
- }
- })
- .collect()
- }
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &type_name,
+ cx.size_and_align_of(closure_env_type),
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ ),
+ // Fields:
+ |cx, owner| build_upvar_field_di_nodes(cx, closure_env_type, owner),
+ NO_GENERICS,
+ )
}
-fn prepare_union_metadata<'ll, 'tcx>(
+/// Build the debuginfo node for a Rust `union` type.
+fn build_union_type_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
- union_type: Ty<'tcx>,
unique_type_id: UniqueTypeId<'tcx>,
-) -> RecursiveTypeDescription<'ll, 'tcx> {
- let union_name = compute_debuginfo_type_name(cx.tcx, union_type, false);
-
- let (union_def_id, variant) = match union_type.kind() {
- ty::Adt(def, _) => (def.did, def.non_enum_variant()),
- _ => bug!("prepare_union_metadata on a non-ADT"),
+) -> DINodeCreationResult<'ll> {
+ let union_type = unique_type_id.expect_ty();
+ let (union_def_id, variant_def) = match union_type.kind() {
+ ty::Adt(def, _) => (def.did(), def.non_enum_variant()),
+ _ => bug!("build_union_type_di_node on a non-ADT"),
};
-
let containing_scope = get_namespace_for_item(cx, union_def_id);
+ let union_ty_and_layout = cx.layout_of(union_type);
+ let type_name = compute_debuginfo_type_name(cx.tcx, union_type, false);
- let union_metadata_stub =
- create_union_stub(cx, union_type, &union_name, unique_type_id, containing_scope);
-
- create_and_register_recursive_type_forward_declaration(
+ type_map::build_type_with_children(
cx,
- union_type,
- unique_type_id,
- union_metadata_stub,
- union_metadata_stub,
- UnionMDF(UnionMemberDescriptionFactory { layout: cx.layout_of(union_type), variant }),
+ type_map::stub(
+ cx,
+ Stub::Union,
+ unique_type_id,
+ &type_name,
+ size_and_align_of(union_ty_and_layout),
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ ),
+ // Fields:
+ |cx, owner| {
+ variant_def
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(i, f)| {
+ let field_layout = union_ty_and_layout.field(cx, i);
+ build_field_di_node(
+ cx,
+ owner,
+ f.name.as_str(),
+ size_and_align_of(field_layout),
+ Size::ZERO,
+ DIFlags::FlagZero,
+ type_di_node(cx, field_layout.ty),
+ )
+ })
+ .collect()
+ },
+ // Generics:
+ |cx| build_generic_type_param_di_nodes(cx, union_type),
)
}
-//=-----------------------------------------------------------------------------
-// Enums
-//=-----------------------------------------------------------------------------
-
// FIXME(eddyb) maybe precompute this? Right now it's computed once
// per generator monomorphization, but it doesn't depend on substs.
fn generator_layout_and_saved_local_names<'tcx>(
(generator_layout, generator_saved_local_names)
}
-/// Describes the members of an enum value; an enum is described as a union of
-/// structs in DWARF. This `MemberDescriptionFactory` provides the description for
-/// the members of this union; so for every variant of the given enum, this
-/// factory will produce one `MemberDescription` (all with no name and a fixed
-/// offset of zero bytes).
-struct EnumMemberDescriptionFactory<'ll, 'tcx> {
- enum_type: Ty<'tcx>,
- layout: TyAndLayout<'tcx>,
- tag_type_metadata: Option<&'ll DIType>,
- common_members: Vec<Option<&'ll DIType>>,
-}
-
-impl<'ll, 'tcx> EnumMemberDescriptionFactory<'ll, 'tcx> {
- fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> {
- let generator_variant_info_data = match *self.enum_type.kind() {
- ty::Generator(def_id, ..) => {
- Some(generator_layout_and_saved_local_names(cx.tcx, def_id))
- }
- _ => None,
- };
-
- let variant_info_for = |index: VariantIdx| match *self.enum_type.kind() {
- ty::Adt(adt, _) => VariantInfo::Adt(&adt.variants[index], index),
- ty::Generator(def_id, _, _) => {
- let (generator_layout, generator_saved_local_names) =
- generator_variant_info_data.as_ref().unwrap();
- VariantInfo::Generator {
- def_id,
- generator_layout: *generator_layout,
- generator_saved_local_names,
- variant_index: index,
- }
- }
- _ => bug!(),
- };
-
- // While LLVM supports generating debuginfo for variant types (enums), it doesn't support
- // lowering that debuginfo to CodeView records for msvc targets. So if we are targeting
- // msvc, then we need to use a different, fallback encoding of the debuginfo.
- let fallback = cpp_like_debuginfo(cx.tcx);
- // This will always find the metadata in the type map.
- let self_metadata = type_metadata(cx, self.enum_type);
-
- match self.layout.variants {
- Variants::Single { index } => {
- if let ty::Adt(adt, _) = self.enum_type.kind() {
- if adt.variants.is_empty() {
- return vec![];
- }
- }
-
- let variant_info = variant_info_for(index);
- let (variant_type_metadata, member_description_factory) =
- describe_enum_variant(cx, self.layout, variant_info, self_metadata);
-
- let member_descriptions = member_description_factory.create_member_descriptions(cx);
- let type_params = compute_type_parameters(cx, self.enum_type);
-
- set_members_of_composite_type(
- cx,
- variant_type_metadata,
- member_descriptions,
- Some(&self.common_members),
- type_params,
- );
- vec![MemberDescription {
- name: variant_info.variant_name(),
- type_metadata: variant_type_metadata,
- offset: Size::ZERO,
- size: self.layout.size,
- align: self.layout.align.abi,
- flags: DIFlags::FlagZero,
- discriminant: None,
- source_info: variant_info.source_info(cx),
- }]
- }
- Variants::Multiple {
- tag_encoding: TagEncoding::Direct,
- tag_field,
- ref variants,
- ..
- } => {
- let fallback_discr_variant = if fallback {
- // For MSVC, we generate a union of structs for each variant and an
- // explicit discriminant field roughly equivalent to the following C:
- // ```c
- // union enum$<{name}> {
- // struct {variant 0 name} {
- // <variant 0 fields>
- // } variant0;
- // <other variant structs>
- // {name} discriminant;
- // }
- // ```
- // The natvis in `intrinsic.natvis` then matches on `this.discriminant` to
- // determine which variant is active and then displays it.
- let enum_layout = self.layout;
- let offset = enum_layout.fields.offset(tag_field);
- let discr_ty = enum_layout.field(cx, tag_field).ty;
- let (size, align) = cx.size_and_align_of(discr_ty);
- Some(MemberDescription {
- name: "discriminant".into(),
- type_metadata: self.tag_type_metadata.unwrap(),
- offset,
- size,
- align,
- flags: DIFlags::FlagZero,
- discriminant: None,
- source_info: None,
- })
- } else {
- None
- };
-
- variants
- .iter_enumerated()
- .map(|(i, _)| {
- let variant = self.layout.for_variant(cx, i);
- let variant_info = variant_info_for(i);
- let (variant_type_metadata, member_desc_factory) =
- describe_enum_variant(cx, variant, variant_info, self_metadata);
-
- let member_descriptions =
- member_desc_factory.create_member_descriptions(cx);
- let type_params = compute_type_parameters(cx, self.enum_type);
-
- set_members_of_composite_type(
- cx,
- variant_type_metadata,
- member_descriptions,
- Some(&self.common_members),
- type_params,
- );
-
- MemberDescription {
- name: if fallback {
- format!("variant{}", i.as_u32())
- } else {
- variant_info.variant_name()
- },
- type_metadata: variant_type_metadata,
- offset: Size::ZERO,
- size: self.layout.size,
- align: self.layout.align.abi,
- flags: DIFlags::FlagZero,
- discriminant: Some(
- self.layout.ty.discriminant_for_variant(cx.tcx, i).unwrap().val
- as u64,
- ),
- source_info: variant_info.source_info(cx),
- }
- })
- .chain(fallback_discr_variant.into_iter())
- .collect()
- }
- Variants::Multiple {
- tag_encoding:
- TagEncoding::Niche { ref niche_variants, niche_start, dataful_variant },
- tag,
- ref variants,
- tag_field,
- } => {
- let calculate_niche_value = |i: VariantIdx| {
- if i == dataful_variant {
- None
- } else {
- let value = (i.as_u32() as u128)
- .wrapping_sub(niche_variants.start().as_u32() as u128)
- .wrapping_add(niche_start);
- let value = tag.value.size(cx).truncate(value);
- // NOTE(eddyb) do *NOT* remove this assert, until
- // we pass the full 128-bit value to LLVM, otherwise
- // truncation will be silent and remain undetected.
- assert_eq!(value as u64 as u128, value);
- Some(value as u64)
- }
- };
-
- // For MSVC, we will generate a union of two fields, one for the dataful variant
- // and one that just points to the discriminant. We also create an enum that
- // contains tag values for the non-dataful variants and make the discriminant field
- // that type. We then use natvis to render the enum type correctly in Windbg/VS.
- // This will generate debuginfo roughly equivalent to the following C:
- // ```c
- // union enum$<{name}, {min niche}, {max niche}, {dataful variant name}> {
- // struct <dataful variant name> {
- // <fields in dataful variant>
- // } dataful_variant;
- // enum Discriminant$ {
- // <non-dataful variants>
- // } discriminant;
- // }
- // ```
- // The natvis in `intrinsic.natvis` matches on the type name `enum$<*, *, *, *>`
- // and evaluates `this.discriminant`. If the value is between the min niche and max
- // niche, then the enum is in the dataful variant and `this.dataful_variant` is
- // rendered. Otherwise, the enum is in one of the non-dataful variants. In that
- // case, we just need to render the name of the `this.discriminant` enum.
- if fallback {
- let dataful_variant_layout = self.layout.for_variant(cx, dataful_variant);
-
- let mut discr_enum_ty = tag.value.to_ty(cx.tcx);
- // If the niche is the NULL value of a reference, then `discr_enum_ty` will be a RawPtr.
- // CodeView doesn't know what to do with enums whose base type is a pointer so we fix this up
- // to just be `usize`.
- if let ty::RawPtr(_) = discr_enum_ty.kind() {
- discr_enum_ty = cx.tcx.types.usize;
- }
-
- let tags: Vec<_> = variants
- .iter_enumerated()
- .filter_map(|(variant_idx, _)| {
- calculate_niche_value(variant_idx).map(|tag| {
- let variant = variant_info_for(variant_idx);
- let name = variant.variant_name();
-
- Some(unsafe {
- llvm::LLVMRustDIBuilderCreateEnumerator(
- DIB(cx),
- name.as_ptr().cast(),
- name.len(),
- tag as i64,
- !discr_enum_ty.is_signed(),
- )
- })
- })
- })
- .collect();
-
- let discr_enum = unsafe {
- llvm::LLVMRustDIBuilderCreateEnumerationType(
- DIB(cx),
- self_metadata,
- "Discriminant$".as_ptr().cast(),
- "Discriminant$".len(),
- unknown_file_metadata(cx),
- UNKNOWN_LINE_NUMBER,
- tag.value.size(cx).bits(),
- tag.value.align(cx).abi.bits() as u32,
- create_DIArray(DIB(cx), &tags),
- type_metadata(cx, discr_enum_ty),
- true,
- )
- };
-
- let variant_info = variant_info_for(dataful_variant);
- let (variant_type_metadata, member_desc_factory) = describe_enum_variant(
- cx,
- dataful_variant_layout,
- variant_info,
- self_metadata,
- );
-
- let member_descriptions = member_desc_factory.create_member_descriptions(cx);
- let type_params = compute_type_parameters(cx, self.enum_type);
-
- set_members_of_composite_type(
- cx,
- variant_type_metadata,
- member_descriptions,
- Some(&self.common_members),
- type_params,
- );
-
- let (size, align) =
- cx.size_and_align_of(dataful_variant_layout.field(cx, tag_field).ty);
-
- vec![
- MemberDescription {
- // Name the dataful variant so that we can identify it for natvis
- name: "dataful_variant".to_string(),
- type_metadata: variant_type_metadata,
- offset: Size::ZERO,
- size: self.layout.size,
- align: self.layout.align.abi,
- flags: DIFlags::FlagZero,
- discriminant: None,
- source_info: variant_info.source_info(cx),
- },
- MemberDescription {
- name: "discriminant".into(),
- type_metadata: discr_enum,
- offset: dataful_variant_layout.fields.offset(tag_field),
- size,
- align,
- flags: DIFlags::FlagZero,
- discriminant: None,
- source_info: None,
- },
- ]
- } else {
- variants
- .iter_enumerated()
- .map(|(i, _)| {
- let variant = self.layout.for_variant(cx, i);
- let variant_info = variant_info_for(i);
- let (variant_type_metadata, member_desc_factory) =
- describe_enum_variant(cx, variant, variant_info, self_metadata);
-
- let member_descriptions =
- member_desc_factory.create_member_descriptions(cx);
- let type_params = compute_type_parameters(cx, self.enum_type);
-
- set_members_of_composite_type(
- cx,
- variant_type_metadata,
- member_descriptions,
- Some(&self.common_members),
- type_params,
- );
-
- let niche_value = calculate_niche_value(i);
-
- MemberDescription {
- name: variant_info.variant_name(),
- type_metadata: variant_type_metadata,
- offset: Size::ZERO,
- size: self.layout.size,
- align: self.layout.align.abi,
- flags: DIFlags::FlagZero,
- discriminant: niche_value,
- source_info: variant_info.source_info(cx),
- }
- })
- .collect()
- }
- }
- }
- }
-}
-
-// Creates `MemberDescription`s for the fields of a single enum variant.
-struct VariantMemberDescriptionFactory<'tcx> {
- /// Cloned from the `layout::Struct` describing the variant.
- offsets: Vec<Size>,
- args: Vec<(String, Ty<'tcx>)>,
-}
-
-impl<'tcx> VariantMemberDescriptionFactory<'tcx> {
- fn create_member_descriptions<'ll>(
- &self,
- cx: &CodegenCx<'ll, 'tcx>,
- ) -> Vec<MemberDescription<'ll>> {
- self.args
- .iter()
- .enumerate()
- .map(|(i, &(ref name, ty))| {
- let (size, align) = cx.size_and_align_of(ty);
- MemberDescription {
- name: name.to_string(),
- type_metadata: type_metadata(cx, ty),
- offset: self.offsets[i],
- size,
- align,
- flags: DIFlags::FlagZero,
- discriminant: None,
- source_info: None,
- }
- })
- .collect()
- }
-}
-
-#[derive(Copy, Clone)]
-enum VariantInfo<'a, 'tcx> {
- Adt(&'tcx ty::VariantDef, VariantIdx),
- Generator {
- def_id: DefId,
- generator_layout: &'tcx GeneratorLayout<'tcx>,
- generator_saved_local_names: &'a IndexVec<mir::GeneratorSavedLocal, Option<Symbol>>,
- variant_index: VariantIdx,
- },
-}
-
-impl<'tcx> VariantInfo<'_, 'tcx> {
- fn variant_idx(&self) -> VariantIdx {
- match self {
- VariantInfo::Adt(_, variant_index) | VariantInfo::Generator { variant_index, .. } => {
- *variant_index
- }
- }
- }
-
- fn map_struct_name<R>(&self, f: impl FnOnce(&str) -> R) -> R {
- match self {
- VariantInfo::Adt(variant, _) => f(variant.name.as_str()),
- VariantInfo::Generator { variant_index, .. } => {
- f(&GeneratorSubsts::variant_name(*variant_index))
- }
- }
- }
-
- fn variant_name(&self) -> String {
- match self {
- VariantInfo::Adt(variant, _) => variant.name.to_string(),
- VariantInfo::Generator { variant_index, .. } => {
- // Since GDB currently prints out the raw discriminant along
- // with every variant, make each variant name be just the value
- // of the discriminant. The struct name for the variant includes
- // the actual variant description.
- format!("{}", variant_index.as_usize())
- }
- }
- }
-
- fn field_name(&self, i: usize) -> String {
- let field_name = match *self {
- VariantInfo::Adt(variant, _) if variant.ctor_kind != CtorKind::Fn => {
- Some(variant.fields[i].name)
- }
- VariantInfo::Generator {
- generator_layout,
- generator_saved_local_names,
- variant_index,
- ..
- } => {
- generator_saved_local_names
- [generator_layout.variant_fields[variant_index][i.into()]]
- }
- _ => None,
- };
- field_name.map(|name| name.to_string()).unwrap_or_else(|| format!("__{}", i))
- }
-
- fn source_info<'ll>(&self, cx: &CodegenCx<'ll, 'tcx>) -> Option<SourceInfo<'ll>> {
- if let VariantInfo::Generator { def_id, variant_index, .. } = self {
- let span =
- cx.tcx.generator_layout(*def_id).unwrap().variant_source_info[*variant_index].span;
- if !span.is_dummy() {
- let loc = cx.lookup_debug_loc(span.lo());
- return Some(SourceInfo { file: file_metadata(cx, &loc.file), line: loc.line });
- }
- }
- None
- }
-}
-
-/// Returns a tuple of (1) `type_metadata_stub` of the variant, (2) a
-/// `MemberDescriptionFactory` for producing the descriptions of the
-/// fields of the variant. This is a rudimentary version of a full
-/// `RecursiveTypeDescription`.
-fn describe_enum_variant<'ll, 'tcx>(
- cx: &CodegenCx<'ll, 'tcx>,
- layout: layout::TyAndLayout<'tcx>,
- variant: VariantInfo<'_, 'tcx>,
- containing_scope: &'ll DIScope,
-) -> (&'ll DICompositeType, MemberDescriptionFactory<'ll, 'tcx>) {
- let metadata_stub = variant.map_struct_name(|variant_name| {
- let unique_type_id =
- UniqueTypeId::for_enum_variant(cx.tcx, layout.ty, variant.variant_idx());
-
- let (size, align) = cx.size_and_align_of(layout.ty);
-
- create_struct_stub(
- cx,
- size,
- align,
- variant_name,
- unique_type_id,
- Some(containing_scope),
- DIFlags::FlagZero,
- None,
- )
- });
-
- let offsets = (0..layout.fields.count()).map(|i| layout.fields.offset(i)).collect();
- let args = (0..layout.fields.count())
- .map(|i| (variant.field_name(i), layout.field(cx, i).ty))
- .collect();
-
- let member_description_factory = VariantMDF(VariantMemberDescriptionFactory { offsets, args });
-
- (metadata_stub, member_description_factory)
-}
-
-fn prepare_enum_metadata<'ll, 'tcx>(
- cx: &CodegenCx<'ll, 'tcx>,
- enum_type: Ty<'tcx>,
- enum_def_id: DefId,
- unique_type_id: UniqueTypeId<'tcx>,
- outer_field_tys: Vec<Ty<'tcx>>,
-) -> RecursiveTypeDescription<'ll, 'tcx> {
- let tcx = cx.tcx;
- let enum_name = compute_debuginfo_type_name(tcx, enum_type, false);
-
- let containing_scope = get_namespace_for_item(cx, enum_def_id);
- // FIXME: This should emit actual file metadata for the enum, but we
- // currently can't get the necessary information when it comes to types
- // imported from other crates. Formerly we violated the ODR when performing
- // LTO because we emitted debuginfo for the same type with varying file
- // metadata, so as a workaround we pretend that the type comes from
- // <unknown>
- let file_metadata = unknown_file_metadata(cx);
-
- let discriminant_type_metadata = |discr: Primitive| {
- let enumerators_metadata: Vec<_> = match enum_type.kind() {
- ty::Adt(def, _) => iter::zip(def.discriminants(tcx), &def.variants)
- .map(|((_, discr), v)| {
- let name = v.name.as_str();
- let is_unsigned = match discr.ty.kind() {
- ty::Int(_) => false,
- ty::Uint(_) => true,
- _ => bug!("non integer discriminant"),
- };
- unsafe {
- Some(llvm::LLVMRustDIBuilderCreateEnumerator(
- DIB(cx),
- name.as_ptr().cast(),
- name.len(),
- // FIXME: what if enumeration has i128 discriminant?
- discr.val as i64,
- is_unsigned,
- ))
- }
- })
- .collect(),
- ty::Generator(_, substs, _) => substs
- .as_generator()
- .variant_range(enum_def_id, tcx)
- .map(|variant_index| {
- debug_assert_eq!(tcx.types.u32, substs.as_generator().discr_ty(tcx));
- let name = GeneratorSubsts::variant_name(variant_index);
- unsafe {
- Some(llvm::LLVMRustDIBuilderCreateEnumerator(
- DIB(cx),
- name.as_ptr().cast(),
- name.len(),
- // Generators use u32 as discriminant type, verified above.
- variant_index.as_u32().into(),
- true, // IsUnsigned
- ))
- }
- })
- .collect(),
- _ => bug!(),
- };
-
- let disr_type_key = (enum_def_id, discr);
- let cached_discriminant_type_metadata =
- debug_context(cx).created_enum_disr_types.borrow().get(&disr_type_key).cloned();
- match cached_discriminant_type_metadata {
- Some(discriminant_type_metadata) => discriminant_type_metadata,
- None => {
- let (discriminant_size, discriminant_align) = (discr.size(cx), discr.align(cx));
- let discriminant_base_type_metadata = type_metadata(cx, discr.to_ty(tcx));
-
- let item_name;
- let discriminant_name = match enum_type.kind() {
- ty::Adt(..) => {
- item_name = tcx.item_name(enum_def_id);
- item_name.as_str()
- }
- ty::Generator(..) => enum_name.as_str(),
- _ => bug!(),
- };
-
- let discriminant_type_metadata = unsafe {
- llvm::LLVMRustDIBuilderCreateEnumerationType(
- DIB(cx),
- containing_scope,
- discriminant_name.as_ptr().cast(),
- discriminant_name.len(),
- file_metadata,
- UNKNOWN_LINE_NUMBER,
- discriminant_size.bits(),
- discriminant_align.abi.bits() as u32,
- create_DIArray(DIB(cx), &enumerators_metadata),
- discriminant_base_type_metadata,
- true,
- )
- };
-
- debug_context(cx)
- .created_enum_disr_types
- .borrow_mut()
- .insert(disr_type_key, discriminant_type_metadata);
-
- discriminant_type_metadata
- }
- }
- };
-
- let layout = cx.layout_of(enum_type);
-
- if let (Abi::Scalar(_), Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. }) =
- (layout.abi, &layout.variants)
- {
- return FinalMetadata(discriminant_type_metadata(tag.value));
- }
-
- // While LLVM supports generating debuginfo for variant types (enums), it doesn't support
- // lowering that debuginfo to CodeView records for msvc targets. So if we are targeting
- // msvc, then we need to use a different encoding of the debuginfo.
- if cpp_like_debuginfo(tcx) {
- let discriminant_type_metadata = match layout.variants {
- Variants::Single { .. } => None,
- Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. }
- | Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => {
- Some(discriminant_type_metadata(tag.value))
- }
- };
-
- let enum_metadata = {
- let unique_type_id_str = unique_type_id.generate_unique_id_string(tcx);
-
- unsafe {
- llvm::LLVMRustDIBuilderCreateUnionType(
- DIB(cx),
- None,
- enum_name.as_ptr().cast(),
- enum_name.len(),
- file_metadata,
- UNKNOWN_LINE_NUMBER,
- layout.size.bits(),
- layout.align.abi.bits() as u32,
- DIFlags::FlagZero,
- None,
- 0, // RuntimeLang
- unique_type_id_str.as_ptr().cast(),
- unique_type_id_str.len(),
- )
- }
- };
-
- return create_and_register_recursive_type_forward_declaration(
- cx,
- enum_type,
- unique_type_id,
- enum_metadata,
- enum_metadata,
- EnumMDF(EnumMemberDescriptionFactory {
- enum_type,
- layout,
- tag_type_metadata: discriminant_type_metadata,
- common_members: vec![],
- }),
- );
- }
-
- let discriminator_name = match enum_type.kind() {
- ty::Generator(..) => "__state",
- _ => "",
- };
- let discriminator_metadata = match layout.variants {
- // A single-variant enum has no discriminant.
- Variants::Single { .. } => None,
-
- Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, tag_field, .. } => {
- // Find the integer type of the correct size.
- let size = tag.value.size(cx);
- let align = tag.value.align(cx);
-
- let tag_type = match tag.value {
- Int(t, _) => t,
- F32 => Integer::I32,
- F64 => Integer::I64,
- Pointer => cx.data_layout().ptr_sized_integer(),
- }
- .to_ty(cx.tcx, false);
-
- let tag_metadata = basic_type_metadata(cx, tag_type);
- unsafe {
- Some(llvm::LLVMRustDIBuilderCreateMemberType(
- DIB(cx),
- containing_scope,
- discriminator_name.as_ptr().cast(),
- discriminator_name.len(),
- file_metadata,
- UNKNOWN_LINE_NUMBER,
- size.bits(),
- align.abi.bits() as u32,
- layout.fields.offset(tag_field).bits(),
- DIFlags::FlagArtificial,
- tag_metadata,
- ))
- }
- }
-
- Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, tag_field, .. } => {
- let discr_type = tag.value.to_ty(cx.tcx);
- let (size, align) = cx.size_and_align_of(discr_type);
-
- let discr_metadata = basic_type_metadata(cx, discr_type);
- unsafe {
- Some(llvm::LLVMRustDIBuilderCreateMemberType(
- DIB(cx),
- containing_scope,
- discriminator_name.as_ptr().cast(),
- discriminator_name.len(),
- file_metadata,
- UNKNOWN_LINE_NUMBER,
- size.bits(),
- align.bits() as u32,
- layout.fields.offset(tag_field).bits(),
- DIFlags::FlagArtificial,
- discr_metadata,
- ))
- }
- }
- };
-
- let outer_fields = match layout.variants {
- Variants::Single { .. } => vec![],
- Variants::Multiple { .. } => {
- let tuple_mdf =
- TupleMemberDescriptionFactory { ty: enum_type, component_types: outer_field_tys };
- tuple_mdf
- .create_member_descriptions(cx)
- .into_iter()
- .map(|desc| Some(desc.into_metadata(cx, containing_scope)))
- .collect()
- }
- };
-
- let variant_part_unique_type_id_str =
- UniqueTypeId::for_enum_variant_part(tcx, enum_type).generate_unique_id_string(tcx);
-
- let empty_array = create_DIArray(DIB(cx), &[]);
- let name = "";
- let variant_part = unsafe {
- llvm::LLVMRustDIBuilderCreateVariantPart(
- DIB(cx),
- containing_scope,
- name.as_ptr().cast(),
- name.len(),
- file_metadata,
- UNKNOWN_LINE_NUMBER,
- layout.size.bits(),
- layout.align.abi.bits() as u32,
- DIFlags::FlagZero,
- discriminator_metadata,
- empty_array,
- variant_part_unique_type_id_str.as_ptr().cast(),
- variant_part_unique_type_id_str.len(),
- )
- };
-
- let struct_wrapper = {
- // The variant part must be wrapped in a struct according to DWARF.
- // All fields except the discriminant (including `outer_fields`)
- // should be put into structures inside the variant part, which gives
- // an equivalent layout but offers us much better integration with
- // debuggers.
- let type_array = create_DIArray(DIB(cx), &[Some(variant_part)]);
- let unique_type_id_str = unique_type_id.generate_unique_id_string(tcx);
-
- unsafe {
- llvm::LLVMRustDIBuilderCreateStructType(
- DIB(cx),
- Some(containing_scope),
- enum_name.as_ptr().cast(),
- enum_name.len(),
- file_metadata,
- UNKNOWN_LINE_NUMBER,
- layout.size.bits(),
- layout.align.abi.bits() as u32,
- DIFlags::FlagZero,
- None,
- type_array,
- 0,
- None,
- unique_type_id_str.as_ptr().cast(),
- unique_type_id_str.len(),
- )
- }
- };
-
- create_and_register_recursive_type_forward_declaration(
- cx,
- enum_type,
- unique_type_id,
- struct_wrapper,
- variant_part,
- EnumMDF(EnumMemberDescriptionFactory {
- enum_type,
- layout,
- tag_type_metadata: None,
- common_members: outer_fields,
- }),
- )
-}
-
-/// Creates debug information for a composite type, that is, anything that
-/// results in a LLVM struct.
-///
-/// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums.
-fn composite_type_metadata<'ll, 'tcx>(
- cx: &CodegenCx<'ll, 'tcx>,
- composite_type: Ty<'tcx>,
- composite_type_name: &str,
- composite_type_unique_id: UniqueTypeId<'tcx>,
- member_descriptions: Vec<MemberDescription<'ll>>,
- containing_scope: Option<&'ll DIScope>,
-) -> &'ll DICompositeType {
- let (size, align) = cx.size_and_align_of(composite_type);
-
- // Create the (empty) struct metadata node ...
- let composite_type_metadata = create_struct_stub(
- cx,
- size,
- align,
- composite_type_name,
- composite_type_unique_id,
- containing_scope,
- DIFlags::FlagZero,
- None,
- );
-
- // ... and immediately create and add the member descriptions.
- set_members_of_composite_type(
- cx,
- composite_type_metadata,
- member_descriptions,
- None,
- compute_type_parameters(cx, composite_type),
- );
-
- composite_type_metadata
-}
-
-fn set_members_of_composite_type<'ll, 'tcx>(
- cx: &CodegenCx<'ll, 'tcx>,
- composite_type_metadata: &'ll DICompositeType,
- member_descriptions: Vec<MemberDescription<'ll>>,
- common_members: Option<&Vec<Option<&'ll DIType>>>,
- type_params: &'ll DIArray,
-) {
- // In some rare cases LLVM metadata uniquing would lead to an existing type
- // description being used instead of a new one created in
- // create_struct_stub. This would cause a hard to trace assertion in
- // DICompositeType::SetTypeArray(). The following check makes sure that we
- // get a better error message if this should happen again due to some
- // regression.
- {
- let mut composite_types_completed =
- debug_context(cx).composite_types_completed.borrow_mut();
- if !composite_types_completed.insert(composite_type_metadata) {
- bug!(
- "debuginfo::set_members_of_composite_type() - \
- Already completed forward declaration re-encountered."
- );
- }
- }
-
- let mut member_metadata: Vec<_> = member_descriptions
- .into_iter()
- .map(|desc| Some(desc.into_metadata(cx, composite_type_metadata)))
- .collect();
- if let Some(other_members) = common_members {
- member_metadata.extend(other_members.iter());
- }
-
- unsafe {
- let field_array = create_DIArray(DIB(cx), &member_metadata);
- llvm::LLVMRustDICompositeTypeReplaceArrays(
- DIB(cx),
- composite_type_metadata,
- Some(field_array),
- Some(type_params),
- );
- }
-}
-
/// Computes the type parameters for a type, if any, for the given metadata.
-fn compute_type_parameters<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, ty: Ty<'tcx>) -> &'ll DIArray {
+fn build_generic_type_param_di_nodes<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ ty: Ty<'tcx>,
+) -> SmallVec<&'ll DIType> {
if let ty::Adt(def, substs) = *ty.kind() {
if substs.types().next().is_some() {
- let generics = cx.tcx.generics_of(def.did);
+ let generics = cx.tcx.generics_of(def.did());
let names = get_parameter_names(cx, generics);
- let template_params: Vec<_> = iter::zip(substs, names)
+ let template_params: SmallVec<_> = iter::zip(substs, names)
.filter_map(|(kind, name)| {
if let GenericArgKind::Type(ty) = kind.unpack() {
let actual_type =
cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
- let actual_type_metadata = type_metadata(cx, actual_type);
+ let actual_type_di_node = type_di_node(cx, actual_type);
let name = name.as_str();
Some(unsafe {
- Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
+ llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
DIB(cx),
None,
name.as_ptr().cast(),
name.len(),
- actual_type_metadata,
- ))
+ actual_type_di_node,
+ )
})
} else {
None
})
.collect();
- return create_DIArray(DIB(cx), &template_params);
+ return template_params;
}
}
- return create_DIArray(DIB(cx), &[]);
+
+ return smallvec![];
fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
let mut names = generics
}
}
-/// A convenience wrapper around `LLVMRustDIBuilderCreateStructType()`. Does not do
-/// any caching, does not add any fields to the struct. This can be done later
-/// with `set_members_of_composite_type()`.
-fn create_struct_stub<'ll, 'tcx>(
- cx: &CodegenCx<'ll, 'tcx>,
- size: Size,
- align: Align,
- type_name: &str,
- unique_type_id: UniqueTypeId<'tcx>,
- containing_scope: Option<&'ll DIScope>,
- flags: DIFlags,
- vtable_holder: Option<&'ll DIType>,
-) -> &'ll DICompositeType {
- let unique_type_id = unique_type_id.generate_unique_id_string(cx.tcx);
-
- let metadata_stub = unsafe {
- // `LLVMRustDIBuilderCreateStructType()` wants an empty array. A null
- // pointer will lead to hard to trace and debug LLVM assertions
- // later on in `llvm/lib/IR/Value.cpp`.
- let empty_array = create_DIArray(DIB(cx), &[]);
-
- llvm::LLVMRustDIBuilderCreateStructType(
- DIB(cx),
- containing_scope,
- type_name.as_ptr().cast(),
- type_name.len(),
- unknown_file_metadata(cx),
- UNKNOWN_LINE_NUMBER,
- size.bits(),
- align.bits() as u32,
- flags,
- None,
- empty_array,
- 0,
- vtable_holder,
- unique_type_id.as_ptr().cast(),
- unique_type_id.len(),
- )
- };
-
- metadata_stub
-}
-
-fn create_union_stub<'ll, 'tcx>(
- cx: &CodegenCx<'ll, 'tcx>,
- union_type: Ty<'tcx>,
- union_type_name: &str,
- unique_type_id: UniqueTypeId<'tcx>,
- containing_scope: &'ll DIScope,
-) -> &'ll DICompositeType {
- let (union_size, union_align) = cx.size_and_align_of(union_type);
- let unique_type_id = unique_type_id.generate_unique_id_string(cx.tcx);
-
- let metadata_stub = unsafe {
- // `LLVMRustDIBuilderCreateUnionType()` wants an empty array. A null
- // pointer will lead to hard to trace and debug LLVM assertions
- // later on in `llvm/lib/IR/Value.cpp`.
- let empty_array = create_DIArray(DIB(cx), &[]);
-
- llvm::LLVMRustDIBuilderCreateUnionType(
- DIB(cx),
- Some(containing_scope),
- union_type_name.as_ptr().cast(),
- union_type_name.len(),
- unknown_file_metadata(cx),
- UNKNOWN_LINE_NUMBER,
- union_size.bits(),
- union_align.bits() as u32,
- DIFlags::FlagZero,
- Some(empty_array),
- 0, // RuntimeLang
- unique_type_id.as_ptr().cast(),
- unique_type_id.len(),
- )
- };
-
- metadata_stub
-}
-
/// Creates debug information for the given global variable.
///
-/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_global_var_metadata<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId, global: &'ll Value) {
+/// Adds the created debuginfo nodes directly to the crate's IR.
+pub fn build_global_var_di_node<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId, global: &'ll Value) {
if cx.dbg_cx.is_none() {
return;
}
let is_local_to_unit = is_node_local_to_unit(cx, def_id);
let variable_type = Instance::mono(cx.tcx, def_id).ty(cx.tcx, ty::ParamEnv::reveal_all());
- let type_metadata = type_metadata(cx, variable_type);
+ let type_di_node = type_di_node(cx, variable_type);
let var_name = tcx.item_name(def_id);
let var_name = var_name.as_str();
let linkage_name = mangled_name_of_instance(cx, Instance::mono(tcx, def_id)).name;
linkage_name.len(),
file_metadata,
line_number,
- type_metadata,
+ type_di_node,
is_local_to_unit,
global,
None,
/// the name of the method they implement. This can be implemented in the future once there
/// is a proper disambiguation scheme for dealing with methods from different traits that have
/// the same name.
-fn vtable_type_metadata<'ll, 'tcx>(
+fn build_vtable_type_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
ty: Ty<'tcx>,
poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
// All function pointers are described as opaque pointers. This could be improved in the future
// by describing them as actual function pointers.
let void_pointer_ty = tcx.mk_imm_ptr(tcx.types.unit);
- let void_pointer_type_debuginfo = type_metadata(cx, void_pointer_ty);
- let usize_debuginfo = type_metadata(cx, tcx.types.usize);
+ let void_pointer_type_di_node = type_di_node(cx, void_pointer_ty);
+ let usize_di_node = type_di_node(cx, tcx.types.usize);
let (pointer_size, pointer_align) = cx.size_and_align_of(void_pointer_ty);
// If `usize` is not pointer-sized and -aligned then the size and alignment computations
// for the vtable as a whole would be wrong. Let's make sure this holds even on weird
// This gets mapped to a DW_AT_containing_type attribute which allows GDB to correlate
// the vtable to the type it is for.
- let vtable_holder = type_metadata(cx, ty);
+ let vtable_holder = type_di_node(cx, ty);
- let vtable_type_metadata = create_struct_stub(
+ build_type_with_children(
cx,
- size,
- pointer_align,
- &vtable_type_name,
- unique_type_id,
- NO_SCOPE_METADATA,
- DIFlags::FlagArtificial,
- Some(vtable_holder),
- );
-
- // Create a field for each entry in the vtable.
- let fields: Vec<_> = vtable_entries
- .iter()
- .enumerate()
- .filter_map(|(index, vtable_entry)| {
- let (field_name, field_type) = match vtable_entry {
- ty::VtblEntry::MetadataDropInPlace => {
- ("drop_in_place".to_string(), void_pointer_type_debuginfo)
- }
- ty::VtblEntry::Method(_) => {
- // Note: This code does not try to give a proper name to each method
- // because there might be multiple methods with the same name
- // (coming from different traits).
- (format!("__method{}", index), void_pointer_type_debuginfo)
- }
- ty::VtblEntry::TraitVPtr(_) => {
- // Note: In the future we could try to set the type of this pointer
- // to the type that we generate for the corresponding vtable.
- (format!("__super_trait_ptr{}", index), void_pointer_type_debuginfo)
- }
- ty::VtblEntry::MetadataAlign => ("align".to_string(), usize_debuginfo),
- ty::VtblEntry::MetadataSize => ("size".to_string(), usize_debuginfo),
- ty::VtblEntry::Vacant => return None,
- };
+ type_map::stub(
+ cx,
+ Stub::VtableTy { vtable_holder },
+ unique_type_id,
+ &vtable_type_name,
+ (size, pointer_align),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagArtificial,
+ ),
+ |cx, vtable_type_di_node| {
+ vtable_entries
+ .iter()
+ .enumerate()
+ .filter_map(|(index, vtable_entry)| {
+ let (field_name, field_type_di_node) = match vtable_entry {
+ ty::VtblEntry::MetadataDropInPlace => {
+ ("drop_in_place".to_string(), void_pointer_type_di_node)
+ }
+ ty::VtblEntry::Method(_) => {
+ // Note: This code does not try to give a proper name to each method
+ // because their might be multiple methods with the same name
+ // (coming from different traits).
+ (format!("__method{}", index), void_pointer_type_di_node)
+ }
+ ty::VtblEntry::TraitVPtr(_) => {
+ (format!("__super_trait_ptr{}", index), void_pointer_type_di_node)
+ }
+ ty::VtblEntry::MetadataAlign => ("align".to_string(), usize_di_node),
+ ty::VtblEntry::MetadataSize => ("size".to_string(), usize_di_node),
+ ty::VtblEntry::Vacant => return None,
+ };
- Some(MemberDescription {
- name: field_name,
- type_metadata: field_type,
- offset: pointer_size * index as u64,
- size: pointer_size,
- align: pointer_align,
- flags: DIFlags::FlagZero,
- discriminant: None,
- source_info: None,
- })
- })
- .collect();
+ let field_offset = pointer_size * index as u64;
- let type_params = create_DIArray(DIB(cx), &[]);
- set_members_of_composite_type(cx, vtable_type_metadata, fields, None, type_params);
- vtable_type_metadata
+ Some(build_field_di_node(
+ cx,
+ vtable_type_di_node,
+ &field_name,
+ (pointer_size, pointer_align),
+ field_offset,
+ DIFlags::FlagZero,
+ field_type_di_node,
+ ))
+ })
+ .collect()
+ },
+ NO_GENERICS,
+ )
+ .di_node
}
/// Creates debug information for the given vtable, which is for the
/// given type.
///
/// Adds the created metadata nodes directly to the crate's IR.
-pub fn create_vtable_metadata<'ll, 'tcx>(
+pub fn create_vtable_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
ty: Ty<'tcx>,
poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
let vtable_name =
compute_debuginfo_vtable_name(cx.tcx, ty, poly_trait_ref, VTableNameKind::GlobalVariable);
- let vtable_type = vtable_type_metadata(cx, ty, poly_trait_ref);
+ let vtable_type_di_node = build_vtable_type_di_node(cx, ty, poly_trait_ref);
let linkage_name = "";
unsafe {
linkage_name.len(),
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
- vtable_type,
+ vtable_type_di_node,
true,
vtable,
None,
let file_metadata = file_metadata(cx, file);
unsafe { llvm::LLVMRustDIBuilderCreateLexicalBlockFile(DIB(cx), scope_metadata, file_metadata) }
}
+
+pub fn tuple_field_name(field_index: usize) -> Cow<'static, str> {
+ const TUPLE_FIELD_NAMES: [&'static str; 16] = [
+ "__0", "__1", "__2", "__3", "__4", "__5", "__6", "__7", "__8", "__9", "__10", "__11",
+ "__12", "__13", "__14", "__15",
+ ];
+ TUPLE_FIELD_NAMES
+ .get(field_index)
+ .map(|s| Cow::from(*s))
+ .unwrap_or_else(|| Cow::from(format!("__{}", field_index)))
+}
--- /dev/null
+use std::borrow::Cow;
+
+use libc::c_uint;
+use rustc_codegen_ssa::debuginfo::{
+ type_names::compute_debuginfo_type_name, wants_c_like_enum_debuginfo,
+};
+use rustc_middle::{
+ bug,
+ ty::{
+ self,
+ layout::{LayoutOf, TyAndLayout},
+ util::Discr,
+ AdtDef, GeneratorSubsts,
+ },
+};
+use rustc_target::abi::{Size, TagEncoding, VariantIdx, Variants};
+use smallvec::smallvec;
+
+use crate::{
+ common::CodegenCx,
+ debuginfo::{
+ metadata::{
+ build_field_di_node, closure_saved_names_of_captured_variables,
+ enums::tag_base_type,
+ file_metadata, generator_layout_and_saved_local_names, size_and_align_of,
+ type_map::{self, UniqueTypeId},
+ unknown_file_metadata, DINodeCreationResult, SmallVec, NO_GENERICS, NO_SCOPE_METADATA,
+ UNKNOWN_LINE_NUMBER,
+ },
+ utils::DIB,
+ },
+ llvm::{
+ self,
+ debuginfo::{DIFile, DIFlags, DIType},
+ },
+};
+
+/// In CPP-like mode, we generate a union of structs for each variant and an
+/// explicit discriminant field roughly equivalent to the following C/C++ code:
+///
+/// ```c
+/// union enum$<{fully-qualified-name}> {
+/// struct {variant 0 name} {
+/// <variant 0 fields>
+/// } variant0;
+/// <other variant structs>
+/// {name} discriminant;
+/// }
+/// ```
+///
+/// As you can see, the type name is wrapped `enum$`. This way we can have a
+/// single NatVis rule for handling all enums.
+///
+/// At the LLVM IR level this looks like
+///
+/// ```txt
+/// DW_TAG_union_type (top-level type for enum)
+/// DW_TAG_member (member for variant 1)
+/// DW_TAG_member (member for variant 2)
+/// DW_TAG_member (member for variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+/// DW_TAG_enumeration_type (type of tag)
+/// ```
+///
+/// The above encoding applies for enums with a direct tag. For niche-tag we have to do things
+/// differently in order to allow a NatVis visualizer to extract all the information needed:
+/// We generate a union of two fields, one for the dataful variant
+/// and one that just points to the discriminant (which is some field within the dataful variant).
+/// We also create a DW_TAG_enumeration_type DIE that contains tag values for the non-dataful
+/// variants and make the discriminant field that type. We then use NatVis to render the enum type
+/// correctly in Windbg/VS. This will generate debuginfo roughly equivalent to the following C:
+///
+/// ```c
+/// union enum$<{name}, {min niche}, {max niche}, {dataful variant name}> {
+/// struct <dataful variant name> {
+/// <fields in dataful variant>
+/// } dataful_variant;
+/// enum Discriminant$ {
+/// <non-dataful variants>
+/// } discriminant;
+/// }
+/// ```
+///
+/// The NatVis in `intrinsic.natvis` matches on the type name `enum$<*, *, *, *>`
+/// and evaluates `this.discriminant`. If the value is between the min niche and max
+/// niche, then the enum is in the dataful variant and `this.dataful_variant` is
+/// rendered. Otherwise, the enum is in one of the non-dataful variants. In that
+/// case, we just need to render the name of the `this.discriminant` enum.
+pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let enum_type = unique_type_id.expect_ty();
+ let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
+ bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
+ };
+
+ let enum_type_and_layout = cx.layout_of(enum_type);
+ let enum_type_name = compute_debuginfo_type_name(cx.tcx, enum_type, false);
+
+ debug_assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout));
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ type_map::Stub::Union,
+ unique_type_id,
+ &enum_type_name,
+ cx.size_and_align_of(enum_type),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagZero,
+ ),
+ |cx, enum_type_di_node| {
+ match enum_type_and_layout.variants {
+ Variants::Single { index: variant_index } => {
+ if enum_adt_def.variants().is_empty() {
+ // Uninhabited enums have Variants::Single. We don't generate
+ // any members for them.
+ return smallvec![];
+ }
+
+ build_single_variant_union_fields(
+ cx,
+ enum_adt_def,
+ enum_type_and_layout,
+ enum_type_di_node,
+ variant_index,
+ )
+ }
+ Variants::Multiple {
+ tag_encoding: TagEncoding::Direct,
+ ref variants,
+ tag_field,
+ ..
+ } => build_union_fields_for_direct_tag_enum(
+ cx,
+ enum_adt_def,
+ enum_type_and_layout,
+ enum_type_di_node,
+ &mut variants.indices(),
+ tag_field,
+ ),
+ Variants::Multiple {
+ tag_encoding: TagEncoding::Niche { dataful_variant, .. },
+ ref variants,
+ tag_field,
+ ..
+ } => build_union_fields_for_niche_tag_enum(
+ cx,
+ enum_adt_def,
+ enum_type_and_layout,
+ enum_type_di_node,
+ dataful_variant,
+ &mut variants.indices(),
+ tag_field,
+ ),
+ }
+ },
+ NO_GENERICS,
+ )
+}
+
+/// A generator debuginfo node looks the same as a that of an enum type.
+///
+/// See [build_enum_type_di_node] for more information.
+pub(super) fn build_generator_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let generator_type = unique_type_id.expect_ty();
+ let generator_type_and_layout = cx.layout_of(generator_type);
+ let generator_type_name = compute_debuginfo_type_name(cx.tcx, generator_type, false);
+
+ debug_assert!(!wants_c_like_enum_debuginfo(generator_type_and_layout));
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ type_map::Stub::Union,
+ unique_type_id,
+ &generator_type_name,
+ size_and_align_of(generator_type_and_layout),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagZero,
+ ),
+ |cx, generator_type_di_node| match generator_type_and_layout.variants {
+ Variants::Multiple { tag_encoding: TagEncoding::Direct, .. } => {
+ build_union_fields_for_direct_tag_generator(
+ cx,
+ generator_type_and_layout,
+ generator_type_di_node,
+ )
+ }
+ Variants::Single { .. }
+ | Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, .. } => {
+ bug!(
+ "Encountered generator with non-direct-tag layout: {:?}",
+ generator_type_and_layout
+ )
+ }
+ },
+ NO_GENERICS,
+ )
+}
+
+fn build_single_variant_union_fields<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_adt_def: AdtDef<'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ variant_index: VariantIdx,
+) -> SmallVec<&'ll DIType> {
+ let variant_layout = enum_type_and_layout.for_variant(cx, variant_index);
+ let variant_struct_type_di_node = super::build_enum_variant_struct_type_di_node(
+ cx,
+ enum_type_and_layout.ty,
+ enum_type_di_node,
+ variant_index,
+ enum_adt_def.variant(variant_index),
+ variant_layout,
+ );
+
+ // NOTE: The field name of the union is the same as the variant name, not "variant0".
+ let variant_name = enum_adt_def.variant(variant_index).name.as_str();
+
+ smallvec![build_field_di_node(
+ cx,
+ enum_type_di_node,
+ variant_name,
+ // NOTE: We use the size and align of the entire type, not from variant_layout
+ // since the later is sometimes smaller (if it has fewer fields).
+ size_and_align_of(enum_type_and_layout),
+ Size::ZERO,
+ DIFlags::FlagZero,
+ variant_struct_type_di_node,
+ )]
+}
+
+fn build_union_fields_for_direct_tag_enum<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_adt_def: AdtDef<'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ variant_indices: &mut dyn Iterator<Item = VariantIdx>,
+ tag_field: usize,
+) -> SmallVec<&'ll DIType> {
+ let variant_field_infos: SmallVec<VariantFieldInfo<'ll>> = variant_indices
+ .map(|variant_index| {
+ let variant_layout = enum_type_and_layout.for_variant(cx, variant_index);
+
+ VariantFieldInfo {
+ variant_index,
+ variant_struct_type_di_node: super::build_enum_variant_struct_type_di_node(
+ cx,
+ enum_type_and_layout.ty,
+ enum_type_di_node,
+ variant_index,
+ enum_adt_def.variant(variant_index),
+ variant_layout,
+ ),
+ source_info: None,
+ }
+ })
+ .collect();
+
+ let discr_type_name = cx.tcx.item_name(enum_adt_def.did());
+ let tag_base_type = super::tag_base_type(cx, enum_type_and_layout);
+ let discr_type_di_node = super::build_enumeration_type_di_node(
+ cx,
+ discr_type_name.as_str(),
+ tag_base_type,
+ &mut enum_adt_def.discriminants(cx.tcx).map(|(variant_index, discr)| {
+ (discr, Cow::from(enum_adt_def.variant(variant_index).name.as_str()))
+ }),
+ enum_type_di_node,
+ );
+
+ build_union_fields_for_direct_tag_enum_or_generator(
+ cx,
+ enum_type_and_layout,
+ enum_type_di_node,
+ &variant_field_infos,
+ discr_type_di_node,
+ tag_field,
+ )
+}
+
+fn build_union_fields_for_niche_tag_enum<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_adt_def: AdtDef<'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ dataful_variant_index: VariantIdx,
+ variant_indices: &mut dyn Iterator<Item = VariantIdx>,
+ tag_field: usize,
+) -> SmallVec<&'ll DIType> {
+ let dataful_variant_struct_type_di_node = super::build_enum_variant_struct_type_di_node(
+ cx,
+ enum_type_and_layout.ty,
+ enum_type_di_node,
+ dataful_variant_index,
+ &enum_adt_def.variant(dataful_variant_index),
+ enum_type_and_layout.for_variant(cx, dataful_variant_index),
+ );
+
+ let tag_base_type = super::tag_base_type(cx, enum_type_and_layout);
+ // Create an DW_TAG_enumerator for each variant except the dataful one.
+ let discr_type_di_node = super::build_enumeration_type_di_node(
+ cx,
+ "Discriminant$",
+ tag_base_type,
+ &mut variant_indices.filter_map(|variant_index| {
+ if let Some(discr_val) =
+ super::compute_discriminant_value(cx, enum_type_and_layout, variant_index)
+ {
+ let discr = Discr { val: discr_val as u128, ty: tag_base_type };
+ let variant_name = Cow::from(enum_adt_def.variant(variant_index).name.as_str());
+ Some((discr, variant_name))
+ } else {
+ debug_assert_eq!(variant_index, dataful_variant_index);
+ None
+ }
+ }),
+ enum_type_di_node,
+ );
+
+ smallvec![
+ build_field_di_node(
+ cx,
+ enum_type_di_node,
+ "dataful_variant",
+ size_and_align_of(enum_type_and_layout),
+ Size::ZERO,
+ DIFlags::FlagZero,
+ dataful_variant_struct_type_di_node,
+ ),
+ build_field_di_node(
+ cx,
+ enum_type_di_node,
+ "discriminant",
+ cx.size_and_align_of(tag_base_type),
+ enum_type_and_layout.fields.offset(tag_field),
+ DIFlags::FlagZero,
+ discr_type_di_node,
+ ),
+ ]
+}
+
+fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ generator_type_and_layout: TyAndLayout<'tcx>,
+ generator_type_di_node: &'ll DIType,
+) -> SmallVec<&'ll DIType> {
+ let Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } = generator_type_and_layout.variants else {
+ bug!("This function only supports layouts with direcly encoded tags.")
+ };
+
+ let (generator_def_id, generator_substs) = match generator_type_and_layout.ty.kind() {
+ &ty::Generator(def_id, substs, _) => (def_id, substs.as_generator()),
+ _ => unreachable!(),
+ };
+
+ let (generator_layout, state_specific_upvar_names) =
+ generator_layout_and_saved_local_names(cx.tcx, generator_def_id);
+
+ let common_upvar_names = closure_saved_names_of_captured_variables(cx.tcx, generator_def_id);
+ let variant_range = generator_substs.variant_range(generator_def_id, cx.tcx);
+
+ // Build the type node for each field.
+ let variant_field_infos: SmallVec<VariantFieldInfo<'ll>> = variant_range
+ .clone()
+ .map(|variant_index| {
+ let variant_struct_type_di_node = super::build_generator_variant_struct_type_di_node(
+ cx,
+ variant_index,
+ generator_type_and_layout,
+ generator_type_di_node,
+ generator_layout,
+ &state_specific_upvar_names,
+ &common_upvar_names,
+ );
+
+ let span = generator_layout.variant_source_info[variant_index].span;
+ let source_info = if !span.is_dummy() {
+ let loc = cx.lookup_debug_loc(span.lo());
+ Some((file_metadata(cx, &loc.file), loc.line as c_uint))
+ } else {
+ None
+ };
+
+ VariantFieldInfo { variant_index, variant_struct_type_di_node, source_info }
+ })
+ .collect();
+
+ let tag_base_type = tag_base_type(cx, generator_type_and_layout);
+ let discr_type_name = "Discriminant$";
+ let discr_type_di_node = super::build_enumeration_type_di_node(
+ cx,
+ discr_type_name,
+ tag_base_type,
+ &mut generator_substs
+ .discriminants(generator_def_id, cx.tcx)
+ .map(|(variant_index, discr)| (discr, GeneratorSubsts::variant_name(variant_index))),
+ generator_type_di_node,
+ );
+
+ build_union_fields_for_direct_tag_enum_or_generator(
+ cx,
+ generator_type_and_layout,
+ generator_type_di_node,
+ &variant_field_infos[..],
+ discr_type_di_node,
+ tag_field,
+ )
+}
+
+/// This is a helper function shared between enums and generators that makes sure fields have the
+/// expect names.
+fn build_union_fields_for_direct_tag_enum_or_generator<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ variant_field_infos: &[VariantFieldInfo<'ll>],
+ discr_type_di_node: &'ll DIType,
+ tag_field: usize,
+) -> SmallVec<&'ll DIType> {
+ let mut unions_fields = SmallVec::with_capacity(variant_field_infos.len() + 1);
+
+ // We create a field in the union for each variant ...
+ unions_fields.extend(variant_field_infos.into_iter().map(|variant_member_info| {
+ let (file_di_node, line_number) = variant_member_info
+ .source_info
+ .unwrap_or_else(|| (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER));
+
+ let field_name = variant_union_field_name(variant_member_info.variant_index);
+ let (size, align) = size_and_align_of(enum_type_and_layout);
+
+ // We use LLVMRustDIBuilderCreateMemberType() member type directly because
+ // the build_field_di_node() function does not support specifying a source location,
+ // which is something that we don't do anywhere else.
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateMemberType(
+ DIB(cx),
+ enum_type_di_node,
+ field_name.as_ptr().cast(),
+ field_name.len(),
+ file_di_node,
+ line_number,
+ // NOTE: We use the size and align of the entire type, not from variant_layout
+ // since the later is sometimes smaller (if it has fewer fields).
+ size.bits(),
+ align.bits() as u32,
+ // Union fields are always at offset zero
+ Size::ZERO.bits(),
+ DIFlags::FlagZero,
+ variant_member_info.variant_struct_type_di_node,
+ )
+ }
+ }));
+
+ debug_assert_eq!(
+ cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty),
+ cx.size_and_align_of(super::tag_base_type(cx, enum_type_and_layout))
+ );
+
+ // ... and a field for the discriminant.
+ unions_fields.push(build_field_di_node(
+ cx,
+ enum_type_di_node,
+ "discriminant",
+ cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty),
+ enum_type_and_layout.fields.offset(tag_field),
+ DIFlags::FlagZero,
+ discr_type_di_node,
+ ));
+
+ unions_fields
+}
+
+/// Information about a single field of the top-level DW_TAG_union_type.
+struct VariantFieldInfo<'ll> {
+ variant_index: VariantIdx,
+ variant_struct_type_di_node: &'ll DIType,
+ source_info: Option<(&'ll DIFile, c_uint)>,
+}
+
+fn variant_union_field_name(variant_index: VariantIdx) -> Cow<'static, str> {
+ const PRE_ALLOCATED: [&str; 16] = [
+ "variant0",
+ "variant1",
+ "variant2",
+ "variant3",
+ "variant4",
+ "variant5",
+ "variant6",
+ "variant7",
+ "variant8",
+ "variant9",
+ "variant10",
+ "variant11",
+ "variant12",
+ "variant13",
+ "variant14",
+ "variant15",
+ ];
+
+ PRE_ALLOCATED
+ .get(variant_index.as_usize())
+ .map(|&s| Cow::from(s))
+ .unwrap_or_else(|| format!("variant{}", variant_index.as_usize()).into())
+}
--- /dev/null
+use rustc_codegen_ssa::debuginfo::{
+ type_names::{compute_debuginfo_type_name, cpp_like_debuginfo},
+ wants_c_like_enum_debuginfo,
+};
+use rustc_hir::def::CtorKind;
+use rustc_index::vec::IndexVec;
+use rustc_middle::{
+ bug,
+ mir::{Field, GeneratorLayout, GeneratorSavedLocal},
+ ty::{
+ self,
+ layout::{IntegerExt, LayoutOf, PrimitiveExt, TyAndLayout},
+ util::Discr,
+ AdtDef, GeneratorSubsts, Ty, VariantDef,
+ },
+};
+use rustc_span::Symbol;
+use rustc_target::abi::{HasDataLayout, Integer, Primitive, TagEncoding, VariantIdx, Variants};
+use std::borrow::Cow;
+
+use crate::{
+ common::CodegenCx,
+ debuginfo::{
+ metadata::{
+ build_field_di_node, build_generic_type_param_di_nodes, type_di_node,
+ type_map::{self, Stub},
+ unknown_file_metadata, UNKNOWN_LINE_NUMBER,
+ },
+ utils::{create_DIArray, get_namespace_for_item, DIB},
+ },
+ llvm::{
+ self,
+ debuginfo::{DIFlags, DIType},
+ },
+};
+
+use super::{
+ size_and_align_of,
+ type_map::{DINodeCreationResult, UniqueTypeId},
+ SmallVec,
+};
+
+mod cpp_like;
+mod native;
+
+pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let enum_type = unique_type_id.expect_ty();
+ let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
+ bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
+ };
+
+ let enum_type_and_layout = cx.layout_of(enum_type);
+
+ if wants_c_like_enum_debuginfo(enum_type_and_layout) {
+ return build_c_style_enum_di_node(cx, enum_adt_def, enum_type_and_layout);
+ }
+
+ if cpp_like_debuginfo(cx.tcx) {
+ cpp_like::build_enum_type_di_node(cx, unique_type_id)
+ } else {
+ native::build_enum_type_di_node(cx, unique_type_id)
+ }
+}
+
+pub(super) fn build_generator_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ if cpp_like_debuginfo(cx.tcx) {
+ cpp_like::build_generator_di_node(cx, unique_type_id)
+ } else {
+ native::build_generator_di_node(cx, unique_type_id)
+ }
+}
+
+/// Build the debuginfo node for a C-style enum, i.e. an enum the variants of which have no fields.
+///
+/// The resulting debuginfo will be a DW_TAG_enumeration_type.
+fn build_c_style_enum_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_adt_def: AdtDef<'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let containing_scope = get_namespace_for_item(cx, enum_adt_def.did());
+ DINodeCreationResult {
+ di_node: build_enumeration_type_di_node(
+ cx,
+ &compute_debuginfo_type_name(cx.tcx, enum_type_and_layout.ty, false),
+ tag_base_type(cx, enum_type_and_layout),
+ &mut enum_adt_def.discriminants(cx.tcx).map(|(variant_index, discr)| {
+ (discr, Cow::from(enum_adt_def.variant(variant_index).name.as_str()))
+ }),
+ containing_scope,
+ ),
+ already_stored_in_typemap: false,
+ }
+}
+
+/// Extract the type with which we want to describe the tag of the given enum or generator.
+fn tag_base_type<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+) -> Ty<'tcx> {
+ debug_assert!(match enum_type_and_layout.ty.kind() {
+ ty::Generator(..) => true,
+ ty::Adt(adt_def, _) => adt_def.is_enum(),
+ _ => false,
+ });
+
+ match enum_type_and_layout.layout.variants() {
+ // A single-variant enum has no discriminant.
+ Variants::Single { .. } => {
+ bug!("tag_base_type() called for enum without tag: {:?}", enum_type_and_layout)
+ }
+
+ Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. } => {
+ // Niche tags are always normalized to unsized integers of the correct size.
+ match tag.value {
+ Primitive::Int(t, _) => t,
+ Primitive::F32 => Integer::I32,
+ Primitive::F64 => Integer::I64,
+ Primitive::Pointer => {
+ // If the niche is the NULL value of a reference, then `discr_enum_ty` will be
+ // a RawPtr. CodeView doesn't know what to do with enums whose base type is a
+ // pointer so we fix this up to just be `usize`.
+ // DWARF might be able to deal with this but with an integer type we are on
+ // the safe side there too.
+ cx.data_layout().ptr_sized_integer()
+ }
+ }
+ .to_ty(cx.tcx, false)
+ }
+
+ Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => {
+ // Direct tags preserve the sign.
+ tag.value.to_ty(cx.tcx)
+ }
+ }
+}
+
+/// Build a DW_TAG_enumeration_type debuginfo node, with the given base type and variants.
+/// This is a helper function and does not register anything in the type map by itself.
+///
+/// `variants` is an iterator of (discr-value, variant-name).
+///
+// NOTE: Handling of discriminant values is somewhat inconsistent. They can appear as u128,
+// u64, and i64. Here everything gets mapped to i64 because that's what LLVM's API expects.
+fn build_enumeration_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ type_name: &str,
+ base_type: Ty<'tcx>,
+ variants: &mut dyn Iterator<Item = (Discr<'tcx>, Cow<'tcx, str>)>,
+ containing_scope: &'ll DIType,
+) -> &'ll DIType {
+ let is_unsigned = match base_type.kind() {
+ ty::Int(_) => false,
+ ty::Uint(_) => true,
+ _ => bug!("build_enumeration_type_di_node() called with non-integer tag type."),
+ };
+
+ let enumerator_di_nodes: SmallVec<Option<&'ll DIType>> = variants
+ .map(|(discr, variant_name)| {
+ unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateEnumerator(
+ DIB(cx),
+ variant_name.as_ptr().cast(),
+ variant_name.len(),
+ // FIXME: what if enumeration has i128 discriminant?
+ discr.val as i64,
+ is_unsigned,
+ ))
+ }
+ })
+ .collect();
+
+ let (size, align) = cx.size_and_align_of(base_type);
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateEnumerationType(
+ DIB(cx),
+ containing_scope,
+ type_name.as_ptr().cast(),
+ type_name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.bits() as u32,
+ create_DIArray(DIB(cx), &enumerator_di_nodes[..]),
+ type_di_node(cx, base_type),
+ true,
+ )
+ }
+}
+
+/// Build the debuginfo node for the struct type describing a single variant of an enum.
+///
+/// ```txt
+/// DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// ---> DW_TAG_structure_type (type of variant 1)
+/// ---> DW_TAG_structure_type (type of variant 2)
+/// ---> DW_TAG_structure_type (type of variant 3)
+/// ```
+///
+/// In CPP-like mode, we have the exact same descriptions for each variant too:
+///
+/// ```txt
+/// DW_TAG_union_type (top-level type for enum)
+/// DW_TAG_member (member for variant 1)
+/// DW_TAG_member (member for variant 2)
+/// DW_TAG_member (member for variant 3)
+/// ---> DW_TAG_structure_type (type of variant 1)
+/// ---> DW_TAG_structure_type (type of variant 2)
+/// ---> DW_TAG_structure_type (type of variant 3)
+/// DW_TAG_enumeration_type (type of tag)
+/// ```
+///
+/// The node looks like:
+///
+/// ```txt
+/// DW_TAG_structure_type
+/// DW_AT_name <name-of-variant>
+/// DW_AT_byte_size 0x00000010
+/// DW_AT_alignment 0x00000008
+/// DW_TAG_member
+/// DW_AT_name <name-of-field-0>
+/// DW_AT_type <0x0000018e>
+/// DW_AT_alignment 0x00000004
+/// DW_AT_data_member_location 4
+/// DW_TAG_member
+/// DW_AT_name <name-of-field-1>
+/// DW_AT_type <0x00000195>
+/// DW_AT_alignment 0x00000008
+/// DW_AT_data_member_location 8
+/// ...
+/// ```
+///
+/// The type of a variant is always a struct type with the name of the variant
+/// and a DW_TAG_member for each field (but not the discriminant).
+fn build_enum_variant_struct_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type: Ty<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ variant_index: VariantIdx,
+ variant_def: &VariantDef,
+ variant_layout: TyAndLayout<'tcx>,
+) -> &'ll DIType {
+ debug_assert_eq!(variant_layout.ty, enum_type);
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ UniqueTypeId::for_enum_variant_struct_type(cx.tcx, enum_type, variant_index),
+ variant_def.name.as_str(),
+ // NOTE: We use size and align of enum_type, not from variant_layout:
+ cx.size_and_align_of(enum_type),
+ Some(enum_type_di_node),
+ DIFlags::FlagZero,
+ ),
+ |cx, struct_type_di_node| {
+ (0..variant_layout.fields.count())
+ .map(|field_index| {
+ let field_name = if variant_def.ctor_kind != CtorKind::Fn {
+ // Fields have names
+ Cow::from(variant_def.fields[field_index].name.as_str())
+ } else {
+ // Tuple-like
+ super::tuple_field_name(field_index)
+ };
+
+ let field_layout = variant_layout.field(cx, field_index);
+
+ build_field_di_node(
+ cx,
+ struct_type_di_node,
+ &field_name,
+ (field_layout.size, field_layout.align.abi),
+ variant_layout.fields.offset(field_index),
+ DIFlags::FlagZero,
+ type_di_node(cx, field_layout.ty),
+ )
+ })
+ .collect()
+ },
+ |cx| build_generic_type_param_di_nodes(cx, enum_type),
+ )
+ .di_node
+}
+
+/// Build the struct type for describing a single generator state.
+/// See [build_generator_variant_struct_type_di_node].
+///
+/// ```txt
+///
+/// DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// ---> DW_TAG_structure_type (type of variant 1)
+/// ---> DW_TAG_structure_type (type of variant 2)
+/// ---> DW_TAG_structure_type (type of variant 3)
+///
+/// ```
+pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ variant_index: VariantIdx,
+ generator_type_and_layout: TyAndLayout<'tcx>,
+ generator_type_di_node: &'ll DIType,
+ generator_layout: &GeneratorLayout<'tcx>,
+ state_specific_upvar_names: &IndexVec<GeneratorSavedLocal, Option<Symbol>>,
+ common_upvar_names: &[String],
+) -> &'ll DIType {
+ let variant_name = GeneratorSubsts::variant_name(variant_index);
+ let unique_type_id = UniqueTypeId::for_enum_variant_struct_type(
+ cx.tcx,
+ generator_type_and_layout.ty,
+ variant_index,
+ );
+
+ let variant_layout = generator_type_and_layout.for_variant(cx, variant_index);
+
+ let generator_substs = match generator_type_and_layout.ty.kind() {
+ ty::Generator(_, substs, _) => substs.as_generator(),
+ _ => unreachable!(),
+ };
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &variant_name,
+ size_and_align_of(generator_type_and_layout),
+ Some(generator_type_di_node),
+ DIFlags::FlagZero,
+ ),
+ |cx, variant_struct_type_di_node| {
+ // Fields that just belong to this variant/state
+ let state_specific_fields: SmallVec<_> = (0..variant_layout.fields.count())
+ .map(|field_index| {
+ let generator_saved_local = generator_layout.variant_fields[variant_index]
+ [Field::from_usize(field_index)];
+ let field_name_maybe = state_specific_upvar_names[generator_saved_local];
+ let field_name = field_name_maybe
+ .as_ref()
+ .map(|s| Cow::from(s.as_str()))
+ .unwrap_or_else(|| super::tuple_field_name(field_index));
+
+ let field_type = variant_layout.field(cx, field_index).ty;
+
+ build_field_di_node(
+ cx,
+ variant_struct_type_di_node,
+ &field_name,
+ cx.size_and_align_of(field_type),
+ variant_layout.fields.offset(field_index),
+ DIFlags::FlagZero,
+ type_di_node(cx, field_type),
+ )
+ })
+ .collect();
+
+ // Fields that are common to all states
+ let common_fields: SmallVec<_> = generator_substs
+ .prefix_tys()
+ .enumerate()
+ .map(|(index, upvar_ty)| {
+ build_field_di_node(
+ cx,
+ variant_struct_type_di_node,
+ &common_upvar_names[index],
+ cx.size_and_align_of(upvar_ty),
+ generator_type_and_layout.fields.offset(index),
+ DIFlags::FlagZero,
+ type_di_node(cx, upvar_ty),
+ )
+ })
+ .collect();
+
+ state_specific_fields.into_iter().chain(common_fields.into_iter()).collect()
+ },
+ |cx| build_generic_type_param_di_nodes(cx, generator_type_and_layout.ty),
+ )
+ .di_node
+}
+
+/// Returns the discriminant value corresponding to the variant index.
+///
+/// Will return `None` if there is less than two variants (because then the enum won't have)
+/// a tag, and if this is the dataful variant of a niche-layout enum (because then there is no
+/// single discriminant value).
+fn compute_discriminant_value<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ variant_index: VariantIdx,
+) -> Option<u64> {
+ match enum_type_and_layout.layout.variants() {
+ &Variants::Single { .. } => None,
+ &Variants::Multiple { tag_encoding: TagEncoding::Direct, .. } => Some(
+ enum_type_and_layout.ty.discriminant_for_variant(cx.tcx, variant_index).unwrap().val
+ as u64,
+ ),
+ &Variants::Multiple {
+ tag_encoding: TagEncoding::Niche { ref niche_variants, niche_start, dataful_variant },
+ tag,
+ ..
+ } => {
+ if variant_index == dataful_variant {
+ None
+ } else {
+ let value = (variant_index.as_u32() as u128)
+ .wrapping_sub(niche_variants.start().as_u32() as u128)
+ .wrapping_add(niche_start);
+ let value = tag.value.size(cx).truncate(value);
+ // NOTE(eddyb) do *NOT* remove this assert, until
+ // we pass the full 128-bit value to LLVM, otherwise
+ // truncation will be silent and remain undetected.
+ assert_eq!(value as u64 as u128, value);
+ Some(value as u64)
+ }
+ }
+ }
+}
--- /dev/null
+use std::borrow::Cow;
+
+use crate::{
+ common::CodegenCx,
+ debuginfo::{
+ metadata::{
+ closure_saved_names_of_captured_variables,
+ enums::tag_base_type,
+ file_metadata, generator_layout_and_saved_local_names, size_and_align_of, type_di_node,
+ type_map::{self, Stub, StubInfo, UniqueTypeId},
+ unknown_file_metadata, DINodeCreationResult, SmallVec, NO_GENERICS,
+ UNKNOWN_LINE_NUMBER,
+ },
+ utils::{create_DIArray, get_namespace_for_item, DIB},
+ },
+ llvm::{
+ self,
+ debuginfo::{DIFile, DIFlags, DIType},
+ },
+};
+use libc::c_uint;
+use rustc_codegen_ssa::{
+ debuginfo::{type_names::compute_debuginfo_type_name, wants_c_like_enum_debuginfo},
+ traits::ConstMethods,
+};
+use rustc_middle::{
+ bug,
+ ty::{
+ self,
+ layout::{LayoutOf, TyAndLayout},
+ },
+};
+use rustc_target::abi::{Size, TagEncoding, VariantIdx, Variants};
+use smallvec::smallvec;
+
+/// Build the debuginfo node for an enum type. The listing below shows how such a
+/// type looks like at the LLVM IR/DWARF level. It is a `DW_TAG_structure_type`
+/// with a single `DW_TAG_variant_part` that in turn contains a `DW_TAG_variant`
+/// for each variant of the enum. The variant-part also contains a single member
+/// describing the discriminant, and a nested struct type for each of the variants.
+///
+/// ```txt
+/// ---> DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+/// ```
+pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let enum_type = unique_type_id.expect_ty();
+ let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
+ bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
+ };
+
+ let containing_scope = get_namespace_for_item(cx, enum_adt_def.did());
+ let enum_type_and_layout = cx.layout_of(enum_type);
+ let enum_type_name = compute_debuginfo_type_name(cx.tcx, enum_type, false);
+
+ debug_assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout));
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &enum_type_name,
+ size_and_align_of(enum_type_and_layout),
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ ),
+ |cx, enum_type_di_node| {
+ // Build the struct type for each variant. These will be referenced by the
+ // DW_TAG_variant DIEs inside of the DW_TAG_variant_part DIE.
+ // We also called the names for the corresponding DW_TAG_variant DIEs here.
+ let variant_member_infos: SmallVec<_> = enum_adt_def
+ .variant_range()
+ .map(|variant_index| VariantMemberInfo {
+ variant_index,
+ variant_name: Cow::from(enum_adt_def.variant(variant_index).name.as_str()),
+ variant_struct_type_di_node: super::build_enum_variant_struct_type_di_node(
+ cx,
+ enum_type,
+ enum_type_di_node,
+ variant_index,
+ enum_adt_def.variant(variant_index),
+ enum_type_and_layout.for_variant(cx, variant_index),
+ ),
+ source_info: None,
+ })
+ .collect();
+
+ smallvec![build_enum_variant_part_di_node(
+ cx,
+ enum_type_and_layout,
+ enum_type_di_node,
+ &variant_member_infos[..],
+ )]
+ },
+ // We don't seem to be emitting generic args on the enum type, it seems. Rather
+ // they get attached to the struct type of each variant.
+ NO_GENERICS,
+ )
+}
+
+/// Build the debuginfo node for a generator environment. It looks the same as the debuginfo for
+/// an enum. See [build_enum_type_di_node] for more information.
+///
+/// ```txt
+///
+/// ---> DW_TAG_structure_type (top-level type for the generator)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+///
+/// ```
+pub(super) fn build_generator_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let generator_type = unique_type_id.expect_ty();
+ let &ty::Generator(generator_def_id, _, _ ) = generator_type.kind() else {
+ bug!("build_generator_di_node() called with non-generator type: `{:?}`", generator_type)
+ };
+
+ let containing_scope = get_namespace_for_item(cx, generator_def_id);
+ let generator_type_and_layout = cx.layout_of(generator_type);
+
+ debug_assert!(!wants_c_like_enum_debuginfo(generator_type_and_layout));
+
+ let generator_type_name = compute_debuginfo_type_name(cx.tcx, generator_type, false);
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &generator_type_name,
+ size_and_align_of(generator_type_and_layout),
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ ),
+ |cx, generator_type_di_node| {
+ let (generator_layout, state_specific_upvar_names) =
+ generator_layout_and_saved_local_names(cx.tcx, generator_def_id);
+
+ let Variants::Multiple { tag_encoding: TagEncoding::Direct, ref variants, .. } = generator_type_and_layout.variants else {
+ bug!(
+ "Encountered generator with non-direct-tag layout: {:?}",
+ generator_type_and_layout
+ )
+ };
+
+ let common_upvar_names =
+ closure_saved_names_of_captured_variables(cx.tcx, generator_def_id);
+
+ // Build variant struct types
+ let variant_struct_type_di_nodes: SmallVec<_> = variants
+ .indices()
+ .map(|variant_index| {
+ // FIXME: This is problematic because just a number is not a valid identifier.
+ // GeneratorSubsts::variant_name(variant_index), would be consistent
+ // with enums?
+ let variant_name = format!("{}", variant_index.as_usize()).into();
+
+ let span = generator_layout.variant_source_info[variant_index].span;
+ let source_info = if !span.is_dummy() {
+ let loc = cx.lookup_debug_loc(span.lo());
+ Some((file_metadata(cx, &loc.file), loc.line))
+ } else {
+ None
+ };
+
+ VariantMemberInfo {
+ variant_index,
+ variant_name,
+ variant_struct_type_di_node:
+ super::build_generator_variant_struct_type_di_node(
+ cx,
+ variant_index,
+ generator_type_and_layout,
+ generator_type_di_node,
+ generator_layout,
+ &state_specific_upvar_names,
+ &common_upvar_names,
+ ),
+ source_info,
+ }
+ })
+ .collect();
+
+ smallvec![build_enum_variant_part_di_node(
+ cx,
+ generator_type_and_layout,
+ generator_type_di_node,
+ &variant_struct_type_di_nodes[..],
+ )]
+ },
+ // We don't seem to be emitting generic args on the generator type, it seems. Rather
+ // they get attached to the struct type of each variant.
+ NO_GENERICS,
+ )
+}
+
+/// Builds the DW_TAG_variant_part of an enum or generator debuginfo node:
+///
+/// ```txt
+/// DW_TAG_structure_type (top-level type for enum)
+/// ---> DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+/// ```
+fn build_enum_variant_part_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ variant_member_infos: &[VariantMemberInfo<'_, 'll>],
+) -> &'ll DIType {
+ let tag_member_di_node =
+ build_discr_member_di_node(cx, enum_type_and_layout, enum_type_di_node);
+
+ let variant_part_unique_type_id =
+ UniqueTypeId::for_enum_variant_part(cx.tcx, enum_type_and_layout.ty);
+
+ let stub = StubInfo::new(
+ cx,
+ variant_part_unique_type_id,
+ |cx, variant_part_unique_type_id_str| unsafe {
+ let variant_part_name = "";
+ llvm::LLVMRustDIBuilderCreateVariantPart(
+ DIB(cx),
+ enum_type_di_node,
+ variant_part_name.as_ptr().cast(),
+ variant_part_name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ enum_type_and_layout.size.bits(),
+ enum_type_and_layout.align.abi.bits() as u32,
+ DIFlags::FlagZero,
+ tag_member_di_node,
+ create_DIArray(DIB(cx), &[]),
+ variant_part_unique_type_id_str.as_ptr().cast(),
+ variant_part_unique_type_id_str.len(),
+ )
+ },
+ );
+
+ type_map::build_type_with_children(
+ cx,
+ stub,
+ |cx, variant_part_di_node| {
+ variant_member_infos
+ .iter()
+ .map(|variant_member_info| {
+ build_enum_variant_member_di_node(
+ cx,
+ enum_type_and_layout,
+ variant_part_di_node,
+ variant_member_info,
+ )
+ })
+ .collect()
+ },
+ NO_GENERICS,
+ )
+ .di_node
+}
+
+/// Builds the DW_TAG_member describing where we can find the tag of an enum.
+/// Returns `None` if the enum does not have a tag.
+///
+/// ```txt
+///
+/// DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// ---> DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+///
+/// ```
+fn build_discr_member_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_or_generator_type_and_layout: TyAndLayout<'tcx>,
+ enum_or_generator_type_di_node: &'ll DIType,
+) -> Option<&'ll DIType> {
+ let tag_name = match enum_or_generator_type_and_layout.ty.kind() {
+ ty::Generator(..) => "__state",
+ _ => "",
+ };
+
+ // NOTE: This is actually wrong. This will become a member of
+ // of the DW_TAG_variant_part. But, due to LLVM's API, that
+ // can only be constructed with this DW_TAG_member already in created.
+ // In LLVM IR the wrong scope will be listed but when DWARF is
+ // generated from it, the DW_TAG_member will be a child the
+ // DW_TAG_variant_part.
+ let containing_scope = enum_or_generator_type_di_node;
+
+ match enum_or_generator_type_and_layout.layout.variants() {
+ // A single-variant enum has no discriminant.
+ &Variants::Single { .. } => None,
+
+ &Variants::Multiple { tag_field, .. } => {
+ let tag_base_type = tag_base_type(cx, enum_or_generator_type_and_layout);
+ let (size, align) = cx.size_and_align_of(tag_base_type);
+
+ unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateMemberType(
+ DIB(cx),
+ containing_scope,
+ tag_name.as_ptr().cast(),
+ tag_name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.bits() as u32,
+ enum_or_generator_type_and_layout.fields.offset(tag_field).bits(),
+ DIFlags::FlagArtificial,
+ type_di_node(cx, tag_base_type),
+ ))
+ }
+ }
+ }
+}
+
+/// Build the debuginfo node for `DW_TAG_variant`:
+///
+/// ```txt
+/// DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// ---> DW_TAG_variant (variant 1)
+/// ---> DW_TAG_variant (variant 2)
+/// ---> DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+/// ```
+///
+/// This node looks like:
+///
+/// ```txt
+/// DW_TAG_variant
+/// DW_AT_discr_value 0
+/// DW_TAG_member
+/// DW_AT_name None
+/// DW_AT_type <0x000002a1>
+/// DW_AT_alignment 0x00000002
+/// DW_AT_data_member_location 0
+/// ```
+///
+/// The DW_AT_discr_value is optional, and is omitted if
+/// - This is the only variant of a univariant enum (i.e. their is no discriminant)
+/// - This is the "dataful" variant of a niche-layout enum
+/// (where only the other variants are identified by a single value)
+///
+/// There is only ever a single member, the type of which is a struct that describes the
+/// fields of the variant (excluding the discriminant). The name of the member is the name
+/// of the variant as given in the source code. The DW_AT_data_member_location is always
+/// zero.
+///
+/// Note that the LLVM DIBuilder API is a bit unintuitive here. The DW_TAG_variant subtree
+/// (including the DW_TAG_member) is built by a single call to
+/// `LLVMRustDIBuilderCreateVariantMemberType()`.
+fn build_enum_variant_member_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ variant_part_di_node: &'ll DIType,
+ variant_member_info: &VariantMemberInfo<'_, 'll>,
+) -> &'ll DIType {
+ let variant_index = variant_member_info.variant_index;
+ let discr_value = super::compute_discriminant_value(cx, enum_type_and_layout, variant_index);
+
+ let (file_di_node, line_number) = variant_member_info
+ .source_info
+ .unwrap_or_else(|| (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER));
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateVariantMemberType(
+ DIB(cx),
+ variant_part_di_node,
+ variant_member_info.variant_name.as_ptr().cast(),
+ variant_member_info.variant_name.len(),
+ file_di_node,
+ line_number,
+ enum_type_and_layout.size.bits(),
+ enum_type_and_layout.align.abi.bits() as u32,
+ Size::ZERO.bits(),
+ discr_value.map(|v| cx.const_u64(v)),
+ DIFlags::FlagZero,
+ variant_member_info.variant_struct_type_di_node,
+ )
+ }
+}
+
+/// Information needed for building a `DW_TAG_variant`:
+///
+/// ```txt
+/// DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// ---> DW_TAG_variant (variant 1)
+/// ---> DW_TAG_variant (variant 2)
+/// ---> DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+struct VariantMemberInfo<'a, 'll> {
+ variant_index: VariantIdx,
+ variant_name: Cow<'a, str>,
+ variant_struct_type_di_node: &'ll DIType,
+ source_info: Option<(&'ll DIFile, c_uint)>,
+}
--- /dev/null
+use std::cell::RefCell;
+
+use rustc_data_structures::{
+ fingerprint::Fingerprint,
+ fx::FxHashMap,
+ stable_hasher::{HashStable, NodeIdHashingMode, StableHasher},
+};
+use rustc_middle::{
+ bug,
+ ty::{ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt},
+};
+use rustc_target::abi::{Align, Size, VariantIdx};
+
+use crate::{
+ common::CodegenCx,
+ debuginfo::utils::{create_DIArray, debug_context, DIB},
+ llvm::{
+ self,
+ debuginfo::{DIFlags, DIScope, DIType},
+ },
+};
+
+use super::{unknown_file_metadata, SmallVec, UNKNOWN_LINE_NUMBER};
+
+mod private {
+ // This type cannot be constructed outside of this module because
+ // it has a private field. We make use of this in order to prevent
+ // `UniqueTypeId` from being constructed directly, without asserting
+ // the preconditions.
+ #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, HashStable)]
+ pub struct HiddenZst;
+}
+
+/// A unique identifier for anything that we create a debuginfo node for.
+/// The types it contains are expected to already be normalized (which
+/// is debug_asserted in the constructors).
+///
+/// Note that there are some things that only show up in debuginfo, like
+/// the separate type descriptions for each enum variant. These get an ID
+/// too because they have their own debuginfo node in LLVM IR.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, HashStable)]
+pub(super) enum UniqueTypeId<'tcx> {
+ /// The ID of a regular type as it shows up at the language level.
+ Ty(Ty<'tcx>, private::HiddenZst),
+ /// The ID for the single DW_TAG_variant_part nested inside the top-level
+ /// DW_TAG_structure_type that describes enums and generators.
+ VariantPart(Ty<'tcx>, private::HiddenZst),
+ /// The ID for the artificial struct type describing a single enum variant.
+ VariantStructType(Ty<'tcx>, VariantIdx, private::HiddenZst),
+ /// The ID of the artificial type we create for VTables.
+ VTableTy(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>, private::HiddenZst),
+}
+
+impl<'tcx> UniqueTypeId<'tcx> {
+ pub fn for_ty(tcx: TyCtxt<'tcx>, t: Ty<'tcx>) -> Self {
+ debug_assert_eq!(t, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t));
+ UniqueTypeId::Ty(t, private::HiddenZst)
+ }
+
+ pub fn for_enum_variant_part(tcx: TyCtxt<'tcx>, enum_ty: Ty<'tcx>) -> Self {
+ debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
+ UniqueTypeId::VariantPart(enum_ty, private::HiddenZst)
+ }
+
+ pub fn for_enum_variant_struct_type(
+ tcx: TyCtxt<'tcx>,
+ enum_ty: Ty<'tcx>,
+ variant_idx: VariantIdx,
+ ) -> Self {
+ debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
+ UniqueTypeId::VariantStructType(enum_ty, variant_idx, private::HiddenZst)
+ }
+
+ pub fn for_vtable_ty(
+ tcx: TyCtxt<'tcx>,
+ self_type: Ty<'tcx>,
+ implemented_trait: Option<PolyExistentialTraitRef<'tcx>>,
+ ) -> Self {
+ debug_assert_eq!(
+ self_type,
+ tcx.normalize_erasing_regions(ParamEnv::reveal_all(), self_type)
+ );
+ debug_assert_eq!(
+ implemented_trait,
+ tcx.normalize_erasing_regions(ParamEnv::reveal_all(), implemented_trait)
+ );
+ UniqueTypeId::VTableTy(self_type, implemented_trait, private::HiddenZst)
+ }
+
+ /// Generates a string version of this [UniqueTypeId], which can be used as the `UniqueId`
+ /// argument of the various `LLVMRustDIBuilderCreate*Type()` methods.
+ ///
+ /// Right now this takes the form of a hex-encoded opaque hash value.
+ pub fn generate_unique_id_string(self, tcx: TyCtxt<'tcx>) -> String {
+ let mut hasher = StableHasher::new();
+ let mut hcx = tcx.create_stable_hashing_context();
+ hcx.while_hashing_spans(false, |hcx| {
+ hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
+ self.hash_stable(hcx, &mut hasher);
+ });
+ });
+ hasher.finish::<Fingerprint>().to_hex()
+ }
+
+ pub fn expect_ty(self) -> Ty<'tcx> {
+ match self {
+ UniqueTypeId::Ty(ty, _) => ty,
+ _ => bug!("Expected `UniqueTypeId::Ty` but found `{:?}`", self),
+ }
+ }
+}
+
+/// The `TypeMap` is where the debug context holds the type metadata nodes
+/// created so far. The debuginfo nodes are identified by `UniqueTypeId`.
+#[derive(Default)]
+pub(crate) struct TypeMap<'ll, 'tcx> {
+ pub(super) unique_id_to_di_node: RefCell<FxHashMap<UniqueTypeId<'tcx>, &'ll DIType>>,
+}
+
+impl<'ll, 'tcx> TypeMap<'ll, 'tcx> {
+ /// Adds a `UniqueTypeId` to metadata mapping to the `TypeMap`. The method will
+ /// fail if the mapping already exists.
+ pub(super) fn insert(&self, unique_type_id: UniqueTypeId<'tcx>, metadata: &'ll DIType) {
+ if self.unique_id_to_di_node.borrow_mut().insert(unique_type_id, metadata).is_some() {
+ bug!("type metadata for unique ID '{:?}' is already in the `TypeMap`!", unique_type_id);
+ }
+ }
+
+ pub(super) fn di_node_for_unique_id(
+ &self,
+ unique_type_id: UniqueTypeId<'tcx>,
+ ) -> Option<&'ll DIType> {
+ self.unique_id_to_di_node.borrow().get(&unique_type_id).cloned()
+ }
+}
+
+pub struct DINodeCreationResult<'ll> {
+ pub di_node: &'ll DIType,
+ pub already_stored_in_typemap: bool,
+}
+
+impl<'ll> DINodeCreationResult<'ll> {
+ pub fn new(di_node: &'ll DIType, already_stored_in_typemap: bool) -> Self {
+ DINodeCreationResult { di_node, already_stored_in_typemap }
+ }
+}
+
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub enum Stub<'ll> {
+ Struct,
+ Union,
+ VtableTy { vtable_holder: &'ll DIType },
+}
+
+pub struct StubInfo<'ll, 'tcx> {
+ metadata: &'ll DIType,
+ unique_type_id: UniqueTypeId<'tcx>,
+}
+
+impl<'ll, 'tcx> StubInfo<'ll, 'tcx> {
+ pub(super) fn new(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+ build: impl FnOnce(&CodegenCx<'ll, 'tcx>, /* unique_type_id_str: */ &str) -> &'ll DIType,
+ ) -> StubInfo<'ll, 'tcx> {
+ let unique_type_id_str = unique_type_id.generate_unique_id_string(cx.tcx);
+ let di_node = build(cx, &unique_type_id_str);
+ StubInfo { metadata: di_node, unique_type_id }
+ }
+}
+
+/// Create a stub debuginfo node onto which fields and nested types can be attached.
+pub(super) fn stub<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ kind: Stub<'ll>,
+ unique_type_id: UniqueTypeId<'tcx>,
+ name: &str,
+ (size, align): (Size, Align),
+ containing_scope: Option<&'ll DIScope>,
+ flags: DIFlags,
+) -> StubInfo<'ll, 'tcx> {
+ let empty_array = create_DIArray(DIB(cx), &[]);
+ let unique_type_id_str = unique_type_id.generate_unique_id_string(cx.tcx);
+
+ let metadata = match kind {
+ Stub::Struct | Stub::VtableTy { .. } => {
+ let vtable_holder = match kind {
+ Stub::VtableTy { vtable_holder } => Some(vtable_holder),
+ _ => None,
+ };
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateStructType(
+ DIB(cx),
+ containing_scope,
+ name.as_ptr().cast(),
+ name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.bits() as u32,
+ flags,
+ None,
+ empty_array,
+ 0,
+ vtable_holder,
+ unique_type_id_str.as_ptr().cast(),
+ unique_type_id_str.len(),
+ )
+ }
+ }
+ Stub::Union => unsafe {
+ llvm::LLVMRustDIBuilderCreateUnionType(
+ DIB(cx),
+ containing_scope,
+ name.as_ptr().cast(),
+ name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.bits() as u32,
+ flags,
+ Some(empty_array),
+ 0,
+ unique_type_id_str.as_ptr().cast(),
+ unique_type_id_str.len(),
+ )
+ },
+ };
+ StubInfo { metadata, unique_type_id }
+}
+
+/// This function enables creating debuginfo nodes that can recursively refer to themselves.
+/// It will first insert the given stub into the type map and only then execute the `members`
+/// and `generics` closures passed in. These closures have access to the stub so they can
+/// directly attach fields to them. If the type of a field transitively refers back
+/// to the type currently being built, the stub will already be found in the type map,
+/// which effectively breaks the recursion cycle.
+pub(super) fn build_type_with_children<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ stub_info: StubInfo<'ll, 'tcx>,
+ members: impl FnOnce(&CodegenCx<'ll, 'tcx>, &'ll DIType) -> SmallVec<&'ll DIType>,
+ generics: impl FnOnce(&CodegenCx<'ll, 'tcx>) -> SmallVec<&'ll DIType>,
+) -> DINodeCreationResult<'ll> {
+ debug_assert_eq!(
+ debug_context(cx).type_map.di_node_for_unique_id(stub_info.unique_type_id),
+ None
+ );
+
+ debug_context(cx).type_map.insert(stub_info.unique_type_id, stub_info.metadata);
+
+ let members: SmallVec<_> =
+ members(cx, stub_info.metadata).into_iter().map(|node| Some(node)).collect();
+ let generics: SmallVec<Option<&'ll DIType>> =
+ generics(cx).into_iter().map(|node| Some(node)).collect();
+
+ if !(members.is_empty() && generics.is_empty()) {
+ unsafe {
+ let members_array = create_DIArray(DIB(cx), &members[..]);
+ let generics_array = create_DIArray(DIB(cx), &generics[..]);
+ llvm::LLVMRustDICompositeTypeReplaceArrays(
+ DIB(cx),
+ stub_info.metadata,
+ Some(members_array),
+ Some(generics_array),
+ );
+ }
+ }
+
+ DINodeCreationResult { di_node: stub_info.metadata, already_stored_in_typemap: true }
+}
use rustc_codegen_ssa::mir::debuginfo::VariableKind::*;
-use self::metadata::{file_metadata, type_metadata, TypeMap};
+use self::metadata::{file_metadata, type_di_node};
use self::metadata::{UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER};
use self::namespace::mangled_name_of_instance;
use self::utils::{create_DIArray, is_node_local_to_unit, DIB};
use rustc_codegen_ssa::debuginfo::type_names;
use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext, VariableKind};
use rustc_codegen_ssa::traits::*;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::Lrc;
use rustc_hir::def_id::{DefId, DefIdMap};
use rustc_index::vec::IndexVec;
use rustc_session::Session;
use rustc_span::symbol::Symbol;
use rustc_span::{self, BytePos, Pos, SourceFile, SourceFileAndLine, Span};
-use rustc_target::abi::{Primitive, Size};
+use rustc_target::abi::Size;
use libc::c_uint;
use smallvec::SmallVec;
mod utils;
pub use self::create_scope_map::compute_mir_scopes;
-pub use self::metadata::create_global_var_metadata;
+pub use self::metadata::build_global_var_di_node;
pub use self::metadata::extend_scope_to_file;
#[allow(non_upper_case_globals)]
const DW_TAG_arg_variable: c_uint = 0x101;
/// A context object for maintaining all state needed by the debuginfo module.
-pub struct CrateDebugContext<'a, 'tcx> {
- llcontext: &'a llvm::Context,
- llmod: &'a llvm::Module,
- builder: &'a mut DIBuilder<'a>,
- created_files: RefCell<FxHashMap<(Option<String>, Option<String>), &'a DIFile>>,
- created_enum_disr_types: RefCell<FxHashMap<(DefId, Primitive), &'a DIType>>,
-
- type_map: TypeMap<'a, 'tcx>,
- namespace_map: RefCell<DefIdMap<&'a DIScope>>,
-
- recursion_marker_type: OnceCell<&'a DIType>,
-
- // This collection is used to assert that composite types (structs, enums,
- // ...) have their members only set once:
- composite_types_completed: RefCell<FxHashSet<&'a DIType>>,
+pub struct CodegenUnitDebugContext<'ll, 'tcx> {
+ llcontext: &'ll llvm::Context,
+ llmod: &'ll llvm::Module,
+ builder: &'ll mut DIBuilder<'ll>,
+ created_files: RefCell<FxHashMap<(Option<String>, Option<String>), &'ll DIFile>>,
+
+ type_map: metadata::TypeMap<'ll, 'tcx>,
+ namespace_map: RefCell<DefIdMap<&'ll DIScope>>,
+ recursion_marker_type: OnceCell<&'ll DIType>,
}
-impl Drop for CrateDebugContext<'_, '_> {
+impl Drop for CodegenUnitDebugContext<'_, '_> {
fn drop(&mut self) {
unsafe {
llvm::LLVMRustDIBuilderDispose(&mut *(self.builder as *mut _));
}
}
-impl<'a, 'tcx> CrateDebugContext<'a, 'tcx> {
- pub fn new(llmod: &'a llvm::Module) -> Self {
- debug!("CrateDebugContext::new");
+impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> {
+ pub fn new(llmod: &'ll llvm::Module) -> Self {
+ debug!("CodegenUnitDebugContext::new");
let builder = unsafe { llvm::LLVMRustDIBuilderCreate(llmod) };
// DIBuilder inherits context from the module, so we'd better use the same one
let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) };
- CrateDebugContext {
+ CodegenUnitDebugContext {
llcontext,
llmod,
builder,
created_files: Default::default(),
- created_enum_disr_types: Default::default(),
type_map: Default::default(),
namespace_map: RefCell::new(Default::default()),
recursion_marker_type: OnceCell::new(),
- composite_types_completed: Default::default(),
}
}
signature.push(if fn_abi.ret.is_ignore() {
None
} else {
- Some(type_metadata(cx, fn_abi.ret.layout.ty))
+ Some(type_di_node(cx, fn_abi.ret.layout.ty))
});
// Arguments types
}
_ => t,
};
- Some(type_metadata(cx, t))
+ Some(type_di_node(cx, t))
}));
} else {
signature
- .extend(fn_abi.args.iter().map(|arg| Some(type_metadata(cx, arg.layout.ty))));
+ .extend(fn_abi.args.iter().map(|arg| Some(type_di_node(cx, arg.layout.ty))));
}
create_DIArray(DIB(cx), &signature[..])
if let GenericArgKind::Type(ty) = kind.unpack() {
let actual_type =
cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
- let actual_type_metadata = type_metadata(cx, actual_type);
+ let actual_type_metadata = type_di_node(cx, actual_type);
let name = name.as_str();
Some(unsafe {
Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
if cx.sess().opts.debuginfo == DebugInfo::Full
&& !impl_self_ty.needs_subst()
{
- Some(type_metadata(cx, impl_self_ty))
+ Some(type_di_node(cx, impl_self_ty))
} else {
- Some(namespace::item_namespace(cx, def.did))
+ Some(namespace::item_namespace(cx, def.did()))
}
}
_ => None,
unsafe { llvm::LLVMRustDIBuilderCreateDebugLocation(line, col, scope, inlined_at) }
}
- fn create_vtable_metadata(
+ fn create_vtable_debuginfo(
&self,
ty: Ty<'tcx>,
trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
vtable: Self::Value,
) {
- metadata::create_vtable_metadata(self, ty, trait_ref, vtable)
+ metadata::create_vtable_di_node(self, ty, trait_ref, vtable)
}
fn extend_scope_to_file(
let loc = self.lookup_debug_loc(span.lo());
let file_metadata = file_metadata(self, &loc.file);
- let type_metadata = type_metadata(self, variable_type);
+ let type_metadata = type_di_node(self, variable_type);
let (argument_index, dwarf_tag) = match variable_kind {
ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable),
// Utility Functions.
use super::namespace::item_namespace;
-use super::CrateDebugContext;
+use super::CodegenUnitDebugContext;
use rustc_hir::def_id::DefId;
use rustc_middle::ty::layout::{HasParamEnv, LayoutOf};
#[inline]
pub fn debug_context<'a, 'll, 'tcx>(
cx: &'a CodegenCx<'ll, 'tcx>,
-) -> &'a CrateDebugContext<'ll, 'tcx> {
+) -> &'a CodegenUnitDebugContext<'ll, 'tcx> {
cx.dbg_cx.as_ref().unwrap()
}
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(bool_to_option)]
#![feature(crate_visibility_modifier)]
+#![feature(let_chains)]
#![feature(let_else)]
#![feature(extern_types)]
#![feature(once_cell)]
("x86", "avx512vaes") => smallvec!["vaes"],
("x86", "avx512gfni") => smallvec!["gfni"],
("x86", "avx512vpclmulqdq") => smallvec!["vpclmulqdq"],
- ("aarch64", "fp") => smallvec!["fp-armv8"],
- ("aarch64", "fp16") => smallvec!["fullfp16"],
- ("aarch64", "fhm") => smallvec!["fp16fml"],
("aarch64", "rcpc2") => smallvec!["rcpc-immo"],
("aarch64", "dpb") => smallvec!["ccpp"],
("aarch64", "dpb2") => smallvec!["ccdp"],
("aarch64", "pmuv3") => smallvec!["perfmon"],
("aarch64", "paca") => smallvec!["pauth"],
("aarch64", "pacg") => smallvec!["pauth"],
+ // Rust ties fp and neon together. In LLVM neon implicitly enables fp,
+ // but we manually enable neon when a feature only implicitly enables fp
+ ("aarch64", "f32mm") => smallvec!["f32mm", "neon"],
+ ("aarch64", "f64mm") => smallvec!["f64mm", "neon"],
+ ("aarch64", "fhm") => smallvec!["fp16fml", "neon"],
+ ("aarch64", "fp16") => smallvec!["fullfp16", "neon"],
+ ("aarch64", "jsconv") => smallvec!["jsconv", "neon"],
+ ("aarch64", "sve") => smallvec!["sve", "neon"],
+ ("aarch64", "sve2") => smallvec!["sve2", "neon"],
+ ("aarch64", "sve2-aes") => smallvec!["sve2-aes", "neon"],
+ ("aarch64", "sve2-sm4") => smallvec!["sve2-sm4", "neon"],
+ ("aarch64", "sve2-sha3") => smallvec!["sve2-sha3", "neon"],
+ ("aarch64", "sve2-bitperm") => smallvec!["sve2-bitperm", "neon"],
(_, s) => smallvec![s],
}
}
None
}
+// Used to generate cfg variables and apply features
+// Must express features in the way Rust understands them
pub fn target_features(sess: &Session) -> Vec<Symbol> {
let target_machine = create_informational_target_machine(sess);
let mut features: Vec<Symbol> =
if sess.is_nightly_build() || gate.is_none() { Some(feature) } else { None }
})
.filter(|feature| {
+ // check that all features in a given smallvec are enabled
for llvm_feature in to_llvm_features(sess, feature) {
let cstr = SmallCStr::new(llvm_feature);
- if unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) } {
- return true;
+ if !unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) } {
+ return false;
}
}
- false
+ true
})
.map(|feature| Symbol::intern(feature))
.collect();
if RUSTC_SPECIFIC_FEATURES.contains(&feature) {
return SmallVec::<[_; 2]>::new();
}
- // ... otherwise though we run through `to_llvm_feature when
+ // ... otherwise though we run through `to_llvm_features` when
// passing requests down to LLVM. This means that all in-language
// features also work on the command line instead of having two
// different names when the LLVM name and the Rust name differ.
if let (&ty::Adt(def, _), &Variants::Single { index }) =
(layout.ty.kind(), &layout.variants)
{
- if def.is_enum() && !def.variants.is_empty() {
- write!(&mut name, "::{}", def.variants[index].name).unwrap();
+ if def.is_enum() && !def.variants().is_empty() {
+ write!(&mut name, "::{}", def.variant(index).name).unwrap();
}
}
if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
if let Some(code) = diag.code {
d.code(code);
}
- handler.emit_diagnostic(&d);
+ handler.emit_diagnostic(&mut d);
}
Ok(SharedEmitterMessage::InlineAsmError(cookie, msg, level, source)) => {
let msg = msg.strip_prefix("error: ").unwrap_or(&msg);
use rustc_attr as attr;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
+
+#[cfg(parallel_compiler)]
use rustc_data_structures::sync::{par_iter, ParallelIterator};
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
- for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() {
+ for i in 0..def_a.variant(VariantIdx::new(0)).fields.len() {
let src_f = src.project_field(bx, i);
let dst_f = dst.project_field(bx, i);
// This likely is a temporary measure. Once we don't have to support the
// non-parallel compiler anymore, we can compile CGUs end-to-end in
// parallel and get rid of the complicated scheduling logic.
+ #[cfg(parallel_compiler)]
let pre_compile_cgus = |cgu_reuse: &[CguReuse]| {
- if cfg!(parallel_compiler) {
- tcx.sess.time("compile_first_CGU_batch", || {
- // Try to find one CGU to compile per thread.
- let cgus: Vec<_> = cgu_reuse
- .iter()
- .enumerate()
- .filter(|&(_, reuse)| reuse == &CguReuse::No)
- .take(tcx.sess.threads())
- .collect();
-
- // Compile the found CGUs in parallel.
- let start_time = Instant::now();
-
- let pre_compiled_cgus = par_iter(cgus)
- .map(|(i, _)| {
- let module = backend.compile_codegen_unit(tcx, codegen_units[i].name());
- (i, module)
- })
- .collect();
-
- (pre_compiled_cgus, start_time.elapsed())
- })
- } else {
- (FxHashMap::default(), Duration::new(0, 0))
- }
+ tcx.sess.time("compile_first_CGU_batch", || {
+ // Try to find one CGU to compile per thread.
+ let cgus: Vec<_> = cgu_reuse
+ .iter()
+ .enumerate()
+ .filter(|&(_, reuse)| reuse == &CguReuse::No)
+ .take(tcx.sess.threads())
+ .collect();
+
+ // Compile the found CGUs in parallel.
+ let start_time = Instant::now();
+
+ let pre_compiled_cgus = par_iter(cgus)
+ .map(|(i, _)| {
+ let module = backend.compile_codegen_unit(tcx, codegen_units[i].name());
+ (i, module)
+ })
+ .collect();
+
+ (pre_compiled_cgus, start_time.elapsed())
+ })
};
+ #[cfg(not(parallel_compiler))]
+ let pre_compile_cgus = |_: &[CguReuse]| (FxHashMap::default(), Duration::new(0, 0));
+
let mut cgu_reuse = Vec::new();
let mut pre_compiled_cgus: Option<FxHashMap<usize, _>> = None;
let mut total_codegen_time = Duration::new(0, 0);
+use rustc_middle::ty::{self, layout::TyAndLayout};
+use rustc_target::abi::Size;
+
// FIXME(eddyb) find a place for this (or a way to replace it).
pub mod type_names;
+
+/// Returns true if we want to generate a DW_TAG_enumeration_type description for
+/// this instead of a DW_TAG_struct_type with DW_TAG_variant_part.
+///
+/// NOTE: This is somewhat inconsistent right now: For empty enums and enums with a single
+/// fieldless variant, we generate DW_TAG_struct_type, although a
+/// DW_TAG_enumeration_type would be a better fit.
+pub fn wants_c_like_enum_debuginfo<'tcx>(enum_type_and_layout: TyAndLayout<'tcx>) -> bool {
+ match enum_type_and_layout.ty.kind() {
+ ty::Adt(adt_def, _) => {
+ if !adt_def.is_enum() {
+ return false;
+ }
+
+ match adt_def.variants().len() {
+ 0 => false,
+ 1 => {
+ // Univariant enums unless they are zero-sized
+ enum_type_and_layout.size != Size::ZERO && adt_def.all_fields().count() == 0
+ }
+ _ => {
+ // Enums with more than one variant if they have no fields
+ adt_def.all_fields().count() == 0
+ }
+ }
+ }
+ _ => false,
+ }
+}
use rustc_hir::def_id::DefId;
use rustc_hir::definitions::{DefPathData, DefPathDataName, DisambiguatedDefPathData};
use rustc_hir::{AsyncGeneratorKind, GeneratorKind, Mutability};
-use rustc_middle::ty::layout::IntegerExt;
+use rustc_middle::ty::layout::{IntegerExt, TyAndLayout};
use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
-use rustc_middle::ty::{self, AdtDef, ExistentialProjection, Ty, TyCtxt};
+use rustc_middle::ty::{self, ExistentialProjection, GeneratorSubsts, ParamEnv, Ty, TyCtxt};
use rustc_query_system::ich::NodeIdHashingMode;
use rustc_target::abi::{Integer, TagEncoding, Variants};
use smallvec::SmallVec;
+use std::borrow::Cow;
use std::fmt::Write;
+use crate::debuginfo::wants_c_like_enum_debuginfo;
+
// Compute the name of the type as it should be stored in debuginfo. Does not do
// any caching, i.e., calling the function twice with the same type will also do
// the work twice. The `qualified` parameter only affects the first level of the
ty::Float(float_ty) => output.push_str(float_ty.name_str()),
ty::Foreign(def_id) => push_item_name(tcx, def_id, qualified, output),
ty::Adt(def, substs) => {
- if def.is_enum() && cpp_like_debuginfo {
- msvc_enum_fallback(tcx, t, def, substs, output, visited);
+ // `layout_for_cpp_like_fallback` will be `Some` if we want to use the fallback encoding.
+ let layout_for_cpp_like_fallback = if cpp_like_debuginfo && def.is_enum() {
+ match tcx.layout_of(ParamEnv::reveal_all().and(t)) {
+ Ok(layout) => {
+ if !wants_c_like_enum_debuginfo(layout) {
+ Some(layout)
+ } else {
+ // This is a C-like enum so we don't want to use the fallback encoding
+ // for the name.
+ None
+ }
+ }
+ Err(e) => {
+ // Computing the layout can still fail here, e.g. if the target architecture
+ // cannot represent the type. See https://github.com/rust-lang/rust/issues/94961.
+ tcx.sess.fatal(&format!("{}", e));
+ }
+ }
} else {
- push_item_name(tcx, def.did, qualified, output);
+ // We are not emitting cpp-like debuginfo or this isn't even an enum.
+ None
+ };
+
+ if let Some(ty_and_layout) = layout_for_cpp_like_fallback {
+ msvc_enum_fallback(
+ tcx,
+ ty_and_layout,
+ &|output, visited| {
+ push_item_name(tcx, def.did(), true, output);
+ push_generic_params_internal(tcx, substs, output, visited);
+ },
+ output,
+ visited,
+ );
+ } else {
+ push_item_name(tcx, def.did(), qualified, output);
push_generic_params_internal(tcx, substs, output, visited);
}
}
let projection_bounds: SmallVec<[_; 4]> = trait_data
.projection_bounds()
.map(|bound| {
- let ExistentialProjection { item_def_id, term, .. } = bound.skip_binder();
+ let ExistentialProjection { item_def_id, term, .. } =
+ tcx.erase_late_bound_regions(bound);
// FIXME(associated_const_equality): allow for consts here
(item_def_id, term.ty().unwrap())
})
if projection_bounds.len() != 0 {
if principal_has_generic_params {
// push_generic_params_internal() above added a `>` but we actually
- // want to add more items to that list, so remove that again.
+ // want to add more items to that list, so remove that again...
pop_close_angle_bracket(output);
+ // .. and add a comma to separate the regular generic args from the
+ // associated types.
+ push_arg_separator(cpp_like_debuginfo, output);
+ } else {
+ // push_generic_params_internal() did not add `<...>`, so we open
+ // angle brackets here.
+ output.push('<');
}
for (item_def_id, ty) in projection_bounds {
- push_arg_separator(cpp_like_debuginfo, output);
-
if cpp_like_debuginfo {
output.push_str("assoc$<");
push_item_name(tcx, item_def_id, false, output);
output.push('=');
push_debuginfo_type_name(tcx, ty, true, output, visited);
}
+ push_arg_separator(cpp_like_debuginfo, output);
}
+ pop_arg_separator(output);
push_close_angle_bracket(cpp_like_debuginfo, output);
}
ty::Closure(def_id, substs) | ty::Generator(def_id, substs, ..) => {
// Name will be "{closure_env#0}<T1, T2, ...>", "{generator_env#0}<T1, T2, ...>", or
// "{async_fn_env#0}<T1, T2, ...>", etc.
- let def_key = tcx.def_key(def_id);
-
- if qualified {
- let parent_def_id = DefId { index: def_key.parent.unwrap(), ..def_id };
- push_item_name(tcx, parent_def_id, true, output);
- output.push_str("::");
+ // In the case of cpp-like debuginfo, the name additionally gets wrapped inside of
+ // an artificial `enum$<>` type, as defined in msvc_enum_fallback().
+ if cpp_like_debuginfo && t.is_generator() {
+ let ty_and_layout = tcx.layout_of(ParamEnv::reveal_all().and(t)).unwrap();
+ msvc_enum_fallback(
+ tcx,
+ ty_and_layout,
+ &|output, visited| {
+ push_closure_or_generator_name(tcx, def_id, substs, true, output, visited);
+ },
+ output,
+ visited,
+ );
+ } else {
+ push_closure_or_generator_name(tcx, def_id, substs, qualified, output, visited);
}
-
- let mut label = String::with_capacity(20);
- write!(&mut label, "{}_env", generator_kind_label(tcx.generator_kind(def_id))).unwrap();
-
- push_disambiguated_special_name(
- &label,
- def_key.disambiguated_data.disambiguator,
- cpp_like_debuginfo,
- output,
- );
-
- // We also need to add the generic arguments of the async fn/generator or
- // the enclosing function (for closures or async blocks), so that we end
- // up with a unique name for every instantiation.
-
- // Find the generics of the enclosing function, as defined in the source code.
- let enclosing_fn_def_id = tcx.typeck_root_def_id(def_id);
- let generics = tcx.generics_of(enclosing_fn_def_id);
-
- // Truncate the substs to the length of the above generics. This will cut off
- // anything closure- or generator-specific.
- let substs = substs.truncate_to(tcx, generics);
- push_generic_params_internal(tcx, substs, output, visited);
}
// Type parameters from polymorphized functions.
ty::Param(_) => {
- output.push_str(&format!("{:?}", t));
+ write!(output, "{:?}", t).unwrap();
}
ty::Error(_)
| ty::Infer(_)
// `EnumMemberDescriptionFactor::create_member_descriptions`.
fn msvc_enum_fallback<'tcx>(
tcx: TyCtxt<'tcx>,
- ty: Ty<'tcx>,
- def: &AdtDef,
- substs: SubstsRef<'tcx>,
+ ty_and_layout: TyAndLayout<'tcx>,
+ push_inner: &dyn Fn(/*output*/ &mut String, /*visited*/ &mut FxHashSet<Ty<'tcx>>),
output: &mut String,
visited: &mut FxHashSet<Ty<'tcx>>,
) {
- let layout = tcx.layout_of(tcx.param_env(def.did).and(ty)).expect("layout error");
+ debug_assert!(!wants_c_like_enum_debuginfo(ty_and_layout));
+ let ty = ty_and_layout.ty;
output.push_str("enum$<");
- push_item_name(tcx, def.did, true, output);
- push_generic_params_internal(tcx, substs, output, visited);
+ push_inner(output, visited);
+
+ let variant_name = |variant_index| match ty.kind() {
+ ty::Adt(adt_def, _) => {
+ debug_assert!(adt_def.is_enum());
+ Cow::from(adt_def.variant(variant_index).name.as_str())
+ }
+ ty::Generator(..) => GeneratorSubsts::variant_name(variant_index),
+ _ => unreachable!(),
+ };
if let Variants::Multiple {
tag_encoding: TagEncoding::Niche { dataful_variant, .. },
tag,
variants,
..
- } = &layout.variants
+ } = &ty_and_layout.variants
{
let dataful_variant_layout = &variants[*dataful_variant];
let max = dataful_discriminant_range.end;
let max = tag.value.size(&tcx).truncate(max);
- let dataful_variant_name = def.variants[*dataful_variant].name.as_str();
-
- output.push_str(&format!(", {}, {}, {}", min, max, dataful_variant_name));
- } else if let Variants::Single { index: variant_idx } = &layout.variants {
+ let dataful_variant_name = variant_name(*dataful_variant);
+ write!(output, ", {}, {}, {}", min, max, dataful_variant_name).unwrap();
+ } else if let Variants::Single { index: variant_idx } = &ty_and_layout.variants {
// Uninhabited enums can't be constructed and should never need to be visualized so
// skip this step for them.
- if def.variants.len() != 0 {
- let variant = def.variants[*variant_idx].name.as_str();
-
- output.push_str(&format!(", {}", variant));
+ if !ty_and_layout.abi.is_uninhabited() {
+ write!(output, ", {}", variant_name(*variant_idx)).unwrap();
}
}
push_close_angle_bracket(true, output);
push_generic_params_internal(tcx, substs, output, &mut visited);
}
+fn push_closure_or_generator_name<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ qualified: bool,
+ output: &mut String,
+ visited: &mut FxHashSet<Ty<'tcx>>,
+) {
+ // Name will be "{closure_env#0}<T1, T2, ...>", "{generator_env#0}<T1, T2, ...>", or
+ // "{async_fn_env#0}<T1, T2, ...>", etc.
+ let def_key = tcx.def_key(def_id);
+ let generator_kind = tcx.generator_kind(def_id);
+
+ if qualified {
+ let parent_def_id = DefId { index: def_key.parent.unwrap(), ..def_id };
+ push_item_name(tcx, parent_def_id, true, output);
+ output.push_str("::");
+ }
+
+ let mut label = String::with_capacity(20);
+ write!(&mut label, "{}_env", generator_kind_label(generator_kind)).unwrap();
+
+ push_disambiguated_special_name(
+ &label,
+ def_key.disambiguated_data.disambiguator,
+ cpp_like_debuginfo(tcx),
+ output,
+ );
+
+ // We also need to add the generic arguments of the async fn/generator or
+ // the enclosing function (for closures or async blocks), so that we end
+ // up with a unique name for every instantiation.
+
+ // Find the generics of the enclosing function, as defined in the source code.
+ let enclosing_fn_def_id = tcx.typeck_root_def_id(def_id);
+ let generics = tcx.generics_of(enclosing_fn_def_id);
+
+ // Truncate the substs to the length of the above generics. This will cut off
+ // anything closure- or generator-specific.
+ let substs = substs.truncate_to(tcx, generics);
+ push_generic_params_internal(tcx, substs, output, visited);
+}
+
fn push_close_angle_bracket(cpp_like_debuginfo: bool, output: &mut String) {
// MSVC debugger always treats `>>` as a shift, even when parsing templates,
// so add a space to avoid confusion.
// Packed types ignore the alignment of their fields.
if let ty::Adt(def, _) = t.kind() {
- if def.repr.packed() {
+ if def.repr().packed() {
unsized_align = sized_align;
}
}
let align = cx.data_layout().pointer_align.abi;
let vtable = cx.static_addr_of(vtable_const, align, Some("vtable"));
- cx.create_vtable_metadata(ty, trait_ref, vtable);
+ cx.create_vtable_debuginfo(ty, trait_ref, vtable);
cx.vtables().borrow_mut().insert((ty, trait_ref), vtable);
vtable
}
use crate::traits::*;
-use rustc_errors::ErrorGuaranteed;
use rustc_middle::mir;
use rustc_middle::mir::interpret::ErrorHandled;
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, TyAndLayout};
all_consts_ok = false;
match err {
// errored or at least linted
- ErrorHandled::Reported(ErrorGuaranteed) | ErrorHandled::Linted => {}
+ ErrorHandled::Reported(_) | ErrorHandled::Linted => {}
ErrorHandled::TooGeneric => {
span_bug!(const_.span, "codgen encountered polymorphic constant: {:?}", err)
}
.ty;
let (llptr, llextra) = match self.val {
OperandValue::Immediate(llptr) => (llptr, None),
- OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)),
+ OperandValue::Pair(llptr, llextra) => {
+ // if the box's allocator isn't a ZST, then "llextra" is actually the allocator
+ if self.layout.ty.is_box() && !self.layout.field(cx, 1).is_zst() {
+ (llptr, None)
+ } else {
+ (llptr, Some(llextra))
+ }
+ }
OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self),
};
let layout = cx.layout_of(projected_ty);
_ if !field.is_unsized() => return simple(),
ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
ty::Adt(def, _) => {
- if def.repr.packed() {
+ if def.repr().packed() {
// FIXME(eddyb) generalize the adjustment when we
// start supporting packing to larger alignments.
assert_eq!(self.layout.align.abi.bytes(), 1);
.find(|elem| matches!(elem.1, mir::ProjectionElem::Deref))
{
base = elem.0 + 1;
- self.codegen_consume(
+ let cg_base = self.codegen_consume(
bx,
mir::PlaceRef { projection: &place_ref.projection[..elem.0], ..place_ref },
- )
- .deref(bx.cx())
+ );
+
+ // a box with a non-zst allocator should not be directly dereferenced
+ if cg_base.layout.ty.is_box() && !cg_base.layout.field(cx, 1).is_zst() {
+ let ptr = cg_base.extract_field(bx, 0).extract_field(bx, 0);
+
+ ptr.deref(bx.cx())
+ } else {
+ cg_base.deref(bx.cx())
+ }
} else {
bug!("using operand local {:?} as place", place_ref);
}
for elem in place_ref.projection[base..].iter() {
cg_base = match elem.clone() {
mir::ProjectionElem::Deref => {
- // custom allocators can change box's abi, making it unable to be derefed directly
- if cg_base.layout.ty.is_box()
- && matches!(cg_base.layout.abi, Abi::Aggregate { .. } | Abi::Uninhabited)
- {
+ // a box with a non-zst allocator should not be directly dereferenced
+ if cg_base.layout.ty.is_box() && !cg_base.layout.field(cx, 1).is_zst() {
let ptr = cg_base.project_field(bx, 0).project_field(bx, 0);
bx.load_operand(ptr).deref(bx.cx())
];
const AARCH64_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
- // FEAT_AdvSimd
- ("neon", Some(sym::aarch64_target_feature)),
- // FEAT_FP
- ("fp", Some(sym::aarch64_target_feature)),
+ // FEAT_AdvSimd & FEAT_FP
+ ("neon", None),
// FEAT_FP16
- ("fp16", Some(sym::aarch64_target_feature)),
+ ("fp16", None),
// FEAT_SVE
- ("sve", Some(sym::aarch64_target_feature)),
+ ("sve", None),
// FEAT_CRC
- ("crc", Some(sym::aarch64_target_feature)),
+ ("crc", None),
// FEAT_RAS
- ("ras", Some(sym::aarch64_target_feature)),
+ ("ras", None),
// FEAT_LSE
- ("lse", Some(sym::aarch64_target_feature)),
+ ("lse", None),
// FEAT_RDM
- ("rdm", Some(sym::aarch64_target_feature)),
+ ("rdm", None),
// FEAT_RCPC
- ("rcpc", Some(sym::aarch64_target_feature)),
+ ("rcpc", None),
// FEAT_RCPC2
- ("rcpc2", Some(sym::aarch64_target_feature)),
+ ("rcpc2", None),
// FEAT_DotProd
- ("dotprod", Some(sym::aarch64_target_feature)),
+ ("dotprod", None),
// FEAT_TME
- ("tme", Some(sym::aarch64_target_feature)),
+ ("tme", None),
// FEAT_FHM
- ("fhm", Some(sym::aarch64_target_feature)),
+ ("fhm", None),
// FEAT_DIT
- ("dit", Some(sym::aarch64_target_feature)),
+ ("dit", None),
// FEAT_FLAGM
- ("flagm", Some(sym::aarch64_target_feature)),
+ ("flagm", None),
// FEAT_SSBS
- ("ssbs", Some(sym::aarch64_target_feature)),
+ ("ssbs", None),
// FEAT_SB
- ("sb", Some(sym::aarch64_target_feature)),
+ ("sb", None),
// FEAT_PAUTH (address authentication)
- ("paca", Some(sym::aarch64_target_feature)),
+ ("paca", None),
// FEAT_PAUTH (generic authentication)
- ("pacg", Some(sym::aarch64_target_feature)),
+ ("pacg", None),
// FEAT_DPB
- ("dpb", Some(sym::aarch64_target_feature)),
+ ("dpb", None),
// FEAT_DPB2
- ("dpb2", Some(sym::aarch64_target_feature)),
+ ("dpb2", None),
// FEAT_SVE2
- ("sve2", Some(sym::aarch64_target_feature)),
+ ("sve2", None),
// FEAT_SVE2_AES
- ("sve2-aes", Some(sym::aarch64_target_feature)),
+ ("sve2-aes", None),
// FEAT_SVE2_SM4
- ("sve2-sm4", Some(sym::aarch64_target_feature)),
+ ("sve2-sm4", None),
// FEAT_SVE2_SHA3
- ("sve2-sha3", Some(sym::aarch64_target_feature)),
+ ("sve2-sha3", None),
// FEAT_SVE2_BitPerm
- ("sve2-bitperm", Some(sym::aarch64_target_feature)),
+ ("sve2-bitperm", None),
// FEAT_FRINTTS
- ("frintts", Some(sym::aarch64_target_feature)),
+ ("frintts", None),
// FEAT_I8MM
- ("i8mm", Some(sym::aarch64_target_feature)),
+ ("i8mm", None),
// FEAT_F32MM
- ("f32mm", Some(sym::aarch64_target_feature)),
+ ("f32mm", None),
// FEAT_F64MM
- ("f64mm", Some(sym::aarch64_target_feature)),
+ ("f64mm", None),
// FEAT_BF16
- ("bf16", Some(sym::aarch64_target_feature)),
+ ("bf16", None),
// FEAT_RAND
- ("rand", Some(sym::aarch64_target_feature)),
+ ("rand", None),
// FEAT_BTI
- ("bti", Some(sym::aarch64_target_feature)),
+ ("bti", None),
// FEAT_MTE
- ("mte", Some(sym::aarch64_target_feature)),
+ ("mte", None),
// FEAT_JSCVT
- ("jsconv", Some(sym::aarch64_target_feature)),
+ ("jsconv", None),
// FEAT_FCMA
- ("fcma", Some(sym::aarch64_target_feature)),
+ ("fcma", None),
// FEAT_AES
- ("aes", Some(sym::aarch64_target_feature)),
+ ("aes", None),
// FEAT_SHA1 & FEAT_SHA256
- ("sha2", Some(sym::aarch64_target_feature)),
+ ("sha2", None),
// FEAT_SHA512 & FEAT_SHA3
- ("sha3", Some(sym::aarch64_target_feature)),
+ ("sha3", None),
// FEAT_SM3 & FEAT_SM4
- ("sm4", Some(sym::aarch64_target_feature)),
+ ("sm4", None),
// FEAT_PAN
- ("pan", Some(sym::aarch64_target_feature)),
+ ("pan", None),
// FEAT_LOR
- ("lor", Some(sym::aarch64_target_feature)),
+ ("lor", None),
// FEAT_VHE
- ("vh", Some(sym::aarch64_target_feature)),
+ ("vh", None),
// FEAT_PMUv3
- ("pmuv3", Some(sym::aarch64_target_feature)),
+ ("pmuv3", None),
// FEAT_SPE
- ("spe", Some(sym::aarch64_target_feature)),
- ("v8.1a", Some(sym::aarch64_target_feature)),
- ("v8.2a", Some(sym::aarch64_target_feature)),
- ("v8.3a", Some(sym::aarch64_target_feature)),
- ("v8.4a", Some(sym::aarch64_target_feature)),
- ("v8.5a", Some(sym::aarch64_target_feature)),
- ("v8.6a", Some(sym::aarch64_target_feature)),
- ("v8.7a", Some(sym::aarch64_target_feature)),
+ ("spe", None),
+ ("v8.1a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.2a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.3a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.4a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.5a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.6a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.7a", Some(sym::aarch64_ver_target_feature)),
];
-const AARCH64_TIED_FEATURES: &[&[&str]] = &[&["paca", "pacg"]];
+const AARCH64_TIED_FEATURES: &[&[&str]] = &[
+ &["paca", "pacg"], // Together these represent `pauth` in LLVM
+];
const X86_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
- ("adx", Some(sym::adx_target_feature)),
+ ("adx", None),
("aes", None),
("avx", None),
("avx2", None),
("f", Some(sym::riscv_target_feature)),
("d", Some(sym::riscv_target_feature)),
("e", Some(sym::riscv_target_feature)),
+ ("v", Some(sym::riscv_target_feature)),
+ ("zfinx", Some(sym::riscv_target_feature)),
+ ("zdinx", Some(sym::riscv_target_feature)),
+ ("zhinx", Some(sym::riscv_target_feature)),
+ ("zhinxmin", Some(sym::riscv_target_feature)),
+ ("zfh", Some(sym::riscv_target_feature)),
+ ("zfhmin", Some(sym::riscv_target_feature)),
+ ("zbkb", Some(sym::riscv_target_feature)),
+ ("zbkc", Some(sym::riscv_target_feature)),
+ ("zbkx", Some(sym::riscv_target_feature)),
+ ("zknd", Some(sym::riscv_target_feature)),
+ ("zkne", Some(sym::riscv_target_feature)),
+ ("zknh", Some(sym::riscv_target_feature)),
+ ("zksed", Some(sym::riscv_target_feature)),
+ ("zksh", Some(sym::riscv_target_feature)),
+ ("zkr", Some(sym::riscv_target_feature)),
+ ("zkn", Some(sym::riscv_target_feature)),
+ ("zks", Some(sym::riscv_target_feature)),
+ ("zk", Some(sym::riscv_target_feature)),
+ ("zkt", Some(sym::riscv_target_feature)),
];
const WASM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
use rustc_target::abi::Size;
pub trait DebugInfoMethods<'tcx>: BackendTypes {
- fn create_vtable_metadata(
+ fn create_vtable_debuginfo(
&self,
ty: Ty<'tcx>,
trait_ref: Option<PolyExistentialTraitRef<'tcx>>,
-use rustc_errors::ErrorGuaranteed;
use rustc_hir::def::DefKind;
use rustc_middle::mir;
use rustc_middle::ty::{self, Ty};
if ecx.tcx.is_ctfe_mir_available(def.did) {
Ok(ecx.tcx.mir_for_ctfe_opt_const_arg(def))
} else if ecx.tcx.def_kind(def.did) == DefKind::AssocConst {
- ecx.tcx.sess.delay_span_bug(
+ let guar = ecx.tcx.sess.delay_span_bug(
rustc_span::DUMMY_SP,
"This is likely a const item that is missing from its impl",
);
- throw_inval!(AlreadyReported(ErrorGuaranteed {}));
+ throw_inval!(AlreadyReported(guar));
} else {
let path = ecx.tcx.def_path_str(def.did);
Err(ConstEvalErrKind::NeedsRfc(format!("calling extern function `{}`", path))
ty::Array(_, len) => branches(usize::try_from(len.eval_usize(ecx.tcx.tcx, ecx.param_env)).unwrap(), None),
ty::Adt(def, _) => {
- if def.variants.is_empty() {
+ if def.variants().is_empty() {
bug!("uninhabited types should have errored and never gotten converted to valtree")
}
let variant = ecx.read_discriminant(&place.into()).unwrap().1;
- branches(def.variants[variant].fields.len(), def.is_enum().then_some(variant))
+ branches(def.variant(variant).fields.len(), def.is_enum().then_some(variant))
}
ty::Never
// Checks if we have any variants, to avoid downcasting to a non-existing variant (when
// there are no variants `read_discriminant` successfully returns a non-existing variant
// index).
- ty::Adt(def, _) if def.variants.is_empty() => throw_ub!(Unreachable),
+ ty::Adt(def, _) if def.variants().is_empty() => throw_ub!(Unreachable),
ty::Adt(def, _) => {
let variant = ecx.read_discriminant(&op)?.1;
let down = ecx.operand_downcast(&op, variant)?;
- (def.variants[variant].fields.len(), Some(variant), down)
+ (def.variant(variant).fields.len(), Some(variant), down)
}
ty::Tuple(substs) => (substs.len(), None, op),
_ => bug!("cannot destructure constant {:?}", val),
pub enum LocalValue<Tag: Provenance = AllocId> {
/// This local is not currently alive, and cannot be used at all.
Dead,
- /// This local is alive but not yet initialized. It can be written to
- /// but not read from or its address taken. Locals get initialized on
- /// first write because for unsized locals, we do not know their size
- /// before that.
- Uninitialized,
+ /// This local is alive but not yet allocated. It cannot be read from or have its address taken,
+ /// and will be allocated on the first write. This is to support unsized locals, where we cannot
+ /// know their size in advance.
+ Unallocated,
/// A normal, live local.
/// Mostly for convenience, we re-use the `Operand` type here.
/// This is an optimization over just always having a pointer here;
pub fn access(&self) -> InterpResult<'tcx, Operand<Tag>> {
match self.value {
LocalValue::Dead => throw_ub!(DeadLocal),
- LocalValue::Uninitialized => {
+ LocalValue::Unallocated => {
bug!("The type checker should prevent reading from a never-written local")
}
LocalValue::Live(val) => Ok(val),
match self.value {
LocalValue::Dead => throw_ub!(DeadLocal),
LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)),
- ref mut
- local @ (LocalValue::Live(Operand::Immediate(_)) | LocalValue::Uninitialized) => {
+ ref mut local @ (LocalValue::Live(Operand::Immediate(_)) | LocalValue::Unallocated) => {
Ok(Ok(local))
}
}
match scalar.try_to_int() {
Ok(int) => int.is_null(),
Err(_) => {
+ // Can only happen during CTFE.
let ptr = self.scalar_to_ptr(scalar);
match self.memory.ptr_try_get_alloc(ptr) {
Ok((alloc_id, offset, _)) => {
// Note that one-past-the-end (offset == size) is still inbounds, and never null.
offset > size
}
- Err(offset) => offset == 0,
+ Err(_offset) => bug!("a non-int scalar is always a pointer"),
}
}
}
})?;
}
- // Locals are initially uninitialized.
- let dummy = LocalState { value: LocalValue::Uninitialized, layout: Cell::new(None) };
+ // Locals are initially unallocated.
+ let dummy = LocalState { value: LocalValue::Unallocated, layout: Cell::new(None) };
let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
// Now mark those locals as dead that we do not want to initialize
assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
trace!("{:?} is now live", local);
- let local_val = LocalValue::Uninitialized;
+ let local_val = LocalValue::Unallocated;
// StorageLive expects the local to be dead, and marks it live.
let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
if !matches!(old, LocalValue::Dead) {
match self.ecx.stack()[frame].locals[local].value {
LocalValue::Dead => write!(fmt, " is dead")?,
- LocalValue::Uninitialized => write!(fmt, " is uninitialized")?,
+ LocalValue::Unallocated => write!(fmt, " is unallocated")?,
LocalValue::Live(Operand::Indirect(mplace)) => {
write!(
fmt,
}
if let Some(def) = mplace.layout.ty.ty_adt_def() {
- if Some(def.did) == self.ecx.tcx.lang_items().unsafe_cell_type() {
+ if Some(def.did()) == self.ecx.tcx.lang_items().unsafe_cell_type() {
// We are crossing over an `UnsafeCell`, we can mutate again. This means that
// References we encounter inside here are interned as pointing to mutable
// allocations.
} else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) {
// Codegen does not like dangling pointers, and generally `tcx` assumes that
// all allocations referenced anywhere actually exist. So, make sure we error here.
- ecx.tcx.sess.span_err(ecx.tcx.span, "encountered dangling pointer in final constant");
- return Err(ErrorGuaranteed);
+ let reported = ecx
+ .tcx
+ .sess
+ .span_err(ecx.tcx.span, "encountered dangling pointer in final constant");
+ return Err(reported);
} else if ecx.tcx.get_global_alloc(alloc_id).is_none() {
// We have hit an `AllocId` that is neither in local or global memory and isn't
// marked as dangling by local memory. That should be impossible.
}
sym::variant_count => match tp_ty.kind() {
// Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
- ty::Adt(ref adt, _) => ConstValue::from_machine_usize(adt.variants.len() as u64, &tcx),
+ ty::Adt(ref adt, _) => {
+ ConstValue::from_machine_usize(adt.variants().len() as u64, &tcx)
+ }
ty::Projection(_)
| ty::Opaque(_, _)
| ty::Param(_)
self.write_pointer(offset_ptr, dest)?;
}
sym::ptr_offset_from => {
- let a = self.read_immediate(&args[0])?.to_scalar()?;
- let b = self.read_immediate(&args[1])?.to_scalar()?;
+ let a = self.read_pointer(&args[0])?;
+ let b = self.read_pointer(&args[1])?;
// Special case: if both scalars are *equal integers*
// and not null, we pretend there is an allocation of size 0 right there,
// and their offset is 0. (There's never a valid object at null, making it an
// exception from the exception.)
// This is the dual to the special exception for offset-by-0
- // in the inbounds pointer offset operation (see the Miri code, `src/operator.rs`).
- //
- // Control flow is weird because we cannot early-return (to reach the
- // `go_to_block` at the end).
- let done = if let (Ok(a), Ok(b)) = (a.try_to_int(), b.try_to_int()) {
- let a = a.try_to_machine_usize(*self.tcx).unwrap();
- let b = b.try_to_machine_usize(*self.tcx).unwrap();
- if a == b && a != 0 {
+ // in the inbounds pointer offset operation (see `ptr_offset_inbounds` below).
+ match (self.memory.ptr_try_get_alloc(a), self.memory.ptr_try_get_alloc(b)) {
+ (Err(a), Err(b)) if a == b && a != 0 => {
+ // Both are the same non-null integer.
self.write_scalar(Scalar::from_machine_isize(0, self), dest)?;
- true
- } else {
- false
}
- } else {
- false
- };
-
- if !done {
- // General case: we need two pointers.
- let a = self.scalar_to_ptr(a);
- let b = self.scalar_to_ptr(b);
- let (a_alloc_id, a_offset, _) = self.memory.ptr_get_alloc(a)?;
- let (b_alloc_id, b_offset, _) = self.memory.ptr_get_alloc(b)?;
- if a_alloc_id != b_alloc_id {
- throw_ub_format!(
- "ptr_offset_from cannot compute offset of pointers into different \
- allocations.",
- );
+ (Err(offset), _) | (_, Err(offset)) => {
+ throw_ub!(DanglingIntPointer(offset, CheckInAllocMsg::OffsetFromTest));
+ }
+ (Ok((a_alloc_id, a_offset, _)), Ok((b_alloc_id, b_offset, _))) => {
+ // Both are pointers. They must be into the same allocation.
+ if a_alloc_id != b_alloc_id {
+ throw_ub_format!(
+ "ptr_offset_from cannot compute offset of pointers into different \
+ allocations.",
+ );
+ }
+ // And they must both be valid for zero-sized accesses ("in-bounds or one past the end").
+ self.memory.check_ptr_access_align(
+ a,
+ Size::ZERO,
+ Align::ONE,
+ CheckInAllocMsg::OffsetFromTest,
+ )?;
+ self.memory.check_ptr_access_align(
+ b,
+ Size::ZERO,
+ Align::ONE,
+ CheckInAllocMsg::OffsetFromTest,
+ )?;
+
+ // Compute offset.
+ let usize_layout = self.layout_of(self.tcx.types.usize)?;
+ let isize_layout = self.layout_of(self.tcx.types.isize)?;
+ let a_offset = ImmTy::from_uint(a_offset.bytes(), usize_layout);
+ let b_offset = ImmTy::from_uint(b_offset.bytes(), usize_layout);
+ let (val, _overflowed, _ty) =
+ self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?;
+ let pointee_layout = self.layout_of(substs.type_at(0))?;
+ let val = ImmTy::from_scalar(val, isize_layout);
+ let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
+ self.exact_div(&val, &size, dest)?;
}
- let usize_layout = self.layout_of(self.tcx.types.usize)?;
- let isize_layout = self.layout_of(self.tcx.types.isize)?;
- let a_offset = ImmTy::from_uint(a_offset.bytes(), usize_layout);
- let b_offset = ImmTy::from_uint(b_offset.bytes(), usize_layout);
- let (val, _overflowed, _ty) =
- self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?;
- let pointee_layout = self.layout_of(substs.type_at(0))?;
- let val = ImmTy::from_scalar(val, isize_layout);
- let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
- self.exact_div(&val, &size, dest)?;
}
}
+use rustc_data_structures::intern::Interned;
use rustc_hir::def_id::CrateNum;
use rustc_hir::definitions::DisambiguatedDefPathData;
use rustc_middle::mir::interpret::{Allocation, ConstAllocation};
}
// Types with identity (print the module path).
- ty::Adt(&ty::AdtDef { did: def_id, .. }, substs)
+ ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), substs)
| ty::FnDef(def_id, substs)
| ty::Opaque(def_id, substs)
| ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs })
CheckInAllocMsg::DerefTest | CheckInAllocMsg::MemoryAccessTest => {
AllocCheck::Dereferenceable
}
- CheckInAllocMsg::PointerArithmeticTest | CheckInAllocMsg::InboundsTest => {
- AllocCheck::Live
- }
+ CheckInAllocMsg::PointerArithmeticTest
+ | CheckInAllocMsg::OffsetFromTest
+ | CheckInAllocMsg::InboundsTest => AllocCheck::Live,
};
let (size, align) = self.get_size_and_align(alloc_id, check)?;
Ok((size, align, ()))
}
}
- // Extract from the pointer an `Option<AllocId>` and an offset, which is relative to the
- // allocation or (if that is `None`) an absolute address.
- let ptr_or_addr = if size.bytes() == 0 {
- // Let's see what we can do, but don't throw errors if there's nothing there.
- self.ptr_try_get_alloc(ptr)
- } else {
- // A "real" access, we insist on getting an `AllocId`.
- Ok(self.ptr_get_alloc(ptr)?)
- };
- Ok(match ptr_or_addr {
+ Ok(match self.ptr_try_get_alloc(ptr) {
Err(addr) => {
- // No memory is actually being accessed.
- debug_assert!(size.bytes() == 0);
- // Must be non-null.
- if addr == 0 {
- throw_ub!(DanglingIntPointer(0, msg))
+ // We couldn't get a proper allocation. This is only okay if the access size is 0,
+ // and the address is not null.
+ if size.bytes() > 0 || addr == 0 {
+ throw_ub!(DanglingIntPointer(addr, msg));
}
// Must be aligned.
if let Some(align) = align {
use std::convert::TryFrom;
use std::fmt::Write;
-use rustc_errors::ErrorGuaranteed;
use rustc_hir::def::Namespace;
use rustc_macros::HashStable;
use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer};
-use rustc_middle::ty::{ConstInt, Ty};
+use rustc_middle::ty::{ConstInt, DelaySpanBugEmitted, Ty};
use rustc_middle::{mir, ty};
use rustc_target::abi::{Abi, HasDataLayout, Size, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants};
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
match val.val() {
ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
- ty::ConstKind::Error(_) => throw_inval!(AlreadyReported(ErrorGuaranteed)),
+ ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => {
+ throw_inval!(AlreadyReported(reported))
+ }
ty::ConstKind::Unevaluated(uv) => {
let instance = self.resolve(uv.def, uv.substs)?;
Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into())
.ty
.ty_adt_def()
.expect("tagged layout for non adt")
- .variants
+ .variants()
.len();
assert!(usize::try_from(variant_index).unwrap() < variants_len);
VariantIdx::from_u32(variant_index)
// cannot use the shim here, because that will only result in infinite recursion
ty::InstanceDef::Virtual(_, idx) => {
let mut args = args.to_vec();
- // We have to implement all "object safe receivers". Currently we
- // support built-in pointers `(&, &mut, Box)` as well as unsized-self. We do
- // not yet support custom self types.
- // Also see `compiler/rustc_codegen_llvm/src/abi.rs` and `compiler/rustc_codegen_ssa/src/mir/block.rs`.
- let receiver_place = match args[0].layout.ty.builtin_deref(true) {
- Some(_) => {
- // Built-in pointer.
- self.deref_operand(&args[0])?
- }
- None => {
- // Unsized self.
- args[0].assert_mem_place()
+ // We have to implement all "object safe receivers". So we have to go search for a
+ // pointer or `dyn Trait` type, but it could be wrapped in newtypes. So recursively
+ // unwrap those newtypes until we are there.
+ let mut receiver = args[0];
+ let receiver_place = loop {
+ match receiver.layout.ty.kind() {
+ ty::Ref(..) | ty::RawPtr(..) => break self.deref_operand(&receiver)?,
+ ty::Dynamic(..) => break receiver.assert_mem_place(),
+ _ => {
+ // Not there yet, search for the only non-ZST field.
+ let mut non_zst_field = None;
+ for i in 0..receiver.layout.fields.count() {
+ let field = self.operand_field(&receiver, i)?;
+ if !field.layout.is_zst() {
+ assert!(
+ non_zst_field.is_none(),
+ "multiple non-ZST fields in dyn receiver type {}",
+ receiver.layout.ty
+ );
+ non_zst_field = Some(field);
+ }
+ }
+ receiver = non_zst_field.unwrap_or_else(|| {
+ panic!(
+ "no non-ZST fields in dyn receiver type {}",
+ receiver.layout.ty
+ )
+ });
+ }
}
};
- // Find and consult vtable
- let vtable = self.scalar_to_ptr(receiver_place.vtable());
+ // Find and consult vtable. The type now could be something like RcBox<dyn Trait>,
+ // i.e., it is still not necessarily `ty::Dynamic` (so we cannot use
+ // `place.vtable()`), but it should have a `dyn Trait` tail.
+ assert!(matches!(
+ self.tcx
+ .struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env)
+ .kind(),
+ ty::Dynamic(..)
+ ));
+ let vtable = self.scalar_to_ptr(receiver_place.meta.unwrap_meta());
let fn_val = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?;
// `*mut receiver_place.layout.ty` is almost the layout that we
Scalar::from_maybe_pointer(receiver_place.ptr, self).into(),
this_receiver_ptr,
));
- trace!("Patched self operand to {:#?}", args[0]);
+ trace!("Patched receiver operand to {:#?}", args[0]);
// recurse with concrete function
self.eval_fn_call(
fn_val,
use super::{
alloc_range, CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine,
- MemPlaceMeta, OpTy, ScalarMaybeUninit, ValueVisitor,
+ MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor,
};
macro_rules! throw_validation_failure {
match layout.variants {
Variants::Single { index } => {
// Inside a variant
- PathElem::Field(def.variants[index].fields[field].name)
+ PathElem::Field(def.variant(index).fields[field].name)
}
Variants::Multiple { .. } => bug!("we handled variants above"),
}
if let Some(ref mut ref_tracking) = self.ref_tracking {
// Proceed recursively even for ZST, no reason to skip them!
// `!` is a ZST and we want to validate it.
- // Skip validation entirely for some external statics
if let Ok((alloc_id, _offset, _ptr)) = self.ecx.memory.ptr_try_get_alloc(place.ptr) {
- // not a ZST
+ // Special handling for pointers to statics (irrespective of their type).
let alloc_kind = self.ecx.tcx.get_global_alloc(alloc_id);
if let Some(GlobalAlloc::Static(did)) = alloc_kind {
assert!(!self.ecx.tcx.is_thread_local_static(did));
// We need to clone the path anyway, make sure it gets created
// with enough space for the additional `Deref`.
let mut new_path = Vec::with_capacity(path.len() + 1);
- new_path.clone_from(path);
+ new_path.extend(path);
new_path.push(PathElem::Deref);
new_path
});
// NOTE: Keep this in sync with the array optimization for int/float
// types below!
if M::enforce_number_validity(self.ecx) {
- // Integers/floats in CTFE: Must be scalar bits, pointers are dangerous
- let is_bits = value.check_init().map_or(false, |v| v.try_to_int().is_ok());
+ // Integers/floats with number validity: Must be scalar bits, pointers are dangerous.
+ // As a special exception we *do* match on a `Scalar` here, since we truly want
+ // to know its underlying representation (and *not* cast it to an integer).
+ let is_bits =
+ value.check_init().map_or(false, |v| matches!(v, Scalar::Int(..)));
if !is_bits {
throw_validation_failure!(self.path,
{ "{:x}", value } expected { "initialized plain (non-pointer) bytes" }
new_op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
let name = match old_op.layout.ty.kind() {
- ty::Adt(adt, _) => PathElem::Variant(adt.variants[variant_id].name),
+ ty::Adt(adt, _) => PathElem::Variant(adt.variant(variant_id).name),
// Generators also have variants
ty::Generator(..) => PathElem::GeneratorState(variant_id),
_ => bug!("Unexpected type with variant: {:?}", old_op.layout.ty),
// Special check preventing `UnsafeCell` in the inner part of constants
if let Some(def) = op.layout.ty.ty_adt_def() {
if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true, .. }))
- && Some(def.did) == self.ecx.tcx.lang_items().unsafe_cell_type()
+ && Some(def.did()) == self.ecx.tcx.lang_items().unsafe_cell_type()
{
throw_validation_failure!(self.path, { "`UnsafeCell` in a `const`" });
}
// "secondary" errors if they occurred.
let secondary_errors = mem::take(&mut self.secondary_errors);
if self.error_emitted.is_none() {
- for error in secondary_errors {
- self.tcx.sess.diagnostic().emit_diagnostic(&error);
+ for mut error in secondary_errors {
+ self.tcx.sess.diagnostic().emit_diagnostic(&mut error);
}
} else {
- assert!(self.tcx.sess.has_errors());
+ assert!(self.tcx.sess.has_errors().is_some());
}
}
match op.importance() {
ops::DiagnosticImportance::Primary => {
- self.error_emitted = Some(ErrorGuaranteed);
- err.emit();
+ let reported = err.emit();
+ self.error_emitted = Some(reported);
}
ops::DiagnosticImportance::Secondary => err.buffer(&mut self.secondary_errors),
//! See the `Qualif` trait for more info.
use rustc_errors::ErrorGuaranteed;
+use rustc_hir::LangItem;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_infer::traits::TraitEngine;
use rustc_middle::mir::*;
/// Returning `true` for `in_adt_inherently` but `false` for `in_any_value_of_ty` is unsound.
fn in_adt_inherently<'tcx>(
cx: &ConstCx<'_, 'tcx>,
- adt: &'tcx AdtDef,
+ adt: AdtDef<'tcx>,
substs: SubstsRef<'tcx>,
) -> bool;
}
fn in_adt_inherently<'tcx>(
cx: &ConstCx<'_, 'tcx>,
- adt: &'tcx AdtDef,
+ adt: AdtDef<'tcx>,
_: SubstsRef<'tcx>,
) -> bool {
// Exactly one type, `UnsafeCell`, has the `HasMutInterior` qualif inherently.
// It arises structurally for all other types.
- Some(adt.did) == cx.tcx.lang_items().unsafe_cell_type()
+ Some(adt.did()) == cx.tcx.lang_items().unsafe_cell_type()
}
}
fn in_adt_inherently<'tcx>(
cx: &ConstCx<'_, 'tcx>,
- adt: &'tcx AdtDef,
+ adt: AdtDef<'tcx>,
_: SubstsRef<'tcx>,
) -> bool {
adt.has_dtor(cx.tcx)
return false;
}
- let Some(drop_trait) = cx.tcx.lang_items().drop_trait() else {
- // there is no way to define a type that needs non-const drop
- // without having the lang item present.
- return false;
- };
+ let destruct = cx.tcx.require_lang_item(LangItem::Destruct, None);
let obligation = Obligation::new(
ObligationCause::dummy(),
cx.param_env,
ty::Binder::dummy(ty::TraitPredicate {
trait_ref: ty::TraitRef {
- def_id: drop_trait,
+ def_id: destruct,
substs: cx.tcx.mk_substs_trait(ty, &[]),
},
constness: ty::BoundConstness::ConstIfConst,
cx.tcx.infer_ctxt().enter(|infcx| {
let mut selcx = SelectionContext::new(&infcx);
let Some(impl_src) = selcx.select(&obligation).ok().flatten() else {
- // If we couldn't select a const drop candidate, then it's bad
+ // If we couldn't select a const destruct candidate, then it's bad
return true;
};
if !matches!(
impl_src,
- ImplSource::ConstDrop(_) | ImplSource::Param(_, ty::BoundConstness::ConstIfConst)
+ ImplSource::ConstDestruct(_)
+ | ImplSource::Param(_, ty::BoundConstness::ConstIfConst)
) {
- // If our const drop candidate is not ConstDrop or implied by the param env,
+ // If our const destruct candidate is not ConstDestruct or implied by the param env,
// then it's bad
return true;
}
fn in_adt_inherently<'tcx>(
cx: &ConstCx<'_, 'tcx>,
- adt: &'tcx AdtDef,
+ adt: AdtDef<'tcx>,
_: SubstsRef<'tcx>,
) -> bool {
adt.has_non_const_dtor(cx.tcx)
fn in_adt_inherently<'tcx>(
cx: &ConstCx<'_, 'tcx>,
- adt: &'tcx AdtDef,
+ adt: AdtDef<'tcx>,
substs: SubstsRef<'tcx>,
) -> bool {
let ty = cx.tcx.mk_ty(ty::Adt(adt, substs));
impl<'tcx> MirPass<'tcx> for PromoteTemps<'tcx> {
fn phase_change(&self) -> Option<MirPhase> {
- Some(MirPhase::ConstPromotion)
+ Some(MirPhase::ConstsPromoted)
}
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
);
}
}
- // The deaggregator currently does not deaggreagate arrays.
- // So for now, we ignore them here.
- Rvalue::Aggregate(box AggregateKind::Array { .. }, _) => {}
- // All other aggregates must be gone after some phases.
- Rvalue::Aggregate(box kind, _) => {
- if self.mir_phase > MirPhase::DropLowering
- && !matches!(kind, AggregateKind::Generator(..))
- {
- // Generators persist until the state machine transformation, but all
- // other aggregates must have been lowered.
- self.fail(
- location,
- format!("{:?} have been lowered to field assignments", rvalue),
- )
- } else if self.mir_phase > MirPhase::GeneratorLowering {
- // No more aggregates after drop and generator lowering.
+ Rvalue::Aggregate(agg_kind, _) => {
+ let disallowed = match **agg_kind {
+ AggregateKind::Array(..) => false,
+ AggregateKind::Generator(..) => {
+ self.mir_phase >= MirPhase::GeneratorsLowered
+ }
+ _ => self.mir_phase >= MirPhase::Deaggregated,
+ };
+ if disallowed {
self.fail(
location,
format!("{:?} have been lowered to field assignments", rvalue),
}
}
Rvalue::Ref(_, BorrowKind::Shallow, _) => {
- if self.mir_phase > MirPhase::DropLowering {
+ if self.mir_phase >= MirPhase::DropsLowered {
self.fail(
location,
"`Assign` statement with a `Shallow` borrow should have been removed after drop lowering phase",
}
}
StatementKind::AscribeUserType(..) => {
- if self.mir_phase > MirPhase::DropLowering {
+ if self.mir_phase >= MirPhase::DropsLowered {
self.fail(
location,
"`AscribeUserType` should have been removed after drop lowering phase",
}
}
StatementKind::FakeRead(..) => {
- if self.mir_phase > MirPhase::DropLowering {
+ if self.mir_phase >= MirPhase::DropsLowered {
self.fail(
location,
"`FakeRead` should have been removed after drop lowering phase",
self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
}
}
- StatementKind::SetDiscriminant { .. }
- | StatementKind::StorageLive(..)
+ StatementKind::SetDiscriminant { .. } => {
+ if self.mir_phase < MirPhase::DropsLowered {
+ self.fail(location, "`SetDiscriminant` is not allowed until drop elaboration");
+ }
+ }
+ StatementKind::Retag(_, _) => {
+ // FIXME(JakobDegen) The validator should check that `self.mir_phase <
+ // DropsLowered`. However, this causes ICEs with generation of drop shims, which
+ // seem to fail to set their `MirPhase` correctly.
+ }
+ StatementKind::StorageLive(..)
| StatementKind::StorageDead(..)
- | StatementKind::Retag(_, _)
| StatementKind::Coverage(_)
| StatementKind::Nop => {}
}
}
}
TerminatorKind::DropAndReplace { target, unwind, .. } => {
- if self.mir_phase > MirPhase::DropLowering {
+ if self.mir_phase >= MirPhase::DropsLowered {
self.fail(
location,
- "`DropAndReplace` is not permitted to exist after drop elaboration",
+ "`DropAndReplace` should have been removed during drop elaboration",
);
}
self.check_edge(location, *target, EdgeKind::Normal);
}
}
TerminatorKind::Yield { resume, drop, .. } => {
- if self.mir_phase > MirPhase::GeneratorLowering {
+ if self.mir_phase >= MirPhase::GeneratorsLowered {
self.fail(location, "`Yield` should have been replaced by generator lowering");
}
self.check_edge(location, *resume, EdgeKind::Normal);
}
}
TerminatorKind::FalseEdge { real_target, imaginary_target } => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`FalseEdge` should have been removed after drop elaboration",
+ );
+ }
self.check_edge(location, *real_target, EdgeKind::Normal);
self.check_edge(location, *imaginary_target, EdgeKind::Normal);
}
TerminatorKind::FalseUnwind { real_target, unwind } => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`FalseUnwind` should have been removed after drop elaboration",
+ );
+ }
self.check_edge(location, *real_target, EdgeKind::Normal);
if let Some(unwind) = unwind {
self.check_edge(location, *unwind, EdgeKind::Unwind);
self.check_edge(location, *cleanup, EdgeKind::Unwind);
}
}
+ TerminatorKind::GeneratorDrop => {
+ if self.mir_phase >= MirPhase::GeneratorsLowered {
+ self.fail(
+ location,
+ "`GeneratorDrop` should have been replaced by generator lowering",
+ );
+ }
+ }
// Nothing to validate for these.
TerminatorKind::Resume
| TerminatorKind::Abort
| TerminatorKind::Return
- | TerminatorKind::Unreachable
- | TerminatorKind::GeneratorDrop => {}
+ | TerminatorKind::Unreachable => {}
}
self.super_terminator(terminator, location);
ProjectionElem::Field(..) => {
let ty = place_base.ty(local_decls, tcx).ty;
match ty.kind() {
- ty::Adt(def, _) => return def.repr.pack,
+ ty::Adt(def, _) => return def.repr().pack,
_ => {}
}
}
.parent(method_did)
.filter(|did| tcx.def_kind(*did) == rustc_hir::def::DefKind::Impl)
.and_then(|did| match tcx.type_of(did).kind() {
- ty::Adt(def, ..) => Some(def.did),
+ ty::Adt(def, ..) => Some(def.did()),
_ => None,
});
let is_option_or_result = parent_self_ty.map_or(false, |def_id| {
[dependencies]
arrayvec = { version = "0.7", default-features = false }
ena = "0.14"
-indexmap = { version = "1.8.0", features = ["rustc-rayon"] }
+indexmap = { version = "1.8.0" }
tracing = "0.1"
jobserver_crate = { version = "0.1.13", package = "jobserver" }
rustc_serialize = { path = "../rustc_serialize" }
rustc_graphviz = { path = "../rustc_graphviz" }
cfg-if = "0.1.2"
stable_deref_trait = "1.0.0"
-rayon = { version = "0.3.2", package = "rustc-rayon" }
-rayon-core = { version = "0.3.2", package = "rustc-rayon-core" }
+rayon = { version = "0.3.2", package = "rustc-rayon", optional = true }
+rayon-core = { version = "0.3.2", package = "rustc-rayon-core", optional = true }
rustc-hash = "1.1.0"
smallvec = { version = "1.6.1", features = ["const_generics", "union", "may_dangle"] }
rustc_index = { path = "../rustc_index", package = "rustc_index" }
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
memmap2 = "0.2.1"
+
+[features]
+rustc_use_parallel_compiler = ["indexmap/rustc-rayon", "rayon", "rayon-core"]
[features]
llvm = ['rustc_interface/llvm']
max_level_info = ['rustc_log/max_level_info']
+rustc_use_parallel_compiler = ['rustc_data_structures/rustc_use_parallel_compiler', 'rustc_interface/rustc_use_parallel_compiler',
+ 'rustc_middle/rustc_use_parallel_compiler']
};
match make_input(config.opts.error_format, &matches.free) {
- Err(ErrorGuaranteed) => return Err(ErrorGuaranteed),
+ Err(reported) => return Err(reported),
Ok(Some((input, input_file_path))) => {
config.input = input;
config.input_path = input_file_path;
if io::stdin().read_to_string(&mut src).is_err() {
// Immediately stop compilation if there was an issue reading
// the input (for example if the input stream is not UTF-8).
- early_error_no_abort(
+ let reported = early_error_no_abort(
error_format,
"couldn't read from stdin, as it did not contain valid UTF-8",
);
- return Err(ErrorGuaranteed);
+ return Err(reported);
}
if let Ok(path) = env::var("UNSTABLE_RUSTDOC_TEST_PATH") {
let line = env::var("UNSTABLE_RUSTDOC_TEST_LINE").expect(
pub fn catch_fatal_errors<F: FnOnce() -> R, R>(f: F) -> Result<R, ErrorGuaranteed> {
catch_unwind(panic::AssertUnwindSafe(f)).map_err(|value| {
if value.is::<rustc_errors::FatalErrorMarker>() {
- ErrorGuaranteed
+ ErrorGuaranteed::unchecked_claim_error_was_emitted()
} else {
panic::resume_unwind(value);
}
// a .span_bug or .bug call has already printed what
// it wants to print.
if !info.payload().is::<rustc_errors::ExplicitBug>() {
- let d = rustc_errors::Diagnostic::new(rustc_errors::Level::Bug, "unexpected panic");
- handler.emit_diagnostic(&d);
+ let mut d = rustc_errors::Diagnostic::new(rustc_errors::Level::Bug, "unexpected panic");
+ handler.emit_diagnostic(&mut d);
}
let mut xs: Vec<Cow<'static, str>> = vec![
E0664: include_str!("./error_codes/E0664.md"),
E0665: include_str!("./error_codes/E0665.md"),
E0666: include_str!("./error_codes/E0666.md"),
+E0667: include_str!("./error_codes/E0667.md"),
E0668: include_str!("./error_codes/E0668.md"),
E0669: include_str!("./error_codes/E0669.md"),
E0670: include_str!("./error_codes/E0670.md"),
// attribute
E0640, // infer outlives requirements
// E0645, // trait aliases not finished
- E0667, // `impl Trait` in projections
// E0694, // an unknown tool name found in scoped attributes
// E0702, // replaced with a generic attribute input check
// E0707, // multiple elided lifetimes used in arguments of `async fn`
--- /dev/null
+`impl Trait` is not allowed in path parameters.
+
+Erroneous code example:
+
+```compile_fail,E0667
+fn some_fn(mut x: impl Iterator) -> <impl Iterator>::Item { // error!
+ x.next().unwrap()
+}
+```
+
+You cannot use `impl Trait` in path parameters. If you want something
+equivalent, you can do this instead:
+
+```
+fn some_fn<T: Iterator>(mut x: T) -> T::Item { // ok!
+ x.next().unwrap()
+}
+```
AnnotationType::Error
}
Level::Warning => AnnotationType::Warning,
- Level::Note => AnnotationType::Note,
+ Level::Note | Level::OnceNote => AnnotationType::Note,
Level::Help => AnnotationType::Help,
// FIXME(#59346): Not sure how to map this level
Level::FailureNote => AnnotationType::Error,
use crate::SubstitutionPart;
use crate::SuggestionStyle;
use crate::ToolMetadata;
-use rustc_lint_defs::Applicability;
+use rustc_data_structures::stable_map::FxHashMap;
+use rustc_lint_defs::{Applicability, LintExpectationId};
use rustc_serialize::json::Json;
use rustc_span::edition::LATEST_STABLE_EDITION;
use rustc_span::{MultiSpan, Span, DUMMY_SP};
| Level::Error { .. }
| Level::FailureNote => true,
- Level::Warning | Level::Note | Level::Help | Level::Allow | Level::Expect(_) => false,
+ Level::Warning
+ | Level::Note
+ | Level::OnceNote
+ | Level::Help
+ | Level::Allow
+ | Level::Expect(_) => false,
+ }
+ }
+
+ pub fn update_unstable_expectation_id(
+ &mut self,
+ unstable_to_stable: &FxHashMap<LintExpectationId, LintExpectationId>,
+ ) {
+ if let Level::Expect(expectation_id) = &mut self.level {
+ if expectation_id.is_stable() {
+ return;
+ }
+
+ // The unstable to stable map only maps the unstable `AttrId` to a stable `HirId` with an attribute index.
+ // The lint index inside the attribute is manually transferred here.
+ let lint_index = expectation_id.get_lint_index();
+ expectation_id.set_lint_index(None);
+ let mut stable_id = *unstable_to_stable
+ .get(&expectation_id)
+ .expect("each unstable `LintExpectationId` must have a matching stable id");
+
+ stable_id.set_lint_index(lint_index);
+ *expectation_id = stable_id;
}
}
self
}
+ /// Prints the span with a note above it.
+ /// This is like [`Diagnostic::note()`], but it gets its own span.
+ pub fn note_once(&mut self, msg: &str) -> &mut Self {
+ self.sub(Level::OnceNote, msg, MultiSpan::new(), None);
+ self
+ }
+
/// Prints the span with a note above it.
/// This is like [`Diagnostic::note()`], but it gets its own span.
pub fn span_note<S: Into<MultiSpan>>(&mut self, sp: S, msg: &str) -> &mut Self {
self
}
+ /// Prints the span with a note above it.
+ /// This is like [`Diagnostic::note()`], but it gets its own span.
+ pub fn span_note_once<S: Into<MultiSpan>>(&mut self, sp: S, msg: &str) -> &mut Self {
+ self.sub(Level::OnceNote, msg, sp.into(), None);
+ self
+ }
+
/// Add a warning attached to this diagnostic.
pub fn warn(&mut self, msg: &str) -> &mut Self {
self.sub(Level::Warning, msg, MultiSpan::new(), None);
DiagnosticBuilderState::Emittable(handler) => {
db.inner.state = DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation;
- handler.emit_diagnostic(&db.inner.diagnostic);
+ let guar = handler.emit_diagnostic(&mut db.inner.diagnostic);
// Only allow a guarantee if the `level` wasn't switched to a
// non-error - the field isn't `pub`, but the whole `Diagnostic`
from `DiagnosticBuilder<ErrorGuaranteed>`",
db.inner.diagnostic.level,
);
- ErrorGuaranteed
+ guar.unwrap()
}
// `.emit()` was previously called, disallowed from repeating it,
// but can take advantage of the previous `.emit()`'s guarantee
became non-error ({:?}), after original `.emit()`",
db.inner.diagnostic.level,
);
- ErrorGuaranteed
+ ErrorGuaranteed::unchecked_claim_error_was_emitted()
}
}
}
DiagnosticBuilderState::Emittable(handler) => {
db.inner.state = DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation;
- handler.emit_diagnostic(&db.inner.diagnostic);
+ handler.emit_diagnostic(&mut db.inner.diagnostic);
}
// `.emit()` was previously called, disallowed from repeating it.
DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation => {}
}
}
+impl<'a> DiagnosticBuilder<'a, !> {
+ /// Convenience function for internal use, clients should use one of the
+ /// `struct_*` methods on [`Handler`].
+ crate fn new_fatal(handler: &'a Handler, message: &str) -> Self {
+ let diagnostic = Diagnostic::new_with_code(Level::Fatal, None, message);
+ Self::new_diagnostic_fatal(handler, diagnostic)
+ }
+
+ /// Creates a new `DiagnosticBuilder` with an already constructed
+ /// diagnostic.
+ crate fn new_diagnostic_fatal(handler: &'a Handler, diagnostic: Diagnostic) -> Self {
+ debug!("Created new diagnostic");
+ Self {
+ inner: DiagnosticBuilderInner {
+ state: DiagnosticBuilderState::Emittable(handler),
+ diagnostic: Box::new(diagnostic),
+ },
+ _marker: PhantomData,
+ }
+ }
+}
+
+impl EmissionGuarantee for ! {
+ fn diagnostic_builder_emit_producing_guarantee(db: &mut DiagnosticBuilder<'_, Self>) -> Self {
+ match db.inner.state {
+ // First `.emit()` call, the `&Handler` is still available.
+ DiagnosticBuilderState::Emittable(handler) => {
+ db.inner.state = DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation;
+
+ handler.emit_diagnostic(&mut db.inner.diagnostic);
+ }
+ // `.emit()` was previously called, disallowed from repeating it.
+ DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation => {}
+ }
+ // Then fatally error, returning `!`
+ crate::FatalError.raise()
+ }
+}
+
/// In general, the `DiagnosticBuilder` uses deref to allow access to
/// the fields and methods of the embedded `diagnostic` in a
/// transparent way. *However,* many of the methods are intended to
) -> &mut Self);
forward!(pub fn note(&mut self, msg: &str) -> &mut Self);
+ forward!(pub fn note_once(&mut self, msg: &str) -> &mut Self);
forward!(pub fn span_note(
&mut self,
sp: impl Into<MultiSpan>,
msg: &str,
) -> &mut Self);
+ forward!(pub fn span_note_once(
+ &mut self,
+ sp: impl Into<MultiSpan>,
+ msg: &str,
+ ) -> &mut Self);
forward!(pub fn warn(&mut self, msg: &str) -> &mut Self);
forward!(pub fn span_warn(&mut self, sp: impl Into<MultiSpan>, msg: &str) -> &mut Self);
forward!(pub fn help(&mut self, msg: &str) -> &mut Self);
// No `.emit()` or `.cancel()` calls.
DiagnosticBuilderState::Emittable(handler) => {
if !panicking() {
- handler.emit_diagnostic(&Diagnostic::new(
+ handler.emit_diagnostic(&mut Diagnostic::new(
Level::Bug,
"the following error was constructed but not emitted",
));
- handler.emit_diagnostic(&self.diagnostic);
+ handler.emit_diagnostic(&mut self.diagnostic);
panic!();
}
}
if let Some(ref note) = self.fatal_note {
d.note(note);
}
- self.fatal_handler.emit_diagnostic(&d);
+ self.fatal_handler.emit_diagnostic(&mut d);
}
}
}
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(crate_visibility_modifier)]
+#![feature(drain_filter)]
#![feature(backtrace)]
#![feature(if_let_guard)]
#![feature(let_else)]
+#![feature(never_type)]
#![feature(nll)]
#![feature(adt_const_params)]
#![allow(incomplete_features)]
impl error::Error for ExplicitBug {}
pub use diagnostic::{Diagnostic, DiagnosticId, DiagnosticStyledString, SubDiagnostic};
-pub use diagnostic_builder::DiagnosticBuilder;
+pub use diagnostic_builder::{DiagnosticBuilder, EmissionGuarantee};
use std::backtrace::Backtrace;
/// A handler deals with errors and other compiler output.
"no warnings or errors encountered even though `delayed_good_path_bugs` issued",
);
}
+
+ assert!(
+ self.unstable_expect_diagnostics.is_empty(),
+ "all diagnostics with unstable expectations should have been converted",
+ );
}
}
}
/// Emit all stashed diagnostics.
- pub fn emit_stashed_diagnostics(&self) {
- self.inner.borrow_mut().emit_stashed_diagnostics();
+ pub fn emit_stashed_diagnostics(&self) -> Option<ErrorGuaranteed> {
+ self.inner.borrow_mut().emit_stashed_diagnostics()
}
/// Construct a builder at the `Warning` level at the given `span` and with the `msg`.
&self,
span: impl Into<MultiSpan>,
msg: &str,
- ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ ) -> DiagnosticBuilder<'_, !> {
let mut result = self.struct_fatal(msg);
result.set_span(span);
result
span: impl Into<MultiSpan>,
msg: &str,
code: DiagnosticId,
- ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ ) -> DiagnosticBuilder<'_, !> {
let mut result = self.struct_span_fatal(span, msg);
result.code(code);
result
}
/// Construct a builder at the `Error` level with the `msg`.
- pub fn struct_fatal(&self, msg: &str) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
- DiagnosticBuilder::new_guaranteeing_error::<{ Level::Fatal }>(self, msg)
+ pub fn struct_fatal(&self, msg: &str) -> DiagnosticBuilder<'_, !> {
+ DiagnosticBuilder::new_fatal(self, msg)
}
/// Construct a builder at the `Help` level with the `msg`.
FatalError.raise()
}
- pub fn span_err(&self, span: impl Into<MultiSpan>, msg: &str) {
- self.emit_diag_at_span(Diagnostic::new(Error { lint: false }, msg), span);
+ pub fn span_err(&self, span: impl Into<MultiSpan>, msg: &str) -> ErrorGuaranteed {
+ self.emit_diag_at_span(Diagnostic::new(Error { lint: false }, msg), span).unwrap()
}
pub fn span_err_with_code(&self, span: impl Into<MultiSpan>, msg: &str, code: DiagnosticId) {
}
#[track_caller]
- pub fn delay_span_bug(&self, span: impl Into<MultiSpan>, msg: &str) {
+ pub fn delay_span_bug(&self, span: impl Into<MultiSpan>, msg: &str) -> ErrorGuaranteed {
self.inner.borrow_mut().delay_span_bug(span, msg)
}
self.inner.borrow_mut().fatal(msg)
}
- pub fn err(&self, msg: &str) {
- self.inner.borrow_mut().err(msg);
+ pub fn err(&self, msg: &str) -> ErrorGuaranteed {
+ self.inner.borrow_mut().err(msg)
}
pub fn warn(&self, msg: &str) {
self.inner.borrow().err_count()
}
- pub fn has_errors(&self) -> bool {
- self.inner.borrow().has_errors()
+ pub fn has_errors(&self) -> Option<ErrorGuaranteed> {
+ if self.inner.borrow().has_errors() { Some(ErrorGuaranteed(())) } else { None }
}
- pub fn has_errors_or_lint_errors(&self) -> bool {
- self.inner.borrow().has_errors_or_lint_errors()
+ pub fn has_errors_or_lint_errors(&self) -> Option<ErrorGuaranteed> {
+ if self.inner.borrow().has_errors_or_lint_errors() {
+ Some(ErrorGuaranteed(()))
+ } else {
+ None
+ }
}
pub fn has_errors_or_delayed_span_bugs(&self) -> bool {
self.inner.borrow().has_errors_or_delayed_span_bugs()
self.inner.borrow_mut().force_print_diagnostic(db)
}
- pub fn emit_diagnostic(&self, diagnostic: &Diagnostic) {
+ pub fn emit_diagnostic(&self, diagnostic: &mut Diagnostic) -> Option<ErrorGuaranteed> {
self.inner.borrow_mut().emit_diagnostic(diagnostic)
}
- fn emit_diag_at_span(&self, mut diag: Diagnostic, sp: impl Into<MultiSpan>) {
+ fn emit_diag_at_span(
+ &self,
+ mut diag: Diagnostic,
+ sp: impl Into<MultiSpan>,
+ ) -> Option<ErrorGuaranteed> {
let mut inner = self.inner.borrow_mut();
- inner.emit_diagnostic(diag.set_span(sp));
+ inner.emit_diagnostic(diag.set_span(sp))
}
pub fn emit_artifact_notification(&self, path: &Path, artifact_type: &str) {
let mut inner = self.inner.borrow_mut();
for mut diag in diags.into_iter() {
- let mut unstable_id = diag
+ diag.update_unstable_expectation_id(unstable_to_stable);
+
+ let stable_id = diag
.level
.get_expectation_id()
.expect("all diagnostics inside `unstable_expect_diagnostics` must have a `LintExpectationId`");
-
- // The unstable to stable map only maps the unstable `AttrId` to a stable `HirId` with an attribute index.
- // The lint index inside the attribute is manually transferred here.
- let lint_index = unstable_id.get_lint_index();
- unstable_id.set_lint_index(None);
- let mut stable_id = *unstable_to_stable
- .get(&unstable_id)
- .expect("each unstable `LintExpectationId` must have a matching stable id");
-
- stable_id.set_lint_index(lint_index);
- diag.level = Level::Expect(stable_id);
inner.fulfilled_expectations.insert(stable_id);
(*TRACK_DIAGNOSTICS)(&diag);
}
+
+ inner
+ .stashed_diagnostics
+ .values_mut()
+ .for_each(|diag| diag.update_unstable_expectation_id(unstable_to_stable));
+ inner
+ .future_breakage_diagnostics
+ .iter_mut()
+ .for_each(|diag| diag.update_unstable_expectation_id(unstable_to_stable));
}
/// This methods steals all [`LintExpectationId`]s that are stored inside
/// [`HandlerInner`] and indicate that the linked expectation has been fulfilled.
+ #[must_use]
pub fn steal_fulfilled_expectation_ids(&self) -> FxHashSet<LintExpectationId> {
assert!(
self.inner.borrow().unstable_expect_diagnostics.is_empty(),
self.taught_diagnostics.insert(code.clone())
}
- fn force_print_diagnostic(&mut self, db: Diagnostic) {
- self.emitter.emit_diagnostic(&db);
+ fn force_print_diagnostic(&mut self, mut db: Diagnostic) {
+ self.emitter.emit_diagnostic(&mut db);
}
/// Emit all stashed diagnostics.
- fn emit_stashed_diagnostics(&mut self) {
+ fn emit_stashed_diagnostics(&mut self) -> Option<ErrorGuaranteed> {
let diags = self.stashed_diagnostics.drain(..).map(|x| x.1).collect::<Vec<_>>();
- diags.iter().for_each(|diag| self.emit_diagnostic(diag));
+ let mut reported = None;
+ for mut diag in diags {
+ if diag.is_error() {
+ reported = Some(ErrorGuaranteed(()));
+ }
+ self.emit_diagnostic(&mut diag);
+ }
+ reported
}
// FIXME(eddyb) this should ideally take `diagnostic` by value.
- fn emit_diagnostic(&mut self, diagnostic: &Diagnostic) {
+ fn emit_diagnostic(&mut self, diagnostic: &mut Diagnostic) -> Option<ErrorGuaranteed> {
if diagnostic.level == Level::DelayedBug {
// FIXME(eddyb) this should check for `has_errors` and stop pushing
// once *any* errors were emitted (and truncate `delayed_span_bugs`
self.delayed_span_bugs.push(diagnostic.clone());
if !self.flags.report_delayed_bugs {
- return;
+ return Some(ErrorGuaranteed::unchecked_claim_error_was_emitted());
}
}
if diagnostic.has_future_breakage() {
(*TRACK_DIAGNOSTICS)(diagnostic);
}
- return;
+ return None;
}
// The `LintExpectationId` can be stable or unstable depending on when it was created.
// a stable one by the `LintLevelsBuilder`.
if let Level::Expect(LintExpectationId::Unstable { .. }) = diagnostic.level {
self.unstable_expect_diagnostics.push(diagnostic.clone());
- return;
+ return None;
}
(*TRACK_DIAGNOSTICS)(diagnostic);
if let Level::Expect(expectation_id) = diagnostic.level {
self.fulfilled_expectations.insert(expectation_id);
- return;
+ return None;
} else if diagnostic.level == Allow {
- return;
+ return None;
}
if let Some(ref code) = diagnostic.code {
// Only emit the diagnostic if we've been asked to deduplicate and
// haven't already emitted an equivalent diagnostic.
if !(self.flags.deduplicate_diagnostics && already_emitted(self)) {
- self.emitter.emit_diagnostic(diagnostic);
+ debug!(?diagnostic);
+ debug!(?self.emitted_diagnostics);
+ let already_emitted_sub = |sub: &mut SubDiagnostic| {
+ debug!(?sub);
+ if sub.level != Level::OnceNote {
+ return false;
+ }
+ let mut hasher = StableHasher::new();
+ sub.hash(&mut hasher);
+ let diagnostic_hash = hasher.finish();
+ debug!(?diagnostic_hash);
+ !self.emitted_diagnostics.insert(diagnostic_hash)
+ };
+
+ diagnostic.children.drain_filter(already_emitted_sub).for_each(|_| {});
+
+ self.emitter.emit_diagnostic(&diagnostic);
if diagnostic.is_error() {
self.deduplicated_err_count += 1;
} else if diagnostic.level == Warning {
} else {
self.bump_err_count();
}
+
+ Some(ErrorGuaranteed::unchecked_claim_error_was_emitted())
} else {
self.bump_warn_count();
+
+ None
}
}
}
#[track_caller]
- fn delay_span_bug(&mut self, sp: impl Into<MultiSpan>, msg: &str) {
+ fn delay_span_bug(&mut self, sp: impl Into<MultiSpan>, msg: &str) -> ErrorGuaranteed {
// This is technically `self.treat_err_as_bug()` but `delay_span_bug` is called before
// incrementing `err_count` by one, so we need to +1 the comparing.
// FIXME: Would be nice to increment err_count in a more coherent way.
let mut diagnostic = Diagnostic::new(Level::DelayedBug, msg);
diagnostic.set_span(sp.into());
diagnostic.note(&format!("delayed at {}", std::panic::Location::caller()));
- self.emit_diagnostic(&diagnostic)
+ self.emit_diagnostic(&mut diagnostic).unwrap()
}
// FIXME(eddyb) note the comment inside `impl Drop for HandlerInner`, that's
// where the explanation of what "good path" is (also, it should be renamed).
fn delay_good_path_bug(&mut self, msg: &str) {
- let diagnostic = Diagnostic::new(Level::DelayedBug, msg);
+ let mut diagnostic = Diagnostic::new(Level::DelayedBug, msg);
if self.flags.report_delayed_bugs {
- self.emit_diagnostic(&diagnostic);
+ self.emit_diagnostic(&mut diagnostic);
}
let backtrace = std::backtrace::Backtrace::force_capture();
self.delayed_good_path_bugs.push(DelayedDiagnostic::with_backtrace(diagnostic, backtrace));
}
fn failure(&mut self, msg: &str) {
- self.emit_diagnostic(&Diagnostic::new(FailureNote, msg));
+ self.emit_diagnostic(&mut Diagnostic::new(FailureNote, msg));
}
fn fatal(&mut self, msg: &str) -> FatalError {
- self.emit_error(Fatal, msg);
+ self.emit(Fatal, msg);
FatalError
}
- fn err(&mut self, msg: &str) {
- self.emit_error(Error { lint: false }, msg);
+ fn err(&mut self, msg: &str) -> ErrorGuaranteed {
+ self.emit(Error { lint: false }, msg)
}
/// Emit an error; level should be `Error` or `Fatal`.
- fn emit_error(&mut self, level: Level, msg: &str) {
+ fn emit(&mut self, level: Level, msg: &str) -> ErrorGuaranteed {
if self.treat_err_as_bug() {
self.bug(msg);
}
- self.emit_diagnostic(&Diagnostic::new(level, msg));
+ self.emit_diagnostic(&mut Diagnostic::new(level, msg)).unwrap()
}
fn bug(&mut self, msg: &str) -> ! {
- self.emit_diagnostic(&Diagnostic::new(Bug, msg));
+ self.emit_diagnostic(&mut Diagnostic::new(Bug, msg));
panic::panic_any(ExplicitBug);
}
if no_bugs {
// Put the overall explanation before the `DelayedBug`s, to
// frame them better (e.g. separate warnings from them).
- self.emit_diagnostic(&Diagnostic::new(Bug, explanation));
+ self.emit_diagnostic(&mut Diagnostic::new(Bug, explanation));
no_bugs = false;
}
}
bug.level = Level::Bug;
- self.emit_diagnostic(&bug);
+ self.emit_diagnostic(&mut bug);
}
// Panic with `ExplicitBug` to avoid "unexpected panic" messages.
},
Warning,
Note,
+ /// A note that is only emitted once.
+ OnceNote,
Help,
FailureNote,
Allow,
Warning => {
spec.set_fg(Some(Color::Yellow)).set_intense(cfg!(windows));
}
- Note => {
+ Note | OnceNote => {
spec.set_fg(Some(Color::Green)).set_intense(true);
}
Help => {
Bug | DelayedBug => "error: internal compiler error",
Fatal | Error { .. } => "error",
Warning => "warning",
- Note => "note",
+ Note | OnceNote => "note",
Help => "help",
FailureNote => "failure-note",
Allow => panic!("Shouldn't call on allowed error"),
);
}
-// Useful type to use with `Result<>` indicate that an error has already
-// been reported to the user, so no need to continue checking.
-#[derive(Clone, Copy, Debug, Encodable, Decodable, Hash, PartialEq, Eq)]
-pub struct ErrorGuaranteed;
+/// Useful type to use with `Result<>` indicate that an error has already
+/// been reported to the user, so no need to continue checking.
+#[derive(Clone, Copy, Debug, Encodable, Decodable, Hash, PartialEq, Eq, PartialOrd, Ord)]
+pub struct ErrorGuaranteed(());
+
+impl ErrorGuaranteed {
+ /// To be used only if you really know what you are doing... ideally, we would find a way to
+ /// eliminate all calls to this method.
+ pub fn unchecked_claim_error_was_emitted() -> Self {
+ ErrorGuaranteed(())
+ }
+}
rustc_data_structures::impl_stable_hash_via_hash!(ErrorGuaranteed);
use rustc_attr::{self as attr, Deprecation, Stability};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::sync::{self, Lrc};
-use rustc_errors::{Applicability, DiagnosticBuilder, ErrorGuaranteed, PResult};
+use rustc_errors::{Applicability, DiagnosticBuilder, ErrorGuaranteed};
use rustc_lint_defs::builtin::PROC_MACRO_BACK_COMPAT;
use rustc_lint_defs::BuiltinLintDiagnostics;
use rustc_parse::{self, nt_to_tokenstream, parser, MACRO_ARGUMENTS};
use rustc_span::hygiene::{AstPass, ExpnData, ExpnKind, LocalExpnId};
use rustc_span::source_map::SourceMap;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
-use rustc_span::{FileName, MultiSpan, Span, DUMMY_SP};
+use rustc_span::{MultiSpan, Span, DUMMY_SP};
use smallvec::{smallvec, SmallVec};
use std::default::Default;
Annotatable::Param(ref p) => p.span,
Annotatable::FieldDef(ref sf) => sf.span,
Annotatable::Variant(ref v) => v.span,
- Annotatable::Crate(ref c) => c.span,
+ Annotatable::Crate(ref c) => c.spans.inner_span,
}
}
pub fn check_unused_macros(&mut self) {
self.resolver.check_unused_macros();
}
-
- /// Resolves a `path` mentioned inside Rust code, returning an absolute path.
- ///
- /// This unifies the logic used for resolving `include_X!`.
- ///
- /// FIXME: move this to `rustc_builtin_macros` and make it private.
- pub fn resolve_path(&self, path: impl Into<PathBuf>, span: Span) -> PResult<'a, PathBuf> {
- let path = path.into();
-
- // Relative paths are resolved relative to the file in which they are found
- // after macro expansion (that is, they are unhygienic).
- if !path.is_absolute() {
- let callsite = span.source_callsite();
- let mut result = match self.source_map().span_to_filename(callsite) {
- FileName::Real(name) => name
- .into_local_path()
- .expect("attempting to resolve a file path in an external file"),
- FileName::DocTest(path, _) => path,
- other => {
- return Err(self.struct_span_err(
- span,
- &format!(
- "cannot resolve relative path in non-file source `{}`",
- self.source_map().filename_for_diagnostics(&other)
- ),
- ));
- }
- };
- result.pop();
- result.push(path);
- Ok(result)
- } else {
- Ok(path)
- }
- }
}
/// Extracts a string literal from the macro expanded version of `expr`,
let attributes_attr = list.get(1);
let proc_attrs: Vec<_> = if let Some(attr) = attributes_attr {
if !attr.has_name(sym::attributes) {
- diag.span_err(attr.span(), "second argument must be `attributes`")
+ diag.span_err(attr.span(), "second argument must be `attributes`");
}
attr.meta_item_list()
.unwrap_or_else(|| {
use rustc_ast::tokenstream::TokenStream;
use rustc_ast::visit::{self, AssocCtxt, Visitor};
use rustc_ast::{AssocItemKind, AstLike, AstLikeWrapper, AttrStyle, ExprKind, ForeignItemKind};
-use rustc_ast::{Inline, ItemKind, MacArgs, MacStmtStyle, MetaItemKind, ModKind, NestedMetaItem};
-use rustc_ast::{NodeId, PatKind, StmtKind, TyKind};
+use rustc_ast::{Inline, ItemKind, MacArgs, MacStmtStyle, MetaItemKind, ModKind};
+use rustc_ast::{NestedMetaItem, NodeId, PatKind, StmtKind, TyKind};
use rustc_ast_pretty::pprust;
use rustc_data_structures::map_in_place::MapInPlace;
use rustc_data_structures::sync::Lrc;
}
pub fn expand_crate(&mut self, krate: ast::Crate) -> ast::Crate {
- let file_path = match self.cx.source_map().span_to_filename(krate.span) {
+ let file_path = match self.cx.source_map().span_to_filename(krate.spans.inner_span) {
FileName::Real(name) => name
.into_local_path()
.expect("attempting to resolve a file path in an external file"),
ModKind::Unloaded => {
// We have an outline `mod foo;` so we need to parse the file.
let old_attrs_len = attrs.len();
- let ParsedExternalMod { items, inner_span, file_path, dir_path, dir_ownership } =
+ let ParsedExternalMod { items, spans, file_path, dir_path, dir_ownership } =
parse_external_mod(
&ecx.sess,
ident,
);
}
- *mod_kind = ModKind::Loaded(items, Inline::No, inner_span);
+ *mod_kind = ModKind::Loaded(items, Inline::No, spans);
node.attrs = attrs;
if node.attrs.len() > old_attrs_len {
// If we loaded an out-of-line module and added some inner attributes,
#![feature(associated_type_bounds)]
#![feature(associated_type_defaults)]
+#![feature(box_syntax)]
#![feature(crate_visibility_modifier)]
#![feature(decl_macro)]
#![feature(if_let_guard)]
use rustc_span::symbol::Ident;
use rustc_span::Span;
-/// Contains the sub-token-trees of a "delimited" token tree, such as the contents of `(`. Note
-/// that the delimiter itself might be `NoDelim`.
+/// Contains the sub-token-trees of a "delimited" token tree such as `(a b c)`. The delimiter itself
+/// might be `NoDelim`.
#[derive(Clone, PartialEq, Encodable, Decodable, Debug)]
struct Delimited {
delim: token::DelimToken,
- tts: Vec<TokenTree>,
+ /// Note: This contains the opening and closing delimiters tokens (e.g. `(` and `)`). Note that
+ /// these could be `NoDelim`. These token kinds must match `delim`, and the methods below
+ /// debug_assert this.
+ all_tts: Vec<TokenTree>,
}
impl Delimited {
- /// Returns a `self::TokenTree` with a `Span` corresponding to the opening delimiter.
- fn open_tt(&self, span: DelimSpan) -> TokenTree {
- TokenTree::token(token::OpenDelim(self.delim), span.open)
+ /// Returns a `self::TokenTree` with a `Span` corresponding to the opening delimiter. Panics if
+ /// the delimiter is `NoDelim`.
+ fn open_tt(&self) -> &TokenTree {
+ let tt = self.all_tts.first().unwrap();
+ debug_assert!(matches!(
+ tt,
+ &TokenTree::Token(token::Token { kind: token::OpenDelim(d), .. }) if d == self.delim
+ ));
+ tt
+ }
+
+ /// Returns a `self::TokenTree` with a `Span` corresponding to the closing delimiter. Panics if
+ /// the delimeter is `NoDelim`.
+ fn close_tt(&self) -> &TokenTree {
+ let tt = self.all_tts.last().unwrap();
+ debug_assert!(matches!(
+ tt,
+ &TokenTree::Token(token::Token { kind: token::CloseDelim(d), .. }) if d == self.delim
+ ));
+ tt
}
- /// Returns a `self::TokenTree` with a `Span` corresponding to the closing delimiter.
- fn close_tt(&self, span: DelimSpan) -> TokenTree {
- TokenTree::token(token::CloseDelim(self.delim), span.close)
+ /// Returns the tts excluding the outer delimiters.
+ ///
+ /// FIXME: #67062 has details about why this is sub-optimal.
+ fn inner_tts(&self) -> &[TokenTree] {
+ // These functions are called for the assertions within them.
+ let _open_tt = self.open_tt();
+ let _close_tt = self.close_tt();
+ &self.all_tts[1..self.all_tts.len() - 1]
}
}
ZeroOrOne,
}
-/// Similar to `tokenstream::TokenTree`, except that `$i`, `$i:ident`, `$(...)`,
-/// and `${...}` are "first-class" token trees. Useful for parsing macros.
+/// Similar to `tokenstream::TokenTree`, except that `Sequence`, `MetaVar`, `MetaVarDecl`, and
+/// `MetaVarExpr` are "first-class" token trees. Useful for parsing macros.
#[derive(Debug, Clone, PartialEq, Encodable, Decodable)]
enum TokenTree {
Token(Token),
+ /// A delimited sequence, e.g. `($e:expr)` (RHS) or `{ $e }` (LHS).
Delimited(DelimSpan, Lrc<Delimited>),
- /// A kleene-style repetition sequence
+ /// A kleene-style repetition sequence, e.g. `$($e:expr)*` (RHS) or `$($e),*` (LHS).
Sequence(DelimSpan, Lrc<SequenceRepetition>),
- /// e.g., `$var`
+ /// e.g., `$var`.
MetaVar(Span, Ident),
- /// e.g., `$var:expr`. This is only used in the left hand side of MBE macros.
+ /// e.g., `$var:expr`. Only appears on the LHS.
MetaVarDecl(Span, Ident /* name to bind */, Option<NonterminalKind>),
- /// A meta-variable expression inside `${...}`
+ /// A meta-variable expression inside `${...}`.
MetaVarExpr(DelimSpan, MetaVarExpr),
}
impl TokenTree {
- /// Return the number of tokens in the tree.
- fn len(&self) -> usize {
- match *self {
- TokenTree::Delimited(_, ref delimed) => match delimed.delim {
- token::NoDelim => delimed.tts.len(),
- _ => delimed.tts.len() + 2,
- },
- TokenTree::Sequence(_, ref seq) => seq.tts.len(),
- _ => 0,
- }
- }
-
/// Returns `true` if the given token tree is delimited.
fn is_delimited(&self) -> bool {
matches!(*self, TokenTree::Delimited(..))
}
}
- /// Gets the `index`-th sub-token-tree. This only makes sense for delimited trees and sequences.
- fn get_tt(&self, index: usize) -> TokenTree {
- match (self, index) {
- (&TokenTree::Delimited(_, ref delimed), _) if delimed.delim == token::NoDelim => {
- delimed.tts[index].clone()
- }
- (&TokenTree::Delimited(span, ref delimed), _) => {
- if index == 0 {
- return delimed.open_tt(span);
- }
- if index == delimed.tts.len() + 1 {
- return delimed.close_tt(span);
- }
- delimed.tts[index - 1].clone()
- }
- (&TokenTree::Sequence(_, ref seq), _) => seq.tts[index].clone(),
- _ => panic!("Cannot expand a token tree"),
- }
- }
-
/// Retrieves the `TokenTree`'s span.
fn span(&self) -> Span {
match *self {
// `MetaVarExpr` can not appear in the LHS of a macro arm
TokenTree::MetaVarExpr(..) => {}
TokenTree::Delimited(_, ref del) => {
- for tt in &del.tts {
+ for tt in del.inner_tts() {
check_binders(sess, node_id, tt, macros, binders, ops, valid);
}
}
let name = MacroRulesNormalizedIdent::new(name);
check_ops_is_prefix(sess, node_id, macros, binders, ops, span, name);
}
- // FIXME(c410-f3r) Check token (https://github.com/rust-lang/rust/issues/93902)
- TokenTree::MetaVarExpr(..) => {}
+ TokenTree::MetaVarExpr(dl, ref mve) => {
+ let Some(name) = mve.ident().map(MacroRulesNormalizedIdent::new) else {
+ return;
+ };
+ check_ops_is_prefix(sess, node_id, macros, binders, ops, dl.entire(), name);
+ }
TokenTree::Delimited(_, ref del) => {
- check_nested_occurrences(sess, node_id, &del.tts, macros, binders, ops, valid);
+ check_nested_occurrences(sess, node_id, del.inner_tts(), macros, binders, ops, valid);
}
TokenTree::Sequence(_, ref seq) => {
let ops = ops.push(seq.kleene);
{
let macro_rules = state == NestedMacroState::MacroRulesNotName;
state = NestedMacroState::Empty;
- let rest =
- check_nested_macro(sess, node_id, macro_rules, &del.tts, &nested_macros, valid);
+ let rest = check_nested_macro(
+ sess,
+ node_id,
+ macro_rules,
+ del.inner_tts(),
+ &nested_macros,
+ valid,
+ );
// If we did not check the whole macro definition, then check the rest as if outside
// the macro definition.
check_nested_occurrences(
sess,
node_id,
- &del.tts[rest..],
+ &del.inner_tts()[rest..],
macros,
binders,
ops,
crate use NamedMatch::*;
crate use ParseResult::*;
-use TokenTreeOrTokenTreeSlice::*;
-use crate::mbe::{self, TokenTree};
+use crate::mbe::{self, SequenceRepetition, TokenTree};
use rustc_ast::token::{self, DocComment, Nonterminal, Token};
-use rustc_parse::parser::Parser;
+use rustc_parse::parser::{NtOrTt, Parser};
use rustc_session::parse::ParseSess;
use rustc_span::symbol::MacroRulesNormalizedIdent;
use std::borrow::Cow;
use std::collections::hash_map::Entry::{Occupied, Vacant};
use std::mem;
-use std::ops::{Deref, DerefMut};
-
-// To avoid costly uniqueness checks, we require that `MatchSeq` always has a nonempty body.
-
-/// Either a sequence of token trees or a single one. This is used as the representation of the
-/// sequence of tokens that make up a matcher.
-#[derive(Clone)]
-enum TokenTreeOrTokenTreeSlice<'tt> {
- Tt(TokenTree),
- TtSeq(&'tt [TokenTree]),
-}
-
-impl<'tt> TokenTreeOrTokenTreeSlice<'tt> {
- /// Returns the number of constituent top-level token trees of `self` (top-level in that it
- /// will not recursively descend into subtrees).
- fn len(&self) -> usize {
- match *self {
- TtSeq(ref v) => v.len(),
- Tt(ref tt) => tt.len(),
- }
- }
-
- /// The `index`-th token tree of `self`.
- fn get_tt(&self, index: usize) -> TokenTree {
- match *self {
- TtSeq(ref v) => v[index].clone(),
- Tt(ref tt) => tt.get_tt(index),
- }
- }
-}
/// An unzipping of `TokenTree`s... see the `stack` field of `MatcherPos`.
///
-/// This is used by `inner_parse_loop` to keep track of delimited submatchers that we have
+/// This is used by `parse_tt_inner` to keep track of delimited submatchers that we have
/// descended into.
#[derive(Clone)]
struct MatcherTtFrame<'tt> {
/// The "parent" matcher that we are descending into.
- elts: TokenTreeOrTokenTreeSlice<'tt>,
+ elts: &'tt [TokenTree],
/// The position of the "dot" in `elts` at the time we descended.
idx: usize,
}
-type NamedMatchVec = SmallVec<[NamedMatch; 4]>;
+// One element is enough to cover 95-99% of vectors for most benchmarks. Also,
+// vectors longer than one frequently have many elements, not just two or
+// three.
+type NamedMatchVec = SmallVec<[NamedMatch; 1]>;
+
+// This type is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(NamedMatchVec, 48);
/// Represents a single "position" (aka "matcher position", aka "item"), as
/// described in the module documentation.
-///
-/// Here:
-///
-/// - `'root` represents the lifetime of the stack slot that holds the root
-/// `MatcherPos`. As described in `MatcherPosHandle`, the root `MatcherPos`
-/// structure is stored on the stack, but subsequent instances are put into
-/// the heap.
-/// - `'tt` represents the lifetime of the token trees that this matcher
-/// position refers to.
-///
-/// It is important to distinguish these two lifetimes because we have a
-/// `SmallVec<TokenTreeOrTokenTreeSlice<'tt>>` below, and the destructor of
-/// that is considered to possibly access the data from its elements (it lacks
-/// a `#[may_dangle]` attribute). As a result, the compiler needs to know that
-/// all the elements in that `SmallVec` strictly outlive the root stack slot
-/// lifetime. By separating `'tt` from `'root`, we can show that.
#[derive(Clone)]
-struct MatcherPos<'root, 'tt> {
- /// The token or sequence of tokens that make up the matcher. `elts` is short for "elements".
- top_elts: TokenTreeOrTokenTreeSlice<'tt>,
+struct MatcherPos<'tt> {
+ /// The token or slice of tokens that make up the matcher. `elts` is short for "elements".
+ top_elts: &'tt [TokenTree],
/// The position of the "dot" in this matcher
idx: usize,
match_hi: usize,
/// This field is only used if we are matching a repetition.
- repetition: Option<MatcherPosRepetition<'root, 'tt>>,
+ repetition: Option<MatcherPosRepetition<'tt>>,
/// Specifically used to "unzip" token trees. By "unzip", we mean to unwrap the delimiters from
/// a delimited token tree (e.g., something wrapped in `(` `)`) or to get the contents of a doc
// This type is used a lot. Make sure it doesn't unintentionally get bigger.
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
-rustc_data_structures::static_assert_size!(MatcherPos<'_, '_>, 240);
+rustc_data_structures::static_assert_size!(MatcherPos<'_>, 112);
+
+impl<'tt> MatcherPos<'tt> {
+ /// `len` `Vec`s (initially shared and empty) that will store matches of metavars.
+ fn create_matches(len: usize) -> Box<[Lrc<NamedMatchVec>]> {
+ if len == 0 {
+ vec![]
+ } else {
+ let empty_matches = Lrc::new(SmallVec::new());
+ vec![empty_matches; len]
+ }
+ .into_boxed_slice()
+ }
-impl<'root, 'tt> MatcherPos<'root, 'tt> {
/// Generates the top-level matcher position in which the "dot" is before the first token of
/// the matcher `ms`.
fn new(ms: &'tt [TokenTree]) -> Self {
let match_idx_hi = count_names(ms);
MatcherPos {
// Start with the top level matcher given to us.
- top_elts: TtSeq(ms),
+ top_elts: ms,
// The "dot" is before the first token of the matcher.
idx: 0,
// Initialize `matches` to a bunch of empty `Vec`s -- one for each metavar in
// `top_elts`. `match_lo` for `top_elts` is 0 and `match_hi` is `match_idx_hi`.
// `match_cur` is 0 since we haven't actually matched anything yet.
- matches: create_matches(match_idx_hi),
+ matches: Self::create_matches(match_idx_hi),
match_lo: 0,
match_cur: 0,
match_hi: match_idx_hi,
}
}
+ fn repetition(up: Box<MatcherPos<'tt>>, seq: &'tt SequenceRepetition) -> Self {
+ MatcherPos {
+ top_elts: &seq.tts,
+ idx: 0,
+ matches: Self::create_matches(up.matches.len()),
+ match_lo: up.match_cur,
+ match_cur: up.match_cur,
+ match_hi: up.match_cur + seq.num_captures,
+ repetition: Some(MatcherPosRepetition { up, seq }),
+ stack: smallvec![],
+ }
+ }
+
/// Adds `m` as a named match for the `idx`-th metavar.
fn push_match(&mut self, idx: usize, m: NamedMatch) {
let matches = Lrc::make_mut(&mut self.matches[idx]);
}
#[derive(Clone)]
-struct MatcherPosRepetition<'root, 'tt> {
- /// The KleeneOp of this sequence.
- seq_op: mbe::KleeneOp,
-
- /// The separator.
- sep: Option<Token>,
-
+struct MatcherPosRepetition<'tt> {
/// The "parent" matcher position. That is, the matcher position just before we enter the
/// sequence.
- up: MatcherPosHandle<'root, 'tt>,
-}
-
-// Lots of MatcherPos instances are created at runtime. Allocating them on the
-// heap is slow. Furthermore, using SmallVec<MatcherPos> to allocate them all
-// on the stack is also slow, because MatcherPos is quite a large type and
-// instances get moved around a lot between vectors, which requires lots of
-// slow memcpy calls.
-//
-// Therefore, the initial MatcherPos is always allocated on the stack,
-// subsequent ones (of which there aren't that many) are allocated on the heap,
-// and this type is used to encapsulate both cases.
-enum MatcherPosHandle<'root, 'tt> {
- Ref(&'root mut MatcherPos<'root, 'tt>),
- Box(Box<MatcherPos<'root, 'tt>>),
-}
-
-impl<'root, 'tt> Clone for MatcherPosHandle<'root, 'tt> {
- // This always produces a new Box.
- fn clone(&self) -> Self {
- MatcherPosHandle::Box(match *self {
- MatcherPosHandle::Ref(ref r) => Box::new((**r).clone()),
- MatcherPosHandle::Box(ref b) => b.clone(),
- })
- }
-}
-
-impl<'root, 'tt> Deref for MatcherPosHandle<'root, 'tt> {
- type Target = MatcherPos<'root, 'tt>;
- fn deref(&self) -> &Self::Target {
- match *self {
- MatcherPosHandle::Ref(ref r) => r,
- MatcherPosHandle::Box(ref b) => b,
- }
- }
-}
+ up: Box<MatcherPos<'tt>>,
-impl<'root, 'tt> DerefMut for MatcherPosHandle<'root, 'tt> {
- fn deref_mut(&mut self) -> &mut MatcherPos<'root, 'tt> {
- match *self {
- MatcherPosHandle::Ref(ref mut r) => r,
- MatcherPosHandle::Box(ref mut b) => b,
- }
- }
+ /// The sequence itself.
+ seq: &'tt SequenceRepetition,
}
-enum EofItems<'root, 'tt> {
+enum EofItems<'tt> {
None,
- One(MatcherPosHandle<'root, 'tt>),
+ One(Box<MatcherPos<'tt>>),
Multiple,
}
pub(super) fn count_names(ms: &[TokenTree]) -> usize {
ms.iter().fold(0, |count, elt| {
count
- + match *elt {
- TokenTree::Delimited(_, ref delim) => count_names(&delim.tts),
+ + match elt {
+ TokenTree::Delimited(_, delim) => count_names(delim.inner_tts()),
TokenTree::MetaVar(..) => 0,
TokenTree::MetaVarDecl(..) => 1,
- // FIXME(c410-f3r) MetaVarExpr should be handled instead of being ignored
- // https://github.com/rust-lang/rust/issues/9390
+ // Panicking here would abort execution because `parse_tree` makes use of this
+ // function. In other words, RHS meta-variable expressions eventually end-up here.
+ //
+ // `0` is still returned to inform that no meta-variable was found. `Meta-variables
+ // != Meta-variable expressions`
TokenTree::MetaVarExpr(..) => 0,
- TokenTree::Sequence(_, ref seq) => seq.num_captures,
+ TokenTree::Sequence(_, seq) => seq.num_captures,
TokenTree::Token(..) => 0,
}
})
}
-/// `len` `Vec`s (initially shared and empty) that will store matches of metavars.
-fn create_matches(len: usize) -> Box<[Lrc<NamedMatchVec>]> {
- if len == 0 {
- vec![]
- } else {
- let empty_matches = Lrc::new(SmallVec::new());
- vec![empty_matches; len]
- }
- .into_boxed_slice()
-}
-
-/// `NamedMatch` is a pattern-match result for a single `token::MATCH_NONTERMINAL`:
-/// so it is associated with a single ident in a parse, and all
+/// `NamedMatch` is a pattern-match result for a single metavar. All
/// `MatchedNonterminal`s in the `NamedMatch` have the same non-terminal type
-/// (expr, item, etc). Each leaf in a single `NamedMatch` corresponds to a
-/// single `token::MATCH_NONTERMINAL` in the `TokenTree` that produced it.
+/// (expr, item, etc).
///
/// The in-memory structure of a particular `NamedMatch` represents the match
/// that occurred when a particular subset of a matcher was applied to a
/// particular token tree.
///
/// The width of each `MatchedSeq` in the `NamedMatch`, and the identity of
-/// the `MatchedNonterminal`s, will depend on the token tree it was applied
-/// to: each `MatchedSeq` corresponds to a single `TTSeq` in the originating
+/// the `MatchedNtNonTts`s, will depend on the token tree it was applied
+/// to: each `MatchedSeq` corresponds to a single repetition in the originating
/// token tree. The depth of the `NamedMatch` structure will therefore depend
-/// only on the nesting depth of `ast::TTSeq`s in the originating
-/// token tree it was derived from.
+/// only on the nesting depth of repetitions in the originating token tree it
+/// was derived from.
///
/// In layman's terms: `NamedMatch` will form a tree representing nested matches of a particular
/// meta variable. For example, if we are matching the following macro against the following
#[derive(Debug, Clone)]
crate enum NamedMatch {
MatchedSeq(Lrc<NamedMatchVec>),
+
+ // A metavar match of type `tt`.
+ MatchedTokenTree(rustc_ast::tokenstream::TokenTree),
+
+ // A metavar match of any type other than `tt`.
MatchedNonterminal(Lrc<Nonterminal>),
}
-/// Takes a sequence of token trees `ms` representing a matcher which successfully matched input
+/// Takes a slice of token trees `ms` representing a matcher which successfully matched input
/// and an iterator of items that matched input and produces a `NamedParseResult`.
fn nameize<I: Iterator<Item = NamedMatch>>(
sess: &ParseSess,
}
}
TokenTree::Delimited(_, ref delim) => {
- for next_m in &delim.tts {
+ for next_m in delim.inner_tts() {
n_rec(sess, next_m, res.by_ref(), ret_val)?;
}
}
}
Occupied(..) => return Err((sp, format!("duplicated bind name: {}", bind_name))),
},
- // FIXME(c410-f3r) MetaVar and MetaVarExpr should be handled instead of being ignored
- // https://github.com/rust-lang/rust/issues/9390
- TokenTree::MetaVar(..) | TokenTree::MetaVarExpr(..) | TokenTree::Token(..) => {}
+ TokenTree::Token(..) => (),
+ TokenTree::MetaVar(..) | TokenTree::MetaVarExpr(..) => unreachable!(),
}
Ok(())
}
}
-/// Process the matcher positions of `cur_items` until it is empty. In the process, this will
-/// produce more items in `next_items`, `eof_items`, and `bb_items`.
-///
-/// For more info about the how this happens, see the module-level doc comments and the inline
-/// comments of this function.
-///
-/// # Parameters
-///
-/// - `cur_items`: the set of current items to be processed. This should be empty by the end of a
-/// successful execution of this function.
-/// - `next_items`: the set of newly generated items. These are used to replenish `cur_items` in
-/// the function `parse`.
-/// - `eof_items`: the set of items that would be valid if this was the EOF.
-/// - `bb_items`: the set of items that are waiting for the black-box parser.
-/// - `token`: the current token of the parser.
-///
-/// # Returns
-///
-/// A `ParseResult`. Note that matches are kept track of through the items generated.
-fn inner_parse_loop<'root, 'tt>(
- sess: &ParseSess,
- cur_items: &mut SmallVec<[MatcherPosHandle<'root, 'tt>; 1]>,
- next_items: &mut Vec<MatcherPosHandle<'root, 'tt>>,
- bb_items: &mut SmallVec<[MatcherPosHandle<'root, 'tt>; 1]>,
- eof_items: &mut EofItems<'root, 'tt>,
- token: &Token,
-) -> Result<(), (rustc_span::Span, String)> {
- // Pop items from `cur_items` until it is empty.
- while let Some(mut item) = cur_items.pop() {
- // When unzipped trees end, remove them. This corresponds to backtracking out of a
- // delimited submatcher into which we already descended. In backtracking out again, we need
- // to advance the "dot" past the delimiters in the outer matcher.
- while item.idx >= item.top_elts.len() {
- match item.stack.pop() {
- Some(MatcherTtFrame { elts, idx }) => {
- item.top_elts = elts;
- item.idx = idx + 1;
+// Note: the item vectors could be created and dropped within `parse_tt`, but to avoid excess
+// allocations we have a single vector fo each kind that is cleared and reused repeatedly.
+pub struct TtParser<'tt> {
+ macro_name: Ident,
+
+ /// The set of current items to be processed. This should be empty by the end of a successful
+ /// execution of `parse_tt_inner`.
+ cur_items: Vec<Box<MatcherPos<'tt>>>,
+
+ /// The set of newly generated items. These are used to replenish `cur_items` in the function
+ /// `parse_tt`.
+ next_items: Vec<Box<MatcherPos<'tt>>>,
+
+ /// The set of items that are waiting for the black-box parser.
+ bb_items: Vec<Box<MatcherPos<'tt>>>,
+}
+
+impl<'tt> TtParser<'tt> {
+ pub(super) fn new(macro_name: Ident) -> TtParser<'tt> {
+ TtParser { macro_name, cur_items: vec![], next_items: vec![], bb_items: vec![] }
+ }
+
+ /// Process the matcher positions of `cur_items` until it is empty. In the process, this will
+ /// produce more items in `next_items` and `bb_items`.
+ ///
+ /// For more info about the how this happens, see the module-level doc comments and the inline
+ /// comments of this function.
+ ///
+ /// # Returns
+ ///
+ /// `Some(result)` if everything is finished, `None` otherwise. Note that matches are kept
+ /// track of through the items generated.
+ fn parse_tt_inner(
+ &mut self,
+ sess: &ParseSess,
+ ms: &[TokenTree],
+ token: &Token,
+ ) -> Option<NamedParseResult> {
+ // Matcher positions that would be valid if the macro invocation was over now. Only
+ // modified if `token == Eof`.
+ let mut eof_items = EofItems::None;
+
+ while let Some(mut item) = self.cur_items.pop() {
+ // When unzipped trees end, remove them. This corresponds to backtracking out of a
+ // delimited submatcher into which we already descended. When backtracking out again, we
+ // need to advance the "dot" past the delimiters in the outer matcher.
+ while item.idx >= item.top_elts.len() {
+ match item.stack.pop() {
+ Some(MatcherTtFrame { elts, idx }) => {
+ item.top_elts = elts;
+ item.idx = idx + 1;
+ }
+ None => break,
}
- None => break,
}
- }
- // Get the current position of the "dot" (`idx`) in `item` and the number of token trees in
- // the matcher (`len`).
- let idx = item.idx;
- let len = item.top_elts.len();
-
- // If `idx >= len`, then we are at or past the end of the matcher of `item`.
- if idx >= len {
- // We are repeating iff there is a parent. If the matcher is inside of a repetition,
- // then we could be at the end of a sequence or at the beginning of the next
- // repetition.
- if let Some(repetition) = &item.repetition {
- // At this point, regardless of whether there is a separator, we should add all
- // matches from the complete repetition of the sequence to the shared, top-level
- // `matches` list (actually, `up.matches`, which could itself not be the top-level,
- // but anyway...). Moreover, we add another item to `cur_items` in which the "dot"
- // is at the end of the `up` matcher. This ensures that the "dot" in the `up`
- // matcher is also advanced sufficiently.
- //
- // NOTE: removing the condition `idx == len` allows trailing separators.
+ // Get the current position of the "dot" (`idx`) in `item` and the number of token
+ // trees in the matcher (`len`).
+ let idx = item.idx;
+ let len = item.top_elts.len();
+
+ if idx < len {
+ // We are in the middle of a matcher. Compare the matcher's current tt against
+ // `token`.
+ match &item.top_elts[idx] {
+ TokenTree::Sequence(_sp, seq) => {
+ let op = seq.kleene.op;
+ if op == mbe::KleeneOp::ZeroOrMore || op == mbe::KleeneOp::ZeroOrOne {
+ // Allow for the possibility of zero matches of this sequence.
+ let mut new_item = item.clone();
+ new_item.match_cur += seq.num_captures;
+ new_item.idx += 1;
+ for idx in item.match_cur..item.match_cur + seq.num_captures {
+ new_item.push_match(idx, MatchedSeq(Lrc::new(smallvec![])));
+ }
+ self.cur_items.push(new_item);
+ }
+
+ // Allow for the possibility of one or more matches of this sequence.
+ self.cur_items.push(box MatcherPos::repetition(item, &seq));
+ }
+
+ &TokenTree::MetaVarDecl(span, _, None) => {
+ // E.g. `$e` instead of `$e:expr`.
+ if sess.missing_fragment_specifiers.borrow_mut().remove(&span).is_some() {
+ return Some(Error(span, "missing fragment specifier".to_string()));
+ }
+ }
+
+ &TokenTree::MetaVarDecl(_, _, Some(kind)) => {
+ // Built-in nonterminals never start with these tokens, so we can eliminate
+ // them from consideration.
+ //
+ // We use the span of the metavariable declaration to determine any
+ // edition-specific matching behavior for non-terminals.
+ if Parser::nonterminal_may_begin_with(kind, token) {
+ self.bb_items.push(item);
+ }
+ }
+
+ TokenTree::Delimited(_, delimited) => {
+ // To descend into a delimited submatcher, we push the current matcher onto
+ // a stack and push a new item containing the submatcher onto `cur_items`.
+ //
+ // At the beginning of the loop, if we reach the end of the delimited
+ // submatcher, we pop the stack to backtrack out of the descent. Note that
+ // we use `all_tts` to include the open and close delimiter tokens.
+ let lower_elts = mem::replace(&mut item.top_elts, &delimited.all_tts);
+ let idx = item.idx;
+ item.stack.push(MatcherTtFrame { elts: lower_elts, idx });
+ item.idx = 0;
+ self.cur_items.push(item);
+ }
+
+ TokenTree::Token(t) => {
+ // If it's a doc comment, we just ignore it and move on to the next tt in
+ // the matcher. If the token matches, we can just advance the parser.
+ // Otherwise, this match has failed, there is nothing to do, and hopefully
+ // another item in `cur_items` will match.
+ if matches!(t, Token { kind: DocComment(..), .. }) {
+ item.idx += 1;
+ self.cur_items.push(item);
+ } else if token_name_eq(&t, token) {
+ item.idx += 1;
+ self.next_items.push(item);
+ }
+ }
+
+ // These cannot appear in a matcher.
+ TokenTree::MetaVar(..) | TokenTree::MetaVarExpr(..) => unreachable!(),
+ }
+ } else if let Some(repetition) = &item.repetition {
+ // We are past the end of a repetition.
+ debug_assert!(idx <= len + 1);
+
if idx == len {
- // Get the `up` matcher
+ // Add all matches from the sequence to `up`, and move the "dot" past the
+ // repetition in `up`. This allows for the case where the sequence matching is
+ // finished.
let mut new_pos = repetition.up.clone();
-
- // Add matches from this repetition to the `matches` of `up`
for idx in item.match_lo..item.match_hi {
let sub = item.matches[idx].clone();
new_pos.push_match(idx, MatchedSeq(sub));
}
-
- // Move the "dot" past the repetition in `up`
new_pos.match_cur = item.match_hi;
new_pos.idx += 1;
- cur_items.push(new_pos);
+ self.cur_items.push(new_pos);
}
- // Check if we need a separator.
- if idx == len && repetition.sep.is_some() {
- // We have a separator, and it is the current token. We can advance past the
- // separator token.
- if repetition.sep.as_ref().map_or(false, |sep| token_name_eq(token, sep)) {
+ if idx == len && repetition.seq.separator.is_some() {
+ if repetition
+ .seq
+ .separator
+ .as_ref()
+ .map_or(false, |sep| token_name_eq(token, sep))
+ {
+ // The matcher has a separator, and it matches the current token. We can
+ // advance past the separator token.
item.idx += 1;
- next_items.push(item);
+ self.next_items.push(item);
}
- } else if repetition.seq_op != mbe::KleeneOp::ZeroOrOne {
+ } else if repetition.seq.kleene.op != mbe::KleeneOp::ZeroOrOne {
// We don't need a separator. Move the "dot" back to the beginning of the
// matcher and try to match again UNLESS we are only allowed to have _one_
// repetition.
item.match_cur = item.match_lo;
item.idx = 0;
- cur_items.push(item);
+ self.cur_items.push(item);
}
} else {
- // If we are not in a repetition, then being at the end of a matcher means that we
- // have reached the potential end of the input.
- *eof_items = match eof_items {
- EofItems::None => EofItems::One(item),
- EofItems::One(_) | EofItems::Multiple => EofItems::Multiple,
- }
- }
- } else {
- // We are in the middle of a matcher. Look at what token in the matcher we are trying
- // to match the current token (`token`) against. Depending on that, we may generate new
- // items.
- match item.top_elts.get_tt(idx) {
- // Need to descend into a sequence
- TokenTree::Sequence(sp, seq) => {
- // Examine the case where there are 0 matches of this sequence. We are
- // implicitly disallowing OneOrMore from having 0 matches here. Thus, that will
- // result in a "no rules expected token" error by virtue of this matcher not
- // working.
- if seq.kleene.op == mbe::KleeneOp::ZeroOrMore
- || seq.kleene.op == mbe::KleeneOp::ZeroOrOne
- {
- let mut new_item = item.clone();
- new_item.match_cur += seq.num_captures;
- new_item.idx += 1;
- for idx in item.match_cur..item.match_cur + seq.num_captures {
- new_item.push_match(idx, MatchedSeq(Lrc::new(smallvec![])));
- }
- cur_items.push(new_item);
- }
-
- let matches = create_matches(item.matches.len());
- cur_items.push(MatcherPosHandle::Box(Box::new(MatcherPos {
- stack: smallvec![],
- idx: 0,
- matches,
- match_lo: item.match_cur,
- match_cur: item.match_cur,
- match_hi: item.match_cur + seq.num_captures,
- repetition: Some(MatcherPosRepetition {
- up: item,
- sep: seq.separator.clone(),
- seq_op: seq.kleene.op,
- }),
- top_elts: Tt(TokenTree::Sequence(sp, seq)),
- })));
- }
-
- // We need to match a metavar (but the identifier is invalid)... this is an error
- TokenTree::MetaVarDecl(span, _, None) => {
- if sess.missing_fragment_specifiers.borrow_mut().remove(&span).is_some() {
- return Err((span, "missing fragment specifier".to_string()));
+ // We are past the end of the matcher, and not in a repetition. Look for end of
+ // input.
+ debug_assert_eq!(idx, len);
+ if *token == token::Eof {
+ eof_items = match eof_items {
+ EofItems::None => EofItems::One(item),
+ EofItems::One(_) | EofItems::Multiple => EofItems::Multiple,
}
}
-
- // We need to match a metavar with a valid ident... call out to the black-box
- // parser by adding an item to `bb_items`.
- TokenTree::MetaVarDecl(_, _, Some(kind)) => {
- // Built-in nonterminals never start with these tokens, so we can eliminate
- // them from consideration.
- //
- // We use the span of the metavariable declaration to determine any
- // edition-specific matching behavior for non-terminals.
- if Parser::nonterminal_may_begin_with(kind, token) {
- bb_items.push(item);
- }
- }
-
- // We need to descend into a delimited submatcher or a doc comment. To do this, we
- // push the current matcher onto a stack and push a new item containing the
- // submatcher onto `cur_items`.
- //
- // At the beginning of the loop, if we reach the end of the delimited submatcher,
- // we pop the stack to backtrack out of the descent.
- seq @ (TokenTree::Delimited(..)
- | TokenTree::Token(Token { kind: DocComment(..), .. })) => {
- let lower_elts = mem::replace(&mut item.top_elts, Tt(seq));
- let idx = item.idx;
- item.stack.push(MatcherTtFrame { elts: lower_elts, idx });
- item.idx = 0;
- cur_items.push(item);
- }
-
- // We just matched a normal token. We can just advance the parser.
- TokenTree::Token(t) if token_name_eq(&t, token) => {
- item.idx += 1;
- next_items.push(item);
- }
-
- // There was another token that was not `token`... This means we can't add any
- // rules. NOTE that this is not necessarily an error unless _all_ items in
- // `cur_items` end up doing this. There may still be some other matchers that do
- // end up working out.
- TokenTree::Token(..) | TokenTree::MetaVar(..) | TokenTree::MetaVarExpr(..) => {}
}
}
- }
-
- // Yay a successful parse (so far)!
- Ok(())
-}
-
-/// Use the given sequence of token trees (`ms`) as a matcher. Match the token
-/// stream from the given `parser` against it and return the match.
-pub(super) fn parse_tt(
- parser: &mut Cow<'_, Parser<'_>>,
- ms: &[TokenTree],
- macro_name: Ident,
-) -> NamedParseResult {
- // A queue of possible matcher positions. We initialize it with the matcher position in which
- // the "dot" is before the first token of the first token tree in `ms`. `inner_parse_loop` then
- // processes all of these possible matcher positions and produces possible next positions into
- // `next_items`. After some post-processing, the contents of `next_items` replenish `cur_items`
- // and we start over again.
- //
- // This MatcherPos instance is allocated on the stack. All others -- and
- // there are frequently *no* others! -- are allocated on the heap.
- let mut initial = MatcherPos::new(ms);
- let mut cur_items = smallvec![MatcherPosHandle::Ref(&mut initial)];
- let mut next_items = Vec::new();
-
- loop {
- assert!(next_items.is_empty());
-
- // Matcher positions black-box parsed by parser.rs (`parser`)
- let mut bb_items = SmallVec::new();
-
- // Matcher positions that would be valid if the macro invocation was over now
- let mut eof_items = EofItems::None;
-
- // Process `cur_items` until either we have finished the input or we need to get some
- // parsing from the black-box parser done. The result is that `next_items` will contain a
- // bunch of possible next matcher positions in `next_items`.
- match inner_parse_loop(
- parser.sess,
- &mut cur_items,
- &mut next_items,
- &mut bb_items,
- &mut eof_items,
- &parser.token,
- ) {
- Ok(()) => {}
- Err((sp, msg)) => return Error(sp, msg),
- }
-
- // inner parse loop handled all cur_items, so it's empty
- assert!(cur_items.is_empty());
- // We need to do some post processing after the `inner_parse_loop`.
- //
- // Error messages here could be improved with links to original rules.
-
- // If we reached the EOF, check that there is EXACTLY ONE possible matcher. Otherwise,
- // either the parse is ambiguous (which should never happen) or there is a syntax error.
- if parser.token == token::Eof {
- return match eof_items {
+ // If we reached the end of input, check that there is EXACTLY ONE possible matcher.
+ // Otherwise, either the parse is ambiguous (which is an error) or there is a syntax error.
+ if *token == token::Eof {
+ Some(match eof_items {
EofItems::One(mut eof_item) => {
let matches =
eof_item.matches.iter_mut().map(|dv| Lrc::make_mut(dv).pop().unwrap());
- nameize(parser.sess, ms, matches)
+ nameize(sess, ms, matches)
}
EofItems::Multiple => {
- Error(parser.token.span, "ambiguity: multiple successful parses".to_string())
+ Error(token.span, "ambiguity: multiple successful parses".to_string())
}
EofItems::None => Failure(
Token::new(
token::Eof,
- if parser.token.span.is_dummy() {
- parser.token.span
- } else {
- parser.token.span.shrink_to_hi()
- },
+ if token.span.is_dummy() { token.span } else { token.span.shrink_to_hi() },
),
"missing tokens in macro arguments",
),
- };
- }
- // Performance hack: `eof_items` may share matchers via `Rc` with other things that we want
- // to modify. Dropping `eof_items` now may drop these refcounts to 1, preventing an
- // unnecessary implicit clone later in `Rc::make_mut`.
- drop(eof_items);
-
- // If there are no possible next positions AND we aren't waiting for the black-box parser,
- // then there is a syntax error.
- if bb_items.is_empty() && next_items.is_empty() {
- return Failure(parser.token.clone(), "no rules expected this token in macro call");
+ })
+ } else {
+ None
}
+ }
- if (!bb_items.is_empty() && !next_items.is_empty()) || bb_items.len() > 1 {
- // We need to call out to parse some rust nonterminal (black-box) parser. But something
- // is wrong, because there is not EXACTLY ONE of these.
- let nts = bb_items
- .iter()
- .map(|item| match item.top_elts.get_tt(item.idx) {
- TokenTree::MetaVarDecl(_, bind, Some(kind)) => format!("{} ('{}')", kind, bind),
- _ => panic!(),
- })
- .collect::<Vec<String>>()
- .join(" or ");
-
- return Error(
- parser.token.span,
- format!(
- "local ambiguity when calling macro `{macro_name}`: multiple parsing options: {}",
- match next_items.len() {
- 0 => format!("built-in NTs {}.", nts),
- 1 => format!("built-in NTs {} or 1 other option.", nts),
- n => format!("built-in NTs {} or {} other options.", nts, n),
- }
- ),
- );
- }
+ /// Use the given slice of token trees (`ms`) as a matcher. Match the token stream from the
+ /// given `parser` against it and return the match.
+ pub(super) fn parse_tt(
+ &mut self,
+ parser: &mut Cow<'_, Parser<'_>>,
+ ms: &'tt [TokenTree],
+ ) -> NamedParseResult {
+ // A queue of possible matcher positions. We initialize it with the matcher position in
+ // which the "dot" is before the first token of the first token tree in `ms`.
+ // `parse_tt_inner` then processes all of these possible matcher positions and produces
+ // possible next positions into `next_items`. After some post-processing, the contents of
+ // `next_items` replenish `cur_items` and we start over again.
+ self.cur_items.clear();
+ self.cur_items.push(box MatcherPos::new(ms));
+
+ loop {
+ self.next_items.clear();
+ self.bb_items.clear();
+
+ // Process `cur_items` until either we have finished the input or we need to get some
+ // parsing from the black-box parser done.
+ if let Some(result) = self.parse_tt_inner(parser.sess, ms, &parser.token) {
+ return result;
+ }
- if !next_items.is_empty() {
- // Dump all possible `next_items` into `cur_items` for the next iteration. Then process
- // the next token.
- cur_items.extend(next_items.drain(..));
- parser.to_mut().bump();
- } else {
- // Finally, we have the case where we need to call the black-box parser to get some
- // nonterminal.
- assert_eq!(bb_items.len(), 1);
-
- let mut item = bb_items.pop().unwrap();
- if let TokenTree::MetaVarDecl(span, _, Some(kind)) = item.top_elts.get_tt(item.idx) {
- let match_cur = item.match_cur;
- // We use the span of the metavariable declaration to determine any
- // edition-specific matching behavior for non-terminals.
- let nt = match parser.to_mut().parse_nonterminal(kind) {
- Err(mut err) => {
- err.span_label(
- span,
- format!("while parsing argument for this `{}` macro fragment", kind),
- )
- .emit();
- return ErrorReported;
+ // `parse_tt_inner` handled all cur_items, so it's empty.
+ assert!(self.cur_items.is_empty());
+
+ // Error messages here could be improved with links to original rules.
+ match (self.next_items.len(), self.bb_items.len()) {
+ (0, 0) => {
+ // There are no possible next positions AND we aren't waiting for the black-box
+ // parser: syntax error.
+ return Failure(
+ parser.token.clone(),
+ "no rules expected this token in macro call",
+ );
+ }
+
+ (_, 0) => {
+ // Dump all possible `next_items` into `cur_items` for the next iteration. Then
+ // process the next token.
+ self.cur_items.extend(self.next_items.drain(..));
+ parser.to_mut().bump();
+ }
+
+ (0, 1) => {
+ // We need to call the black-box parser to get some nonterminal.
+ let mut item = self.bb_items.pop().unwrap();
+ if let TokenTree::MetaVarDecl(span, _, Some(kind)) = item.top_elts[item.idx] {
+ let match_cur = item.match_cur;
+ // We use the span of the metavariable declaration to determine any
+ // edition-specific matching behavior for non-terminals.
+ let nt = match parser.to_mut().parse_nonterminal(kind) {
+ Err(mut err) => {
+ err.span_label(
+ span,
+ format!(
+ "while parsing argument for this `{kind}` macro fragment"
+ ),
+ )
+ .emit();
+ return ErrorReported;
+ }
+ Ok(nt) => nt,
+ };
+ let m = match nt {
+ NtOrTt::Nt(nt) => MatchedNonterminal(Lrc::new(nt)),
+ NtOrTt::Tt(tt) => MatchedTokenTree(tt),
+ };
+ item.push_match(match_cur, m);
+ item.idx += 1;
+ item.match_cur += 1;
+ } else {
+ unreachable!()
}
- Ok(nt) => nt,
- };
- item.push_match(match_cur, MatchedNonterminal(Lrc::new(nt)));
- item.idx += 1;
- item.match_cur += 1;
- } else {
- unreachable!()
+ self.cur_items.push(item);
+ }
+
+ (_, _) => {
+ // Too many possibilities!
+ return self.ambiguity_error(parser.token.span);
+ }
}
- cur_items.push(item);
+
+ assert!(!self.cur_items.is_empty());
}
+ }
- assert!(!cur_items.is_empty());
+ fn ambiguity_error(&self, token_span: rustc_span::Span) -> NamedParseResult {
+ let nts = self
+ .bb_items
+ .iter()
+ .map(|item| match item.top_elts[item.idx] {
+ TokenTree::MetaVarDecl(_, bind, Some(kind)) => {
+ format!("{} ('{}')", kind, bind)
+ }
+ _ => panic!(),
+ })
+ .collect::<Vec<String>>()
+ .join(" or ");
+
+ Error(
+ token_span,
+ format!(
+ "local ambiguity when calling macro `{}`: multiple parsing options: {}",
+ self.macro_name,
+ match self.next_items.len() {
+ 0 => format!("built-in NTs {}.", nts),
+ 1 => format!("built-in NTs {} or 1 other option.", nts),
+ n => format!("built-in NTs {} or {} other options.", nts, n),
+ }
+ ),
+ )
}
}
use crate::expand::{ensure_complete_parse, parse_ast_fragment, AstFragment, AstFragmentKind};
use crate::mbe;
use crate::mbe::macro_check;
-use crate::mbe::macro_parser::parse_tt;
-use crate::mbe::macro_parser::{Error, ErrorReported, Failure, Success};
-use crate::mbe::macro_parser::{MatchedNonterminal, MatchedSeq};
+use crate::mbe::macro_parser::{Error, ErrorReported, Failure, Success, TtParser};
+use crate::mbe::macro_parser::{MatchedSeq, MatchedTokenTree};
use crate::mbe::transcribe::transcribe;
use rustc_ast as ast;
-use rustc_ast::token::{self, NonterminalKind, NtTT, Token, TokenKind::*};
+use rustc_ast::token::{self, NonterminalKind, Token, TokenKind::*};
use rustc_ast::tokenstream::{DelimSpan, TokenStream};
use rustc_ast::{NodeId, DUMMY_NODE_ID};
use rustc_ast_pretty::pprust;
}
/// Given `lhses` and `rhses`, this is the new macro we create
-fn generic_extension<'cx>(
+fn generic_extension<'cx, 'tt>(
cx: &'cx mut ExtCtxt<'_>,
sp: Span,
def_span: Span,
name: Ident,
transparency: Transparency,
arg: TokenStream,
- lhses: &[mbe::TokenTree],
- rhses: &[mbe::TokenTree],
+ lhses: &'tt [mbe::TokenTree],
+ rhses: &'tt [mbe::TokenTree],
is_local: bool,
) -> Box<dyn MacResult + 'cx> {
let sess = &cx.sess.parse_sess;
// this situation.)
let parser = parser_from_cx(sess, arg.clone());
- for (i, lhs) in lhses.iter().enumerate() {
- // try each arm's matchers
- let lhs_tt = match *lhs {
- mbe::TokenTree::Delimited(_, ref delim) => &delim.tts,
+ // A matcher is always delimited, but the delimiters are ignored.
+ let delimited_inner_tts = |tt: &'tt mbe::TokenTree| -> &'tt [mbe::TokenTree] {
+ match tt {
+ mbe::TokenTree::Delimited(_, delimited) => delimited.inner_tts(),
_ => cx.span_bug(sp, "malformed macro lhs"),
- };
+ }
+ };
+ // Try each arm's matchers.
+ let mut tt_parser = TtParser::new(name);
+ for (i, lhs) in lhses.iter().enumerate() {
// Take a snapshot of the state of pre-expansion gating at this point.
// This is used so that if a matcher is not `Success(..)`ful,
// then the spans which became gated when parsing the unsuccessful matcher
// are not recorded. On the first `Success(..)`ful matcher, the spans are merged.
let mut gated_spans_snapshot = mem::take(&mut *sess.gated_spans.spans.borrow_mut());
- match parse_tt(&mut Cow::Borrowed(&parser), lhs_tt, name) {
+ match tt_parser.parse_tt(&mut Cow::Borrowed(&parser), delimited_inner_tts(lhs)) {
Success(named_matches) => {
// The matcher was `Success(..)`ful.
// Merge the gated spans from parsing the matcher with the pre-existing ones.
sess.gated_spans.merge(gated_spans_snapshot);
- let rhs = match rhses[i] {
- // ignore delimiters
- mbe::TokenTree::Delimited(_, ref delimed) => delimed.tts.clone(),
- _ => cx.span_bug(sp, "malformed macro rhs"),
- };
+ let rhs = delimited_inner_tts(&rhses[i]).to_vec().clone();
let arm_span = rhses[i].span();
let rhs_spans = rhs.iter().map(|t| t.span()).collect::<Vec<_>>();
// Check whether there's a missing comma in this macro call, like `println!("{}" a);`
if let Some((arg, comma_span)) = arg.add_comma() {
for lhs in lhses {
- // try each arm's matchers
- let lhs_tt = match *lhs {
- mbe::TokenTree::Delimited(_, ref delim) => &delim.tts,
- _ => continue,
- };
- if let Success(_) =
- parse_tt(&mut Cow::Borrowed(&parser_from_cx(sess, arg.clone())), lhs_tt, name)
- {
+ if let Success(_) = tt_parser.parse_tt(
+ &mut Cow::Borrowed(&parser_from_cx(sess, arg.clone())),
+ delimited_inner_tts(lhs),
+ ) {
if comma_span.is_dummy() {
err.note("you might be missing a comma");
} else {
];
let parser = Parser::new(&sess.parse_sess, body, true, rustc_parse::MACRO_ARGUMENTS);
- let argument_map = match parse_tt(&mut Cow::Borrowed(&parser), &argument_gram, def.ident) {
+ let mut tt_parser = TtParser::new(def.ident);
+ let argument_map = match tt_parser.parse_tt(&mut Cow::Borrowed(&parser), &argument_gram) {
Success(m) => m,
Failure(token, msg) => {
let s = parse_failure_msg(&token);
MatchedSeq(ref s) => s
.iter()
.map(|m| {
- if let MatchedNonterminal(ref nt) = *m {
- if let NtTT(ref tt) = **nt {
- let tt = mbe::quoted::parse(
- tt.clone().into(),
- true,
- &sess.parse_sess,
- def.id,
- features,
- edition,
- )
- .pop()
- .unwrap();
- valid &= check_lhs_nt_follows(&sess.parse_sess, features, &def, &tt);
- return tt;
- }
+ if let MatchedTokenTree(ref tt) = *m {
+ let mut tts = vec![];
+ mbe::quoted::parse(
+ tt.clone().into(),
+ true,
+ &sess.parse_sess,
+ def.id,
+ features,
+ edition,
+ &mut tts,
+ );
+ let tt = tts.pop().unwrap();
+ valid &= check_lhs_nt_follows(&sess.parse_sess, features, &def, &tt);
+ return tt;
}
sess.parse_sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs")
})
MatchedSeq(ref s) => s
.iter()
.map(|m| {
- if let MatchedNonterminal(ref nt) = *m {
- if let NtTT(ref tt) = **nt {
- return mbe::quoted::parse(
- tt.clone().into(),
- false,
- &sess.parse_sess,
- def.id,
- features,
- edition,
- )
- .pop()
- .unwrap();
- }
+ if let MatchedTokenTree(ref tt) = *m {
+ let mut tts = vec![];
+ mbe::quoted::parse(
+ tt.clone().into(),
+ false,
+ &sess.parse_sess,
+ def.id,
+ features,
+ edition,
+ &mut tts,
+ );
+ return tts.pop().unwrap();
}
sess.parse_sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs")
})
let (transparency, transparency_error) = attr::find_transparency(&def.attrs, macro_rules);
match transparency_error {
Some(TransparencyError::UnknownTransparency(value, span)) => {
- diag.span_err(span, &format!("unknown macro transparency: `{}`", value))
+ diag.span_err(span, &format!("unknown macro transparency: `{}`", value));
}
Some(TransparencyError::MultipleTransparencyAttrs(old_span, new_span)) => {
- diag.span_err(vec![old_span, new_span], "multiple macro transparency attributes")
+ diag.span_err(vec![old_span, new_span], "multiple macro transparency attributes");
}
None => {}
}
) -> bool {
// lhs is going to be like TokenTree::Delimited(...), where the
// entire lhs is those tts. Or, it can be a "bare sequence", not wrapped in parens.
- if let mbe::TokenTree::Delimited(_, ref tts) = *lhs {
- check_matcher(sess, features, def, &tts.tts)
+ if let mbe::TokenTree::Delimited(_, delimited) = lhs {
+ check_matcher(sess, features, def, delimited.inner_tts())
} else {
let msg = "invalid macro matcher; matchers must be contained in balanced delimiters";
sess.span_diagnostic.span_err(lhs.span(), msg);
| TokenTree::MetaVarDecl(..)
| TokenTree::MetaVarExpr(..) => (),
TokenTree::Delimited(_, ref del) => {
- if !check_lhs_no_empty_seq(sess, &del.tts) {
+ if !check_lhs_no_empty_seq(sess, del.inner_tts()) {
return false;
}
}
fn check_rhs(sess: &ParseSess, rhs: &mbe::TokenTree) -> bool {
match *rhs {
mbe::TokenTree::Delimited(..) => return true,
- _ => sess.span_diagnostic.span_err(rhs.span(), "macro rhs must be delimited"),
+ _ => {
+ sess.span_diagnostic.span_err(rhs.span(), "macro rhs must be delimited");
+ }
}
false
}
| TokenTree::MetaVarExpr(..) => {
first.replace_with(tt.clone());
}
- TokenTree::Delimited(span, ref delimited) => {
- build_recur(sets, &delimited.tts);
- first.replace_with(delimited.open_tt(span));
+ TokenTree::Delimited(_span, ref delimited) => {
+ build_recur(sets, delimited.inner_tts());
+ first.replace_with(delimited.open_tt().clone());
}
TokenTree::Sequence(sp, ref seq_rep) => {
let subfirst = build_recur(sets, &seq_rep.tts);
first.add_one(tt.clone());
return first;
}
- TokenTree::Delimited(span, ref delimited) => {
- first.add_one(delimited.open_tt(span));
+ TokenTree::Delimited(_span, ref delimited) => {
+ first.add_one(delimited.open_tt().clone());
return first;
}
TokenTree::Sequence(sp, ref seq_rep) => {
suffix_first = build_suffix_first();
}
}
- TokenTree::Delimited(span, ref d) => {
- let my_suffix = TokenSet::singleton(d.close_tt(span));
- check_matcher_core(sess, features, def, first_sets, &d.tts, &my_suffix);
+ TokenTree::Delimited(_span, ref d) => {
+ let my_suffix = TokenSet::singleton(d.close_tt().clone());
+ check_matcher_core(sess, features, def, first_sets, d.inner_tts(), &my_suffix);
// don't track non NT tokens
last.replace_with_irrelevant();
Ok(rslt)
}
- crate fn ident(&self) -> Option<&Ident> {
- match self {
- MetaVarExpr::Count(ident, _) | MetaVarExpr::Ignore(ident) => Some(&ident),
+ crate fn ident(&self) -> Option<Ident> {
+ match *self {
+ MetaVarExpr::Count(ident, _) | MetaVarExpr::Ignore(ident) => Some(ident),
MetaVarExpr::Index(..) | MetaVarExpr::Length(..) => None,
}
}
node_id: NodeId,
features: &Features,
edition: Edition,
-) -> Vec<TokenTree> {
- // Will contain the final collection of `self::TokenTree`
- let mut result = Vec::new();
-
+ result: &mut Vec<TokenTree>,
+) {
// For each token tree in `input`, parse the token into a `self::TokenTree`, consuming
// additional trees if need be.
let mut trees = input.trees();
_ => result.push(tree),
}
}
- result
}
/// Asks for the `macro_metavar_expr` feature if it is not already declared
// If we didn't find a metavar expression above, then we must have a
// repetition sequence in the macro (e.g. `$(pat)*`). Parse the
// contents of the sequence itself
- let sequence = parse(tts, parsing_patterns, sess, node_id, features, edition);
+ let mut sequence = vec![];
+ parse(tts, parsing_patterns, sess, node_id, features, edition, &mut sequence);
// Get the Kleene operator and optional separator
let (separator, kleene) =
parse_sep_and_kleene_op(&mut trees, delim_span.entire(), sess);
)
}
- // `tree` is followed by an `ident`. This could be `$meta_var` or the `$crate` special
- // metavariable that names the crate of the invocation.
+ // `tree` is followed by an `ident`. This could be `$meta_var` or the `$crate`
+ // special metavariable that names the crate of the invocation.
Some(tokenstream::TokenTree::Token(token)) if token.is_ident() => {
let (ident, is_raw) = token.ident().unwrap();
let span = ident.span.with_lo(span.lo());
// `tree` is the beginning of a delimited set of tokens (e.g., `(` or `{`). We need to
// descend into the delimited set and further parse it.
- tokenstream::TokenTree::Delimited(span, delim, tts) => TokenTree::Delimited(
- span,
- Lrc::new(Delimited {
- delim,
- tts: parse(tts, parsing_patterns, sess, node_id, features, edition),
- }),
- ),
+ tokenstream::TokenTree::Delimited(span, delim, tts) => {
+ let mut all_tts = vec![];
+ // Add the explicit open and close delimiters, which
+ // `tokenstream::TokenTree::Delimited` lacks.
+ all_tts.push(TokenTree::token(token::OpenDelim(delim), span.open));
+ parse(tts, parsing_patterns, sess, node_id, features, edition, &mut all_tts);
+ all_tts.push(TokenTree::token(token::CloseDelim(delim), span.close));
+ TokenTree::Delimited(span, Lrc::new(Delimited { delim, all_tts }))
+ }
}
}
use crate::base::ExtCtxt;
-use crate::mbe;
-use crate::mbe::macro_parser::{MatchedNonterminal, MatchedSeq, NamedMatch};
-
+use crate::mbe::macro_parser::{MatchedNonterminal, MatchedSeq, MatchedTokenTree, NamedMatch};
+use crate::mbe::{self, MetaVarExpr};
use rustc_ast::mut_visit::{self, MutVisitor};
-use rustc_ast::token::{self, NtTT, Token};
+use rustc_ast::token::{self, Token, TokenKind};
use rustc_ast::tokenstream::{DelimSpan, TokenStream, TokenTree, TreeAndSpacing};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::Lrc;
use rustc_errors::{pluralize, PResult};
+use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed};
use rustc_span::hygiene::{LocalExpnId, Transparency};
-use rustc_span::symbol::MacroRulesNormalizedIdent;
-use rustc_span::Span;
+use rustc_span::symbol::{sym, Ident, MacroRulesNormalizedIdent};
+use rustc_span::{Span, DUMMY_SP};
use smallvec::{smallvec, SmallVec};
use std::mem;
impl Frame {
/// Construct a new frame around the delimited set of tokens.
- fn new(tts: Vec<mbe::TokenTree>) -> Frame {
- let forest = Lrc::new(mbe::Delimited { delim: token::NoDelim, tts });
+ fn new(mut tts: Vec<mbe::TokenTree>) -> Frame {
+ // Need to add empty delimeters.
+ let open_tt = mbe::TokenTree::token(token::OpenDelim(token::NoDelim), DUMMY_SP);
+ let close_tt = mbe::TokenTree::token(token::CloseDelim(token::NoDelim), DUMMY_SP);
+ tts.insert(0, open_tt);
+ tts.push(close_tt);
+
+ let forest = Lrc::new(mbe::Delimited { delim: token::NoDelim, all_tts: tts });
Frame::Delimited { forest, idx: 0, span: DelimSpan::dummy() }
}
}
fn next(&mut self) -> Option<mbe::TokenTree> {
match *self {
Frame::Delimited { ref forest, ref mut idx, .. } => {
+ let res = forest.inner_tts().get(*idx).cloned();
*idx += 1;
- forest.tts.get(*idx - 1).cloned()
+ res
}
Frame::Sequence { ref forest, ref mut idx, .. } => {
+ let res = forest.tts.get(*idx).cloned();
*idx += 1;
- forest.tts.get(*idx - 1).cloned()
+ res
}
}
}
// the meta-var.
let ident = MacroRulesNormalizedIdent::new(orignal_ident);
if let Some(cur_matched) = lookup_cur_matched(ident, interp, &repeats) {
- if let MatchedNonterminal(nt) = cur_matched {
- let token = if let NtTT(tt) = &**nt {
+ match cur_matched {
+ MatchedTokenTree(ref tt) => {
// `tt`s are emitted into the output stream directly as "raw tokens",
// without wrapping them into groups.
- tt.clone()
- } else {
+ let token = tt.clone();
+ result.push(token.into());
+ }
+ MatchedNonterminal(ref nt) => {
// Other variables are emitted into the output stream as groups with
// `Delimiter::None` to maintain parsing priorities.
// `Interpolated` is currently used for such groups in rustc parser.
marker.visit_span(&mut sp);
- TokenTree::token(token::Interpolated(nt.clone()), sp)
- };
- result.push(token.into());
- } else {
- // We were unable to descend far enough. This is an error.
- return Err(cx.struct_span_err(
- sp, /* blame the macro writer */
- &format!("variable '{}' is still repeating at this depth", ident),
- ));
+ let token = TokenTree::token(token::Interpolated(nt.clone()), sp);
+ result.push(token.into());
+ }
+ MatchedSeq(..) => {
+ // We were unable to descend far enough. This is an error.
+ return Err(cx.struct_span_err(
+ sp, /* blame the macro writer */
+ &format!("variable '{}' is still repeating at this depth", ident),
+ ));
+ }
}
} else {
// If we aren't able to match the meta-var, we push it back into the result but
// Replace meta-variable expressions with the result of their expansion.
mbe::TokenTree::MetaVarExpr(sp, expr) => {
- transcribe_metavar_expr(cx, expr, interp, &repeats, &mut result, &sp)?;
+ transcribe_metavar_expr(cx, expr, interp, &mut marker, &repeats, &mut result, &sp)?;
}
// If we are entering a new delimiter, we push its contents to the `stack` to be
let mut matched = matched;
for &(idx, _) in repeats {
match matched {
- MatchedNonterminal(_) => break,
+ MatchedTokenTree(_) | MatchedNonterminal(_) => break,
MatchedSeq(ref ads) => matched = ads.get(idx).unwrap(),
}
}
) -> LockstepIterSize {
use mbe::TokenTree;
match *tree {
- TokenTree::Delimited(_, ref delimed) => {
- delimed.tts.iter().fold(LockstepIterSize::Unconstrained, |size, tt| {
+ TokenTree::Delimited(_, ref delimited) => {
+ delimited.inner_tts().iter().fold(LockstepIterSize::Unconstrained, |size, tt| {
size.with(lockstep_iter_size(tt, interpolations, repeats))
})
}
let name = MacroRulesNormalizedIdent::new(name);
match lookup_cur_matched(name, interpolations, repeats) {
Some(matched) => match matched {
- MatchedNonterminal(_) => LockstepIterSize::Unconstrained,
+ MatchedTokenTree(_) | MatchedNonterminal(_) => LockstepIterSize::Unconstrained,
MatchedSeq(ref ads) => LockstepIterSize::Constraint(ads.len(), name),
},
_ => LockstepIterSize::Unconstrained,
TokenTree::MetaVarExpr(_, ref expr) => {
let default_rslt = LockstepIterSize::Unconstrained;
let Some(ident) = expr.ident() else { return default_rslt; };
- let name = MacroRulesNormalizedIdent::new(ident.clone());
+ let name = MacroRulesNormalizedIdent::new(ident);
match lookup_cur_matched(name, interpolations, repeats) {
Some(MatchedSeq(ref ads)) => {
default_rslt.with(LockstepIterSize::Constraint(ads.len(), name))
}
}
+/// Used solely by the `count` meta-variable expression, counts the outer-most repetitions at a
+/// given optional nested depth.
+///
+/// For example, a macro parameter of `$( { $( $foo:ident ),* } )*` called with `{ a, b } { c }`:
+///
+/// * `[ $( ${count(foo)} ),* ]` will return [2, 1] with a, b = 2 and c = 1
+/// * `[ $( ${count(foo, 0)} ),* ]` will be the same as `[ $( ${count(foo)} ),* ]`
+/// * `[ $( ${count(foo, 1)} ),* ]` will return an error because `${count(foo, 1)}` is
+/// declared inside a single repetition and the index `1` implies two nested repetitions.
+fn count_repetitions<'a>(
+ cx: &ExtCtxt<'a>,
+ depth_opt: Option<usize>,
+ mut matched: &NamedMatch,
+ repeats: &[(usize, usize)],
+ sp: &DelimSpan,
+) -> PResult<'a, usize> {
+ // Recursively count the number of matches in `matched` at given depth
+ // (or at the top-level of `matched` if no depth is given).
+ fn count<'a>(
+ cx: &ExtCtxt<'a>,
+ declared_lhs_depth: usize,
+ depth_opt: Option<usize>,
+ matched: &NamedMatch,
+ sp: &DelimSpan,
+ ) -> PResult<'a, usize> {
+ match matched {
+ MatchedTokenTree(_) | MatchedNonterminal(_) => {
+ if declared_lhs_depth == 0 {
+ return Err(cx.struct_span_err(
+ sp.entire(),
+ "`count` can not be placed inside the inner-most repetition",
+ ));
+ }
+ match depth_opt {
+ None => Ok(1),
+ Some(_) => Err(out_of_bounds_err(cx, declared_lhs_depth, sp.entire(), "count")),
+ }
+ }
+ MatchedSeq(ref named_matches) => {
+ let new_declared_lhs_depth = declared_lhs_depth + 1;
+ match depth_opt {
+ None => named_matches
+ .iter()
+ .map(|elem| count(cx, new_declared_lhs_depth, None, elem, sp))
+ .sum(),
+ Some(0) => Ok(named_matches.len()),
+ Some(depth) => named_matches
+ .iter()
+ .map(|elem| count(cx, new_declared_lhs_depth, Some(depth - 1), elem, sp))
+ .sum(),
+ }
+ }
+ }
+ }
+ // `repeats` records all of the nested levels at which we are currently
+ // matching meta-variables. The meta-var-expr `count($x)` only counts
+ // matches that occur in this "subtree" of the `NamedMatch` where we
+ // are currently transcribing, so we need to descend to that subtree
+ // before we start counting. `matched` contains the various levels of the
+ // tree as we descend, and its final value is the subtree we are currently at.
+ for &(idx, _) in repeats {
+ if let MatchedSeq(ref ads) = matched {
+ matched = &ads[idx];
+ }
+ }
+ count(cx, 0, depth_opt, matched, sp)
+}
+
+/// Returns a `NamedMatch` item declared on the LHS given an arbitrary [Ident]
+fn matched_from_ident<'ctx, 'interp, 'rslt>(
+ cx: &ExtCtxt<'ctx>,
+ ident: Ident,
+ interp: &'interp FxHashMap<MacroRulesNormalizedIdent, NamedMatch>,
+) -> PResult<'ctx, &'rslt NamedMatch>
+where
+ 'interp: 'rslt,
+{
+ let span = ident.span;
+ let key = MacroRulesNormalizedIdent::new(ident);
+ interp.get(&key).ok_or_else(|| {
+ cx.struct_span_err(
+ span,
+ &format!("variable `{}` is not recognized in meta-variable expression", key),
+ )
+ })
+}
+
+/// Used by meta-variable expressions when an user input is out of the actual declared bounds. For
+/// example, index(999999) in an repetition of only three elements.
+fn out_of_bounds_err<'a>(
+ cx: &ExtCtxt<'a>,
+ max: usize,
+ span: Span,
+ ty: &str,
+) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ cx.struct_span_err(span, &format!("{ty} depth must be less than {max}"))
+}
+
fn transcribe_metavar_expr<'a>(
- _cx: &ExtCtxt<'a>,
- _expr: mbe::MetaVarExpr,
- _interp: &FxHashMap<MacroRulesNormalizedIdent, NamedMatch>,
- _repeats: &[(usize, usize)],
- _result: &mut Vec<TreeAndSpacing>,
- _sp: &DelimSpan,
+ cx: &ExtCtxt<'a>,
+ expr: MetaVarExpr,
+ interp: &FxHashMap<MacroRulesNormalizedIdent, NamedMatch>,
+ marker: &mut Marker,
+ repeats: &[(usize, usize)],
+ result: &mut Vec<TreeAndSpacing>,
+ sp: &DelimSpan,
) -> PResult<'a, ()> {
+ let mut visited_span = || {
+ let mut span = sp.entire();
+ marker.visit_span(&mut span);
+ span
+ };
+ match expr {
+ MetaVarExpr::Count(original_ident, depth_opt) => {
+ let matched = matched_from_ident(cx, original_ident, interp)?;
+ let count = count_repetitions(cx, depth_opt, matched, &repeats, sp)?;
+ let tt = TokenTree::token(
+ TokenKind::lit(token::Integer, sym::integer(count), None),
+ visited_span(),
+ );
+ result.push(tt.into());
+ }
+ MetaVarExpr::Ignore(original_ident) => {
+ // Used to ensure that `original_ident` is present in the LHS
+ let _ = matched_from_ident(cx, original_ident, interp)?;
+ }
+ MetaVarExpr::Index(depth) => match repeats.iter().nth_back(depth) {
+ Some((index, _)) => {
+ result.push(
+ TokenTree::token(
+ TokenKind::lit(token::Integer, sym::integer(*index), None),
+ visited_span(),
+ )
+ .into(),
+ );
+ }
+ None => return Err(out_of_bounds_err(cx, repeats.len(), sp.entire(), "index")),
+ },
+ MetaVarExpr::Length(depth) => match repeats.iter().nth_back(depth) {
+ Some((_, length)) => {
+ result.push(
+ TokenTree::token(
+ TokenKind::lit(token::Integer, sym::integer(*length), None),
+ visited_span(),
+ )
+ .into(),
+ );
+ }
+ None => return Err(out_of_bounds_err(cx, repeats.len(), sp.entire(), "length")),
+ },
+ }
Ok(())
}
use crate::base::ModuleData;
use rustc_ast::ptr::P;
-use rustc_ast::{token, Attribute, Inline, Item};
+use rustc_ast::{token, Attribute, Inline, Item, ModSpans};
use rustc_errors::{struct_span_err, DiagnosticBuilder, ErrorGuaranteed};
use rustc_parse::new_parser_from_file;
use rustc_parse::validate_attr;
crate struct ParsedExternalMod {
pub items: Vec<P<Item>>,
- pub inner_span: Span,
+ pub spans: ModSpans,
pub file_path: PathBuf,
pub dir_path: PathBuf,
pub dir_ownership: DirOwnership,
(items, inner_span, mp.file_path)
};
// (1) ...instead, we return a dummy module.
- let (items, inner_span, file_path) =
+ let (items, spans, file_path) =
result.map_err(|err| err.report(sess, span)).unwrap_or_default();
// Extract the directory path for submodules of the module.
let dir_path = file_path.parent().unwrap_or(&file_path).to_owned();
- ParsedExternalMod { items, inner_span, file_path, dir_path, dir_ownership }
+ ParsedExternalMod { items, spans, file_path, dir_path, dir_ownership }
}
crate fn mod_dir_path(
AstFragmentKind::Crate => AstFragment::Crate(ast::Crate {
attrs: Default::default(),
items: Default::default(),
- span,
+ spans: ast::ModSpans { inner_span: span, ..Default::default() },
id,
is_placeholder: true,
}),
if let Some(s) = e.as_str() {
err.help(&format!("message: {}", s));
}
- err.emit();
- ErrorGuaranteed
+ err.emit()
})
}
}
if let Some(s) = e.as_str() {
err.help(&format!("message: {}", s));
}
- err.emit();
- ErrorGuaranteed
+ err.emit()
})
}
}
use crate::base::ExtCtxt;
use rustc_ast as ast;
-use rustc_ast::token::{self, Nonterminal, NtIdent};
+use rustc_ast::token;
use rustc_ast::tokenstream::{self, CanSynthesizeMissingTokens};
use rustc_ast::tokenstream::{DelimSpan, Spacing::*, TokenStream, TreeAndSpacing};
use rustc_ast_pretty::pprust;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::Lrc;
use rustc_errors::{Diagnostic, PResult};
-use rustc_lint_defs::builtin::PROC_MACRO_BACK_COMPAT;
-use rustc_lint_defs::BuiltinLintDiagnostics;
use rustc_parse::lexer::nfc_normalize;
use rustc_parse::{nt_to_tokenstream, parse_stream_from_source_str};
use rustc_session::parse::ParseSess;
use rustc_span::def_id::CrateNum;
-use rustc_span::hygiene::ExpnKind;
use rustc_span::symbol::{self, kw, sym, Symbol};
-use rustc_span::{BytePos, FileName, MultiSpan, Pos, RealFileName, SourceFile, Span};
+use rustc_span::{BytePos, FileName, MultiSpan, Pos, SourceFile, Span};
use pm::bridge::{server, TokenTree};
use pm::{Delimiter, Level, LineColumn, Spacing};
tt!(Punct::new('#', false))
}
- Interpolated(nt)
- if let Some((name, is_raw)) = ident_name_compatibility_hack(&nt, span, rustc) =>
- {
- TokenTree::Ident(Ident::new(rustc.sess(), name.name, is_raw, name.span))
+ Interpolated(nt) if let NtIdent(ident, is_raw) = *nt => {
+ TokenTree::Ident(Ident::new(rustc.sess(), ident.name, is_raw, ident.span))
}
Interpolated(nt) => {
let stream = nt_to_tokenstream(&nt, rustc.sess(), CanSynthesizeMissingTokens::No);
) {
diag.sub(level.to_internal(), msg, MultiSpan::from_spans(spans), None);
}
- fn emit(&mut self, diag: Self::Diagnostic) {
- self.sess().span_diagnostic.emit_diagnostic(&diag);
+ fn emit(&mut self, mut diag: Self::Diagnostic) {
+ self.sess().span_diagnostic.emit_diagnostic(&mut diag);
}
}
})
}
}
-
-// See issue #74616 for details
-fn ident_name_compatibility_hack(
- nt: &Nonterminal,
- orig_span: Span,
- rustc: &mut Rustc<'_, '_>,
-) -> Option<(rustc_span::symbol::Ident, bool)> {
- if let NtIdent(ident, is_raw) = nt {
- if let ExpnKind::Macro(_, macro_name) = orig_span.ctxt().outer_expn_data().kind {
- let source_map = rustc.sess().source_map();
- let filename = source_map.span_to_filename(orig_span);
- if let FileName::Real(RealFileName::LocalPath(path)) = filename {
- let matches_prefix = |prefix, filename| {
- // Check for a path that ends with 'prefix*/src/<filename>'
- let mut iter = path.components().rev();
- iter.next().and_then(|p| p.as_os_str().to_str()) == Some(filename)
- && iter.next().and_then(|p| p.as_os_str().to_str()) == Some("src")
- && iter
- .next()
- .and_then(|p| p.as_os_str().to_str())
- .map_or(false, |p| p.starts_with(prefix))
- };
-
- let time_macros_impl =
- macro_name == sym::impl_macros && matches_prefix("time-macros-impl", "lib.rs");
- let js_sys = macro_name == sym::arrays && matches_prefix("js-sys", "lib.rs");
- if time_macros_impl || js_sys {
- let snippet = source_map.span_to_snippet(orig_span);
- if snippet.as_deref() == Ok("$name") {
- if time_macros_impl {
- rustc.sess().buffer_lint_with_diagnostic(
- &PROC_MACRO_BACK_COMPAT,
- orig_span,
- ast::CRATE_NODE_ID,
- "using an old version of `time-macros-impl`",
- BuiltinLintDiagnostics::ProcMacroBackCompat(
- "the `time-macros-impl` crate will stop compiling in futures version of Rust. \
- Please update to the latest version of the `time` crate to avoid breakage".to_string())
- );
- return Some((*ident, *is_raw));
- }
- if js_sys {
- if let Some(c) = path
- .components()
- .flat_map(|c| c.as_os_str().to_str())
- .find(|c| c.starts_with("js-sys"))
- {
- let mut version = c.trim_start_matches("js-sys-").split('.');
- if version.next() == Some("0")
- && version.next() == Some("3")
- && version
- .next()
- .and_then(|c| c.parse::<u32>().ok())
- .map_or(false, |v| v < 40)
- {
- rustc.sess().buffer_lint_with_diagnostic(
- &PROC_MACRO_BACK_COMPAT,
- orig_span,
- ast::CRATE_NODE_ID,
- "using an old version of `js-sys`",
- BuiltinLintDiagnostics::ProcMacroBackCompat(
- "older versions of the `js-sys` crate will stop compiling in future versions of Rust; \
- please update to `js-sys` v0.3.40 or above".to_string())
- );
- return Some((*ident, *is_raw));
- }
- }
- }
- }
- }
-
- if macro_name == sym::tuple_from_req && matches_prefix("actix-web", "extract.rs") {
- let snippet = source_map.span_to_snippet(orig_span);
- if snippet.as_deref() == Ok("$T") {
- if let FileName::Real(RealFileName::LocalPath(macro_path)) =
- source_map.span_to_filename(rustc.def_site)
- {
- if macro_path.to_string_lossy().contains("pin-project-internal-0.") {
- rustc.sess().buffer_lint_with_diagnostic(
- &PROC_MACRO_BACK_COMPAT,
- orig_span,
- ast::CRATE_NODE_ID,
- "using an old version of `actix-web`",
- BuiltinLintDiagnostics::ProcMacroBackCompat(
- "the version of `actix-web` you are using might stop compiling in future versions of Rust; \
- please update to the latest version of the `actix-web` crate to avoid breakage".to_string())
- );
- return Some((*ident, *is_raw));
- }
- }
- }
- }
- }
- }
- }
- None
-}
// feature-group-start: accepted features
// -------------------------------------------------------------------------
+ /// Allows `#[target_feature(...)]` on aarch64 platforms
+ (accepted, aarch64_target_feature, "1.61.0", Some(44839), None),
/// Allows the sysV64 ABI to be specified on all platforms
/// instead of just the platforms on which it is the C ABI.
(accepted, abi_sysv64, "1.24.0", Some(36167), None),
+ /// Allows using ADX intrinsics from `core::arch::{x86, x86_64}`.
+ (accepted, adx_target_feature, "1.61.0", Some(44839), None),
/// Allows the definition of associated constants in `trait` or `impl` blocks.
(accepted, associated_consts, "1.20.0", Some(29646), None),
/// Allows using associated `type`s in `trait`s.
// FIXME: Document these and merge with the list below.
// Unstable `#[target_feature]` directives.
- (active, aarch64_target_feature, "1.27.0", Some(44839), None),
+ (active, aarch64_ver_target_feature, "1.27.0", Some(44839), None),
(active, adx_target_feature, "1.32.0", Some(44839), None),
(active, arm_target_feature, "1.27.0", Some(44839), None),
(active, avx512_target_feature, "1.27.0", Some(44839), None),
/// Allows `extern "x86-interrupt" fn()`.
(active, abi_x86_interrupt, "1.17.0", Some(40180), None),
/// Allows additional const parameter types, such as `&'static str` or user defined types
- (incomplete, adt_const_params, "1.56.0", Some(44580), None),
+ (incomplete, adt_const_params, "1.56.0", Some(95174), None),
/// Allows defining an `#[alloc_error_handler]`.
(active, alloc_error_handler, "1.29.0", Some(51540), None),
/// Allows explicit discriminants on non-unit enum variants.
(active, default_alloc_error_handler, "1.48.0", Some(66741), None),
/// Allows default type parameters to influence type inference.
(active, default_type_parameter_fallback, "1.3.0", Some(27336), None),
+ /// Allows using `#[deprecated_safe]` to deprecate the safeness of a function or trait
+ (active, deprecated_safe, "1.61.0", Some(94978), None),
/// Allows having using `suggestion` in the `#[deprecated]` attribute.
(active, deprecated_suggestion, "1.61.0", Some(94785), None),
/// Allows `#[derive(Default)]` and `#[default]` on enums.
"`default_method_body_is_const` is a temporary placeholder for declaring default bodies \
as `const`, which may be removed or renamed in the future."
),
+ // lang-team MCP 147
+ gated!(
+ deprecated_safe, Normal, template!(List: r#"since = "version", note = "...""#), ErrorFollowing,
+ experimental!(deprecated_safe),
+ ),
// ==========================================================================
// Internal attributes: Stability, deprecation, and unsafe:
///
/// where `'f` is something like `Fresh(0)`. The indices are
/// unique per impl, but not necessarily continuous.
- Fresh(usize),
+ Fresh(LocalDefId),
/// Indicates an illegal name was given and an error has been
/// reported (so we should squelch other derived errors). Occurs
rustc_data_structures::static_assert_size!(super::Expr<'static>, 56);
rustc_data_structures::static_assert_size!(super::Pat<'static>, 88);
rustc_data_structures::static_assert_size!(super::QPath<'static>, 24);
- rustc_data_structures::static_assert_size!(super::Ty<'static>, 80);
+ rustc_data_structures::static_assert_size!(super::Ty<'static>, 72);
rustc_data_structures::static_assert_size!(super::Item<'static>, 184);
rustc_data_structures::static_assert_size!(super::TraitItem<'static>, 128);
Freeze, sym::freeze, freeze_trait, Target::Trait, GenericRequirement::Exact(0);
Drop, sym::drop, drop_trait, Target::Trait, GenericRequirement::None;
+ Destruct, sym::destruct, destruct_trait, Target::Trait, GenericRequirement::None;
CoerceUnsized, sym::coerce_unsized, coerce_unsized_trait, Target::Trait, GenericRequirement::Minimum(1);
DispatchFromDyn, sym::dispatch_from_dyn, dispatch_from_dyn_trait, Target::Trait, GenericRequirement::Minimum(1);
Target::Statement => "statement",
Target::Arm => "match arm",
Target::AssocConst => "associated const",
- Target::Method(_) => "method",
+ Target::Method(kind) => match kind {
+ MethodKind::Inherent => "inherent method",
+ MethodKind::Trait { body: false } => "required trait method",
+ MethodKind::Trait { body: true } => "provided trait method",
+ },
Target::AssocTy => "associated type",
Target::ForeignFn => "foreign function",
Target::ForeignStatic => "foreign static item",
use rustc_session::{Session, StableCrateId};
use std::fs as std_fs;
-use std::io;
+use std::io::{self, ErrorKind};
use std::mem;
use std::path::{Path, PathBuf};
use std::time::{Duration, SystemTime, UNIX_EPOCH};
let crate_dir = match crate_dir.canonicalize() {
Ok(v) => v,
Err(err) => {
- sess.err(&format!(
+ let reported = sess.err(&format!(
"incremental compilation: error canonicalizing path `{}`: {}",
crate_dir.display(),
err
));
- return Err(ErrorGuaranteed);
+ return Err(reported);
}
};
let new_path = incr_comp_session_dir.parent().unwrap().join(new_sub_dir_name);
debug!("finalize_session_directory() - new path: {}", new_path.display());
- match std_fs::rename(&*incr_comp_session_dir, &new_path) {
+ match rename_path_with_retry(&*incr_comp_session_dir, &new_path, 3) {
Ok(_) => {
debug!("finalize_session_directory() - directory renamed successfully");
Ok(())
}
Err(err) => {
- sess.err(&format!(
+ let reported = sess.err(&format!(
"Could not create incremental compilation {} \
directory `{}`: {}",
dir_tag,
path.display(),
err
));
- Err(ErrorGuaranteed)
+ Err(reported)
}
}
}
);
}
}
- err.emit();
- Err(ErrorGuaranteed)
+ Err(err.emit())
}
}
}
result => result,
}
}
+
+// On Windows the compiler would sometimes fail to rename the session directory because
+// the OS thought something was still being accessed in it. So we retry a few times to give
+// the OS time to catch up.
+// See https://github.com/rust-lang/rust/issues/86929.
+fn rename_path_with_retry(from: &Path, to: &Path, mut retries_left: usize) -> std::io::Result<()> {
+ loop {
+ match std_fs::rename(from, to) {
+ Ok(()) => return Ok(()),
+ Err(e) => {
+ if retries_left > 0 && e.kind() == ErrorKind::PermissionDenied {
+ // Try again after a short waiting period.
+ std::thread::sleep(Duration::from_millis(50));
+ retries_left -= 1;
+ } else {
+ return Err(e);
+ }
+ }
+ }
+ }
+}
fn union(&mut self, other: &HybridBitSet<T>) -> bool {
// FIXME: This is slow if `other` is dense, but it hasn't been a problem
// in practice so far.
- // If a a faster implementation of this operation is required, consider
+ // If a faster implementation of this operation is required, consider
// reopening https://github.com/rust-lang/rust/pull/94625
assert_eq!(self.domain_size, other.domain_size());
sequential_update(|elem| self.insert(elem), other.iter())
fn subtract(&mut self, other: &HybridBitSet<T>) -> bool {
// FIXME: This is slow if `other` is dense, but it hasn't been a problem
// in practice so far.
- // If a a faster implementation of this operation is required, consider
+ // If a faster implementation of this operation is required, consider
// reopening https://github.com/rust-lang/rust/pull/94625
assert_eq!(self.domain_size, other.domain_size());
sequential_update(|elem| self.remove(elem), other.iter())
use super::*;
use rustc_middle::ty::relate::{Relate, TypeRelation};
-use rustc_middle::ty::Const;
+use rustc_middle::ty::{Const, ImplSubject};
pub struct At<'a, 'tcx> {
pub infcx: &'a InferCtxt<'a, 'tcx>,
}
}
+impl<'tcx> ToTrace<'tcx> for ImplSubject<'tcx> {
+ fn to_trace(
+ tcx: TyCtxt<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: Self,
+ b: Self,
+ ) -> TypeTrace<'tcx> {
+ match (a, b) {
+ (ImplSubject::Trait(trait_ref_a), ImplSubject::Trait(trait_ref_b)) => {
+ ToTrace::to_trace(tcx, cause, a_is_expected, trait_ref_a, trait_ref_b)
+ }
+ (ImplSubject::Inherent(ty_a), ImplSubject::Inherent(ty_b)) => {
+ ToTrace::to_trace(tcx, cause, a_is_expected, ty_a, ty_b)
+ }
+ (ImplSubject::Trait(_), ImplSubject::Inherent(_))
+ | (ImplSubject::Inherent(_), ImplSubject::Trait(_)) => {
+ bug!("can not trace TraitRef and Ty");
+ }
+ }
+ }
+}
+
impl<'tcx> ToTrace<'tcx> for Ty<'tcx> {
fn to_trace(
_: TyCtxt<'tcx>,
)
| (&ty::Infer(ty::InferTy::TyVar(_)), _)
| (_, &ty::Infer(ty::InferTy::TyVar(_))) => true,
+ (&ty::Ref(reg_a, ty_a, mut_a), &ty::Ref(reg_b, ty_b, mut_b)) => {
+ reg_a == reg_b && mut_a == mut_b && same_type_modulo_infer(*ty_a, *ty_b)
+ }
_ => a == b,
}
}
if let (&ty::Adt(exp_adt, _), &ty::Adt(found_adt, _)) =
(exp_found.expected.kind(), exp_found.found.kind())
{
- report_path_match(err, exp_adt.did, found_adt.did);
+ report_path_match(err, exp_adt.did(), found_adt.did());
}
}
TypeError::Traits(ref exp_found) => {
match *cause.code() {
ObligationCauseCode::Pattern { origin_expr: true, span: Some(span), root_ty } => {
let ty = self.resolve_vars_if_possible(root_ty);
- if ty.is_suggestable() {
+ if !matches!(ty.kind(), ty::Infer(ty::InferTy::TyVar(_) | ty::InferTy::FreshTy(_)))
+ {
// don't show type `_`
err.span_label(span, format!("this expression has type `{}`", ty));
}
return Some(());
}
if let ty::Adt(def, _) = ta.kind() {
- let path_ = self.tcx.def_path_str(def.did);
+ let path_ = self.tcx.def_path_str(def.did());
if path_ == other_path {
self.highlight_outer(&mut t1_out, &mut t2_out, path, sub, i, other_ty);
return Some(());
// process starts here
match (t1.kind(), t2.kind()) {
(&ty::Adt(def1, sub1), &ty::Adt(def2, sub2)) => {
- let sub_no_defaults_1 = self.strip_generic_default_params(def1.did, sub1);
- let sub_no_defaults_2 = self.strip_generic_default_params(def2.did, sub2);
+ let did1 = def1.did();
+ let did2 = def2.did();
+ let sub_no_defaults_1 = self.strip_generic_default_params(did1, sub1);
+ let sub_no_defaults_2 = self.strip_generic_default_params(did2, sub2);
let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new());
- let path1 = self.tcx.def_path_str(def1.did);
- let path2 = self.tcx.def_path_str(def2.did);
- if def1.did == def2.did {
+ let path1 = self.tcx.def_path_str(did1);
+ let path2 = self.tcx.def_path_str(did2);
+ if did1 == did2 {
// Easy case. Replace same types with `_` to shorten the output and highlight
// the differing ones.
// let x: Foo<Bar, Qux> = y::<Foo<Quz, Qux>>();
let t = match t.kind() {
// We'll hide this type only if all its type params are hidden as well.
ty::Adt(def, substs) => {
- let generics = self.tcx().generics_of(def.did);
+ let generics = self.tcx().generics_of(def.did());
// Account for params with default values, like `Vec`, where we
// want to show `Vec<T>`, not `Vec<T, _>`. If we replaced that
// subst, then we'd get the incorrect output, so we passthrough.
};
if self.level == 1 || substs.iter().any(should_keep) {
let substs = self.tcx().intern_substs(&substs[..]);
- self.tcx().mk_ty(ty::Adt(def, substs))
+ self.tcx().mk_ty(ty::Adt(*def, substs))
} else {
self.tcx().ty_error()
}
self.suggest_adding_lifetime_params(sub, ty_sup, ty_sub, &mut err);
- err.emit();
- Some(ErrorGuaranteed)
+ let reported = err.emit();
+ Some(reported)
}
fn suggest_adding_lifetime_params(
// about the original obligation only.
let code = match cause.code() {
ObligationCauseCode::FunctionArgumentObligation { parent_code, .. } => &*parent_code,
- _ => cause.code(),
+ code => code,
};
let ObligationCauseCode::MatchImpl(parent, impl_def_id) = code else {
return None;
let impl_span = self.tcx().def_span(*impl_def_id);
err.span_note(impl_span, "...does not necessarily outlive the static lifetime introduced by the compatible `impl`");
}
- err.emit();
- Some(ErrorGuaranteed)
+ let reported = err.emit();
+ Some(reported)
}
}
pub fn try_report(&self) -> Option<ErrorGuaranteed> {
self.try_report_from_nll()
- .map(|mut diag| {
- diag.emit();
- ErrorGuaranteed
- })
+ .map(|mut diag| diag.emit())
.or_else(|| self.try_report_impl_not_conforming_to_trait())
.or_else(|| self.try_report_anon_anon_conflict())
.or_else(|| self.try_report_static_impl_trait())
),
);
if self.find_impl_on_dyn_trait(&mut err, param.param_ty, &ctxt) {
- err.emit();
- return Some(ErrorGuaranteed);
+ let reported = err.emit();
+ return Some(reported);
} else {
err.cancel();
}
Some((param.param_ty_span, param.param_ty.to_string())),
);
- err.emit();
- Some(ErrorGuaranteed)
+ let reported = err.emit();
+ Some(reported)
}
}
) = (sub_trace.values.ty(), sup_trace.values.ty(), sub_trace.cause.code())
&& sup_expected_found == sub_expected_found
{
- self.emit_err(
+ let guar = self.emit_err(
var_origin.span(),
sub_expected,
sub_found,
*trait_item_def_id,
);
- return Some(ErrorGuaranteed);
+ return Some(guar);
}
if let RegionResolutionError::ConcreteFailure(origin, _, _)
| RegionResolutionError::GenericBoundFailure(origin, _, _) = error.clone()
trait_item_def_id,
} = origin
{
- self.emit_associated_type_err(
+ let guar = self.emit_associated_type_err(
span,
self.infcx.tcx.item_name(impl_item_def_id),
impl_item_def_id,
trait_item_def_id,
);
- return Some(ErrorGuaranteed);
+ return Some(guar);
}
None
}
- fn emit_err(&self, sp: Span, expected: Ty<'tcx>, found: Ty<'tcx>, trait_def_id: DefId) {
+ fn emit_err(
+ &self,
+ sp: Span,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ trait_def_id: DefId,
+ ) -> ErrorGuaranteed {
let trait_sp = self.tcx().def_span(trait_def_id);
let mut err = self
.tcx()
argument, the other inputs and its output",
);
}
- err.emit();
+ err.emit()
}
fn emit_associated_type_err(
item_name: Symbol,
impl_item_def_id: DefId,
trait_item_def_id: DefId,
- ) {
+ ) -> ErrorGuaranteed {
let impl_sp = self.tcx().def_span(impl_item_def_id);
let trait_sp = self.tcx().def_span(trait_item_def_id);
let mut err = self
err.span_label(impl_sp, "found");
err.span_label(trait_sp, "expected");
- err.emit();
+ err.emit()
}
}
}
impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
- /// Replaces all regions (resp. types) bound by `binder` with placeholder
- /// regions (resp. types) and return a map indicating which bound-region
- /// placeholder region. This is the first step of checking subtyping
- /// when higher-ranked things are involved.
+ /// Replaces all bound variables (lifetimes, types, and constants) bound by
+ /// `binder` with placeholder variables.
///
- /// **Important:** You have to be careful to not leak these placeholders,
- /// for more information about how placeholders and HRTBs work, see
- /// the [rustc dev guide].
+ /// This is the first step of checking subtyping when higher-ranked things are involved.
+ /// For more details visit the relevant sections of the [rustc dev guide].
///
/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/hrtb.html
pub fn replace_bound_vars_with_placeholders<T>(&self, binder: ty::Binder<'tcx, T>) -> T
use rustc_middle::infer::canonical::{Canonical, CanonicalVarValues};
use rustc_middle::infer::unify_key::{ConstVarValue, ConstVariableValue};
use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind, ToType};
-use rustc_middle::mir::interpret::ErrorHandled;
-use rustc_middle::mir::interpret::EvalToConstValueResult;
+use rustc_middle::mir::interpret::{ErrorHandled, EvalToConstValueResult};
use rustc_middle::traits::select;
use rustc_middle::ty::error::{ExpectedFound, TypeError};
use rustc_middle::ty::fold::{TypeFoldable, TypeFolder};
pub mod type_variable;
mod undo_log;
-use crate::infer::canonical::OriginalQueryValues;
pub use rustc_middle::infer::unify_key;
#[must_use]
impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
/// calls `tcx.try_unify_abstract_consts` after
/// canonicalizing the consts.
+ #[instrument(skip(self), level = "debug")]
pub fn try_unify_abstract_consts(
&self,
a: ty::Unevaluated<'tcx, ()>,
b: ty::Unevaluated<'tcx, ()>,
+ param_env: ty::ParamEnv<'tcx>,
) -> bool {
- let canonical = self.canonicalize_query((a, b), &mut OriginalQueryValues::default());
- debug!("canonical consts: {:?}", &canonical.value);
+ // Reject any attempt to unify two unevaluated constants that contain inference
+ // variables, since inference variables in queries lead to ICEs.
+ if a.substs.has_infer_types_or_consts()
+ || b.substs.has_infer_types_or_consts()
+ || param_env.has_infer_types_or_consts()
+ {
+ debug!("a or b or param_env contain infer vars in its substs -> cannot unify");
+ return false;
+ }
- self.tcx.try_unify_abstract_consts(canonical.value)
+ let param_env_and = param_env.and((a, b));
+ let erased = self.tcx.erase_regions(param_env_and);
+ debug!("after erase_regions: {:?}", erased);
+
+ self.tcx.try_unify_abstract_consts(erased)
}
pub fn is_in_snapshot(&self) -> bool {
value.fold_with(&mut r)
}
+ pub fn resolve_numeric_literals_with_default<T>(&self, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ if !value.needs_infer() {
+ return value; // Avoid duplicated subst-folding.
+ }
+ let mut r = InferenceLiteralEraser { tcx: self.tcx };
+ value.fold_with(&mut r)
+ }
+
/// Returns the first unresolved variable contained in `T`. In the
/// process of visiting `T`, this will resolve (where possible)
/// type variables in `T`, but it never constructs the final,
///
/// This handles inferences variables within both `param_env` and `substs` by
/// performing the operation on their respective canonical forms.
+ #[instrument(skip(self), level = "debug")]
pub fn const_eval_resolve(
&self,
param_env: ty::ParamEnv<'tcx>,
span: Option<Span>,
) -> EvalToConstValueResult<'tcx> {
let substs = self.resolve_vars_if_possible(unevaluated.substs);
+ debug!(?substs);
// Postpone the evaluation of constants whose substs depend on inference
// variables
if substs.has_infer_types_or_consts() {
+ debug!("substs have infer types or consts: {:?}", substs);
return Err(ErrorHandled::TooGeneric);
}
let param_env_erased = self.tcx.erase_regions(param_env);
let substs_erased = self.tcx.erase_regions(substs);
+ debug!(?param_env_erased);
+ debug!(?substs_erased);
let unevaluated = ty::Unevaluated {
def: unevaluated.def,
}
}
+/// Replace `{integer}` with `i32` and `{float}` with `f64`.
+/// Used only for diagnostics.
+struct InferenceLiteralEraser<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> TypeFolder<'tcx> for InferenceLiteralEraser<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ match ty.kind() {
+ ty::Infer(ty::IntVar(_) | ty::FreshIntTy(_)) => self.tcx.types.i32,
+ ty::Infer(ty::FloatVar(_) | ty::FreshFloatTy(_)) => self.tcx.types.f64,
+ _ => ty.super_fold_with(self),
+ }
+ }
+}
+
struct ShallowResolver<'a, 'tcx> {
infcx: &'a InferCtxt<'a, 'tcx>,
}
self.tcx.sess.delay_span_bug(
origin.span(),
&format!("no region-bound-pairs for {:?}", body_id),
- )
+ );
}
}
}
libc = "0.2"
libloading = "0.7.1"
tracing = "0.1"
-rustc-rayon-core = "0.3.2"
-rayon = { version = "0.3.2", package = "rustc-rayon" }
+rustc-rayon-core = { version = "0.3.2", optional = true }
+rayon = { version = "0.3.2", package = "rustc-rayon", optional = true }
smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
rustc_ast = { path = "../rustc_ast" }
rustc_attr = { path = "../rustc_attr" }
[features]
llvm = ['rustc_codegen_llvm']
+rustc_use_parallel_compiler = ['rayon', 'rustc-rayon-core', 'rustc_query_impl/rustc_use_parallel_compiler']
if recursion_limit_hit {
// If we hit a recursion limit, exit early to avoid later passes getting overwhelmed
// with a large AST
- Err(ErrorGuaranteed)
+ Err(ErrorGuaranteed::unchecked_claim_error_was_emitted())
} else {
Ok(krate)
}
);
msg.warn("The generated documentation may be incorrect");
- msg.emit()
+ msg.emit();
} else {
krate = sess.time("maybe_create_a_macro_crate", || {
let is_test_crate = sess.opts.test;
if let Some(ref input_path) = compiler.input_path {
if sess.opts.will_create_output_file() {
if output_contains_path(&output_paths, input_path) {
- sess.err(&format!(
+ let reported = sess.err(&format!(
"the input file \"{}\" would be overwritten by the generated \
executable",
input_path.display()
));
- return Err(ErrorGuaranteed);
+ return Err(reported);
}
if let Some(dir_path) = output_conflicts_with_dir(&output_paths) {
- sess.err(&format!(
+ let reported = sess.err(&format!(
"the generated executable for the input file \"{}\" conflicts with the \
existing directory \"{}\"",
input_path.display(),
dir_path.display()
));
- return Err(ErrorGuaranteed);
+ return Err(reported);
}
}
}
if let Some(ref dir) = compiler.temps_dir {
if fs::create_dir_all(dir).is_err() {
- sess.err("failed to find or create the directory specified by `--temps-dir`");
- return Err(ErrorGuaranteed);
+ let reported =
+ sess.err("failed to find or create the directory specified by `--temps-dir`");
+ return Err(reported);
}
}
if !only_dep_info {
if let Some(ref dir) = compiler.output_dir {
if fs::create_dir_all(dir).is_err() {
- sess.err("failed to find or create the directory specified by `--out-dir`");
- return Err(ErrorGuaranteed);
+ let reported =
+ sess.err("failed to find or create the directory specified by `--out-dir`");
+ return Err(reported);
}
}
}
// lot of annoying errors in the ui tests (basically,
// lint warnings and so on -- kindck used to do this abort, but
// kindck is gone now). -nmatsakis
- if sess.has_errors() {
- return Err(ErrorGuaranteed);
+ if let Some(reported) = sess.has_errors() {
+ return Err(reported);
}
sess.time("misc_checking_3", || {
use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_data_structures::svh::Svh;
use rustc_data_structures::sync::{Lrc, OnceCell, WorkerLocal};
-use rustc_errors::ErrorGuaranteed;
use rustc_hir::def_id::LOCAL_CRATE;
use rustc_incremental::DepGraphFuture;
use rustc_lint::LintStore;
pub fn parse(&self) -> Result<&Query<ast::Crate>> {
self.parse.compute(|| {
- passes::parse(self.session(), &self.compiler.input).map_err(|mut parse_error| {
- parse_error.emit();
- ErrorGuaranteed
- })
+ passes::parse(self.session(), &self.compiler.input)
+ .map_err(|mut parse_error| parse_error.emit())
})
}
rustc_optgroups, ErrorOutputType, ExternLocation, LocationDetail, Options, Passes,
};
use rustc_session::config::{
- BranchProtection, Externs, OutputType, OutputTypes, PAuthKey, PacRet, SymbolManglingVersion,
- WasiExecModel,
+ BranchProtection, Externs, OomStrategy, OutputType, OutputTypes, PAuthKey, PacRet,
+ SymbolManglingVersion, WasiExecModel,
};
use rustc_session::config::{CFGuard, ExternEntry, LinkerPluginLto, LtoCli, SwitchWithOptPath};
use rustc_session::lint::Level;
tracked!(no_link, true);
tracked!(no_unique_section_names, true);
tracked!(no_profiler_runtime, true);
+ tracked!(oom, OomStrategy::Panic);
tracked!(osx_rpath_install_name, true);
tracked!(panic_abort_tests, true);
tracked!(panic_in_drop, PanicStrategy::Abort);
);
}
+#[test]
+fn test_too_many_hashes() {
+ let max_count = u16::MAX;
+ let mut hashes: String = "#".repeat(max_count.into());
+
+ // Valid number of hashes (65535 = 2^16 - 1), but invalid string.
+ check_raw_str(&hashes, max_count, Some(RawStrError::InvalidStarter { bad_char: '\u{0}' }));
+
+ // One more hash sign (65536 = 2^16) becomes too many.
+ hashes.push('#');
+ check_raw_str(
+ &hashes,
+ 0,
+ Some(RawStrError::TooManyDelimiters { found: usize::from(max_count) + 1 }),
+ );
+}
+
#[test]
fn test_valid_shebang() {
// https://github.com/rust-lang/rust/issues/70528
if let GenericArgKind::Type(leaf_ty) = leaf.unpack() {
if leaf_ty.is_box() {
cx.struct_span_lint(BOX_POINTERS, span, |lint| {
- lint.build(&format!("type uses owned (Box type) pointers: {}", ty)).emit()
+ lint.build(&format!("type uses owned (Box type) pointers: {}", ty)).emit();
});
}
}
&self,
cx: &EarlyContext<'_>,
span: Span,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a>),
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
) {
// This comes from a macro that has `#[allow_internal_unsafe]`.
if span.allows_unsafe() {
cx.struct_span_lint(UNSAFE_CODE, span, decorate);
}
- fn report_overriden_symbol_name(&self, cx: &EarlyContext<'_>, span: Span, msg: &str) {
+ fn report_overridden_symbol_name(&self, cx: &EarlyContext<'_>, span: Span, msg: &str) {
self.report_unsafe(cx, span, |lint| {
lint.build(msg)
.note(
macros using unsafe without triggering \
the `unsafe_code` lint at their call site",
)
- .emit()
+ .emit();
});
}
}
// Don't warn about generated blocks; that'll just pollute the output.
if blk.rules == ast::BlockCheckMode::Unsafe(ast::UserProvided) {
self.report_unsafe(cx, blk.span, |lint| {
- lint.build("usage of an `unsafe` block").emit()
+ lint.build("usage of an `unsafe` block").emit();
});
}
}
match it.kind {
ast::ItemKind::Trait(box ast::Trait { unsafety: ast::Unsafe::Yes(_), .. }) => self
.report_unsafe(cx, it.span, |lint| {
- lint.build("declaration of an `unsafe` trait").emit()
+ lint.build("declaration of an `unsafe` trait").emit();
}),
ast::ItemKind::Impl(box ast::Impl { unsafety: ast::Unsafe::Yes(_), .. }) => self
.report_unsafe(cx, it.span, |lint| {
- lint.build("implementation of an `unsafe` trait").emit()
+ lint.build("implementation of an `unsafe` trait").emit();
}),
ast::ItemKind::Fn(..) => {
if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::no_mangle) {
- self.report_overriden_symbol_name(
+ self.report_overridden_symbol_name(
cx,
attr.span,
"declaration of a `no_mangle` function",
);
}
if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::export_name) {
- self.report_overriden_symbol_name(
+ self.report_overridden_symbol_name(
cx,
attr.span,
"declaration of a function with `export_name`",
ast::ItemKind::Static(..) => {
if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::no_mangle) {
- self.report_overriden_symbol_name(
+ self.report_overridden_symbol_name(
cx,
attr.span,
"declaration of a `no_mangle` static",
);
}
if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::export_name) {
- self.report_overriden_symbol_name(
+ self.report_overridden_symbol_name(
cx,
attr.span,
"declaration of a static with `export_name`",
fn check_impl_item(&mut self, cx: &EarlyContext<'_>, it: &ast::AssocItem) {
if let ast::AssocItemKind::Fn(..) = it.kind {
if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::no_mangle) {
- self.report_overriden_symbol_name(
+ self.report_overridden_symbol_name(
cx,
attr.span,
"declaration of a `no_mangle` method",
);
}
if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::export_name) {
- self.report_overriden_symbol_name(
+ self.report_overridden_symbol_name(
cx,
attr.span,
"declaration of a method with `export_name`",
FnCtxt::Assoc(_) if body.is_none() => "declaration of an `unsafe` method",
FnCtxt::Assoc(_) => "implementation of an `unsafe` method",
};
- self.report_unsafe(cx, span, |lint| lint.build(msg).emit());
+ self.report_unsafe(cx, span, |lint| {
+ lint.build(msg).emit();
+ });
}
}
}
MISSING_DOCS,
cx.tcx.sess.source_map().guess_head_span(sp),
|lint| {
- lint.build(&format!("missing documentation for {} {}", article, desc)).emit()
+ lint.build(&format!("missing documentation for {} {}", article, desc)).emit();
},
);
}
let parent = cx.tcx.hir().get_parent_item(impl_item.hir_id());
let impl_ty = cx.tcx.type_of(parent);
let outerdef = match impl_ty.kind() {
- ty::Adt(def, _) => Some(def.did),
+ ty::Adt(def, _) => Some(def.did()),
ty::Foreign(def_id) => Some(*def_id),
_ => None,
};
"type could implement `Copy`; consider adding `impl \
Copy`",
)
- .emit()
+ .emit();
})
}
}
let mut impls = LocalDefIdSet::default();
cx.tcx.for_each_impl(debug, |d| {
if let Some(ty_def) = cx.tcx.type_of(d).ty_adt_def() {
- if let Some(def_id) = ty_def.did.as_local() {
+ if let Some(def_id) = ty_def.did().as_local() {
impls.insert(def_id);
}
}
or a manual implementation",
cx.tcx.def_path_str(debug)
))
- .emit()
+ .emit();
});
}
}
if to_mt == hir::Mutability::Mut && from_mt == hir::Mutability::Not {
let msg = "transmuting &T to &mut T is undefined behavior, \
even if the reference is unused, consider instead using an UnsafeCell";
- cx.struct_span_lint(MUTABLE_TRANSMUTES, expr.span, |lint| lint.build(msg).emit());
+ cx.struct_span_lint(MUTABLE_TRANSMUTES, expr.span, |lint| {
+ lint.build(msg).emit();
+ });
}
}
if let Some(items) = attr.meta_item_list() {
for item in items {
cx.struct_span_lint(UNSTABLE_FEATURES, item.span(), |lint| {
- lint.build("unstable feature").emit()
+ lint.build("unstable feature").emit();
});
}
}
or lifetime parameters",
predicate_kind_name, predicate
))
- .emit()
+ .emit();
});
}
}
let attrs = cx.tcx.hir().attrs(it.hir_id());
if let Some(attr) = cx.sess().find_by_name(attrs, sym::rustc_test_marker) {
cx.struct_span_lint(UNNAMEABLE_TEST_ITEMS, attr.span, |lint| {
- lint.build("cannot test inner items").emit()
+ lint.build("cannot test inner items").emit();
});
}
}
format!("r#{}", ident),
Applicability::MachineApplicable,
)
- .emit()
+ .emit();
});
}
}
/// Test if this enum has several actually "existing" variants.
/// Zero-sized uninhabited variants do not always have a tag assigned and thus do not "exist".
- fn is_multi_variant(adt: &ty::AdtDef) -> bool {
+ fn is_multi_variant<'tcx>(adt: ty::AdtDef<'tcx>) -> bool {
// As an approximation, we only count dataless variants. Those are definitely inhabited.
- let existing_variants = adt.variants.iter().filter(|v| v.fields.is_empty()).count();
+ let existing_variants = adt.variants().iter().filter(|v| v.fields.is_empty()).count();
existing_variants > 1
}
Adt(adt_def, substs) if !adt_def.is_union() => {
// First check if this ADT has a layout attribute (like `NonNull` and friends).
use std::ops::Bound;
- match tcx.layout_scalar_valid_range(adt_def.did) {
+ match tcx.layout_scalar_valid_range(adt_def.did()) {
// We exploit here that `layout_scalar_valid_range` will never
// return `Bound::Excluded`. (And we have tests checking that we
// handle the attribute correctly.)
_ => {}
}
// Now, recurse.
- match adt_def.variants.len() {
+ match adt_def.variants().len() {
0 => Some(("enums with no variants have no valid value".to_string(), None)),
1 => {
// Struct, or enum with exactly one variant.
// Proceed recursively, check all fields.
- let variant = &adt_def.variants[VariantIdx::from_u32(0)];
+ let variant = &adt_def.variant(VariantIdx::from_u32(0));
variant.fields.iter().find_map(|field| {
ty_find_init_error(tcx, field.ty(tcx, substs), init).map(
|(mut msg, span)| {
}
// Multi-variant enum.
_ => {
- if init == InitKind::Uninit && is_multi_variant(adt_def) {
- let span = tcx.def_span(adt_def.did);
+ if init == InitKind::Uninit && is_multi_variant(*adt_def) {
+ let span = tcx.def_span(adt_def.did());
Some((
"enums have to be initialized to a variant".to_string(),
Some(span),
let mut ty = ty;
loop {
if let ty::Adt(def, substs) = *ty.kind() {
- let is_transparent = def.subst(tcx, substs).repr.transparent();
- let is_non_null = crate::types::nonnull_optimization_guaranteed(tcx, &def);
+ let is_transparent = def.subst(tcx, substs).repr().transparent();
+ let is_non_null = crate::types::nonnull_optimization_guaranteed(tcx, def);
debug!(
"non_transparent_ty({:?}) -- type is transparent? {}, type is non-null? {}",
ty, is_transparent, is_non_null
);
if is_transparent && !is_non_null {
- debug_assert!(def.variants.len() == 1);
- let v = &def.variants[VariantIdx::new(0)];
+ debug_assert!(def.variants().len() == 1);
+ let v = &def.variant(VariantIdx::new(0));
ty = transparent_newtype_field(tcx, v)
.expect(
"single-variant transparent structure with zero-sized field",
}
// Grab a flattened representation of all fields.
- let a_fields = a_def.variants.iter().flat_map(|v| v.fields.iter());
- let b_fields = b_def.variants.iter().flat_map(|v| v.fields.iter());
+ let a_fields = a_def.variants().iter().flat_map(|v| v.fields.iter());
+ let b_fields = b_def.variants().iter().flat_map(|v| v.fields.iter());
// Perform a structural comparison for each field.
a_fields.eq_by(
"this signature doesn't match the previous declaration",
)
.note_expected_found(&"", expected_str, &"", found_str)
- .emit()
+ .emit();
},
);
}
&self,
lint: &'static Lint,
span: Option<impl Into<MultiSpan>>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a>),
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
diagnostic: BuiltinLintDiagnostics,
) {
self.lookup(lint, span, |lint| {
&self,
lint: &'static Lint,
span: Option<S>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a>),
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
);
fn struct_span_lint<S: Into<MultiSpan>>(
&self,
lint: &'static Lint,
span: S,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a>),
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
) {
self.lookup(lint, Some(span), decorate);
}
/// Emit a lint at the appropriate level, with no associated span.
- fn lint(&self, lint: &'static Lint, decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a>)) {
+ fn lint(
+ &self,
+ lint: &'static Lint,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ ) {
self.lookup(lint, None as Option<Span>, decorate);
}
}
&self,
lint: &'static Lint,
span: Option<S>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a>),
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
) {
let hir_id = self.last_node_with_lint_attrs;
&self,
lint: &'static Lint,
span: Option<S>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a>),
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
) {
self.builder.struct_lint(lint, span.map(|s| s.into()), decorate)
}
) -> Result<Self::Path, Self::Error> {
if trait_ref.is_none() {
if let ty::Adt(def, substs) = self_ty.kind() {
- return self.print_def_path(def.did, substs);
+ return self.print_def_path(def.did(), substs);
}
}
self.context.lookup_with_diagnostics(
lint_id.lint,
Some(span),
- |lint| lint.build(&msg).emit(),
+ |lint| {
+ lint.build(&msg).emit();
+ },
diagnostic,
);
}
hir_id: HirId,
expectation: &LintExpectation,
) {
- // FIXME: The current implementation doesn't cover cases where the
- // `unfulfilled_lint_expectations` is actually expected by another lint
- // expectation. This can be added here by checking the lint level and
- // retrieving the `LintExpectationId` if it was expected.
tcx.struct_span_lint_hir(
builtin::UNFULFILLED_LINT_EXPECTATIONS,
hir_id,
if let Some(rationale) = expectation.reason {
diag.note(&rationale.as_str());
}
+
+ if expectation.is_unfulfilled_lint_expectations {
+ diag.note("the `unfulfilled_lint_expectations` lint can't be expected and will always produce this message");
+ }
+
diag.emit();
},
);
Res::SelfTy { trait_: None, alias_to: Some((did, _)) } => {
if let ty::Adt(adt, substs) = cx.tcx.type_of(did).kind() {
if let Some(name @ (sym::Ty | sym::TyCtxt)) =
- cx.tcx.get_diagnostic_name(adt.did)
+ cx.tcx.get_diagnostic_name(adt.did())
{
// NOTE: This path is currently unreachable as `Ty<'tcx>` is
// defined as a type alias meaning that `impl<'tcx> Ty<'tcx>`
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{RegisteredTools, TyCtxt};
use rustc_session::lint::{
- builtin::{self, FORBIDDEN_LINT_GROUPS},
+ builtin::{self, FORBIDDEN_LINT_GROUPS, UNFULFILLED_LINT_EXPECTATIONS},
Level, Lint, LintExpectationId, LintId,
};
use rustc_session::parse::{add_feature_diagnostics, feature_err};
continue
};
for id in ids {
- // ForceWarn and Forbid cannot be overriden
+ // ForceWarn and Forbid cannot be overridden
if let Some((Level::ForceWarn | Level::Forbid, _)) = self.current_specs().get(&id) {
continue;
}
self.sets.get_lint_level(id.lint, self.cur, Some(self.current_specs()), &self.sess);
// Setting to a non-forbid level is an error if the lint previously had
// a forbid level. Note that this is not necessarily true even with a
- // `#[forbid(..)]` attribute present, as that is overriden by `--cap-lints`.
+ // `#[forbid(..)]` attribute present, as that is overridden by `--cap-lints`.
//
// This means that this only errors if we're truly lowering the lint
// level from forbid.
}
}
}
+
+ // The lint `unfulfilled_lint_expectations` can't be expected, as it would suppress itself.
+ // Handling expectations of this lint would add additional complexity with little to no
+ // benefit. The expect level for this lint will therefore be ignored.
+ if let Level::Expect(_) = level && id == LintId::of(UNFULFILLED_LINT_EXPECTATIONS) {
+ return;
+ }
+
if let Level::ForceWarn = old_level {
self.current_specs_mut().insert(id, (old_level, old_src));
} else {
self.store.check_lint_name(&name, tool_name, self.registered_tools);
match &lint_result {
CheckLintNameResult::Ok(ids) => {
+ // This checks for instances where the user writes `#[expect(unfulfilled_lint_expectations)]`
+ // in that case we want to avoid overriding the lint level but instead add an expectation that
+ // can't be fulfilled. The lint message will include an explanation, that the
+ // `unfulfilled_lint_expectations` lint can't be expected.
+ if let Level::Expect(expect_id) = level {
+ // The `unfulfilled_lint_expectations` lint is not part of any lint groups. Therefore. we
+ // only need to check the slice if it contains a single lint.
+ let is_unfulfilled_lint_expectations = match ids {
+ [lint] => *lint == LintId::of(UNFULFILLED_LINT_EXPECTATIONS),
+ _ => false,
+ };
+ self.lint_expectations.push((
+ expect_id,
+ LintExpectation::new(reason, sp, is_unfulfilled_lint_expectations),
+ ));
+ }
let src = LintLevelSource::Node(
meta_item.path.segments.last().expect("empty lint name").ident.name,
sp,
self.insert_spec(id, (level, src));
}
}
- if let Level::Expect(expect_id) = level {
- self.lint_expectations
- .push((expect_id, LintExpectation::new(reason, sp)));
- }
}
CheckLintNameResult::Tool(result) => {
}
if let Level::Expect(expect_id) = level {
self.lint_expectations
- .push((expect_id, LintExpectation::new(reason, sp)));
+ .push((expect_id, LintExpectation::new(reason, sp, false)));
}
}
Err((Some(ids), ref new_lint_name)) => {
}
if let Level::Expect(expect_id) = level {
self.lint_expectations
- .push((expect_id, LintExpectation::new(reason, sp)));
+ .push((expect_id, LintExpectation::new(reason, sp, false)));
}
}
Err((None, _)) => {
}
if let Level::Expect(expect_id) = level {
self.lint_expectations
- .push((expect_id, LintExpectation::new(reason, sp)));
+ .push((expect_id, LintExpectation::new(reason, sp, false)));
}
} else {
panic!("renamed lint does not exist: {}", new_name);
&self,
lint: &'static Lint,
span: Option<MultiSpan>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a>),
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
) {
let (level, src) = self.lint_level(lint);
struct_lint_level(self.sess, lint, level, src, span, decorate)
) {
let source_type = cx.typeck_results().expr_ty(source);
if let ty::Adt(def, substs) = source_type.kind() {
- if cx.tcx.is_diagnostic_item(sym::Result, def.did) {
+ if cx.tcx.is_diagnostic_item(sym::Result, def.did()) {
if let ty::Adt(adt, _) = substs.type_at(0).kind() {
- if cx.tcx.is_diagnostic_item(sym::cstring_type, adt.did) {
+ if cx.tcx.is_diagnostic_item(sym::cstring_type, adt.did()) {
cx.struct_span_lint(TEMPORARY_CSTRING_AS_PTR, as_ptr_span, |diag| {
let mut diag = diag
.build("getting the inner pointer of a temporary `CString`");
}
has_non_ascii_idents = true;
cx.struct_span_lint(NON_ASCII_IDENTS, sp, |lint| {
- lint.build("identifier contains non-ASCII characters").emit()
+ lint.build("identifier contains non-ASCII characters").emit();
});
if check_uncommon_codepoints
&& !symbol_str.chars().all(GeneralSecurityProfile::identifier_allowed)
{
cx.struct_span_lint(UNCOMMON_CODEPOINTS, sp, |lint| {
- lint.build("identifier contains uncommon Unicode codepoints").emit()
+ lint.build("identifier contains uncommon Unicode codepoints").emit();
})
}
}
let char_info = format!("'{}' (U+{:04X})", ch, ch as u32);
note += &char_info;
}
- lint.build(&message).note(¬e).note("please recheck to make sure their usages are indeed what you want").emit()
+ lint.build(&message).note(¬e).note("please recheck to make sure their usages are indeed what you want").emit();
});
}
}
ty::Ref(_, r, _) if *r.kind() == ty::Str,
) || matches!(
ty.ty_adt_def(),
- Some(ty_def) if cx.tcx.is_diagnostic_item(sym::String, ty_def.did),
+ Some(ty_def) if cx.tcx.is_diagnostic_item(sym::String, ty_def.did()),
);
let (suggest_display, suggest_debug) = cx.tcx.infer_ctxt().enter(|infcx| {
let method = &call.ident.name;
let message =
format!("call to `.{}()` on a reference in this situation does nothing", &method,);
- lint.build(&message).span_label(span, "unnecessary method call").note(¬e).emit()
+ lint.build(&message).span_label(span, "unnecessary method call").note(¬e).emit();
});
}
}
}
Res::SelfTy { trait_: None, alias_to: Some((did, _)) } => {
if let ty::Adt(adt, substs) = cx.tcx.type_of(did).kind() {
- if cx.tcx.has_attr(adt.did, sym::rustc_pass_by_value) {
- return Some(cx.tcx.def_path_str_with_substs(adt.did, substs));
+ if cx.tcx.has_attr(adt.did(), sym::rustc_pass_by_value) {
+ return Some(cx.tcx.def_path_str_with_substs(adt.did(), substs));
}
}
}
let Trait(trait_predicate) = predicate.kind().skip_binder() else {
continue
};
- if trait_predicate.is_const_if_const() {
- // `~const Drop` definitely have meanings so avoid linting here.
- continue;
- }
let def_id = trait_predicate.trait_ref.def_id;
if cx.tcx.lang_items().drop_trait() == Some(def_id) {
// Explicitly allow `impl Drop`, a drop-guards-as-Voldemort-type pattern.
predicate,
cx.tcx.def_path_str(needs_drop)
);
- lint.build(&msg).emit()
+ lint.build(&msg).emit();
});
}
}
instead using `{}` to detect whether a type is trivially dropped",
cx.tcx.def_path_str(needs_drop)
);
- lint.build(&msg).emit()
+ lint.build(&msg).emit();
});
}
}
min,
max,
))
- .emit()
+ .emit();
});
}
}
hir::ExprKind::Binary(binop, ref l, ref r) => {
if is_comparison(binop) && !check_limits(cx, binop, &l, &r) {
cx.struct_span_lint(UNUSED_COMPARISONS, e.span, |lint| {
- lint.build("comparison is useless due to type limits").emit()
+ lint.build("comparison is useless due to type limits").emit();
});
}
}
FfiUnsafe { ty: Ty<'tcx>, reason: String, help: Option<String> },
}
-crate fn nonnull_optimization_guaranteed<'tcx>(tcx: TyCtxt<'tcx>, def: &ty::AdtDef) -> bool {
- tcx.get_attrs(def.did).iter().any(|a| a.has_name(sym::rustc_nonnull_optimization_guaranteed))
+crate fn nonnull_optimization_guaranteed<'tcx>(tcx: TyCtxt<'tcx>, def: ty::AdtDef<'tcx>) -> bool {
+ tcx.get_attrs(def.did()).iter().any(|a| a.has_name(sym::rustc_nonnull_optimization_guaranteed))
}
/// `repr(transparent)` structs can have a single non-ZST field, this function returns that
ty::FnPtr(_) => true,
ty::Ref(..) => true,
ty::Adt(def, _) if def.is_box() && matches!(mode, CItemKind::Definition) => true,
- ty::Adt(def, substs) if def.repr.transparent() && !def.is_union() => {
- let marked_non_null = nonnull_optimization_guaranteed(tcx, &def);
+ ty::Adt(def, substs) if def.repr().transparent() && !def.is_union() => {
+ let marked_non_null = nonnull_optimization_guaranteed(tcx, *def);
if marked_non_null {
return true;
// Types with a `#[repr(no_niche)]` attribute have their niche hidden.
// The attribute is used by the UnsafeCell for example (the only use so far).
- if def.repr.hide_niche() {
+ if def.repr().hide_niche() {
return false;
}
- def.variants
+ def.variants()
.iter()
.filter_map(|variant| transparent_newtype_field(cx.tcx, variant))
.any(|field| ty_is_known_nonnull(cx, field.ty(tcx, substs), mode))
Some(match *ty.kind() {
ty::Adt(field_def, field_substs) => {
let inner_field_ty = {
- let first_non_zst_ty =
- field_def.variants.iter().filter_map(|v| transparent_newtype_field(cx.tcx, v));
+ let first_non_zst_ty = field_def
+ .variants()
+ .iter()
+ .filter_map(|v| transparent_newtype_field(cx.tcx, v));
debug_assert_eq!(
first_non_zst_ty.clone().count(),
1,
) -> Option<Ty<'tcx>> {
debug!("is_repr_nullable_ptr(cx, ty = {:?})", ty);
if let ty::Adt(ty_def, substs) = ty.kind() {
- let field_ty = match &ty_def.variants.raw[..] {
+ let field_ty = match &ty_def.variants().raw[..] {
[var_one, var_two] => match (&var_one.fields[..], &var_two.fields[..]) {
([], [field]) | ([field], []) => field.ty(cx.tcx, substs),
_ => return None,
&self,
cache: &mut FxHashSet<Ty<'tcx>>,
ty: Ty<'tcx>,
- def: &ty::AdtDef,
+ def: ty::AdtDef<'tcx>,
variant: &ty::VariantDef,
substs: SubstsRef<'tcx>,
) -> FfiResult<'tcx> {
use FfiResult::*;
- if def.repr.transparent() {
+ if def.repr().transparent() {
// Can assume that at most one field is not a ZST, so only check
// that field's type for FFI-safety.
if let Some(field) = transparent_newtype_field(self.cx.tcx, variant) {
AdtKind::Struct | AdtKind::Union => {
let kind = if def.is_struct() { "struct" } else { "union" };
- if !def.repr.c() && !def.repr.transparent() {
+ if !def.repr().c() && !def.repr().transparent() {
return FfiUnsafe {
ty,
reason: format!("this {} has unspecified layout", kind),
let is_non_exhaustive =
def.non_enum_variant().is_field_list_non_exhaustive();
- if is_non_exhaustive && !def.did.is_local() {
+ if is_non_exhaustive && !def.did().is_local() {
return FfiUnsafe {
ty,
reason: format!("this {} is non-exhaustive", kind),
self.check_variant_for_ffi(cache, ty, def, def.non_enum_variant(), substs)
}
AdtKind::Enum => {
- if def.variants.is_empty() {
+ if def.variants().is_empty() {
// Empty enums are okay... although sort of useless.
return FfiSafe;
}
// Check for a repr() attribute to specify the size of the
// discriminant.
- if !def.repr.c() && !def.repr.transparent() && def.repr.int.is_none() {
+ if !def.repr().c() && !def.repr().transparent() && def.repr().int.is_none()
+ {
// Special-case types like `Option<extern fn()>`.
if repr_nullable_ptr(self.cx, ty, self.mode).is_none() {
return FfiUnsafe {
}
}
- if def.is_variant_list_non_exhaustive() && !def.did.is_local() {
+ if def.is_variant_list_non_exhaustive() && !def.did().is_local() {
return FfiUnsafe {
ty,
reason: "this enum is non-exhaustive".into(),
}
// Check the contained variants.
- for variant in &def.variants {
+ for variant in def.variants() {
let is_non_exhaustive = variant.is_field_list_non_exhaustive();
if is_non_exhaustive && !variant.def_id.is_local() {
return FfiUnsafe {
}
diag.note(note);
if let ty::Adt(def, _) = ty.kind() {
- if let Some(sp) = self.cx.tcx.hir().span_if_local(def.did) {
+ if let Some(sp) = self.cx.tcx.hir().span_if_local(def.did()) {
diag.span_note(sp, "the type is defined here");
}
}
larger ({} bytes) than the next largest",
largest
))
- .emit()
+ .emit();
},
);
}
&& let Some(adt) = cx.tcx.type_of(impl_did).ty_adt_def()
// skip extension traits, only lint functions from the standard library
&& cx.tcx.trait_id_of_impl(impl_did).is_none()
- && let Some(parent) = cx.tcx.parent(adt.did)
+ && let Some(parent) = cx.tcx.parent(adt.did())
&& cx.tcx.is_diagnostic_item(sym::atomic_mod, parent)
- && ATOMIC_TYPES.contains(&cx.tcx.item_name(adt.did))
+ && ATOMIC_TYPES.contains(&cx.tcx.item_name(adt.did()))
{
return Some((method_path.ident.name, args));
}
if !(type_permits_lack_of_use || fn_warned || op_warned) {
cx.struct_span_lint(UNUSED_RESULTS, s.span, |lint| {
- lint.build(&format!("unused result of type `{}`", ty)).emit()
+ lint.build(&format!("unused result of type `{}`", ty)).emit();
});
}
let descr_pre = &format!("{}boxed ", descr_pre);
check_must_use_ty(cx, boxed_ty, expr, span, descr_pre, descr_post, plural_len)
}
- ty::Adt(def, _) => check_must_use_def(cx, def.did, span, descr_pre, descr_post),
+ ty::Adt(def, _) => check_must_use_def(cx, def.did(), span, descr_pre, descr_post),
ty::Opaque(def, _) => {
let mut has_emitted = false;
for &(predicate, _) in cx.tcx.explicit_item_bounds(def) {
} else {
lint.span_help(s.span, "use `drop` to clarify the intent");
}
- lint.emit()
+ lint.emit();
} else {
- lint.build("path statement with no effect").emit()
+ lint.build("path statement with no effect").emit();
}
});
}
};
cx.struct_span_lint(UNUSED_IMPORT_BRACES, item.span, |lint| {
- lint.build(&format!("braces around {} is unnecessary", node_name)).emit()
+ lint.build(&format!("braces around {} is unnecessary", node_name)).emit();
});
}
}
"unnecessary allocation, use `&mut` instead"
}
};
- lint.build(msg).emit()
+ lint.build(msg).emit();
});
}
}
/// ### Explanation
///
/// A duplicated attribute may erroneously originate from a copy-paste and the effect of it
- /// being duplicated may not be obvious or desireable.
+ /// being duplicated may not be obvious or desirable.
///
/// For instance, doubling the `#[test]` attributes registers the test to be run twice with no
/// change to its environment.
($x:expr) => {
if $x != 1 { "s" } else { "" }
};
+ ("is", $x:expr) => {
+ if $x == 1 { "is" } else { "are" }
+ };
+ ("this", $x:expr) => {
+ if $x == 1 { "this" } else { "these" }
+ };
}
/// Indicates the confidence in the correctness of a suggestion.
/// Expected `Diagnostic`s get the lint level `Expect` which stores the `LintExpectationId`
/// to match it with the actual expectation later on.
///
-/// The `LintExpectationId` has to be has stable between compilations, as diagnostic
+/// The `LintExpectationId` has to be stable between compilations, as diagnostic
/// instances might be loaded from cache. Lint messages can be emitted during an
/// `EarlyLintPass` operating on the AST and during a `LateLintPass` traversing the
/// HIR tree. The AST doesn't have enough information to create a stable id. The
#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash, Encodable, Decodable)]
pub enum LintExpectationId {
/// Used for lints emitted during the `EarlyLintPass`. This id is not
- /// has stable and should not be cached.
+ /// hash stable and should not be cached.
Unstable { attr_id: AttrId, lint_index: Option<u16> },
/// The [`HirId`] that the lint expectation is attached to. This id is
/// stable and can be cached. The additional index ensures that nodes with
lint_index.hash_stable(hcx, hasher);
}
_ => {
- unreachable!("HashStable should only be called for a filled `LintExpectationId`")
+ unreachable!(
+ "HashStable should only be called for filled and stable `LintExpectationId`"
+ )
}
}
}
for (_, data) in self.cstore.iter_crate_data() {
if data.has_global_allocator() {
match global_allocator {
- Some(other_crate) => self.sess.err(&format!(
+ Some(other_crate) => {
+ self.sess.err(&format!(
"the `#[global_allocator]` in {} conflicts with global allocator in: {}",
other_crate,
data.name()
- )),
+ ));
+ }
None => global_allocator = Some(data.name()),
}
}
// don't perform this validation if the session has errors, as one of
// those errors may indicate a circular dependency which could cause
// this to stack overflow.
- if self.sess.has_errors() {
+ if self.sess.has_errors().is_some() {
return;
}
fn report_unused_deps(&mut self, krate: &ast::Crate) {
// Make a point span rather than covering the whole file
- let span = krate.span.shrink_to_lo();
+ let span = krate.spans.inner_span.shrink_to_lo();
// Complain about anything left over
for (name, entry) in self.sess.opts.externs.iter() {
if let ExternLocation::FoundInLibrarySearchDirectories = entry.location {
("bundle", NativeLibKind::Static { bundle, .. }) => {
*bundle = Some(value);
}
- ("bundle", _) => sess.span_err(
- span,
- "bundle linking modifier is only compatible with \
+ ("bundle", _) => {
+ sess.span_err(
+ span,
+ "bundle linking modifier is only compatible with \
`static` linking kind",
- ),
+ );
+ }
("verbatim", _) => lib.verbatim = Some(value),
("whole-archive", NativeLibKind::Static { whole_archive, .. }) => {
*whole_archive = Some(value);
}
- ("whole-archive", _) => sess.span_err(
- span,
- "whole-archive linking modifier is only compatible with \
+ ("whole-archive", _) => {
+ sess.span_err(
+ span,
+ "whole-archive linking modifier is only compatible with \
`static` linking kind",
- ),
+ );
+ }
("as-needed", NativeLibKind::Dylib { as_needed })
| ("as-needed", NativeLibKind::Framework { as_needed }) => {
*as_needed = Some(value);
}
- ("as-needed", _) => sess.span_err(
- span,
- "as-needed linking modifier is only compatible with \
+ ("as-needed", _) => {
+ sess.span_err(
+ span,
+ "as-needed linking modifier is only compatible with \
`dylib` and `framework` linking kinds",
- ),
+ );
+ }
- _ => sess.span_err(
- span,
- &format!(
- "unrecognized linking modifier `{}`, expected one \
+ _ => {
+ sess.span_err(
+ span,
+ &format!(
+ "unrecognized linking modifier `{}`, expected one \
of: bundle, verbatim, whole-archive, as-needed",
- modifier
- ),
- ),
+ modifier
+ ),
+ );
+ }
}
}
} else {
Some(span) => {
struct_span_err!(self.tcx.sess, span, E0455, "{}", msg).emit();
}
- None => self.tcx.sess.err(msg),
+ None => {
+ self.tcx.sess.err(msg);
+ }
}
}
if lib.cfg.is_some() && !self.tcx.features().link_cfg {
)
}
- fn get_adt_def(self, item_id: DefIndex, tcx: TyCtxt<'tcx>) -> &'tcx ty::AdtDef {
+ fn get_adt_def(self, item_id: DefIndex, tcx: TyCtxt<'tcx>) -> ty::AdtDef<'tcx> {
let kind = self.kind(item_id);
let did = self.local_def_id(item_id);
record!(self.tables.type_of[def_id] <- self.tcx.type_of(def_id));
}
- fn encode_enum_variant_info(&mut self, def: &ty::AdtDef, index: VariantIdx) {
+ fn encode_enum_variant_info(&mut self, def: ty::AdtDef<'tcx>, index: VariantIdx) {
let tcx = self.tcx;
- let variant = &def.variants[index];
+ let variant = &def.variant(index);
let def_id = variant.def_id;
debug!("EncodeContext::encode_enum_variant_info({:?})", def_id);
}
}
- fn encode_enum_variant_ctor(&mut self, def: &ty::AdtDef, index: VariantIdx) {
+ fn encode_enum_variant_ctor(&mut self, def: ty::AdtDef<'tcx>, index: VariantIdx) {
let tcx = self.tcx;
- let variant = &def.variants[index];
+ let variant = &def.variant(index);
let def_id = variant.ctor_def_id.unwrap();
debug!("EncodeContext::encode_enum_variant_ctor({:?})", def_id);
fn encode_field(
&mut self,
- adt_def: &ty::AdtDef,
+ adt_def: ty::AdtDef<'tcx>,
variant_index: VariantIdx,
field_index: usize,
) {
- let variant = &adt_def.variants[variant_index];
+ let variant = &adt_def.variant(variant_index);
let field = &variant.fields[field_index];
let def_id = field.did;
self.encode_item_type(def_id);
}
- fn encode_struct_ctor(&mut self, adt_def: &ty::AdtDef, def_id: DefId) {
+ fn encode_struct_ctor(&mut self, adt_def: ty::AdtDef<'tcx>, def_id: DefId) {
debug!("EncodeContext::encode_struct_ctor({:?})", def_id);
let tcx = self.tcx;
let variant = adt_def.non_enum_variant();
is_non_exhaustive: variant.is_field_list_non_exhaustive(),
};
- record!(self.tables.kind[def_id] <- EntryKind::Struct(self.lazy(data), adt_def.repr));
+ record!(self.tables.kind[def_id] <- EntryKind::Struct(self.lazy(data), adt_def.repr()));
self.encode_item_type(def_id);
if variant.ctor_kind == CtorKind::Fn {
record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
self.encode_explicit_item_bounds(def_id);
EntryKind::OpaqueTy
}
- hir::ItemKind::Enum(..) => EntryKind::Enum(self.tcx.adt_def(def_id).repr),
+ hir::ItemKind::Enum(..) => EntryKind::Enum(self.tcx.adt_def(def_id).repr()),
hir::ItemKind::Struct(ref struct_def, _) => {
let adt_def = self.tcx.adt_def(def_id);
let variant = adt_def.non_enum_variant();
ctor,
is_non_exhaustive: variant.is_field_list_non_exhaustive(),
}),
- adt_def.repr,
+ adt_def.repr(),
)
}
hir::ItemKind::Union(..) => {
ctor: None,
is_non_exhaustive: variant.is_field_list_non_exhaustive(),
}),
- adt_def.repr,
+ adt_def.repr(),
)
}
hir::ItemKind::Impl(hir::Impl { defaultness, constness, .. }) => {
// FIXME(eddyb) there should be a nicer way to do this.
match item.kind {
hir::ItemKind::Enum(..) => record!(self.tables.children[def_id] <-
- self.tcx.adt_def(def_id).variants.iter().map(|v| {
+ self.tcx.adt_def(def_id).variants().iter().map(|v| {
assert!(v.def_id.is_local());
v.def_id.index
})
}
impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
- fn encode_fields(&mut self, adt_def: &ty::AdtDef) {
- for (variant_index, variant) in adt_def.variants.iter_enumerated() {
+ fn encode_fields(&mut self, adt_def: ty::AdtDef<'tcx>) {
+ for (variant_index, variant) in adt_def.variants().iter_enumerated() {
for (field_index, _field) in variant.fields.iter().enumerate() {
self.encode_field(adt_def, variant_index, field_index);
}
let def = self.tcx.adt_def(item.def_id.to_def_id());
self.encode_fields(def);
- for (i, variant) in def.variants.iter_enumerated() {
+ for (i, variant) in def.variants().iter_enumerated() {
self.encode_enum_variant_info(def, i);
if let Some(_ctor_def_id) = variant.ctor_def_id {
either = "1.5.0"
gsgdt = "0.1.2"
tracing = "0.1"
-rustc-rayon = "0.3.2"
-rustc-rayon-core = "0.3.2"
+rustc-rayon = { version = "0.3.2", optional = true }
+rustc-rayon-core = { version = "0.3.2", optional = true }
polonius-engine = "0.13.0"
rustc_apfloat = { path = "../rustc_apfloat" }
rustc_attr = { path = "../rustc_attr" }
rustc_serialize = { path = "../rustc_serialize" }
rustc_ast = { path = "../rustc_ast" }
rustc_span = { path = "../rustc_span" }
-chalk-ir = "0.76.0"
+chalk-ir = "0.80.0"
smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
rustc_session = { path = "../rustc_session" }
rustc_type_ir = { path = "../rustc_type_ir" }
rand = "0.8.4"
rand_xoshiro = "0.6.0"
+
+[features]
+rustc_use_parallel_compiler = ["rustc-rayon", "rustc-rayon-core"]
[] layout: rustc_target::abi::LayoutS<'tcx>,
[] fn_abi: rustc_target::abi::call::FnAbi<'tcx, rustc_middle::ty::Ty<'tcx>>,
// AdtDef are interned and compared by address
- [decode] adt_def: rustc_middle::ty::AdtDef,
+ [decode] adt_def: rustc_middle::ty::AdtDefData,
[] steal_thir: rustc_data_structures::steal::Steal<rustc_middle::thir::Thir<'tcx>>,
[] steal_mir: rustc_data_structures::steal::Steal<rustc_middle::mir::Body<'tcx>>,
[decode] mir: rustc_middle::mir::Body<'tcx>,
Vec<rustc_middle::traits::query::OutlivesBound<'tcx>>
>
>,
- [] dtorck_constraint: rustc_middle::traits::query::DtorckConstraint<'tcx>,
+ [] dtorck_constraint: rustc_middle::traits::query::DropckConstraint<'tcx>,
[] candidate_step: rustc_middle::traits::query::CandidateStep<'tcx>,
[] autoderef_bad_ty: rustc_middle::traits::query::MethodAutoderefBadTy<'tcx>,
[] type_op_subtype:
pub mod place;
use crate::ty::query::Providers;
-use crate::ty::TyCtxt;
+use crate::ty::{ImplSubject, TyCtxt};
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
-use rustc_hir::def_id::LocalDefId;
+use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::*;
use rustc_query_system::ich::StableHashingContext;
use rustc_span::DUMMY_SP;
pub fn parent_module(self, id: HirId) -> LocalDefId {
self.parent_module_from_def_id(id.owner)
}
+
+ pub fn impl_subject(self, def_id: DefId) -> ImplSubject<'tcx> {
+ self.impl_trait_ref(def_id)
+ .map(ImplSubject::Trait)
+ .unwrap_or_else(|| ImplSubject::Inherent(self.type_of(def_id)))
+ }
}
pub fn provide(providers: &mut Providers) {
#![feature(new_uninit)]
#![feature(nll)]
#![feature(once_cell)]
+#![feature(let_chains)]
#![feature(let_else)]
#![feature(min_specialization)]
#![feature(trusted_len)]
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
-use rustc_errors::{Diagnostic, DiagnosticBuilder, DiagnosticId};
+use rustc_errors::{
+ Diagnostic, DiagnosticBuilder, DiagnosticId, EmissionGuarantee, ErrorGuaranteed,
+};
use rustc_hir::HirId;
use rustc_index::vec::IndexVec;
use rustc_query_system::ich::StableHashingContext;
builtin::{self, FORBIDDEN_LINT_GROUPS},
FutureIncompatibilityReason, Level, Lint, LintExpectationId, LintId,
};
-use rustc_session::{DiagnosticMessageId, Session};
+use rustc_session::Session;
use rustc_span::hygiene::MacroKind;
use rustc_span::source_map::{DesugaringKind, ExpnKind, MultiSpan};
use rustc_span::{symbol, Span, Symbol, DUMMY_SP};
pub reason: Option<Symbol>,
/// The [`Span`] of the attribute that this expectation originated from.
pub emission_span: Span,
+ /// Lint messages for the `unfulfilled_lint_expectations` lint will be
+ /// adjusted to include an additional note. Therefore, we have to track if
+ /// the expectation is for the lint.
+ pub is_unfulfilled_lint_expectations: bool,
}
impl LintExpectation {
- pub fn new(reason: Option<Symbol>, attr_span: Span) -> Self {
- Self { reason, emission_span: attr_span }
+ pub fn new(
+ reason: Option<Symbol>,
+ emission_span: Span,
+ is_unfulfilled_lint_expectations: bool,
+ ) -> Self {
+ Self { reason, emission_span, is_unfulfilled_lint_expectations }
}
}
-pub struct LintDiagnosticBuilder<'a>(DiagnosticBuilder<'a, ()>);
+pub struct LintDiagnosticBuilder<'a, G: EmissionGuarantee>(DiagnosticBuilder<'a, G>);
-impl<'a> LintDiagnosticBuilder<'a> {
- /// Return the inner DiagnosticBuilder, first setting the primary message to `msg`.
- pub fn build(mut self, msg: &str) -> DiagnosticBuilder<'a, ()> {
+impl<'a, G: EmissionGuarantee> LintDiagnosticBuilder<'a, G> {
+ /// Return the inner `DiagnosticBuilder`, first setting the primary message to `msg`.
+ pub fn build(mut self, msg: &str) -> DiagnosticBuilder<'a, G> {
self.0.set_primary_message(msg);
self.0.set_is_lint();
self.0
}
- /// Create a LintDiagnosticBuilder from some existing DiagnosticBuilder.
- pub fn new(err: DiagnosticBuilder<'a, ()>) -> LintDiagnosticBuilder<'a> {
+ /// Create a `LintDiagnosticBuilder` from some existing `DiagnosticBuilder`.
+ pub fn new(err: DiagnosticBuilder<'a, G>) -> LintDiagnosticBuilder<'a, G> {
LintDiagnosticBuilder(err)
}
}
+impl<'a> LintDiagnosticBuilder<'a, ErrorGuaranteed> {
+ pub fn forget_guarantee(self) -> LintDiagnosticBuilder<'a, ()> {
+ LintDiagnosticBuilder(self.0.forget_guarantee())
+ }
+}
+
pub fn explain_lint_level_source(
- sess: &Session,
lint: &'static Lint,
level: Level,
src: LintLevelSource,
let name = lint.name_lower();
match src {
LintLevelSource::Default => {
- sess.diag_note_once(
- err,
- DiagnosticMessageId::from(lint),
- &format!("`#[{}({})]` on by default", level.as_str(), name),
- );
+ err.note_once(&format!("`#[{}({})]` on by default", level.as_str(), name));
}
LintLevelSource::CommandLine(lint_flag_val, orig_level) => {
let flag = match orig_level {
};
let hyphen_case_lint_name = name.replace('_', "-");
if lint_flag_val.as_str() == name {
- sess.diag_note_once(
- err,
- DiagnosticMessageId::from(lint),
- &format!(
- "requested on the command line with `{} {}`",
- flag, hyphen_case_lint_name
- ),
- );
+ err.note_once(&format!(
+ "requested on the command line with `{} {}`",
+ flag, hyphen_case_lint_name
+ ));
} else {
let hyphen_case_flag_val = lint_flag_val.as_str().replace('_', "-");
- sess.diag_note_once(
- err,
- DiagnosticMessageId::from(lint),
- &format!(
- "`{} {}` implied by `{} {}`",
- flag, hyphen_case_lint_name, flag, hyphen_case_flag_val
- ),
- );
+ err.note_once(&format!(
+ "`{} {}` implied by `{} {}`",
+ flag, hyphen_case_lint_name, flag, hyphen_case_flag_val
+ ));
}
}
LintLevelSource::Node(lint_attr_name, src, reason) => {
if let Some(rationale) = reason {
err.note(rationale.as_str());
}
- sess.diag_span_note_once(
- err,
- DiagnosticMessageId::from(lint),
- src,
- "the lint level is defined here",
- );
+ err.span_note_once(src, "the lint level is defined here");
if lint_attr_name.as_str() != name {
let level_str = level.as_str();
- sess.diag_note_once(
- err,
- DiagnosticMessageId::from(lint),
- &format!(
- "`#[{}({})]` implied by `#[{}({})]`",
- level_str, name, level_str, lint_attr_name
- ),
- );
+ err.note_once(&format!(
+ "`#[{}({})]` implied by `#[{}({})]`",
+ level_str, name, level_str, lint_attr_name
+ ));
}
}
}
level: Level,
src: LintLevelSource,
span: Option<MultiSpan>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a>) + 'd,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>) + 'd,
) {
// Avoid codegen bloat from monomorphization by immediately doing dyn dispatch of `decorate` to
// the "real" work.
level: Level,
src: LintLevelSource,
span: Option<MultiSpan>,
- decorate: Box<dyn for<'b> FnOnce(LintDiagnosticBuilder<'b>) + 'd>,
+ decorate: Box<dyn for<'b> FnOnce(LintDiagnosticBuilder<'b, ()>) + 'd>,
) {
// Check for future incompatibility lints and issue a stronger warning.
let future_incompatible = lint.future_incompatible;
return;
}
- explain_lint_level_source(sess, lint, level, src, &mut err);
+ explain_lint_level_source(lint, level, src, &mut err);
let name = lint.name_lower();
let is_force_warn = matches!(level, Level::ForceWarn);
//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/borrow_check.html
use crate::ty::TyCtxt;
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_hir as hir;
use rustc_hir::Node;
/// conditional expression or repeating block. (Note that the
/// enclosing scope ID for the block associated with a closure is
/// the closure itself.)
- pub parent_map: FxHashMap<Scope, (Scope, ScopeDepth)>,
+ pub parent_map: FxIndexMap<Scope, (Scope, ScopeDepth)>,
/// Maps from a variable or binding ID to the block in which that
/// variable is declared.
- var_map: FxHashMap<hir::ItemLocalId, Scope>,
+ var_map: FxIndexMap<hir::ItemLocalId, Scope>,
/// Maps from a `NodeId` to the associated destruction scope (if any).
- destruction_scopes: FxHashMap<hir::ItemLocalId, Scope>,
+ destruction_scopes: FxIndexMap<hir::ItemLocalId, Scope>,
/// `rvalue_scopes` includes entries for those expressions whose
/// cleanup scope is larger than the default. The map goes from the
use rustc_session::lint::builtin::{DEPRECATED, DEPRECATED_IN_FUTURE, SOFT_UNSTABLE};
use rustc_session::lint::{BuiltinLintDiagnostics, Level, Lint, LintBuffer};
use rustc_session::parse::feature_err_issue;
-use rustc_session::{DiagnosticMessageId, Session};
+use rustc_session::Session;
use rustc_span::symbol::{sym, Symbol};
-use rustc_span::{MultiSpan, Span};
+use rustc_span::Span;
use std::num::NonZeroU32;
#[derive(PartialEq, Clone, Copy, Debug)]
None => format!("use of unstable library feature '{}'", &feature),
};
- let msp: MultiSpan = span.into();
- let sm = &sess.parse_sess.source_map();
- let span_key = msp.primary_span().and_then(|sp: Span| {
- if !sp.is_dummy() {
- let file = sm.lookup_char_pos(sp.lo()).file;
- if file.is_imported() { None } else { Some(span) }
- } else {
- None
- }
- });
-
- let error_id = (DiagnosticMessageId::StabilityId(issue), span_key, msg.clone());
- let fresh = sess.one_time_diagnostics.borrow_mut().insert(error_id);
- if fresh {
- if is_soft {
- soft_handler(SOFT_UNSTABLE, span, &msg)
- } else {
- let mut err =
- feature_err_issue(&sess.parse_sess, feature, span, GateIssue::Library(issue), &msg);
- if let Some((inner_types, ref msg, sugg, applicability)) = suggestion {
- err.span_suggestion(inner_types, msg, sugg, applicability);
- }
- err.emit();
+ if is_soft {
+ soft_handler(SOFT_UNSTABLE, span, &msg)
+ } else {
+ let mut err =
+ feature_err_issue(&sess.parse_sess, feature, span, GateIssue::Library(issue), &msg);
+ if let Some((inner_types, ref msg, sugg, applicability)) = suggestion {
+ err.span_suggestion(inner_types, msg, sugg, applicability);
}
+ err.emit();
}
}
let kind = tcx.def_kind(def_id).descr(def_id);
deprecation_suggestion(&mut diag, kind, suggestion, method_span);
}
- diag.emit()
+ diag.emit();
});
}
) {
let soft_handler = |lint, span, msg: &_| {
self.struct_span_lint_hir(lint, id.unwrap_or(hir::CRATE_HIR_ID), span, |lint| {
- lint.build(msg).emit()
+ lint.build(msg).emit();
})
};
match self.eval_stability(def_id, id, span, method_span) {
let style = NodeStyle { title_bg: Some(bgcolor.to_owned()), ..Default::default() };
let mut stmts: Vec<String> = data.statements.iter().map(|x| format!("{:?}", x)).collect();
- // add the terminator to the stmts, gsgdt can print it out seperately
+ // add the terminator to the stmts, gsgdt can print it out separately
let mut terminator_head = String::new();
data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
stmts.push(terminator_head);
impl From<ErrorHandled> for InterpErrorInfo<'_> {
fn from(err: ErrorHandled) -> Self {
match err {
- ErrorHandled::Reported(ErrorGuaranteed) | ErrorHandled::Linted => {
+ ErrorHandled::Reported(ErrorGuaranteed { .. }) | ErrorHandled::Linted => {
err_inval!(ReferencedConstant)
}
ErrorHandled::TooGeneric => err_inval!(TooGeneric),
match self {
TooGeneric => write!(f, "encountered overly generic constant"),
ReferencedConstant => write!(f, "referenced constant has errors"),
- AlreadyReported(ErrorGuaranteed) => {
+ AlreadyReported(ErrorGuaranteed { .. }) => {
write!(f, "encountered constants with type errors, stopping evaluation")
}
Layout(ref err) => write!(f, "{}", err),
MemoryAccessTest,
/// We are doing pointer arithmetic.
PointerArithmeticTest,
+ /// We are doing pointer offset_from.
+ OffsetFromTest,
/// None of the above -- generic/unspecific inbounds test.
InboundsTest,
}
CheckInAllocMsg::DerefTest => "dereferencing pointer failed: ",
CheckInAllocMsg::MemoryAccessTest => "memory access failed: ",
CheckInAllocMsg::PointerArithmeticTest => "pointer arithmetic failed: ",
+ CheckInAllocMsg::OffsetFromTest => "out-of-bounds offset_from: ",
CheckInAllocMsg::InboundsTest => "",
}
)
DanglingIntPointer(0, CheckInAllocMsg::InboundsTest) => {
write!(f, "null pointer is not a valid pointer for this operation")
}
+ DanglingIntPointer(0, msg) => {
+ write!(f, "{}null pointer is not a valid pointer", msg)
+ }
DanglingIntPointer(i, msg) => {
write!(f, "{}0x{:x} is not a valid pointer", msg, i)
}
Pointer::new(*alloc, access.access_offset),
access.uninit_size.bytes(),
pluralize!(access.uninit_size.bytes()),
- if access.uninit_size.bytes() != 1 { "are" } else { "is" },
+ pluralize!("is", access.uninit_size.bytes()),
Pointer::new(*alloc, access.uninit_offset),
),
InvalidUninitBytes(None) => write!(
/// mostly opaque; the `Machine` trait extends it with some more operations that also have access to
/// some global state.
/// We don't actually care about this `Debug` bound (we use `Provenance::fmt` to format the entire
-/// pointer), but `derive` adds some unecessary bounds.
+/// pointer), but `derive` adds some unnecessary bounds.
pub trait Provenance: Copy + fmt::Debug {
/// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address.
/// If `true, ptr-to-int casts work by simply discarding the provenance.
use super::{ErrorHandled, EvalToConstValueResult, GlobalId};
use crate::mir;
+use crate::ty::fold::TypeFoldable;
use crate::ty::subst::InternalSubsts;
use crate::ty::{self, TyCtxt};
use rustc_hir::def_id::DefId;
ct: ty::Unevaluated<'tcx>,
span: Option<Span>,
) -> EvalToConstValueResult<'tcx> {
+ // Cannot resolve `Unevaluated` constants that contain inference
+ // variables. We reject those here since `resolve_opt_const_arg`
+ // would fail otherwise.
+ //
+ // When trying to evaluate constants containing inference variables,
+ // use `Infcx::const_eval_resolve` instead.
+ if ct.substs.has_infer_types_or_consts() {
+ bug!("did not expect inference variables here");
+ }
+
match ty::Instance::resolve_opt_const_arg(self, param_env, ct.def, ct.substs) {
Ok(Some(instance)) => {
let cid = GlobalId { instance, promoted: ct.promoted };
use self::graph_cyclic_cache::GraphIsCyclicCache;
use self::predecessors::{PredecessorCache, Predecessors};
pub use self::query::*;
+use self::switch_sources::{SwitchSourceCache, SwitchSources};
pub mod coverage;
mod generic_graph;
pub mod pretty;
mod query;
pub mod spanview;
+mod switch_sources;
pub mod tcx;
pub mod terminator;
pub use terminator::*;
/// These phases all describe dialects of MIR. Since all MIR uses the same datastructures, the
/// dialects forbid certain variants or values in certain phases.
///
-/// Note: Each phase's validation checks all invariants of the *previous* phases' dialects. A phase
-/// that changes the dialect documents what invariants must be upheld *after* that phase finishes.
-///
/// Warning: ordering of variants is significant.
#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
#[derive(HashStable)]
pub enum MirPhase {
- Build = 0,
+ Built = 0,
// FIXME(oli-obk): it's unclear whether we still need this phase (and its corresponding query).
// We used to have this for pre-miri MIR based const eval.
Const = 1,
/// by creating a new MIR body per promoted element. After this phase (and thus the termination
/// of the `mir_promoted` query), these promoted elements are available in the `promoted_mir`
/// query.
- ConstPromotion = 2,
- /// After this phase
- /// * the only `AggregateKind`s allowed are `Array` and `Generator`,
- /// * `DropAndReplace` is gone for good
- /// * `Drop` now uses explicit drop flags visible in the MIR and reaching a `Drop` terminator
- /// means that the auto-generated drop glue will be invoked.
- DropLowering = 3,
- /// After this phase, generators are explicit state machines (no more `Yield`).
- /// `AggregateKind::Generator` is gone for good.
- GeneratorLowering = 4,
- Optimization = 5,
+ ConstsPromoted = 2,
+ /// Beginning with this phase, the following variants are disallowed:
+ /// * [`TerminatorKind::DropAndReplace`](terminator::TerminatorKind::DropAndReplace)
+ /// * [`TerminatorKind::FalseUnwind`](terminator::TerminatorKind::FalseUnwind)
+ /// * [`TerminatorKind::FalseEdge`](terminator::TerminatorKind::FalseEdge)
+ /// * [`StatementKind::FakeRead`]
+ /// * [`StatementKind::AscribeUserType`]
+ /// * [`Rvalue::Ref`] with `BorrowKind::Shallow`
+ ///
+ /// And the following variant is allowed:
+ /// * [`StatementKind::Retag`]
+ ///
+ /// Furthermore, `Drop` now uses explicit drop flags visible in the MIR and reaching a `Drop`
+ /// terminator means that the auto-generated drop glue will be invoked.
+ DropsLowered = 3,
+ /// Beginning with this phase, the following variant is disallowed:
+ /// * [`Rvalue::Aggregate`] for any `AggregateKind` except `Array`
+ ///
+ /// And the following variant is allowed:
+ /// * [`StatementKind::SetDiscriminant`]
+ Deaggregated = 4,
+ /// Beginning with this phase, the following variants are disallowed:
+ /// * [`TerminatorKind::Yield`](terminator::TerminatorKind::Yield)
+ /// * [`TerminatorKind::GeneratorDrop](terminator::TerminatorKind::GeneratorDrop)
+ GeneratorsLowered = 5,
+ Optimized = 6,
}
impl MirPhase {
pub is_polymorphic: bool,
predecessor_cache: PredecessorCache,
+ switch_source_cache: SwitchSourceCache,
is_cyclic: GraphIsCyclicCache,
pub tainted_by_errors: Option<ErrorGuaranteed>,
);
let mut body = Body {
- phase: MirPhase::Build,
+ phase: MirPhase::Built,
source,
basic_blocks,
source_scopes,
required_consts: Vec::new(),
is_polymorphic: false,
predecessor_cache: PredecessorCache::new(),
+ switch_source_cache: SwitchSourceCache::new(),
is_cyclic: GraphIsCyclicCache::new(),
tainted_by_errors,
};
/// crate.
pub fn new_cfg_only(basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>) -> Self {
let mut body = Body {
- phase: MirPhase::Build,
+ phase: MirPhase::Built,
source: MirSource::item(DefId::local(CRATE_DEF_INDEX)),
basic_blocks,
source_scopes: IndexVec::new(),
var_debug_info: Vec::new(),
is_polymorphic: false,
predecessor_cache: PredecessorCache::new(),
+ switch_source_cache: SwitchSourceCache::new(),
is_cyclic: GraphIsCyclicCache::new(),
tainted_by_errors: None,
};
// FIXME: Use a finer-grained API for this, so only transformations that alter terminators
// invalidate the caches.
self.predecessor_cache.invalidate();
+ self.switch_source_cache.invalidate();
self.is_cyclic.invalidate();
&mut self.basic_blocks
}
&mut self,
) -> (&mut IndexVec<BasicBlock, BasicBlockData<'tcx>>, &mut LocalDecls<'tcx>) {
self.predecessor_cache.invalidate();
+ self.switch_source_cache.invalidate();
self.is_cyclic.invalidate();
(&mut self.basic_blocks, &mut self.local_decls)
}
&mut Vec<VarDebugInfo<'tcx>>,
) {
self.predecessor_cache.invalidate();
+ self.switch_source_cache.invalidate();
self.is_cyclic.invalidate();
(&mut self.basic_blocks, &mut self.local_decls, &mut self.var_debug_info)
}
self.predecessor_cache.compute(&self.basic_blocks)
}
+ #[inline]
+ pub fn switch_sources(&self) -> &SwitchSources {
+ self.switch_source_cache.compute(&self.basic_blocks)
+ }
+
#[inline]
pub fn dominators(&self) -> Dominators<BasicBlock> {
dominators(self)
}
}
+/// The various kinds of statements that can appear in MIR.
+///
+/// Not all of these are allowed at every [`MirPhase`]. Check the documentation there to see which
+/// ones you do not have to worry about. The MIR validator will generally enforce such restrictions,
+/// causing an ICE if they are violated.
#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable)]
pub enum StatementKind<'tcx> {
/// Write the RHS Rvalue to the LHS Place.
+ ///
+ /// The LHS place may not overlap with any memory accessed on the RHS.
Assign(Box<(Place<'tcx>, Rvalue<'tcx>)>),
/// This represents all the reading that a pattern match may do
pub enum ProjectionElem<V, T> {
Deref,
Field(Field, T),
+ /// Index into a slice/array.
+ ///
+ /// Note that this does not also dereference, and so it does not exactly correspond to slice
+ /// indexing in Rust. In other words, in the below Rust code:
+ ///
+ /// ```rust
+ /// let x = &[1, 2, 3, 4];
+ /// let i = 2;
+ /// x[i];
+ /// ```
+ ///
+ /// The `x[i]` is turned into a `Deref` followed by an `Index`, not just an `Index`. The same
+ /// thing is true of the `ConstantIndex` and `Subslice` projections below.
Index(V),
/// These indices are generated by slice patterns. Easiest to explain
/// Rvalues
#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
+/// The various kinds of rvalues that can appear in MIR.
+///
+/// Not all of these are allowed at every [`MirPhase`]. Check the documentation there to see which
+/// ones you do not have to worry about. The MIR validator will generally enforce such restrictions,
+/// causing an ICE if they are violated.
pub enum Rvalue<'tcx> {
/// x (either a move or copy, depending on type of x)
Use(Operand<'tcx>),
AggregateKind::Adt(adt_did, variant, substs, _user_ty, _) => {
ty::tls::with(|tcx| {
- let variant_def = &tcx.adt_def(adt_did).variants[variant];
+ let variant_def = &tcx.adt_def(adt_did).variant(variant);
let substs = tcx.lift(substs).expect("could not lift for printing");
let name = FmtPrinter::new(tcx, Namespace::ValueNS)
.print_def_path(variant_def.def_id, substs)?
self.map_projections(|pat_ty_proj| pat_ty_proj.leaf(field))
}
- pub fn variant(self, adt_def: &'tcx AdtDef, variant_index: VariantIdx, field: Field) -> Self {
+ pub fn variant(self, adt_def: AdtDef<'tcx>, variant_index: VariantIdx, field: Field) -> Self {
self.map_projections(|pat_ty_proj| pat_ty_proj.variant(adt_def, variant_index, field))
}
}
pub(crate) fn variant(
mut self,
- adt_def: &AdtDef,
+ adt_def: AdtDef<'_>,
variant_index: VariantIdx,
field: Field,
) -> Self {
self.projs.push(ProjectionElem::Downcast(
- Some(adt_def.variants[variant_index].name),
+ Some(adt_def.variant(variant_index).name),
variant_index,
));
self.projs.push(ProjectionElem::Field(field, ()));
/// The result of the `mir_const_qualif` query.
///
-/// Each field (except `error_occured`) corresponds to an implementer of the `Qualif` trait in
+/// Each field (except `error_occurred`) corresponds to an implementer of the `Qualif` trait in
/// `rustc_const_eval/src/transform/check_consts/qualifs.rs`. See that file for more information on each
/// `Qualif`.
#[derive(Clone, Copy, Debug, Default, TyEncodable, TyDecodable, HashStable)]
--- /dev/null
+//! Lazily compute the inverse of each `SwitchInt`'s switch targets. Modeled after
+//! `Predecessors`/`PredecessorCache`.
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::OnceCell;
+use rustc_index::vec::IndexVec;
+use rustc_serialize as serialize;
+use smallvec::SmallVec;
+
+use crate::mir::{BasicBlock, BasicBlockData, Terminator, TerminatorKind};
+
+pub type SwitchSources = IndexVec<BasicBlock, IndexVec<BasicBlock, SmallVec<[Option<u128>; 1]>>>;
+
+#[derive(Clone, Debug)]
+pub(super) struct SwitchSourceCache {
+ cache: OnceCell<SwitchSources>,
+}
+
+impl SwitchSourceCache {
+ #[inline]
+ pub(super) fn new() -> Self {
+ SwitchSourceCache { cache: OnceCell::new() }
+ }
+
+ /// Invalidates the switch source cache.
+ #[inline]
+ pub(super) fn invalidate(&mut self) {
+ self.cache = OnceCell::new();
+ }
+
+ /// Returns the switch sources for this MIR.
+ #[inline]
+ pub(super) fn compute(
+ &self,
+ basic_blocks: &IndexVec<BasicBlock, BasicBlockData<'_>>,
+ ) -> &SwitchSources {
+ self.cache.get_or_init(|| {
+ let mut switch_sources = IndexVec::from_elem(
+ IndexVec::from_elem(SmallVec::new(), basic_blocks),
+ basic_blocks,
+ );
+ for (bb, data) in basic_blocks.iter_enumerated() {
+ if let Some(Terminator {
+ kind: TerminatorKind::SwitchInt { targets, .. }, ..
+ }) = &data.terminator
+ {
+ for (value, target) in targets.iter() {
+ switch_sources[target][bb].push(Some(value));
+ }
+ switch_sources[targets.otherwise()][bb].push(None);
+ }
+ }
+
+ switch_sources
+ })
+ }
+}
+
+impl<S: serialize::Encoder> serialize::Encodable<S> for SwitchSourceCache {
+ #[inline]
+ fn encode(&self, s: &mut S) -> Result<(), S::Error> {
+ s.emit_unit()
+ }
+}
+
+impl<D: serialize::Decoder> serialize::Decodable<D> for SwitchSourceCache {
+ #[inline]
+ fn decode(_: &mut D) -> Self {
+ Self::new()
+ }
+}
+
+impl<CTX> HashStable<CTX> for SwitchSourceCache {
+ #[inline]
+ fn hash_stable(&self, _: &mut CTX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+TrivialTypeFoldableAndLiftImpls! {
+ SwitchSourceCache,
+}
None => adt_def.non_enum_variant(),
Some(variant_index) => {
assert!(adt_def.is_enum());
- &adt_def.variants[variant_index]
+ &adt_def.variant(variant_index)
}
};
let field_def = &variant_def.fields[f.index()];
}
/// Fetch the THIR for a given body. If typeck for that body failed, returns an empty `Thir`.
- query thir_body(key: ty::WithOptConstParam<LocalDefId>) -> (&'tcx Steal<thir::Thir<'tcx>>, thir::ExprId) {
+ query thir_body(key: ty::WithOptConstParam<LocalDefId>)
+ -> Result<(&'tcx Steal<thir::Thir<'tcx>>, thir::ExprId), ErrorGuaranteed>
+ {
// Perf tests revealed that hashing THIR is inefficient (see #85729).
no_hash
desc { |tcx| "building THIR for `{}`", tcx.def_path_str(key.did.to_def_id()) }
}
}
- query try_unify_abstract_consts(key: (
- ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>
- )) -> bool {
+ query try_unify_abstract_consts(key:
+ ty::ParamEnvAnd<'tcx, (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>
+ )>) -> bool {
desc {
|tcx| "trying to unify the generic constants {} and {}",
- tcx.def_path_str(key.0.def.did), tcx.def_path_str(key.1.def.did)
+ tcx.def_path_str(key.value.0.def.did), tcx.def_path_str(key.value.1.def.did)
}
}
storage(ArenaCacheSelector<'tcx>)
separate_provide_extern
}
- query adt_def(key: DefId) -> &'tcx ty::AdtDef {
+ query adt_def(key: DefId) -> ty::AdtDef<'tcx> {
desc { |tcx| "computing ADT definition for `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
query adt_dtorck_constraint(
key: DefId
- ) -> Result<&'tcx DtorckConstraint<'tcx>, NoSolution> {
+ ) -> Result<&'tcx DropckConstraint<'tcx>, NoSolution> {
desc { |tcx| "computing drop-check constraints for `{}`", tcx.def_path_str(key) }
}
desc { "get a &core::panic::Location referring to a span" }
}
+ // FIXME get rid of this with valtrees
query lit_to_const(
key: LitToConstInput<'tcx>
) -> Result<ty::Const<'tcx>, LitToConstError> {
separate_provide_extern
}
+ /// Determines whether an item is annotated with `doc(hidden)`.
+ query is_doc_hidden(def_id: DefId) -> bool {
+ desc { |tcx| "checking whether `{}` is `doc(hidden)`", tcx.def_path_str(def_id) }
+ }
+
query item_attrs(def_id: DefId) -> &'tcx [ast::Attribute] {
desc { |tcx| "collecting attributes of `{}`", tcx.def_path_str(def_id) }
separate_provide_extern
#[derive(Debug, HashStable)]
pub struct Adt<'tcx> {
/// The ADT we're constructing.
- pub adt_def: &'tcx AdtDef,
+ pub adt_def: AdtDef<'tcx>,
/// The variant of the ADT.
pub variant_index: VariantIdx,
pub substs: SubstsRef<'tcx>,
},
/// An inline `const` block, e.g. `const {}`.
ConstBlock {
- value: Const<'tcx>,
+ did: DefId,
+ substs: SubstsRef<'tcx>,
},
/// An array literal constructed from one repeated element, e.g. `[1; 5]`.
Repeat {
},
/// A literal.
Literal {
- literal: Const<'tcx>,
+ lit: &'tcx hir::Lit,
+ neg: bool,
+ },
+ /// For literals that don't correspond to anything in the HIR
+ NonHirLiteral {
+ lit: ty::ScalarInt,
+ user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ },
+ /// Associated constants and named constants
+ NamedConst {
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
- /// The `DefId` of the `const` item this literal
- /// was produced from, if this is not a user-written
- /// literal value.
- const_id: Option<DefId>,
},
+ ConstParam {
+ param: ty::ParamConst,
+ def_id: DefId,
+ },
+ // FIXME improve docs for `StaticRef` by distinguishing it from `NamedConst`
/// A literal containing the address of a `static`.
///
/// This is only distinguished from `Literal` so that we can register some
},
}
+impl<'tcx> ExprKind<'tcx> {
+ pub fn zero_sized_literal(user_ty: Option<Canonical<'tcx, UserType<'tcx>>>) -> Self {
+ ExprKind::NonHirLiteral { lit: ty::ScalarInt::ZST, user_ty }
+ }
+}
+
/// Represents the association of a field identifier and an expression.
///
/// This is used in struct constructors.
/// `Foo(...)` or `Foo{...}` or `Foo`, where `Foo` is a variant name from an ADT with
/// multiple variants.
Variant {
- adt_def: &'tcx AdtDef,
+ adt_def: AdtDef<'tcx>,
substs: SubstsRef<'tcx>,
variant_index: VariantIdx,
subpatterns: Vec<FieldPat<'tcx>>,
PatKind::Variant { ref subpatterns, .. } | PatKind::Leaf { ref subpatterns } => {
let variant = match *self.kind {
PatKind::Variant { adt_def, variant_index, .. } => {
- Some(&adt_def.variants[variant_index])
+ Some(adt_def.variant(variant_index))
}
_ => self.ty.ty_adt_def().and_then(|adt| {
if !adt.is_enum() { Some(adt.non_enum_variant()) } else { None }
use super::{
Arm, Block, Expr, ExprKind, Guard, InlineAsmOperand, Pat, PatKind, Stmt, StmtKind, Thir,
};
-use rustc_middle::ty::Const;
pub trait Visitor<'a, 'tcx: 'a>: Sized {
fn thir(&self) -> &'a Thir<'tcx>;
fn visit_pat(&mut self, pat: &Pat<'tcx>) {
walk_pat(self, pat);
}
-
- fn visit_const(&mut self, _cnst: Const<'tcx>) {}
}
pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Expr<'tcx>) {
visitor.visit_expr(&visitor.thir()[value])
}
}
- ConstBlock { value } => visitor.visit_const(value),
- Repeat { value, count } => {
+ ConstBlock { did: _, substs: _ } => {}
+ Repeat { value, count: _ } => {
visitor.visit_expr(&visitor.thir()[value]);
- visitor.visit_const(count);
}
Array { ref fields } | Tuple { ref fields } => {
for &field in &**fields {
visitor.visit_expr(&visitor.thir()[source])
}
Closure { closure_id: _, substs: _, upvars: _, movability: _, fake_reads: _ } => {}
- Literal { literal, user_ty: _, const_id: _ } => visitor.visit_const(literal),
+ Literal { lit: _, neg: _ } => {}
+ NonHirLiteral { lit: _, user_ty: _ } => {}
+ NamedConst { def_id: _, substs: _, user_ty: _ } => {}
+ ConstParam { param: _, def_id: _ } => {}
StaticRef { alloc_id: _, ty: _, def_id: _ } => {}
InlineAsm { ref operands, template: _, options: _, line_spans: _ } => {
for op in &**operands {
visitor.visit_pat(&subpattern.pattern);
}
}
- Constant { value } => visitor.visit_const(*value),
- Range(range) => {
- visitor.visit_const(range.lo);
- visitor.visit_const(range.hi);
- }
+ Constant { value: _ } => {}
+ Range(_) => {}
Slice { prefix, slice, suffix } | Array { prefix, slice, suffix } => {
for subpattern in prefix {
visitor.visit_pat(&subpattern);
type InternedVariances = Vec<chalk_ir::Variance>;
type InternedConstraints = Vec<chalk_ir::InEnvironment<chalk_ir::Constraint<Self>>>;
type DefId = DefId;
- type InternedAdtId = &'tcx AdtDef;
+ type InternedAdtId = AdtDef<'tcx>;
type Identifier = ();
type FnAbi = Abi;
BuiltinDerivedObligation(DerivedObligationCause<'tcx>),
- ImplDerivedObligation(DerivedObligationCause<'tcx>),
+ ImplDerivedObligation(Box<ImplDerivedObligationCause<'tcx>>),
DerivedObligation(DerivedObligationCause<'tcx>),
},
}
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub struct ImplDerivedObligationCause<'tcx> {
+ pub derived: DerivedObligationCause<'tcx>,
+ pub impl_def_id: DefId,
+ pub span: Span,
+}
+
impl ObligationCauseCode<'_> {
// Return the base obligation, ignoring derived obligations.
pub fn peel_derives(&self) -> &Self {
let mut base_cause = self;
- while let BuiltinDerivedObligation(DerivedObligationCause { parent_code, .. })
- | ImplDerivedObligation(DerivedObligationCause { parent_code, .. })
- | DerivedObligation(DerivedObligationCause { parent_code, .. })
- | FunctionArgumentObligation { parent_code, .. } = base_cause
- {
- base_cause = &parent_code;
+ loop {
+ match base_cause {
+ BuiltinDerivedObligation(DerivedObligationCause { parent_code, .. })
+ | DerivedObligation(DerivedObligationCause { parent_code, .. })
+ | FunctionArgumentObligation { parent_code, .. } => {
+ base_cause = &parent_code;
+ }
+ ImplDerivedObligation(obligation_cause) => {
+ base_cause = &*obligation_cause.derived.parent_code;
+ }
+ _ => break,
+ }
}
base_cause
}
/// A given constant couldn't be evaluated.
NotConstEvaluatable(NotConstEvaluatable),
/// Exceeded the recursion depth during type projection.
- Overflow,
+ Overflow(OverflowError),
/// Signaling that an error has already been emitted, to avoid
/// multiple errors being shown.
ErrorReporting,
TraitAlias(ImplSourceTraitAliasData<'tcx, N>),
/// ImplSource for a `const Drop` implementation.
- ConstDrop(ImplSourceConstDropData<N>),
+ ConstDestruct(ImplSourceConstDestructData<N>),
}
impl<'tcx, N> ImplSource<'tcx, N> {
| ImplSource::Pointee(ImplSourcePointeeData) => Vec::new(),
ImplSource::TraitAlias(d) => d.nested,
ImplSource::TraitUpcasting(d) => d.nested,
- ImplSource::ConstDrop(i) => i.nested,
+ ImplSource::ConstDestruct(i) => i.nested,
}
}
| ImplSource::Pointee(ImplSourcePointeeData) => &[],
ImplSource::TraitAlias(d) => &d.nested,
ImplSource::TraitUpcasting(d) => &d.nested,
- ImplSource::ConstDrop(i) => &i.nested,
+ ImplSource::ConstDestruct(i) => &i.nested,
}
}
nested: d.nested.into_iter().map(f).collect(),
})
}
- ImplSource::ConstDrop(i) => ImplSource::ConstDrop(ImplSourceConstDropData {
- nested: i.nested.into_iter().map(f).collect(),
- }),
+ ImplSource::ConstDestruct(i) => {
+ ImplSource::ConstDestruct(ImplSourceConstDestructData {
+ nested: i.nested.into_iter().map(f).collect(),
+ })
+ }
}
}
}
pub struct ImplSourcePointeeData;
#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, TypeFoldable, Lift)]
-pub struct ImplSourceConstDropData<N> {
+pub struct ImplSourceConstDestructData<N> {
pub nested: Vec<N>,
}
/// A set of constraints that need to be satisfied in order for
/// a type to be valid for destruction.
#[derive(Clone, Debug, HashStable)]
-pub struct DtorckConstraint<'tcx> {
+pub struct DropckConstraint<'tcx> {
/// Types that are required to be alive in order for this
/// type to be valid for destruction.
pub outlives: Vec<ty::subst::GenericArg<'tcx>>,
pub overflows: Vec<Ty<'tcx>>,
}
-impl<'tcx> DtorckConstraint<'tcx> {
- pub fn empty() -> DtorckConstraint<'tcx> {
- DtorckConstraint { outlives: vec![], dtorck_types: vec![], overflows: vec![] }
+impl<'tcx> DropckConstraint<'tcx> {
+ pub fn empty() -> DropckConstraint<'tcx> {
+ DropckConstraint { outlives: vec![], dtorck_types: vec![], overflows: vec![] }
}
}
-impl<'tcx> FromIterator<DtorckConstraint<'tcx>> for DtorckConstraint<'tcx> {
- fn from_iter<I: IntoIterator<Item = DtorckConstraint<'tcx>>>(iter: I) -> Self {
+impl<'tcx> FromIterator<DropckConstraint<'tcx>> for DropckConstraint<'tcx> {
+ fn from_iter<I: IntoIterator<Item = DropckConstraint<'tcx>>>(iter: I) -> Self {
let mut result = Self::empty();
- for DtorckConstraint { outlives, dtorck_types, overflows } in iter {
+ for DropckConstraint { outlives, dtorck_types, overflows } in iter {
result.outlives.extend(outlives);
result.dtorck_types.extend(dtorck_types);
result.overflows.extend(overflows);
use self::EvaluationResult::*;
use super::{SelectionError, SelectionResult};
+use rustc_errors::ErrorGuaranteed;
use crate::ty;
BuiltinUnsizeCandidate,
- /// Implementation of `const Drop`, optionally from a custom `impl const Drop`.
- ConstDropCandidate(Option<DefId>),
+ /// Implementation of `const Destruct`, optionally from a custom `impl const Drop`.
+ ConstDestructCandidate(Option<DefId>),
}
/// The result of trait evaluation. The order is important
/// Indicates that trait evaluation caused overflow and in which pass.
#[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable)]
pub enum OverflowError {
+ Error(ErrorGuaranteed),
Canonical,
ErrorReporting,
}
+impl From<ErrorGuaranteed> for OverflowError {
+ fn from(e: ErrorGuaranteed) -> OverflowError {
+ OverflowError::Error(e)
+ }
+}
+
+TrivialTypeFoldableAndLiftImpls! {
+ OverflowError,
+}
+
impl<'tcx> From<OverflowError> for SelectionError<'tcx> {
fn from(overflow_error: OverflowError) -> SelectionError<'tcx> {
match overflow_error {
- OverflowError::Canonical => SelectionError::Overflow,
+ OverflowError::Error(e) => SelectionError::Overflow(OverflowError::Error(e)),
+ OverflowError::Canonical => SelectionError::Overflow(OverflowError::Canonical),
OverflowError::ErrorReporting => SelectionError::ErrorReporting,
}
}
pub children: DefIdMap<Children>,
/// Whether an error was emitted while constructing the graph.
- pub has_errored: bool,
+ pub has_errored: Option<ErrorGuaranteed>,
}
impl Graph {
pub fn new() -> Graph {
- Graph { parent: Default::default(), children: Default::default(), has_errored: false }
+ Graph { parent: Default::default(), children: Default::default(), has_errored: None }
}
/// The parent of a given impl, which is the `DefId` of the trait when the
) -> Result<Ancestors<'tcx>, ErrorGuaranteed> {
let specialization_graph = tcx.specialization_graph_of(trait_def_id);
- if specialization_graph.has_errored || tcx.type_of(start_from_impl).references_error() {
- Err(ErrorGuaranteed)
+ if let Some(reported) = specialization_graph.has_errored {
+ Err(reported)
+ } else if let Some(reported) = tcx.type_of(start_from_impl).error_reported() {
+ Err(reported)
} else {
Ok(Ancestors {
trait_def_id,
super::ImplSource::TraitUpcasting(ref d) => write!(f, "{:?}", d),
- super::ImplSource::ConstDrop(ref d) => write!(f, "{:?}", d),
+ super::ImplSource::ConstDestruct(ref d) => write!(f, "{:?}", d),
}
}
}
}
}
-impl<N: fmt::Debug> fmt::Debug for traits::ImplSourceConstDropData<N> {
+impl<N: fmt::Debug> fmt::Debug for traits::ImplSourceConstDestructData<N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "ImplSourceConstDropData(nested={:?})", self.nested)
+ write!(f, "ImplSourceConstDestructData(nested={:?})", self.nested)
}
}
use rustc_data_structures::captures::Captures;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::intern::Interned;
use rustc_data_structures::stable_hasher::HashingControls;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
-use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
use rustc_hir::def::{CtorKind, DefKind, Res};
use rustc_hir::def_id::DefId;
/// where `x` here represents the `DefId` of `S.x`. Then, the `DefId`
/// can be used with [`TyCtxt::type_of()`] to get the type of the field.
#[derive(TyEncodable, TyDecodable)]
-pub struct AdtDef {
+pub struct AdtDefData {
/// The `DefId` of the struct, enum or union item.
pub did: DefId,
/// Variants of the ADT. If this is a struct or union, then there will be a single variant.
- pub variants: IndexVec<VariantIdx, VariantDef>,
+ variants: IndexVec<VariantIdx, VariantDef>,
/// Flags of the ADT (e.g., is this a struct? is this non-exhaustive?).
flags: AdtFlags,
/// Repr options provided by the user.
- pub repr: ReprOptions,
+ repr: ReprOptions,
}
-impl PartialOrd for AdtDef {
- fn partial_cmp(&self, other: &AdtDef) -> Option<Ordering> {
+impl PartialOrd for AdtDefData {
+ fn partial_cmp(&self, other: &AdtDefData) -> Option<Ordering> {
Some(self.cmp(&other))
}
}
/// There should be only one AdtDef for each `did`, therefore
/// it is fine to implement `Ord` only based on `did`.
-impl Ord for AdtDef {
- fn cmp(&self, other: &AdtDef) -> Ordering {
+impl Ord for AdtDefData {
+ fn cmp(&self, other: &AdtDefData) -> Ordering {
self.did.cmp(&other.did)
}
}
/// There should be only one AdtDef for each `did`, therefore
/// it is fine to implement `PartialEq` only based on `did`.
-impl PartialEq for AdtDef {
+impl PartialEq for AdtDefData {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.did == other.did
}
}
-impl Eq for AdtDef {}
+impl Eq for AdtDefData {}
/// There should be only one AdtDef for each `did`, therefore
/// it is fine to implement `Hash` only based on `did`.
-impl Hash for AdtDef {
+impl Hash for AdtDefData {
#[inline]
fn hash<H: Hasher>(&self, s: &mut H) {
self.did.hash(s)
}
}
-impl<'a> HashStable<StableHashingContext<'a>> for AdtDef {
+impl<'a> HashStable<StableHashingContext<'a>> for AdtDefData {
fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
thread_local! {
static CACHE: RefCell<FxHashMap<(usize, HashingControls), Fingerprint>> = Default::default();
}
let hash: Fingerprint = CACHE.with(|cache| {
- let addr = self as *const AdtDef as usize;
+ let addr = self as *const AdtDefData as usize;
let hashing_controls = hcx.hashing_controls();
*cache.borrow_mut().entry((addr, hashing_controls)).or_insert_with(|| {
- let ty::AdtDef { did, ref variants, ref flags, ref repr } = *self;
+ let ty::AdtDefData { did, ref variants, ref flags, ref repr } = *self;
let mut hasher = StableHasher::new();
did.hash_stable(hcx, &mut hasher);
}
}
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, HashStable)]
+#[cfg_attr(not(bootstrap), rustc_pass_by_value)]
+pub struct AdtDef<'tcx>(pub Interned<'tcx, AdtDefData>);
+
+impl<'tcx> AdtDef<'tcx> {
+ pub fn did(self) -> DefId {
+ self.0.0.did
+ }
+
+ pub fn variants(self) -> &'tcx IndexVec<VariantIdx, VariantDef> {
+ &self.0.0.variants
+ }
+
+ pub fn variant(self, idx: VariantIdx) -> &'tcx VariantDef {
+ &self.0.0.variants[idx]
+ }
+
+ pub fn flags(self) -> AdtFlags {
+ self.0.0.flags
+ }
+
+ pub fn repr(self) -> ReprOptions {
+ self.0.0.repr
+ }
+}
+
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, TyEncodable, TyDecodable)]
pub enum AdtKind {
Struct,
}
}
-impl<'tcx> AdtDef {
- /// Creates a new `AdtDef`.
+impl AdtDefData {
+ /// Creates a new `AdtDefData`.
pub(super) fn new(
tcx: TyCtxt<'_>,
did: DefId,
flags |= AdtFlags::IS_MANUALLY_DROP;
}
- AdtDef { did, variants, flags, repr }
+ AdtDefData { did, variants, flags, repr }
}
+}
+impl<'tcx> AdtDef<'tcx> {
/// Returns `true` if this is a struct.
#[inline]
- pub fn is_struct(&self) -> bool {
- self.flags.contains(AdtFlags::IS_STRUCT)
+ pub fn is_struct(self) -> bool {
+ self.flags().contains(AdtFlags::IS_STRUCT)
}
/// Returns `true` if this is a union.
#[inline]
- pub fn is_union(&self) -> bool {
- self.flags.contains(AdtFlags::IS_UNION)
+ pub fn is_union(self) -> bool {
+ self.flags().contains(AdtFlags::IS_UNION)
}
/// Returns `true` if this is an enum.
#[inline]
- pub fn is_enum(&self) -> bool {
- self.flags.contains(AdtFlags::IS_ENUM)
+ pub fn is_enum(self) -> bool {
+ self.flags().contains(AdtFlags::IS_ENUM)
}
/// Returns `true` if the variant list of this ADT is `#[non_exhaustive]`.
#[inline]
- pub fn is_variant_list_non_exhaustive(&self) -> bool {
- self.flags.contains(AdtFlags::IS_VARIANT_LIST_NON_EXHAUSTIVE)
+ pub fn is_variant_list_non_exhaustive(self) -> bool {
+ self.flags().contains(AdtFlags::IS_VARIANT_LIST_NON_EXHAUSTIVE)
}
/// Returns the kind of the ADT.
#[inline]
- pub fn adt_kind(&self) -> AdtKind {
+ pub fn adt_kind(self) -> AdtKind {
if self.is_enum() {
AdtKind::Enum
} else if self.is_union() {
}
/// Returns a description of this abstract data type.
- pub fn descr(&self) -> &'static str {
+ pub fn descr(self) -> &'static str {
match self.adt_kind() {
AdtKind::Struct => "struct",
AdtKind::Union => "union",
/// Returns a description of a variant of this abstract data type.
#[inline]
- pub fn variant_descr(&self) -> &'static str {
+ pub fn variant_descr(self) -> &'static str {
match self.adt_kind() {
AdtKind::Struct => "struct",
AdtKind::Union => "union",
/// If this function returns `true`, it implies that `is_struct` must return `true`.
#[inline]
- pub fn has_ctor(&self) -> bool {
- self.flags.contains(AdtFlags::HAS_CTOR)
+ pub fn has_ctor(self) -> bool {
+ self.flags().contains(AdtFlags::HAS_CTOR)
}
/// Returns `true` if this type is `#[fundamental]` for the purposes
/// of coherence checking.
#[inline]
- pub fn is_fundamental(&self) -> bool {
- self.flags.contains(AdtFlags::IS_FUNDAMENTAL)
+ pub fn is_fundamental(self) -> bool {
+ self.flags().contains(AdtFlags::IS_FUNDAMENTAL)
}
/// Returns `true` if this is `PhantomData<T>`.
#[inline]
- pub fn is_phantom_data(&self) -> bool {
- self.flags.contains(AdtFlags::IS_PHANTOM_DATA)
+ pub fn is_phantom_data(self) -> bool {
+ self.flags().contains(AdtFlags::IS_PHANTOM_DATA)
}
/// Returns `true` if this is Box<T>.
#[inline]
- pub fn is_box(&self) -> bool {
- self.flags.contains(AdtFlags::IS_BOX)
+ pub fn is_box(self) -> bool {
+ self.flags().contains(AdtFlags::IS_BOX)
}
/// Returns `true` if this is `ManuallyDrop<T>`.
#[inline]
- pub fn is_manually_drop(&self) -> bool {
- self.flags.contains(AdtFlags::IS_MANUALLY_DROP)
+ pub fn is_manually_drop(self) -> bool {
+ self.flags().contains(AdtFlags::IS_MANUALLY_DROP)
}
/// Returns `true` if this type has a destructor.
- pub fn has_dtor(&self, tcx: TyCtxt<'tcx>) -> bool {
+ pub fn has_dtor(self, tcx: TyCtxt<'tcx>) -> bool {
self.destructor(tcx).is_some()
}
- pub fn has_non_const_dtor(&self, tcx: TyCtxt<'tcx>) -> bool {
+ pub fn has_non_const_dtor(self, tcx: TyCtxt<'tcx>) -> bool {
matches!(self.destructor(tcx), Some(Destructor { constness: hir::Constness::NotConst, .. }))
}
/// Asserts this is a struct or union and returns its unique variant.
- pub fn non_enum_variant(&self) -> &VariantDef {
+ pub fn non_enum_variant(self) -> &'tcx VariantDef {
assert!(self.is_struct() || self.is_union());
- &self.variants[VariantIdx::new(0)]
+ &self.variant(VariantIdx::new(0))
}
#[inline]
- pub fn predicates(&self, tcx: TyCtxt<'tcx>) -> GenericPredicates<'tcx> {
- tcx.predicates_of(self.did)
+ pub fn predicates(self, tcx: TyCtxt<'tcx>) -> GenericPredicates<'tcx> {
+ tcx.predicates_of(self.did())
}
/// Returns an iterator over all fields contained
/// by this ADT.
#[inline]
- pub fn all_fields(&self) -> impl Iterator<Item = &FieldDef> + Clone {
- self.variants.iter().flat_map(|v| v.fields.iter())
+ pub fn all_fields(self) -> impl Iterator<Item = &'tcx FieldDef> + Clone {
+ self.variants().iter().flat_map(|v| v.fields.iter())
}
/// Whether the ADT lacks fields. Note that this includes uninhabited enums,
/// e.g., `enum Void {}` is considered payload free as well.
- pub fn is_payloadfree(&self) -> bool {
+ pub fn is_payloadfree(self) -> bool {
// Treat the ADT as not payload-free if arbitrary_enum_discriminant is used (#88621).
// This would disallow the following kind of enum from being casted into integer.
// ```
// }
// ```
if self
- .variants
+ .variants()
.iter()
.any(|v| matches!(v.discr, VariantDiscr::Explicit(_)) && v.ctor_kind != CtorKind::Const)
{
return false;
}
- self.variants.iter().all(|v| v.fields.is_empty())
+ self.variants().iter().all(|v| v.fields.is_empty())
}
/// Return a `VariantDef` given a variant id.
- pub fn variant_with_id(&self, vid: DefId) -> &VariantDef {
- self.variants.iter().find(|v| v.def_id == vid).expect("variant_with_id: unknown variant")
+ pub fn variant_with_id(self, vid: DefId) -> &'tcx VariantDef {
+ self.variants().iter().find(|v| v.def_id == vid).expect("variant_with_id: unknown variant")
}
/// Return a `VariantDef` given a constructor id.
- pub fn variant_with_ctor_id(&self, cid: DefId) -> &VariantDef {
- self.variants
+ pub fn variant_with_ctor_id(self, cid: DefId) -> &'tcx VariantDef {
+ self.variants()
.iter()
.find(|v| v.ctor_def_id == Some(cid))
.expect("variant_with_ctor_id: unknown variant")
}
/// Return the index of `VariantDef` given a variant id.
- pub fn variant_index_with_id(&self, vid: DefId) -> VariantIdx {
- self.variants
+ pub fn variant_index_with_id(self, vid: DefId) -> VariantIdx {
+ self.variants()
.iter_enumerated()
.find(|(_, v)| v.def_id == vid)
.expect("variant_index_with_id: unknown variant")
}
/// Return the index of `VariantDef` given a constructor id.
- pub fn variant_index_with_ctor_id(&self, cid: DefId) -> VariantIdx {
- self.variants
+ pub fn variant_index_with_ctor_id(self, cid: DefId) -> VariantIdx {
+ self.variants()
.iter_enumerated()
.find(|(_, v)| v.ctor_def_id == Some(cid))
.expect("variant_index_with_ctor_id: unknown variant")
.0
}
- pub fn variant_of_res(&self, res: Res) -> &VariantDef {
+ pub fn variant_of_res(self, res: Res) -> &'tcx VariantDef {
match res {
Res::Def(DefKind::Variant, vid) => self.variant_with_id(vid),
Res::Def(DefKind::Ctor(..), cid) => self.variant_with_ctor_id(cid),
}
#[inline]
- pub fn eval_explicit_discr(&self, tcx: TyCtxt<'tcx>, expr_did: DefId) -> Option<Discr<'tcx>> {
+ pub fn eval_explicit_discr(self, tcx: TyCtxt<'tcx>, expr_did: DefId) -> Option<Discr<'tcx>> {
assert!(self.is_enum());
let param_env = tcx.param_env(expr_did);
- let repr_type = self.repr.discr_type();
+ let repr_type = self.repr().discr_type();
match tcx.const_eval_poly(expr_did) {
Ok(val) => {
let ty = repr_type.to_ty(tcx);
}
Err(err) => {
let msg = match err {
- ErrorHandled::Reported(ErrorGuaranteed) | ErrorHandled::Linted => {
+ ErrorHandled::Reported(_) | ErrorHandled::Linted => {
"enum discriminant evaluation failed"
}
ErrorHandled::TooGeneric => "enum discriminant depends on generics",
#[inline]
pub fn discriminants(
- &'tcx self,
+ self,
tcx: TyCtxt<'tcx>,
) -> impl Iterator<Item = (VariantIdx, Discr<'tcx>)> + Captures<'tcx> {
assert!(self.is_enum());
- let repr_type = self.repr.discr_type();
+ let repr_type = self.repr().discr_type();
let initial = repr_type.initial_discriminant(tcx);
let mut prev_discr = None::<Discr<'tcx>>;
- self.variants.iter_enumerated().map(move |(i, v)| {
+ self.variants().iter_enumerated().map(move |(i, v)| {
let mut discr = prev_discr.map_or(initial, |d| d.wrap_incr(tcx));
if let VariantDiscr::Explicit(expr_did) = v.discr {
if let Some(new_discr) = self.eval_explicit_discr(tcx, expr_did) {
}
#[inline]
- pub fn variant_range(&self) -> Range<VariantIdx> {
- VariantIdx::new(0)..VariantIdx::new(self.variants.len())
+ pub fn variant_range(self) -> Range<VariantIdx> {
+ VariantIdx::new(0)..VariantIdx::new(self.variants().len())
}
/// Computes the discriminant value used by a specific variant.
/// assuming there are no constant-evaluation errors there.
#[inline]
pub fn discriminant_for_variant(
- &self,
+ self,
tcx: TyCtxt<'tcx>,
variant_index: VariantIdx,
) -> Discr<'tcx> {
let (val, offset) = self.discriminant_def_for_variant(variant_index);
let explicit_value = val
.and_then(|expr_did| self.eval_explicit_discr(tcx, expr_did))
- .unwrap_or_else(|| self.repr.discr_type().initial_discriminant(tcx));
+ .unwrap_or_else(|| self.repr().discr_type().initial_discriminant(tcx));
explicit_value.checked_add(tcx, offset as u128).0
}
/// Yields a `DefId` for the discriminant and an offset to add to it
/// Alternatively, if there is no explicit discriminant, returns the
/// inferred discriminant directly.
- pub fn discriminant_def_for_variant(&self, variant_index: VariantIdx) -> (Option<DefId>, u32) {
- assert!(!self.variants.is_empty());
+ pub fn discriminant_def_for_variant(self, variant_index: VariantIdx) -> (Option<DefId>, u32) {
+ assert!(!self.variants().is_empty());
let mut explicit_index = variant_index.as_u32();
let expr_did;
loop {
- match self.variants[VariantIdx::from_u32(explicit_index)].discr {
+ match self.variant(VariantIdx::from_u32(explicit_index)).discr {
ty::VariantDiscr::Relative(0) => {
expr_did = None;
break;
(expr_did, variant_index.as_u32() - explicit_index)
}
- pub fn destructor(&self, tcx: TyCtxt<'tcx>) -> Option<Destructor> {
- tcx.adt_destructor(self.did)
+ pub fn destructor(self, tcx: TyCtxt<'tcx>) -> Option<Destructor> {
+ tcx.adt_destructor(self.did())
}
/// Returns a list of types such that `Self: Sized` if and only
///
/// Due to normalization being eager, this applies even if
/// the associated type is behind a pointer (e.g., issue #31299).
- pub fn sized_constraint(&self, tcx: TyCtxt<'tcx>) -> &'tcx [Ty<'tcx>] {
- tcx.adt_sized_constraint(self.did).0
+ pub fn sized_constraint(self, tcx: TyCtxt<'tcx>) -> &'tcx [Ty<'tcx>] {
+ tcx.adt_sized_constraint(self.did()).0
}
}
write!(
&mut symbol,
"__{}",
- def.variants[variant].fields[idx as usize].name.as_str(),
+ def.variant(variant).fields[idx as usize].name.as_str(),
)
.unwrap();
}
curr_string = format!(
"{}.{}",
curr_string,
- def.variants[variant].fields[idx as usize].name.as_str()
+ def.variant(variant).fields[idx as usize].name.as_str()
);
}
ty::Tuple(_) => {
use crate::thir;
use crate::traits;
use crate::ty::subst::SubstsRef;
-use crate::ty::{self, Ty, TyCtxt};
+use crate::ty::{self, AdtDef, Ty, TyCtxt};
use rustc_data_structures::fx::FxHashMap;
use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
use rustc_span::Span;
}
}
+impl<'tcx, E: TyEncoder<'tcx>> Encodable<E> for AdtDef<'tcx> {
+ fn encode(&self, e: &mut E) -> Result<(), E::Error> {
+ self.0.0.encode(e)
+ }
+}
+
impl<'tcx, E: TyEncoder<'tcx>> Encodable<E> for AllocId {
fn encode(&self, e: &mut E) -> Result<(), E::Error> {
e.encode_alloc_id(self)
&'tcx mir::Body<'tcx>,
&'tcx mir::UnsafetyCheckResult,
&'tcx mir::BorrowCheckResult<'tcx>,
- &'tcx mir::coverage::CodeRegion,
- &'tcx ty::AdtDef
+ &'tcx mir::coverage::CodeRegion
}
pub trait TyDecoder<'tcx>: Decoder {
}
}
+impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for AdtDef<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ decoder.tcx().intern_adt_def(Decodable::decode(decoder))
+ }
+}
+
impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for [(ty::Predicate<'tcx>, Span)] {
fn decode(decoder: &mut D) -> &'tcx Self {
decoder.tcx().arena.alloc_from_iter(
&'tcx mir::UnsafetyCheckResult,
&'tcx mir::BorrowCheckResult<'tcx>,
&'tcx mir::coverage::CodeRegion,
- &'tcx ty::List<ty::BoundVariableKind>,
- &'tcx ty::AdtDef
+ &'tcx ty::List<ty::BoundVariableKind>
}
#[macro_export]
if let Some(val) = self.val().try_eval(tcx, param_env) {
match val {
Ok(val) => Const::from_value(tcx, val, self.ty()),
- Err(ErrorGuaranteed) => tcx.const_error(self.ty()),
+ Err(ErrorGuaranteed { .. }) => tcx.const_error(self.ty()),
}
} else {
self
use crate::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSubsts};
use crate::ty::TyKind::*;
use crate::ty::{
- self, AdtDef, AdtKind, Binder, BindingMode, BoundVar, CanonicalPolyFnSig,
+ self, AdtDef, AdtDefData, AdtKind, Binder, BindingMode, BoundVar, CanonicalPolyFnSig,
ClosureSizeProfileData, Const, ConstS, ConstVid, DefIdTree, ExistentialPredicate, FloatTy,
FloatVar, FloatVid, GenericParamDefKind, InferConst, InferTy, IntTy, IntVar, IntVid, List,
ParamConst, ParamTy, PolyFnSig, Predicate, PredicateKind, PredicateS, ProjectionTy, Region,
/// except through the error-reporting functions on a [`tcx`][TyCtxt].
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[derive(TyEncodable, TyDecodable, HashStable)]
-pub struct DelaySpanBugEmitted(());
+pub struct DelaySpanBugEmitted {
+ pub reported: ErrorGuaranteed,
+ _priv: (),
+}
type InternedSet<'tcx, T> = ShardedHashMap<InternedInSet<'tcx, T>, ()>;
const_allocation: InternedSet<'tcx, Allocation>,
bound_variable_kinds: InternedSet<'tcx, List<ty::BoundVariableKind>>,
layout: InternedSet<'tcx, LayoutS<'tcx>>,
- adt_def: InternedSet<'tcx, AdtDef>,
+ adt_def: InternedSet<'tcx, AdtDefData>,
}
impl<'tcx> CtxtInterners<'tcx> {
kind: AdtKind,
variants: IndexVec<VariantIdx, ty::VariantDef>,
repr: ReprOptions,
- ) -> &'tcx ty::AdtDef {
- self.intern_adt_def(ty::AdtDef::new(self, did, kind, variants, repr))
+ ) -> ty::AdtDef<'tcx> {
+ self.intern_adt_def(ty::AdtDefData::new(self, did, kind, variants, repr))
}
/// Allocates a read-only byte or string literal for `mir::interpret`.
/// ensure it gets used.
#[track_caller]
pub fn ty_error_with_message<S: Into<MultiSpan>>(self, span: S, msg: &str) -> Ty<'tcx> {
- self.sess.delay_span_bug(span, msg);
- self.mk_ty(Error(DelaySpanBugEmitted(())))
+ let reported = self.sess.delay_span_bug(span, msg);
+ self.mk_ty(Error(DelaySpanBugEmitted { reported, _priv: () }))
}
/// Like [TyCtxt::ty_error] but for constants.
span: S,
msg: &str,
) -> Const<'tcx> {
- self.sess.delay_span_bug(span, msg);
- self.mk_const(ty::ConstS { val: ty::ConstKind::Error(DelaySpanBugEmitted(())), ty })
+ let reported = self.sess.delay_span_bug(span, msg);
+ self.mk_const(ty::ConstS {
+ val: ty::ConstKind::Error(DelaySpanBugEmitted { reported, _priv: () }),
+ ty,
+ })
}
pub fn consider_optimizing<T: Fn() -> String>(self, msg: T) -> bool {
const_: mk_const(ConstS<'tcx>): Const -> Const<'tcx>,
const_allocation: intern_const_alloc(Allocation): ConstAllocation -> ConstAllocation<'tcx>,
layout: intern_layout(LayoutS<'tcx>): Layout -> Layout<'tcx>,
-}
-
-macro_rules! direct_interners_old {
- ($($name:ident: $method:ident($ty:ty),)+) => {
- $(impl<'tcx> Borrow<$ty> for InternedInSet<'tcx, $ty> {
- fn borrow<'a>(&'a self) -> &'a $ty {
- &self.0
- }
- }
-
- impl<'tcx> PartialEq for InternedInSet<'tcx, $ty> {
- fn eq(&self, other: &Self) -> bool {
- // The `Borrow` trait requires that `x.borrow() == y.borrow()`
- // equals `x == y`.
- self.0 == other.0
- }
- }
-
- impl<'tcx> Eq for InternedInSet<'tcx, $ty> {}
-
- impl<'tcx> Hash for InternedInSet<'tcx, $ty> {
- fn hash<H: Hasher>(&self, s: &mut H) {
- // The `Borrow` trait requires that `x.borrow().hash(s) ==
- // x.hash(s)`.
- self.0.hash(s)
- }
- }
-
- impl<'tcx> TyCtxt<'tcx> {
- pub fn $method(self, v: $ty) -> &'tcx $ty {
- self.interners.$name.intern(v, |v| {
- InternedInSet(self.interners.arena.alloc(v))
- }).0
- }
- })+
- }
-}
-
-// FIXME: eventually these should all be converted to `direct_interners`.
-direct_interners_old! {
- adt_def: intern_adt_def(AdtDef),
+ adt_def: intern_adt_def(AdtDefData): AdtDef -> AdtDef<'tcx>,
}
macro_rules! slice_interners {
}
#[inline]
- pub fn mk_adt(self, def: &'tcx AdtDef, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+ pub fn mk_adt(self, def: AdtDef<'tcx>, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
// Take a copy of substs so that we own the vectors inside.
self.mk_ty(Adt(def, substs))
}
pub fn mk_place_downcast(
self,
place: Place<'tcx>,
- adt_def: &'tcx AdtDef,
+ adt_def: AdtDef<'tcx>,
variant_index: VariantIdx,
) -> Place<'tcx> {
self.mk_place_elem(
place,
- PlaceElem::Downcast(Some(adt_def.variants[variant_index].name), variant_index),
+ PlaceElem::Downcast(Some(adt_def.variant(variant_index).name), variant_index),
)
}
lint: &'static Lint,
hir_id: HirId,
span: impl Into<MultiSpan>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a>),
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
) {
let (level, src) = self.lint_level_at_node(lint, hir_id);
struct_lint_level(self.sess, lint, level, src, Some(span.into()), decorate);
self,
lint: &'static Lint,
id: HirId,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a>),
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
) {
let (level, src) = self.lint_level_at_node(lint, id);
struct_lint_level(self.sess, lint, level, src, None, decorate);
};
err.span_suggestion_verbose(span, msg, suggestion, applicability);
- } else {
+ } else if suggestions.len() > 1 {
err.multipart_suggestion_verbose(
"consider restricting type parameters",
suggestions.into_iter().map(|(span, suggestion, _)| (span, suggestion)).collect(),
}
ty::Tuple(ref tys) if tys.is_empty() => format!("`{}`", self).into(),
- ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.def_path_str(def.did)).into(),
+ ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.def_path_str(def.did())).into(),
ty::Foreign(def_id) => format!("extern type `{}`", tcx.def_path_str(def_id)).into(),
ty::Array(t, n) => {
if t.is_simple_ty() {
"{some} method{s} {are} available that return{r} `{ty}`",
some = if methods.len() == 1 { "a" } else { "some" },
s = pluralize!(methods.len()),
- are = if methods.len() == 1 { "is" } else { "are" },
+ are = pluralize!("is", methods.len()),
r = if methods.len() == 1 { "s" } else { "" },
ty = expected
);
ty::Int(int_type) => Some(IntSimplifiedType(int_type)),
ty::Uint(uint_type) => Some(UintSimplifiedType(uint_type)),
ty::Float(float_type) => Some(FloatSimplifiedType(float_type)),
- ty::Adt(def, _) => Some(AdtSimplifiedType(def.did)),
+ ty::Adt(def, _) => Some(AdtSimplifiedType(def.did())),
ty::Str => Some(StrSimplifiedType),
ty::Array(..) => Some(ArraySimplifiedType),
ty::Slice(..) => Some(SliceSimplifiedType),
//! ```
use crate::mir;
use crate::ty::{self, flags::FlagComputation, Binder, Ty, TyCtxt, TypeFlags};
+use rustc_errors::ErrorGuaranteed;
use rustc_hir::def_id::DefId;
use rustc_data_structures::fx::FxHashSet;
fn references_error(&self) -> bool {
self.has_type_flags(TypeFlags::HAS_ERROR)
}
+ fn error_reported(&self) -> Option<ErrorGuaranteed> {
+ if self.references_error() {
+ Some(ErrorGuaranteed::unchecked_claim_error_was_emitted())
+ } else {
+ None
+ }
+ }
fn has_param_types_or_consts(&self) -> bool {
self.has_type_flags(TypeFlags::HAS_TY_PARAM | TypeFlags::HAS_CT_PARAM)
}
}
}
-impl<'tcx> AdtDef {
+impl<'tcx> AdtDef<'tcx> {
/// Calculates the forest of `DefId`s from which this ADT is visibly uninhabited.
fn uninhabited_from(
- &self,
+ self,
tcx: TyCtxt<'tcx>,
substs: SubstsRef<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> DefIdForest<'tcx> {
// Non-exhaustive ADTs from other crates are always considered inhabited.
- if self.is_variant_list_non_exhaustive() && !self.did.is_local() {
+ if self.is_variant_list_non_exhaustive() && !self.did().is_local() {
DefIdForest::empty()
} else {
DefIdForest::intersection(
tcx,
- self.variants
+ self.variants()
.iter()
.map(|v| v.uninhabited_from(tcx, substs, self.adt_kind(), param_env)),
)
}
// SIMD vector types.
- ty::Adt(def, substs) if def.repr.simd() => {
+ ty::Adt(def, substs) if def.repr().simd() => {
if !def.is_struct() {
// Should have yielded E0517 by now.
tcx.sess.delay_span_bug(
ty::Adt(def, substs) => {
// Cache the field layouts.
let variants = def
- .variants
+ .variants()
.iter()
.map(|v| {
v.fields
.collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
if def.is_union() {
- if def.repr.pack.is_some() && def.repr.align.is_some() {
+ if def.repr().pack.is_some() && def.repr().align.is_some() {
self.tcx.sess.delay_span_bug(
- tcx.def_span(def.did),
+ tcx.def_span(def.did()),
"union cannot be packed and aligned",
);
return Err(LayoutError::Unknown(ty));
}
let mut align =
- if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+ if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
- if let Some(repr_align) = def.repr.align {
+ if let Some(repr_align) = def.repr().align {
align = align.max(AbiAndPrefAlign::new(repr_align));
}
- let optimize = !def.repr.inhibit_union_abi_opt();
+ let optimize = !def.repr().inhibit_union_abi_opt();
let mut size = Size::ZERO;
let mut abi = Abi::Aggregate { sized: true };
let index = VariantIdx::new(0);
size = cmp::max(size, field.size);
}
- if let Some(pack) = def.repr.pack {
+ if let Some(pack) = def.repr().pack {
align = align.min(AbiAndPrefAlign::new(pack));
}
// Only one variant is present.
(present_second.is_none() &&
// Representation optimizations are allowed.
- !def.repr.inhibit_enum_layout_opt());
+ !def.repr().inhibit_enum_layout_opt());
if is_struct {
// Struct, or univariant enum equivalent to a struct.
// (Typechecking will reject discriminant-sizing attrs.)
let kind = if def.is_enum() || variants[v].is_empty() {
StructKind::AlwaysSized
} else {
- let param_env = tcx.param_env(def.did);
- let last_field = def.variants[v].fields.last().unwrap();
+ let param_env = tcx.param_env(def.did());
+ let last_field = def.variant(v).fields.last().unwrap();
let always_sized =
tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
if !always_sized {
}
};
- let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
+ let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
st.variants = Variants::Single { index: v };
- let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
+ let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
match st.abi {
Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
// the asserts ensure that we are not using the
}
// Update `largest_niche` if we have introduced a larger niche.
- let niche = if def.repr.hide_niche() {
+ let niche = if def.repr().hide_niche() {
None
} else {
Niche::from_scalar(dl, Size::ZERO, *scalar)
// instead of actual discriminants, so dataful enums with
// explicit discriminants (RFC #2363) would misbehave.
let no_explicit_discriminants = def
- .variants
+ .variants()
.iter_enumerated()
.all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
let mut niche_filling_layout = None;
// Niche-filling enum optimization.
- if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
+ if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
let mut dataful_variant = None;
let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
let mut st = self.univariant_uninterned(
ty,
v,
- &def.repr,
+ &def.repr(),
StructKind::AlwaysSized,
)?;
st.variants = Variants::Single { index: j };
}
let (mut min, mut max) = (i128::MAX, i128::MIN);
- let discr_type = def.repr.discr_type();
+ let discr_type = def.repr().discr_type();
let bits = Integer::from_attr(self, discr_type).size().bits();
for (i, discr) in def.discriminants(tcx) {
if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
max = 0;
}
assert!(min <= max, "discriminant range is {}...{}", min, max);
- let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
+ let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
let mut align = dl.aggregate_align;
let mut size = Size::ZERO;
// determining the alignment of the overall enum, and the
// determining the alignment of the payload after the tag.)
let mut prefix_align = min_ity.align(dl).abi;
- if def.repr.c() {
+ if def.repr().c() {
for fields in &variants {
for field in fields {
prefix_align = prefix_align.max(field.align.abi);
let mut st = self.univariant_uninterned(
ty,
&field_layouts,
- &def.repr,
+ &def.repr(),
StructKind::Prefixed(min_ity.size(), prefix_align),
)?;
st.variants = Variants::Single { index: i };
return Err(LayoutError::SizeOverflow(ty));
}
- let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
+ let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
if typeck_ity < min_ity {
// It is a bug if Layout decided on a greater discriminant size than typeck for
// some reason at this point (based on values discriminant can take on). Mostly
// won't be so conservative.
// Use the initial field alignment
- let mut ity = if def.repr.c() || def.repr.int.is_some() {
+ let mut ity = if def.repr().c() || def.repr().int.is_some() {
min_ity
} else {
Integer::for_align(dl, start_align).unwrap_or(min_ity)
};
let adt_kind = adt_def.adt_kind();
- let adt_packed = adt_def.repr.pack.is_some();
+ let adt_packed = adt_def.repr().pack.is_some();
let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
let mut min_size = Size::ZERO;
match layout.variants {
Variants::Single { index } => {
- if !adt_def.variants.is_empty() && layout.fields != FieldsShape::Primitive {
+ if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
debug!(
"print-type-size `{:#?}` variant {}",
- layout, adt_def.variants[index].name
+ layout,
+ adt_def.variant(index).name
);
- let variant_def = &adt_def.variants[index];
+ let variant_def = &adt_def.variant(index);
let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
record(
adt_kind.into(),
debug!(
"print-type-size `{:#?}` adt general variants def {}",
layout.ty,
- adt_def.variants.len()
+ adt_def.variants().len()
);
let variant_infos: Vec<_> = adt_def
- .variants
+ .variants()
.iter_enumerated()
.map(|(i, variant_def)| {
let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
ty::Adt(def, substs) => {
// Only newtypes and enums w/ nullable pointer optimization.
- if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
+ if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
return Err(err);
}
// Get a zero-sized variant or a pointer newtype.
let zero_or_ptr_variant = |i| {
let i = VariantIdx::new(i);
- let fields = def.variants[i]
- .fields
- .iter()
- .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
+ let fields =
+ def.variant(i).fields.iter().map(|field| {
+ SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
+ });
let mut ptr = None;
for field in fields {
let field = field?;
let v0 = zero_or_ptr_variant(0)?;
// Newtype.
- if def.variants.len() == 1 {
+ if def.variants().len() == 1 {
if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
return Ok(SizeSkeleton::Pointer {
non_zero: non_zero
- || match tcx.layout_scalar_valid_range(def.did) {
+ || match tcx.layout_scalar_valid_range(def.did()) {
(Bound::Included(start), Bound::Unbounded) => start > 0,
(Bound::Included(start), Bound::Included(end)) => {
0 < start && start < end
}
let fields = match this.ty.kind() {
- ty::Adt(def, _) if def.variants.is_empty() =>
+ ty::Adt(def, _) if def.variants().is_empty() =>
bug!("for_variant called on zero-variant enum"),
- ty::Adt(def, _) => def.variants[variant_index].fields.len(),
+ ty::Adt(def, _) => def.variant(variant_index).fields.len(),
_ => bug!(),
};
tcx.intern_layout(LayoutS {
ty::Adt(def, substs) => {
match this.variants {
Variants::Single { index } => {
- TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
+ TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
}
// Discriminant field for enums (where applicable).
use rustc_query_system::ich::StableHashingContext;
use rustc_session::cstore::CrateStoreDyn;
use rustc_span::symbol::{kw, Ident, Symbol};
-use rustc_span::{sym, Span};
+use rustc_span::Span;
use rustc_target::abi::Align;
+use std::fmt::Debug;
use std::hash::Hash;
use std::ops::ControlFlow;
use std::{fmt, str};
pub predicates: Vec<Predicate<'tcx>>,
}
+#[derive(Copy, Clone, Debug, TypeFoldable)]
+pub enum ImplSubject<'tcx> {
+ Trait(TraitRef<'tcx>),
+ Inherent(Ty<'tcx>),
+}
+
#[derive(
Copy,
Clone,
pub constness: BoundConstness,
+ /// If polarity is Positive: we are proving that the trait is implemented.
+ ///
+ /// If polarity is Negative: we are proving that a negative impl of this trait
+ /// exists. (Note that coherence also checks whether negative impls of supertraits
+ /// exist via a series of predicates.)
+ ///
+ /// If polarity is Reserved: that's a bug.
pub polarity: ImplPolarity,
}
if unlikely!(Some(self.trait_ref.def_id) == tcx.lang_items().drop_trait()) {
// remap without changing constness of this predicate.
// this is because `T: ~const Drop` has a different meaning to `T: Drop`
+ // FIXME(fee1-dead): remove this logic after beta bump
param_env.remap_constness_with(self.constness)
} else {
*param_env = param_env.with_constness(self.constness.and(param_env.constness()))
self.sess.contains_name(&self.get_attrs(did), attr)
}
- /// Determines whether an item is annotated with `doc(hidden)`.
- pub fn is_doc_hidden(self, did: DefId) -> bool {
- self.get_attrs(did)
- .iter()
- .filter_map(|attr| if attr.has_name(sym::doc) { attr.meta_item_list() } else { None })
- .any(|items| items.iter().any(|item| item.has_name(sym::hidden)))
- }
-
/// Returns `true` if this is an `auto trait`.
pub fn trait_is_auto(self, trait_def_id: DefId) -> bool {
self.trait_def(trait_def_id).has_auto_impl
visited: &mut SsoHashSet<Ty<'a>>,
) -> Option<DefId> {
match *ty.kind() {
- ty::Adt(adt_def, _) => Some(adt_def.did),
+ ty::Adt(adt_def, _) => Some(adt_def.did()),
ty::Dynamic(data, ..) => data.principal_def_id(),
ty::BoundTyKind::Param(p) => p!(write("{}", p)),
},
ty::Adt(def, substs) => {
- p!(print_def_path(def.did, substs));
+ p!(print_def_path(def.did(), substs));
}
ty::Dynamic(data, r) => {
let print_r = self.should_print_region(r);
);
if !generics.is_empty() || !assoc_items.is_empty() {
- p!("<");
let mut first = true;
for ty in generics {
- if !first {
+ if first {
+ p!("<");
+ first = false;
+ } else {
p!(", ");
}
p!(print(trait_ref.rebind(*ty)));
- first = false;
}
for (assoc_item_def_id, term) in assoc_items {
- if !first {
+ // Skip printing `<[generator@] as Generator<_>>::Return` from async blocks
+ if let Some(ty) = term.skip_binder().ty() &&
+ let ty::Projection(ty::ProjectionTy { item_def_id, .. }) = ty.kind() &&
+ Some(*item_def_id) == self.tcx().lang_items().generator_return() {
+ continue;
+ }
+
+ if first {
+ p!("<");
+ first = false;
+ } else {
p!(", ");
}
+
p!(write("{} = ", self.tcx().associated_item(assoc_item_def_id).name));
match term.skip_binder() {
Term::Ty(ty) => {
- // Skip printing `<[generator@] as Generator<_>>::Return` from async blocks
- if matches!(
- ty.kind(), ty::Projection(ty::ProjectionTy { item_def_id, .. })
- if Some(*item_def_id) == self.tcx().lang_items().generator_return()
- ) {
- p!("[async output]")
- } else {
- p!(print(ty))
- }
+ p!(print(ty))
}
Term::Const(c) => {
p!(print(c));
}
};
-
- first = false;
}
- p!(">");
+ if !first {
+ p!(">");
+ }
}
first = false;
}
p!(")");
}
- ty::Adt(def, _) if def.variants.is_empty() => {
+ ty::Adt(def, _) if def.variants().is_empty() => {
self = self.typed_value(
|mut this| {
write!(this, "unreachable()")?;
ty::Adt(def, substs) => {
let variant_idx =
contents.variant.expect("destructed const of adt without variant idx");
- let variant_def = &def.variants[variant_idx];
+ let variant_def = &def.variant(variant_idx);
p!(print_value_path(variant_def.def_id, substs));
match variant_def.ctor_kind {
CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal, NoSolution,
};
use crate::traits::query::{
- DropckOutlivesResult, DtorckConstraint, MethodAutoderefStepsResult, NormalizationResult,
+ DropckConstraint, DropckOutlivesResult, MethodAutoderefStepsResult, NormalizationResult,
OutlivesBound,
};
use crate::traits::specialization_graph;
mod sealed {
use super::{DefId, LocalDefId};
- /// An analogue of the `Into` trait that's intended only for query paramaters.
+ /// An analogue of the `Into` trait that's intended only for query parameters.
///
/// This exists to allow queries to accept either `DefId` or `LocalDefId` while requiring that the
/// user call `to_def_id` to convert between them everywhere else.
use crate::mir::interpret::{get_slice_bytes, ConstValue, GlobalAlloc, Scalar};
use crate::ty::error::{ExpectedFound, TypeError};
use crate::ty::subst::{GenericArg, GenericArgKind, Subst, SubstsRef};
-use crate::ty::{self, Term, Ty, TyCtxt, TypeFoldable};
+use crate::ty::{self, ImplSubject, Term, Ty, TyCtxt, TypeFoldable};
use rustc_hir as ast;
use rustc_hir::def_id::DefId;
use rustc_span::DUMMY_SP;
}
}
+impl<'tcx> Relate<'tcx> for ImplSubject<'tcx> {
+ #[inline]
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ImplSubject<'tcx>,
+ b: ImplSubject<'tcx>,
+ ) -> RelateResult<'tcx, ImplSubject<'tcx>> {
+ match (a, b) {
+ (ImplSubject::Trait(trait_ref_a), ImplSubject::Trait(trait_ref_b)) => {
+ let trait_ref = ty::TraitRef::relate(relation, trait_ref_a, trait_ref_b)?;
+ Ok(ImplSubject::Trait(trait_ref))
+ }
+ (ImplSubject::Inherent(ty_a), ImplSubject::Inherent(ty_b)) => {
+ let ty = Ty::relate(relation, ty_a, ty_b)?;
+ Ok(ImplSubject::Inherent(ty))
+ }
+ (ImplSubject::Trait(_), ImplSubject::Inherent(_))
+ | (ImplSubject::Inherent(_), ImplSubject::Trait(_)) => {
+ bug!("can not relate TraitRef and Ty");
+ }
+ }
+ }
+}
+
impl<'tcx> Relate<'tcx> for Ty<'tcx> {
#[inline]
fn relate<R: TypeRelation<'tcx>>(
(ty::Placeholder(p1), ty::Placeholder(p2)) if p1 == p2 => Ok(a),
(&ty::Adt(a_def, a_substs), &ty::Adt(b_def, b_substs)) if a_def == b_def => {
- let substs = relation.relate_item_substs(a_def.did, a_substs, b_substs)?;
+ let substs = relation.relate_item_substs(a_def.did(), a_substs, b_substs)?;
Ok(tcx.mk_adt(a_def, substs))
}
(ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu))
if tcx.features().generic_const_exprs =>
{
- tcx.try_unify_abstract_consts((au.shrink(), bu.shrink()))
+ tcx.try_unify_abstract_consts(relation.param_env().and((au.shrink(), bu.shrink())))
}
// While this is slightly incorrect, it shouldn't matter for `min_const_generics`
}
}
-impl fmt::Debug for ty::AdtDef {
+impl<'tcx> fmt::Debug for ty::AdtDef<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ty::tls::with(|tcx| {
with_no_trimmed_paths!({
f.write_str(
&FmtPrinter::new(tcx, Namespace::TypeNS)
- .print_def_path(self.did, &[])?
+ .print_def_path(self.did(), &[])?
.into_buffer(),
)
})
// TypeFoldable implementations.
/// AdtDefs are basically the same as a DefId.
-impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::AdtDef {
+impl<'tcx> TypeFoldable<'tcx> for ty::AdtDef<'tcx> {
fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
self,
_folder: &mut F,
///
/// Note that generic parameters in fields only get lazily substituted
/// by using something like `adt_def.all_fields().map(|field| field.ty(tcx, substs))`.
- Adt(&'tcx AdtDef, SubstsRef<'tcx>),
+ Adt(AdtDef<'tcx>, SubstsRef<'tcx>),
/// An unsized FFI type that is opaque to Rust. Written as `extern type T`.
Foreign(DefId),
}
/// This returns the types of the MIR locals which had to be stored across suspension points.
- /// It is calculated in rustc_const_eval::transform::generator::StateTransform.
+ /// It is calculated in rustc_mir_transform::generator::StateTransform.
/// All the types here must be in the tuple in GeneratorInterior.
///
/// The locals are grouped by their variant number. Note that some locals may
polarity: ty::ImplPolarity::Positive,
})
}
+
+ /// Same as [`PolyTraitRef::to_poly_trait_predicate`] but sets a negative polarity instead.
+ pub fn to_poly_trait_predicate_negative_polarity(&self) -> ty::PolyTraitPredicate<'tcx> {
+ self.map_bound(|trait_ref| ty::TraitPredicate {
+ trait_ref,
+ constness: ty::BoundConstness::NotConst,
+ polarity: ty::ImplPolarity::Negative,
+ })
+ }
}
/// An existential reference to a trait, where `Self` is erased.
#[inline]
pub fn is_simd(self) -> bool {
match self.kind() {
- Adt(def, _) => def.repr.simd(),
+ Adt(def, _) => def.repr().simd(),
_ => false,
}
}
pub fn simd_size_and_type(self, tcx: TyCtxt<'tcx>) -> (u64, Ty<'tcx>) {
match self.kind() {
Adt(def, substs) => {
- assert!(def.repr.simd(), "`simd_size_and_type` called on non-SIMD type");
+ assert!(def.repr().simd(), "`simd_size_and_type` called on non-SIMD type");
let variant = def.non_enum_variant();
let f0_ty = variant.fields[0].ty(tcx, substs);
}
#[inline]
- pub fn ty_adt_def(self) -> Option<&'tcx AdtDef> {
+ pub fn ty_adt_def(self) -> Option<AdtDef<'tcx>> {
match self.kind() {
- Adt(adt, _) => Some(adt),
+ Adt(adt, _) => Some(*adt),
_ => None,
}
}
/// Iterates over tuple fields.
/// Panics when called on anything but a tuple.
+ #[inline]
pub fn tuple_fields(self) -> &'tcx List<Ty<'tcx>> {
match self.kind() {
Tuple(substs) => substs,
variant_index: VariantIdx,
) -> Option<Discr<'tcx>> {
match self.kind() {
- TyKind::Adt(adt, _) if adt.variants.is_empty() => {
+ TyKind::Adt(adt, _) if adt.variants().is_empty() => {
// This can actually happen during CTFE, see
// https://github.com/rust-lang/rust/issues/89765.
None
/// Returns the type of the discriminant of this type.
pub fn discriminant_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
match self.kind() {
- ty::Adt(adt, _) if adt.is_enum() => adt.repr.discr_type().to_ty(tcx),
+ ty::Adt(adt, _) if adt.is_enum() => adt.repr().discr_type().to_ty(tcx),
ty::Generator(_, substs, _) => substs.as_generator().discr_ty(tcx),
ty::Param(_) | ty::Projection(_) | ty::Opaque(..) | ty::Infer(ty::TyVar(_)) => {
}
}
- /// Returns the type of metadata for (potentially fat) pointers to this type.
+ /// Returns the type of metadata for (potentially fat) pointers to this type,
+ /// and a boolean signifying if this is conditional on this type being `Sized`.
pub fn ptr_metadata_ty(
self,
tcx: TyCtxt<'tcx>,
normalize: impl FnMut(Ty<'tcx>) -> Ty<'tcx>,
- ) -> Ty<'tcx> {
+ ) -> (Ty<'tcx>, bool) {
let tail = tcx.struct_tail_with_normalize(self, normalize);
match tail.kind() {
// Sized types
| ty::Closure(..)
| ty::Never
| ty::Error(_)
+ // Extern types have metadata = ().
| ty::Foreign(..)
// If returned by `struct_tail_without_normalization` this is a unit struct
// without any fields, or not a struct, and therefore is Sized.
| ty::Adt(..)
// If returned by `struct_tail_without_normalization` this is the empty tuple,
// a.k.a. unit type, which is Sized
- | ty::Tuple(..) => tcx.types.unit,
+ | ty::Tuple(..) => (tcx.types.unit, false),
- ty::Str | ty::Slice(_) => tcx.types.usize,
+ ty::Str | ty::Slice(_) => (tcx.types.usize, false),
ty::Dynamic(..) => {
let dyn_metadata = tcx.lang_items().dyn_metadata().unwrap();
- tcx.type_of(dyn_metadata).subst(tcx, &[tail.into()])
+ (tcx.type_of(dyn_metadata).subst(tcx, &[tail.into()]), false)
},
- ty::Projection(_)
- | ty::Param(_)
- | ty::Opaque(..)
- | ty::Infer(ty::TyVar(_))
+ // type parameters only have unit metadata if they're sized, so return true
+ // to make sure we double check this during confirmation
+ ty::Param(_) | ty::Projection(_) | ty::Opaque(..) => (tcx.types.unit, true),
+
+ ty::Infer(ty::TyVar(_))
| ty::Bound(..)
| ty::Placeholder(..)
| ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
- bug!("`ptr_metadata_ty` applied to unexpected type: {:?}", tail)
+ bug!("`ptr_metadata_ty` applied to unexpected type: {:?} (tail = {:?})", self, tail)
}
}
}
}
impl<'tcx> List<Ty<'tcx>> {
- /// Allows to freely switch betwen `List<Ty<'tcx>>` and `List<GenericArg<'tcx>>`.
+ /// Allows to freely switch between `List<Ty<'tcx>>` and `List<GenericArg<'tcx>>`.
///
/// As lists are interned, `List<Ty<'tcx>>` and `List<GenericArg<'tcx>>` have
/// be interned together, see `intern_type_list` for more details.
use rustc_hir::def_id::DefId;
use rustc_macros::HashStable;
use rustc_query_system::ich::NodeIdHashingMode;
-use rustc_span::DUMMY_SP;
+use rustc_span::{sym, DUMMY_SP};
use rustc_target::abi::{Integer, Size, TargetDataLayout};
use smallvec::SmallVec;
use std::{fmt, iter};
/// Note that this returns only the constraints for the
/// destructor of `def` itself. For the destructors of the
/// contents, you need `adt_dtorck_constraint`.
- pub fn destructor_constraints(self, def: &'tcx ty::AdtDef) -> Vec<ty::subst::GenericArg<'tcx>> {
+ pub fn destructor_constraints(self, def: ty::AdtDef<'tcx>) -> Vec<ty::subst::GenericArg<'tcx>> {
let dtor = match def.destructor(self) {
None => {
- debug!("destructor_constraints({:?}) - no dtor", def.did);
+ debug!("destructor_constraints({:?}) - no dtor", def.did());
return vec![];
}
Some(dtor) => dtor.did,
_ => bug!(),
};
- let item_substs = match *self.type_of(def.did).kind() {
+ let item_substs = match *self.type_of(def.did()).kind() {
ty::Adt(def_, substs) if def_ == def => substs,
_ => bug!(),
};
})
.map(|(item_param, _)| item_param)
.collect();
- debug!("destructor_constraint({:?}) = {:?}", def.did, result);
+ debug!("destructor_constraint({:?}) = {:?}", def.did(), result);
result
}
}
/// Given the `DefId`, returns the `DefId` of the innermost item that
- /// has its own type-checking context or "inference enviornment".
+ /// has its own type-checking context or "inference environment".
///
/// For example, a closure has its own `DefId`, but it is type-checked
/// with the containing item. Similarly, an inline const block has its
val.fold_with(&mut visitor)
}
+/// Determines whether an item is annotated with `doc(hidden)`.
+pub fn is_doc_hidden(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ tcx.get_attrs(def_id)
+ .iter()
+ .filter_map(|attr| if attr.has_name(sym::doc) { attr.meta_item_list() } else { None })
+ .any(|items| items.iter().any(|item| item.has_name(sym::hidden)))
+}
+
pub fn provide(providers: &mut ty::query::Providers) {
- *providers = ty::query::Providers { normalize_opaque_types, ..*providers }
+ *providers = ty::query::Providers { normalize_opaque_types, is_doc_hidden, ..*providers }
}
//! See docs in build/expr/mod.rs
use crate::build::Builder;
-use rustc_middle::mir::interpret::{ConstValue, Scalar};
+use crate::thir::constant::parse_float;
+use rustc_ast::ast;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::interpret::{
+ Allocation, ConstValue, LitToConstError, LitToConstInput, Scalar,
+};
use rustc_middle::mir::*;
use rustc_middle::thir::*;
-use rustc_middle::ty::CanonicalUserTypeAnnotation;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, CanonicalUserTypeAnnotation, Ty, TyCtxt};
+use rustc_target::abi::Size;
impl<'a, 'tcx> Builder<'a, 'tcx> {
/// Compile `expr`, yielding a compile-time constant. Assumes that
/// `expr` is a valid compile-time constant!
crate fn as_constant(&mut self, expr: &Expr<'tcx>) -> Constant<'tcx> {
+ let create_uneval_from_def_id =
+ |tcx: TyCtxt<'tcx>, def_id: DefId, ty: Ty<'tcx>, substs: SubstsRef<'tcx>| {
+ let uneval = ty::Unevaluated::new(ty::WithOptConstParam::unknown(def_id), substs);
+ tcx.mk_const(ty::ConstS { val: ty::ConstKind::Unevaluated(uneval), ty })
+ };
+
let this = self;
+ let tcx = this.tcx;
let Expr { ty, temp_lifetime: _, span, ref kind } = *expr;
match *kind {
ExprKind::Scope { region_scope: _, lint_level: _, value } => {
this.as_constant(&this.thir[value])
}
- ExprKind::Literal { literal, user_ty, const_id: _ } => {
+ ExprKind::Literal { lit, neg } => {
+ let literal =
+ match lit_to_constant(tcx, LitToConstInput { lit: &lit.node, ty, neg }) {
+ Ok(c) => c,
+ Err(LitToConstError::Reported) => ConstantKind::Ty(tcx.const_error(ty)),
+ Err(LitToConstError::TypeError) => {
+ bug!("encountered type error in `lit_to_constant")
+ }
+ };
+
+ Constant { span, user_ty: None, literal: literal.into() }
+ }
+ ExprKind::NonHirLiteral { lit, user_ty } => {
let user_ty = user_ty.map(|user_ty| {
this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
span,
inferred_ty: ty,
})
});
- assert_eq!(literal.ty(), ty);
- Constant { span, user_ty, literal: literal.into() }
+
+ let literal = ConstantKind::Val(ConstValue::Scalar(Scalar::Int(lit)), ty);
+
+ Constant { span, user_ty: user_ty, literal }
+ }
+ ExprKind::NamedConst { def_id, substs, user_ty } => {
+ let user_ty = user_ty.map(|user_ty| {
+ this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
+ span,
+ user_ty,
+ inferred_ty: ty,
+ })
+ });
+ let literal = ConstantKind::Ty(create_uneval_from_def_id(tcx, def_id, ty, substs));
+
+ Constant { user_ty, span, literal }
+ }
+ ExprKind::ConstParam { param, def_id: _ } => {
+ let const_param =
+ tcx.mk_const(ty::ConstS { val: ty::ConstKind::Param(param), ty: expr.ty });
+ let literal = ConstantKind::Ty(const_param);
+
+ Constant { user_ty: None, span, literal }
+ }
+ ExprKind::ConstBlock { did: def_id, substs } => {
+ let literal = ConstantKind::Ty(create_uneval_from_def_id(tcx, def_id, ty, substs));
+
+ Constant { user_ty: None, span, literal }
}
ExprKind::StaticRef { alloc_id, ty, .. } => {
- let const_val =
- ConstValue::Scalar(Scalar::from_pointer(alloc_id.into(), &this.tcx));
+ let const_val = ConstValue::Scalar(Scalar::from_pointer(alloc_id.into(), &tcx));
let literal = ConstantKind::Val(const_val, ty);
Constant { span, user_ty: None, literal }
}
- ExprKind::ConstBlock { value } => {
- Constant { span: span, user_ty: None, literal: value.into() }
- }
_ => span_bug!(span, "expression is not a valid constant {:?}", kind),
}
}
}
+
+crate fn lit_to_constant<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ lit_input: LitToConstInput<'tcx>,
+) -> Result<ConstantKind<'tcx>, LitToConstError> {
+ let LitToConstInput { lit, ty, neg } = lit_input;
+ let trunc = |n| {
+ let param_ty = ty::ParamEnv::reveal_all().and(ty);
+ let width = tcx.layout_of(param_ty).map_err(|_| LitToConstError::Reported)?.size;
+ trace!("trunc {} with size {} and shift {}", n, width.bits(), 128 - width.bits());
+ let result = width.truncate(n);
+ trace!("trunc result: {}", result);
+ Ok(ConstValue::Scalar(Scalar::from_uint(result, width)))
+ };
+
+ let value = match (lit, &ty.kind()) {
+ (ast::LitKind::Str(s, _), ty::Ref(_, inner_ty, _)) if inner_ty.is_str() => {
+ let s = s.as_str();
+ let allocation = Allocation::from_bytes_byte_aligned_immutable(s.as_bytes());
+ let allocation = tcx.intern_const_alloc(allocation);
+ ConstValue::Slice { data: allocation, start: 0, end: s.len() }
+ }
+ (ast::LitKind::ByteStr(data), ty::Ref(_, inner_ty, _))
+ if matches!(inner_ty.kind(), ty::Slice(_)) =>
+ {
+ let allocation = Allocation::from_bytes_byte_aligned_immutable(data as &[u8]);
+ let allocation = tcx.intern_const_alloc(allocation);
+ ConstValue::Slice { data: allocation, start: 0, end: data.len() }
+ }
+ (ast::LitKind::ByteStr(data), ty::Ref(_, inner_ty, _)) if inner_ty.is_array() => {
+ let id = tcx.allocate_bytes(data);
+ ConstValue::Scalar(Scalar::from_pointer(id.into(), &tcx))
+ }
+ (ast::LitKind::Byte(n), ty::Uint(ty::UintTy::U8)) => {
+ ConstValue::Scalar(Scalar::from_uint(*n, Size::from_bytes(1)))
+ }
+ (ast::LitKind::Int(n, _), ty::Uint(_)) | (ast::LitKind::Int(n, _), ty::Int(_)) => {
+ trunc(if neg { (*n as i128).overflowing_neg().0 as u128 } else { *n })?
+ }
+ (ast::LitKind::Float(n, _), ty::Float(fty)) => {
+ parse_float(*n, *fty, neg).ok_or(LitToConstError::Reported)?
+ }
+ (ast::LitKind::Bool(b), ty::Bool) => ConstValue::Scalar(Scalar::from_bool(*b)),
+ (ast::LitKind::Char(c), ty::Char) => ConstValue::Scalar(Scalar::from_char(*c)),
+ (ast::LitKind::Err(_), _) => return Err(LitToConstError::Reported),
+ _ => return Err(LitToConstError::TypeError),
+ };
+
+ Ok(ConstantKind::Val(value, ty))
+}
self.project(PlaceElem::Deref)
}
- crate fn downcast(self, adt_def: &'tcx AdtDef, variant_index: VariantIdx) -> Self {
- self.project(PlaceElem::Downcast(Some(adt_def.variants[variant_index].name), variant_index))
+ crate fn downcast(self, adt_def: AdtDef<'tcx>, variant_index: VariantIdx) -> Self {
+ self.project(PlaceElem::Downcast(Some(adt_def.variant(variant_index).name), variant_index))
}
fn index(self, index: Local) -> Self {
| ExprKind::Continue { .. }
| ExprKind::Return { .. }
| ExprKind::Literal { .. }
+ | ExprKind::NamedConst { .. }
+ | ExprKind::NonHirLiteral { .. }
+ | ExprKind::ConstParam { .. }
| ExprKind::ConstBlock { .. }
| ExprKind::StaticRef { .. }
| ExprKind::InlineAsm { .. }
}
ExprKind::Yield { .. }
| ExprKind::Literal { .. }
+ | ExprKind::NamedConst { .. }
+ | ExprKind::NonHirLiteral { .. }
+ | ExprKind::ConstParam { .. }
| ExprKind::ConstBlock { .. }
| ExprKind::StaticRef { .. }
| ExprKind::Block { .. }
local_decl.local_info =
Some(Box::new(LocalInfo::StaticRef { def_id, is_thread_local: true }));
}
- ExprKind::Literal { const_id: Some(def_id), .. } => {
+ ExprKind::NamedConst { def_id, .. } | ExprKind::ConstParam { def_id, .. } => {
local_decl.local_info = Some(Box::new(LocalInfo::ConstRef { def_id }));
}
_ => {}
| ExprKind::AssignOp { .. }
| ExprKind::ThreadLocalRef(_) => Some(Category::Rvalue(RvalueFunc::AsRvalue)),
- ExprKind::ConstBlock { .. } | ExprKind::Literal { .. } | ExprKind::StaticRef { .. } => {
- Some(Category::Constant)
- }
+ ExprKind::ConstBlock { .. }
+ | ExprKind::Literal { .. }
+ | ExprKind::NonHirLiteral { .. }
+ | ExprKind::ConstParam { .. }
+ | ExprKind::StaticRef { .. }
+ | ExprKind::NamedConst { .. } => Some(Category::Constant),
ExprKind::Loop { .. }
| ExprKind::Block { .. }
.collect();
let field_names: Vec<_> =
- (0..adt_def.variants[variant_index].fields.len()).map(Field::new).collect();
+ (0..adt_def.variant(variant_index).fields.len()).map(Field::new).collect();
let fields: Vec<_> = if let Some(FruInfo { base, field_types }) = base {
let place_builder =
})
});
let adt = Box::new(AggregateKind::Adt(
- adt_def.did,
+ adt_def.did(),
variant_index,
substs,
user_ty,
| ExprKind::Closure { .. }
| ExprKind::ConstBlock { .. }
| ExprKind::Literal { .. }
+ | ExprKind::NamedConst { .. }
+ | ExprKind::NonHirLiteral { .. }
+ | ExprKind::ConstParam { .. }
| ExprKind::ThreadLocalRef(_)
| ExprKind::StaticRef { .. } => {
debug_assert!(match Category::of(&expr.kind).unwrap() {
//! basically the point where the "by value" operations are bridged
//! over to the "by reference" mode (`as_place`).
-mod as_constant;
+crate mod as_constant;
mod as_operand;
pub mod as_place;
mod as_rvalue;
/// Test what enum variant a value is.
Switch {
/// The enum type being tested.
- adt_def: &'tcx ty::AdtDef,
+ adt_def: ty::AdtDef<'tcx>,
/// The set of variants that we should create a branch for. We also
/// create an additional "otherwise" case.
variants: BitSet<VariantIdx>,
}
PatKind::Variant { adt_def, substs, variant_index, ref subpatterns } => {
- let irrefutable = adt_def.variants.iter_enumerated().all(|(i, v)| {
+ let irrefutable = adt_def.variants().iter_enumerated().all(|(i, v)| {
i == variant_index || {
self.tcx.features().exhaustive_patterns
&& !v
)
.is_empty()
}
- }) && (adt_def.did.is_local()
+ }) && (adt_def.did().is_local()
|| !adt_def.is_variant_list_non_exhaustive());
if irrefutable {
let place_builder = match_pair.place.downcast(adt_def, variant_index);
/// It is a bug to call this with a not-fully-simplified pattern.
pub(super) fn test<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> Test<'tcx> {
match *match_pair.pattern.kind {
- PatKind::Variant { ref adt_def, substs: _, variant_index: _, subpatterns: _ } => Test {
+ PatKind::Variant { adt_def, substs: _, variant_index: _, subpatterns: _ } => Test {
span: match_pair.pattern.span,
kind: TestKind::Switch {
adt_def,
- variants: BitSet::new_empty(adt_def.variants.len()),
+ variants: BitSet::new_empty(adt_def.variants().len()),
},
},
TestKind::Switch { adt_def, ref variants } => {
let target_blocks = make_target_blocks(self);
// Variants is a BitVec of indexes into adt_def.variants.
- let num_enum_variants = adt_def.variants.len();
+ let num_enum_variants = adt_def.variants().len();
debug_assert_eq!(target_blocks.len(), num_enum_variants + 1);
let otherwise_block = *target_blocks.last().unwrap();
let tcx = self.tcx;
otherwise_block,
);
debug!("num_enum_variants: {}, variants: {:?}", num_enum_variants, variants);
- let discr_ty = adt_def.repr.discr_type().to_ty(tcx);
+ let discr_ty = adt_def.repr().discr_type().to_ty(tcx);
let discr = self.temp(discr_ty, test.span);
self.cfg.push_assign(
block,
fn candidate_after_variant_switch<'pat>(
&mut self,
match_pair_index: usize,
- adt_def: &'tcx ty::AdtDef,
+ adt_def: ty::AdtDef<'tcx>,
variant_index: VariantIdx,
subpatterns: &'pat [FieldPat<'tcx>],
candidate: &mut Candidate<'pat, 'tcx>,
// we want to create a set of derived match-patterns like
// `(x as Variant).0 @ P1` and `(x as Variant).1 @ P1`.
let elem =
- ProjectionElem::Downcast(Some(adt_def.variants[variant_index].name), variant_index);
+ ProjectionElem::Downcast(Some(adt_def.variant(variant_index).name), variant_index);
let downcast_place = match_pair.place.project(elem); // `(x as Variant)`
let consequent_match_pairs = subpatterns.iter().map(|subpattern| {
// e.g., `(x as Variant).0`
// variants, we have a target for each variant and the
// otherwise case, and we make sure that all of the cases not
// specified have the same block.
- adt_def.variants.len() + 1
+ adt_def.variants().len() + 1
}
TestKind::SwitchInt { switch_ty, ref options, .. } => {
if switch_ty.is_bool() {
};
let body = tcx.hir().body(body_id);
- let (thir, expr) = tcx.thir_body(def);
+ let (thir, expr) = tcx
+ .thir_body(def)
+ .unwrap_or_else(|_| (tcx.alloc_steal_thir(Thir::new()), ExprId::from_u32(0)));
// We ran all queries that depended on THIR at the beginning
// of `mir_build`, so now we can steal it
let thir = thir.steal();
let return_ty = typeck_results.node_type(id);
- let (thir, expr) = tcx.thir_body(def);
+ let (thir, expr) = tcx
+ .thir_body(def)
+ .unwrap_or_else(|_| (tcx.alloc_steal_thir(Thir::new()), ExprId::from_u32(0)));
// We ran all queries that depended on THIR at the beginning
// of `mir_build`, so now we can steal it
let thir = thir.steal();
ExprKind::Field { lhs, .. } => {
if let ty::Adt(adt_def, _) = self.thir[lhs].ty.kind() {
if (Bound::Unbounded, Bound::Unbounded)
- != self.tcx.layout_scalar_valid_range(adt_def.did)
+ != self.tcx.layout_scalar_valid_range(adt_def.did())
{
self.found = true;
}
visit::walk_pat(self, pat);
self.in_union_destructure = old_in_union_destructure;
} else if (Bound::Unbounded, Bound::Unbounded)
- != self.tcx.layout_scalar_valid_range(adt_def.did)
+ != self.tcx.layout_scalar_valid_range(adt_def.did())
{
let old_inside_adt = std::mem::replace(&mut self.inside_adt, true);
visit::walk_pat(self, pat);
| ExprKind::Block { .. }
| ExprKind::Borrow { .. }
| ExprKind::Literal { .. }
+ | ExprKind::NamedConst { .. }
+ | ExprKind::NonHirLiteral { .. }
+ | ExprKind::ConstParam { .. }
| ExprKind::ConstBlock { .. }
| ExprKind::Deref { .. }
| ExprKind::Index { .. }
user_ty: _,
fields: _,
base: _,
- }) => match self.tcx.layout_scalar_valid_range(adt_def.did) {
+ }) => match self.tcx.layout_scalar_valid_range(adt_def.did()) {
(Bound::Unbounded, Bound::Unbounded) => {}
_ => self.requires_unsafe(expr.span, InitializingTypeWith),
},
} else {
ty::WithOptConstParam::unknown(closure_id)
};
- let (closure_thir, expr) = self.tcx.thir_body(closure_def);
+ let (closure_thir, expr) = self.tcx.thir_body(closure_def).unwrap_or_else(|_| {
+ (self.tcx.alloc_steal_thir(Thir::new()), ExprId::from_u32(0))
+ });
let closure_thir = &closure_thir.borrow();
let hir_context = self.tcx.hir().local_def_id_to_hir_id(closure_id);
let mut closure_visitor =
return;
}
- let (thir, expr) = tcx.thir_body(def);
+ let (thir, expr) = match tcx.thir_body(def) {
+ Ok(body) => body,
+ Err(_) => return,
+ };
let thir = &thir.borrow();
- // If `thir` is empty, a type error occured, skip this body.
+ // If `thir` is empty, a type error occurred, skip this body.
if thir.exprs.is_empty() {
return;
}
use rustc_span::symbol::Symbol;
use rustc_target::abi::Size;
+// FIXME Once valtrees are available, get rid of this function and the query
crate fn lit_to_const<'tcx>(
tcx: TyCtxt<'tcx>,
lit_input: LitToConstInput<'tcx>,
Ok(ty::Const::from_value(tcx, lit, ty))
}
-fn parse_float<'tcx>(num: Symbol, fty: ty::FloatTy, neg: bool) -> Option<ConstValue<'tcx>> {
+// FIXME move this to rustc_mir_build::build
+pub(crate) fn parse_float<'tcx>(
+ num: Symbol,
+ fty: ty::FloatTy,
+ neg: bool,
+) -> Option<ConstValue<'tcx>> {
let num = num.as_str();
use rustc_apfloat::ieee::{Double, Single};
let scalar = match fty {
Adjust, Adjustment, AutoBorrow, AutoBorrowMutability, PointerCast,
};
use rustc_middle::ty::subst::{InternalSubsts, SubstsRef};
-use rustc_middle::ty::{self, AdtKind, Ty, UpvarSubsts, UserType};
+use rustc_middle::ty::{
+ self, AdtKind, InlineConstSubsts, InlineConstSubstsParts, ScalarInt, Ty, UpvarSubsts, UserType,
+};
use rustc_span::def_id::DefId;
use rustc_span::Span;
use rustc_target::abi::VariantIdx;
let user_ty =
user_provided_types.get(fun.hir_id).copied().map(|mut u_ty| {
if let UserType::TypeOf(ref mut did, _) = &mut u_ty.value {
- *did = adt_def.did;
+ *did = adt_def.did();
}
u_ty
});
}
}
- hir::ExprKind::Lit(ref lit) => ExprKind::Literal {
- literal: self.const_eval_literal(&lit.node, expr_ty, lit.span, false),
- user_ty: None,
- const_id: None,
- },
+ hir::ExprKind::Lit(ref lit) => ExprKind::Literal { lit, neg: false },
hir::ExprKind::Binary(op, ref lhs, ref rhs) => {
if self.typeck_results().is_method_call(expr) {
let arg = self.mirror_expr(arg);
self.overloaded_operator(expr, Box::new([arg]))
} else if let hir::ExprKind::Lit(ref lit) = arg.kind {
- ExprKind::Literal {
- literal: self.const_eval_literal(&lit.node, expr_ty, lit.span, true),
- user_ty: None,
- const_id: None,
- }
+ ExprKind::Literal { lit, neg: true }
} else {
ExprKind::Unary { op: UnOp::Neg, arg: self.mirror_expr(arg) }
}
let user_ty = user_provided_types.get(expr.hir_id).copied();
debug!("make_mirror_unadjusted: (struct/union) user_ty={:?}", user_ty);
ExprKind::Adt(Box::new(Adt {
- adt_def: adt,
+ adt_def: *adt,
variant_index: VariantIdx::new(0),
substs,
user_ty,
let user_ty = user_provided_types.get(expr.hir_id).copied();
debug!("make_mirror_unadjusted: (variant) user_ty={:?}", user_ty);
ExprKind::Adt(Box::new(Adt {
- adt_def: adt,
+ adt_def: *adt,
variant_index: index,
substs,
user_ty,
ty,
temp_lifetime,
span: expr.span,
- kind: ExprKind::Literal {
- literal: ty::Const::zero_sized(self.tcx, ty),
- user_ty,
- const_id: None,
- },
+ kind: ExprKind::zero_sized_literal(user_ty),
}),
}
}
ty,
temp_lifetime,
span: expr.span,
- kind: ExprKind::Literal {
- literal: ty::Const::zero_sized(self.tcx, ty),
- user_ty: None,
- const_id: None,
- },
+ kind: ExprKind::zero_sized_literal(None),
}),
}
}
},
hir::ExprKind::ConstBlock(ref anon_const) => {
- let anon_const_def_id = self.tcx.hir().local_def_id(anon_const.hir_id);
-
- // FIXME Do we want to use `from_inline_const` once valtrees
- // are introduced? This would create `ValTree`s that will never be used...
- let value = ty::Const::from_inline_const(self.tcx, anon_const_def_id);
-
- ExprKind::ConstBlock { value }
+ let tcx = self.tcx;
+ let local_def_id = tcx.hir().local_def_id(anon_const.hir_id);
+ let anon_const_def_id = local_def_id.to_def_id();
+
+ // Need to include the parent substs
+ let hir_id = tcx.hir().local_def_id_to_hir_id(local_def_id);
+ let ty = tcx.typeck(local_def_id).node_type(hir_id);
+ let typeck_root_def_id = tcx.typeck_root_def_id(anon_const_def_id);
+ let parent_substs =
+ tcx.erase_regions(InternalSubsts::identity_for_item(tcx, typeck_root_def_id));
+ let substs =
+ InlineConstSubsts::new(tcx, InlineConstSubstsParts { parent_substs, ty })
+ .substs;
+
+ ExprKind::ConstBlock { did: anon_const_def_id, substs }
}
// Now comes the rote stuff:
hir::ExprKind::Repeat(ref v, _) => {
let idx = adt_def.variant_index_with_ctor_id(variant_ctor_id);
let (d, o) = adt_def.discriminant_def_for_variant(idx);
use rustc_middle::ty::util::IntTypeExt;
- let ty = adt_def.repr.discr_type();
+ let ty = adt_def.repr().discr_type();
let ty = ty.to_ty(self.tcx());
Some((d, o, ty))
}
};
let source = if let Some((did, offset, var_ty)) = var {
- let mk_const = |literal| Expr {
+ let param_env_ty = self.param_env.and(var_ty);
+ let size = self
+ .tcx
+ .layout_of(param_env_ty)
+ .unwrap_or_else(|e| {
+ panic!("could not compute layout for {:?}: {:?}", param_env_ty, e)
+ })
+ .size;
+ let lit = ScalarInt::try_from_uint(offset as u128, size).unwrap();
+ let kind = ExprKind::NonHirLiteral { lit, user_ty: None };
+ let offset = self.thir.exprs.push(Expr {
temp_lifetime,
ty: var_ty,
span: expr.span,
- kind: ExprKind::Literal { literal, user_ty: None, const_id: None },
- };
- let offset = self.thir.exprs.push(mk_const(ty::Const::from_bits(
- self.tcx,
- offset as u128,
- self.param_env.and(var_ty),
- )));
+ kind,
+ });
match did {
Some(did) => {
// in case we are offsetting from a computed discriminant
// and not the beginning of discriminants (which is always `0`)
let substs = InternalSubsts::identity_for_item(self.tcx(), did);
- let lhs = ty::ConstS {
- val: ty::ConstKind::Unevaluated(ty::Unevaluated::new(
- ty::WithOptConstParam::unknown(did),
- substs,
- )),
+ let kind =
+ ExprKind::NamedConst { def_id: did, substs, user_ty: None };
+ let lhs = self.thir.exprs.push(Expr {
+ temp_lifetime,
ty: var_ty,
- };
- let lhs = self.thir.exprs.push(mk_const(self.tcx().mk_const(lhs)));
- let bin =
- ExprKind::Binary { op: BinOp::Add, lhs: lhs, rhs: offset };
+ span: expr.span,
+ kind,
+ });
+ let bin = ExprKind::Binary { op: BinOp::Add, lhs, rhs: offset };
self.thir.exprs.push(Expr {
temp_lifetime,
ty: var_ty,
}
};
let ty = self.tcx().mk_fn_def(def_id, substs);
- Expr {
- temp_lifetime,
- ty,
- span,
- kind: ExprKind::Literal {
- literal: ty::Const::zero_sized(self.tcx(), ty),
- user_ty,
- const_id: None,
- },
- }
+ Expr { temp_lifetime, ty, span, kind: ExprKind::zero_sized_literal(user_ty) }
}
fn convert_arm(&mut self, arm: &'tcx hir::Arm<'tcx>) -> ArmId {
Res::Def(DefKind::Fn, _)
| Res::Def(DefKind::AssocFn, _)
| Res::Def(DefKind::Ctor(_, CtorKind::Fn), _)
- | Res::SelfCtor(..) => {
+ | Res::SelfCtor(_) => {
let user_ty = self.user_substs_applied_to_res(expr.hir_id, res);
- debug!("convert_path_expr: user_ty={:?}", user_ty);
- ExprKind::Literal {
- literal: ty::Const::zero_sized(
- self.tcx,
- self.typeck_results().node_type(expr.hir_id),
- ),
- user_ty,
- const_id: None,
- }
+ ExprKind::zero_sized_literal(user_ty)
}
Res::Def(DefKind::ConstParam, def_id) => {
let generics = self.tcx.generics_of(item_def_id);
let index = generics.param_def_id_to_index[&def_id];
let name = self.tcx.hir().name(hir_id);
- let val = ty::ConstKind::Param(ty::ParamConst::new(index, name));
- ExprKind::Literal {
- literal: self.tcx.mk_const(ty::ConstS {
- val,
- ty: self.typeck_results().node_type(expr.hir_id),
- }),
- user_ty: None,
- const_id: Some(def_id),
- }
+ let param = ty::ParamConst::new(index, name);
+
+ ExprKind::ConstParam { param, def_id }
}
Res::Def(DefKind::Const, def_id) | Res::Def(DefKind::AssocConst, def_id) => {
let user_ty = self.user_substs_applied_to_res(expr.hir_id, res);
- debug!("convert_path_expr: (const) user_ty={:?}", user_ty);
- ExprKind::Literal {
- literal: self.tcx.mk_const(ty::ConstS {
- val: ty::ConstKind::Unevaluated(ty::Unevaluated::new(
- ty::WithOptConstParam::unknown(def_id),
- substs,
- )),
- ty: self.typeck_results().node_type(expr.hir_id),
- }),
- user_ty,
- const_id: Some(def_id),
- }
+ ExprKind::NamedConst { def_id, substs, user_ty: user_ty }
}
Res::Def(DefKind::Ctor(_, CtorKind::Const), def_id) => {
// A unit struct/variant which is used as a value.
// We return a completely different ExprKind here to account for this special case.
ty::Adt(adt_def, substs) => ExprKind::Adt(Box::new(Adt {
- adt_def,
+ adt_def: *adt_def,
variant_index: adt_def.variant_index_with_ctor_id(def_id),
substs,
user_ty: user_provided_type,
use crate::thir::pattern::pat_from_hir;
use crate::thir::util::UserAnnotatedTyHelpers;
-use rustc_ast as ast;
use rustc_data_structures::steal::Steal;
+use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::HirId;
use rustc_hir::Node;
use rustc_middle::middle::region;
-use rustc_middle::mir::interpret::{LitToConstError, LitToConstInput};
use rustc_middle::thir::*;
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{self, TyCtxt};
use rustc_span::Span;
crate fn thir_body<'tcx>(
tcx: TyCtxt<'tcx>,
owner_def: ty::WithOptConstParam<LocalDefId>,
-) -> (&'tcx Steal<Thir<'tcx>>, ExprId) {
+) -> Result<(&'tcx Steal<Thir<'tcx>>, ExprId), ErrorGuaranteed> {
let hir = tcx.hir();
let body = hir.body(hir.body_owned_by(hir.local_def_id_to_hir_id(owner_def.did)));
let mut cx = Cx::new(tcx, owner_def);
- if cx.typeck_results.tainted_by_errors.is_some() {
- return (tcx.alloc_steal_thir(Thir::new()), ExprId::from_u32(0));
+ if let Some(reported) = cx.typeck_results.tainted_by_errors {
+ return Err(reported);
}
let expr = cx.mirror_expr(&body.value);
- (tcx.alloc_steal_thir(cx.thir), expr)
+ Ok((tcx.alloc_steal_thir(cx.thir), expr))
}
crate fn thir_tree<'tcx>(
tcx: TyCtxt<'tcx>,
owner_def: ty::WithOptConstParam<LocalDefId>,
) -> String {
- format!("{:#?}", thir_body(tcx, owner_def).0.steal())
+ match thir_body(tcx, owner_def) {
+ Ok((thir, _)) => format!("{:#?}", thir.steal()),
+ Err(_) => "error".into(),
+ }
}
struct Cx<'tcx> {
}
}
- crate fn const_eval_literal(
- &mut self,
- lit: &'tcx ast::LitKind,
- ty: Ty<'tcx>,
- sp: Span,
- neg: bool,
- ) -> ty::Const<'tcx> {
- trace!("const_eval_literal: {:#?}, {:?}, {:?}, {:?}", lit, ty, sp, neg);
-
- match self.tcx.at(sp).lit_to_const(LitToConstInput { lit, ty, neg }) {
- Ok(c) => c,
- Err(LitToConstError::Reported) => {
- // create a dummy value and continue compiling
- self.tcx.const_error(ty)
- }
- Err(LitToConstError::TypeError) => bug!("const_eval_literal: had type error"),
- }
- }
-
crate fn pattern_from_hir(&mut self, p: &hir::Pat<'_>) -> Pat<'tcx> {
let p = match self.tcx.hir().get(p.hir_id) {
Node::Pat(p) | Node::Binding(p) => p,
self.check_patterns(pat, Refutable);
let mut cx = self.new_cx(scrutinee.hir_id);
let tpat = self.lower_pattern(&mut cx, pat, &mut false);
- check_let_reachability(&mut cx, pat.hir_id, tpat, span);
+ self.check_let_reachability(&mut cx, pat.hir_id, tpat, span);
}
fn check_match(
if let Some(hir::Guard::IfLet(ref pat, _)) = arm.guard {
self.check_patterns(pat, Refutable);
let tpat = self.lower_pattern(&mut cx, pat, &mut false);
- check_let_reachability(&mut cx, pat.hir_id, tpat, tpat.span());
+ self.check_let_reachability(&mut cx, pat.hir_id, tpat, tpat.span());
}
}
}
}
+ fn check_let_reachability(
+ &mut self,
+ cx: &mut MatchCheckCtxt<'p, 'tcx>,
+ pat_id: HirId,
+ pat: &'p DeconstructedPat<'p, 'tcx>,
+ span: Span,
+ ) {
+ if self.check_let_chain(cx, pat_id) {
+ return;
+ }
+
+ if is_let_irrefutable(cx, pat_id, pat) {
+ irrefutable_let_pattern(cx.tcx, pat_id, span);
+ }
+ }
+
+ fn check_let_chain(&mut self, cx: &mut MatchCheckCtxt<'p, 'tcx>, pat_id: HirId) -> bool {
+ let hir = self.tcx.hir();
+ let parent = hir.get_parent_node(pat_id);
+
+ // First, figure out if the given pattern is part of a let chain,
+ // and if so, obtain the top node of the chain.
+ let mut top = parent;
+ let mut part_of_chain = false;
+ loop {
+ let new_top = hir.get_parent_node(top);
+ if let hir::Node::Expr(
+ hir::Expr {
+ kind: hir::ExprKind::Binary(Spanned { node: hir::BinOpKind::And, .. }, lhs, rhs),
+ ..
+ },
+ ..,
+ ) = hir.get(new_top)
+ {
+ // If this isn't the first iteration, we need to check
+ // if there is a let expr before us in the chain, so
+ // that we avoid doubly checking the let chain.
+
+ // The way a chain of &&s is encoded is ((let ... && let ...) && let ...) && let ...
+ // as && is left-to-right associative. Thus, we need to check rhs.
+ if part_of_chain && matches!(rhs.kind, hir::ExprKind::Let(..)) {
+ return true;
+ }
+ // If there is a let at the lhs, and we provide the rhs, we don't do any checking either.
+ if !part_of_chain && matches!(lhs.kind, hir::ExprKind::Let(..)) && rhs.hir_id == top
+ {
+ return true;
+ }
+ } else {
+ // We've reached the top.
+ break;
+ }
+
+ // Since this function is called within a let context, it is reasonable to assume that any parent
+ // `&&` infers a let chain
+ part_of_chain = true;
+ top = new_top;
+ }
+ if !part_of_chain {
+ return false;
+ }
+
+ // Second, obtain the refutabilities of all exprs in the chain,
+ // and record chain members that aren't let exprs.
+ let mut chain_refutabilities = Vec::new();
+ let hir::Node::Expr(top_expr) = hir.get(top) else {
+ // We ensure right above that it's an Expr
+ unreachable!()
+ };
+ let mut cur_expr = top_expr;
+ loop {
+ let mut add = |expr: &hir::Expr<'tcx>| {
+ let refutability = match expr.kind {
+ hir::ExprKind::Let(hir::Let { pat, init, span, .. }) => {
+ let mut ncx = self.new_cx(init.hir_id);
+ let tpat = self.lower_pattern(&mut ncx, pat, &mut false);
+
+ let refutable = !is_let_irrefutable(&mut ncx, pat.hir_id, tpat);
+ Some((*span, refutable))
+ }
+ _ => None,
+ };
+ chain_refutabilities.push(refutability);
+ };
+ if let hir::Expr {
+ kind: hir::ExprKind::Binary(Spanned { node: hir::BinOpKind::And, .. }, lhs, rhs),
+ ..
+ } = cur_expr
+ {
+ add(rhs);
+ cur_expr = lhs;
+ } else {
+ add(cur_expr);
+ break;
+ }
+ }
+ chain_refutabilities.reverse();
+
+ // Third, emit the actual warnings.
+
+ if chain_refutabilities.iter().all(|r| matches!(*r, Some((_, false)))) {
+ // The entire chain is made up of irrefutable `let` statements
+ let let_source = let_source_parent(self.tcx, top, None);
+ irrefutable_let_patterns(
+ cx.tcx,
+ top,
+ let_source,
+ chain_refutabilities.len(),
+ top_expr.span,
+ );
+ return true;
+ }
+ let lint_affix = |affix: &[Option<(Span, bool)>], kind, suggestion| {
+ let span_start = affix[0].unwrap().0;
+ let span_end = affix.last().unwrap().unwrap().0;
+ let span = span_start.to(span_end);
+ let cnt = affix.len();
+ cx.tcx.struct_span_lint_hir(IRREFUTABLE_LET_PATTERNS, top, span, |lint| {
+ let s = pluralize!(cnt);
+ let mut diag = lint.build(&format!("{kind} irrefutable pattern{s} in let chain"));
+ diag.note(&format!(
+ "{these} pattern{s} will always match",
+ these = pluralize!("this", cnt),
+ ));
+ diag.help(&format!(
+ "consider moving {} {suggestion}",
+ if cnt > 1 { "them" } else { "it" }
+ ));
+ diag.emit()
+ });
+ };
+ if let Some(until) = chain_refutabilities.iter().position(|r| !matches!(*r, Some((_, false)))) && until > 0 {
+ // The chain has a non-zero prefix of irrefutable `let` statements.
+
+ // Check if the let source is while, for there is no alternative place to put a prefix,
+ // and we shouldn't lint.
+ let let_source = let_source_parent(self.tcx, top, None);
+ if !matches!(let_source, LetSource::WhileLet) {
+ // Emit the lint
+ let prefix = &chain_refutabilities[..until];
+ lint_affix(prefix, "leading", "outside of the construct");
+ }
+ }
+ if let Some(from) = chain_refutabilities.iter().rposition(|r| !matches!(*r, Some((_, false)))) && from != (chain_refutabilities.len() - 1) {
+ // The chain has a non-empty suffix of irrefutable `let` statements
+ let suffix = &chain_refutabilities[from + 1..];
+ lint_affix(suffix, "trailing", "into the body");
+ }
+ true
+ }
+
fn check_irrefutable(&self, pat: &'tcx Pat<'tcx>, origin: &str, sp: Option<Span>) {
let mut cx = self.new_cx(pat.hir_id);
&& let pat_ty = cx.typeck_results.pat_ty(p).peel_refs()
&& let ty::Adt(edef, _) = pat_ty.kind()
&& edef.is_enum()
- && edef.variants.iter().any(|variant| {
+ && edef.variants().iter().any(|variant| {
variant.ident(cx.tcx) == ident && variant.ctor_kind == CtorKind::Const
})
{
- let variant_count = edef.variants.len();
+ let variant_count = edef.variants().len();
cx.tcx.struct_span_lint_hir(
BINDINGS_WITH_VARIANT_NAME,
p.hir_id,
p.span,
|lint| {
- let ty_path = cx.tcx.def_path_str(edef.did);
+ let ty_path = cx.tcx.def_path_str(edef.did());
let mut err = lint.build(&format!(
"pattern binding `{}` is named the same as one \
of the variants of the type `{}`",
}
fn irrefutable_let_pattern(tcx: TyCtxt<'_>, id: HirId, span: Span) {
+ let source = let_source(tcx, id);
+ irrefutable_let_patterns(tcx, id, source, 1, span);
+}
+
+fn irrefutable_let_patterns(
+ tcx: TyCtxt<'_>,
+ id: HirId,
+ source: LetSource,
+ count: usize,
+ span: Span,
+) {
macro_rules! emit_diag {
(
$lint:expr,
$note_sufix:expr,
$help_sufix:expr
) => {{
- let mut diag = $lint.build(concat!("irrefutable ", $source_name, " pattern"));
- diag.note(concat!("this pattern will always match, so the ", $note_sufix));
+ let s = pluralize!(count);
+ let these = pluralize!("this", count);
+ let mut diag = $lint.build(&format!("irrefutable {} pattern{s}", $source_name));
+ diag.note(&format!("{these} pattern{s} will always match, so the {}", $note_sufix));
diag.help(concat!("consider ", $help_sufix));
diag.emit()
}};
}
- let source = let_source(tcx, id);
let span = match source {
LetSource::LetElse(span) => span,
_ => span,
});
}
-fn check_let_reachability<'p, 'tcx>(
+fn is_let_irrefutable<'p, 'tcx>(
cx: &mut MatchCheckCtxt<'p, 'tcx>,
pat_id: HirId,
pat: &'p DeconstructedPat<'p, 'tcx>,
- span: Span,
-) {
- if is_let_chain(cx.tcx, pat_id) {
- return;
- }
-
+) -> bool {
let arms = [MatchArm { pat, hir_id: pat_id, has_guard: false }];
let report = compute_match_usefulness(&cx, &arms, pat_id, pat.ty());
// `is_uninhabited` check.
report_arm_reachability(&cx, &report);
- if report.non_exhaustiveness_witnesses.is_empty() {
- // The match is exhaustive, i.e. the `if let` pattern is irrefutable.
- irrefutable_let_pattern(cx.tcx, pat_id, span);
- }
+ // If the list of witnesses is empty, the match is exhaustive,
+ // i.e. the `if let` pattern is irrefutable.
+ report.non_exhaustiveness_witnesses.is_empty()
}
/// Report unreachable arms, if any.
) {
let is_empty_match = arms.is_empty();
let non_empty_enum = match scrut_ty.kind() {
- ty::Adt(def, _) => def.is_enum() && !def.variants.is_empty(),
+ ty::Adt(def, _) => def.is_enum() && !def.variants().is_empty(),
_ => false,
};
// In the case of an empty match, replace the '`_` not covered' diagnostic with something more
};
let is_variant_list_non_exhaustive = match scrut_ty.kind() {
- ty::Adt(def, _) if def.is_variant_list_non_exhaustive() && !def.did.is_local() => true,
+ ty::Adt(def, _) if def.is_variant_list_non_exhaustive() && !def.did().is_local() => true,
_ => false,
};
if let ty::Adt(def, _) = ty.kind() {
let mut spans = vec![];
if witnesses.len() < 5 {
- for sp in maybe_point_at_variant(cx, def, witnesses.iter()) {
+ for sp in maybe_point_at_variant(cx, *def, witnesses.iter()) {
spans.push(sp);
}
}
let def_span = cx
.tcx
.hir()
- .get_if_local(def.did)
+ .get_if_local(def.did())
.and_then(|node| node.ident())
.map(|ident| ident.span)
- .unwrap_or_else(|| cx.tcx.def_span(def.did));
+ .unwrap_or_else(|| cx.tcx.def_span(def.did()));
let mut span: MultiSpan =
if spans.is_empty() { def_span.into() } else { spans.clone().into() };
fn maybe_point_at_variant<'a, 'p: 'a, 'tcx: 'a>(
cx: &MatchCheckCtxt<'p, 'tcx>,
- def: &AdtDef,
+ def: AdtDef<'tcx>,
patterns: impl Iterator<Item = &'a DeconstructedPat<'p, 'tcx>>,
) -> Vec<Span> {
use Constructor::*;
let mut covered = vec![];
for pattern in patterns {
if let Variant(variant_index) = pattern.ctor() {
- if let ty::Adt(this_def, _) = pattern.ty().kind() && this_def.did != def.did {
+ if let ty::Adt(this_def, _) = pattern.ty().kind() && this_def.did() != def.did() {
continue;
}
- let sp = def.variants[*variant_index].ident(cx.tcx).span;
+ let sp = def.variant(*variant_index).ident(cx.tcx).span;
if covered.contains(&sp) {
// Don't point at variants that have already been covered due to other patterns to avoid
// visual clutter.
let hir = tcx.hir();
let parent = hir.get_parent_node(pat_id);
+ let_source_parent(tcx, parent, Some(pat_id))
+}
+
+fn let_source_parent(tcx: TyCtxt<'_>, parent: HirId, pat_id: Option<HirId>) -> LetSource {
+ let hir = tcx.hir();
+
let parent_node = hir.get(parent);
match parent_node {
hir::Node::Arm(hir::Arm {
guard: Some(hir::Guard::IfLet(&hir::Pat { hir_id, .. }, _)),
..
- }) if hir_id == pat_id => {
+ }) if Some(hir_id) == pat_id => {
return LetSource::IfLetGuard;
}
hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Let(..), span, .. }) => {
LetSource::GenericLet
}
-
-// Since this function is called within a let context, it is reasonable to assume that any parent
-// `&&` infers a let chain
-fn is_let_chain(tcx: TyCtxt<'_>, pat_id: HirId) -> bool {
- let hir = tcx.hir();
- let parent = hir.get_parent_node(pat_id);
- let parent_parent = hir.get_parent_node(parent);
- matches!(
- hir.get(parent_parent),
- hir::Node::Expr(
- hir::Expr {
- kind: hir::ExprKind::Binary(Spanned { node: hir::BinOpKind::And, .. }, ..),
- ..
- },
- ..
- )
- )
-}
self.infcx.tcx
}
- fn adt_derive_msg(&self, adt_def: &AdtDef) -> String {
- let path = self.tcx().def_path_str(adt_def.did);
+ fn adt_derive_msg(&self, adt_def: AdtDef<'tcx>) -> String {
+ let path = self.tcx().def_path_str(adt_def.did());
format!(
"to use a constant of type `{}` in a pattern, \
`{}` must be annotated with `#[derive(PartialEq, Eq)]`",
lint::builtin::INDIRECT_STRUCTURAL_MATCH,
self.id,
self.span,
- |lint| lint.build(&msg).emit(),
+ |lint| {
+ lint.build(&msg).emit();
+ },
);
} else {
debug!(
lint::builtin::ILLEGAL_FLOATING_POINT_LITERAL_PATTERN,
id,
span,
- |lint| lint.build("floating-point types cannot be used in patterns").emit(),
+ |lint| {
+ lint.build("floating-point types cannot be used in patterns").emit();
+ },
);
}
PatKind::Constant { value: cv }
if self.include_lint_checks {
tcx.sess.span_err(span, msg);
} else {
- tcx.sess.delay_span_bug(span, msg)
+ tcx.sess.delay_span_bug(span, msg);
}
PatKind::Wild
}
if self.include_lint_checks {
tcx.sess.span_err(self.span, &msg);
} else {
- tcx.sess.delay_span_bug(self.span, &msg)
+ tcx.sess.delay_span_bug(self.span, &msg);
}
PatKind::Wild
}
cv.ty(),
cv.ty(),
);
- lint.build(&msg).emit()
+ lint.build(&msg).emit();
},
);
}
adt_def,
cv.ty()
);
- let path = tcx.def_path_str(adt_def.did);
+ let path = tcx.def_path_str(adt_def.did());
let msg = format!(
"to use a constant of type `{}` in a pattern, \
`{}` must be annotated with `#[derive(PartialEq, Eq)]`",
if self.include_lint_checks {
tcx.sess.span_err(span, &msg);
} else {
- tcx.sess.delay_span_bug(span, &msg)
+ tcx.sess.delay_span_bug(span, &msg);
}
PatKind::Wild
}
ty::Adt(adt_def, substs) if adt_def.is_enum() => {
let destructured = tcx.destructure_const(param_env.and(cv));
PatKind::Variant {
- adt_def,
+ adt_def: *adt_def,
substs,
variant_index: destructured
.variant
if self.include_lint_checks {
tcx.sess.span_err(span, &msg);
} else {
- tcx.sess.delay_span_bug(span, &msg)
+ tcx.sess.delay_span_bug(span, &msg);
}
PatKind::Wild
}
lint::builtin::INDIRECT_STRUCTURAL_MATCH,
self.id,
self.span,
- |lint| lint.build(&msg).emit(),
+ |lint| {lint.build(&msg).emit();},
);
}
PatKind::Constant { value: cv }
if self.include_lint_checks {
tcx.sess.span_err(span, &msg);
} else {
- tcx.sess.delay_span_bug(span, &msg)
+ tcx.sess.delay_span_bug(span, &msg);
}
}
PatKind::Wild
ty::RawPtr(pointee) if pointee.ty.is_sized(tcx.at(span), param_env) => {
PatKind::Constant { value: cv }
}
- // FIXME: these can have very suprising behaviour where optimization levels or other
+ // FIXME: these can have very surprising behaviour where optimization levels or other
// compilation choices change the runtime behaviour of the match.
// See https://github.com/rust-lang/rust/issues/70861 for examples.
ty::FnPtr(..) | ty::RawPtr(..) => {
lint::builtin::POINTER_STRUCTURAL_MATCH,
id,
span,
- |lint| lint.build(&msg).emit(),
+ |lint| {
+ lint.build(&msg).emit();
+ },
);
}
PatKind::Constant { value: cv }
if self.include_lint_checks {
tcx.sess.span_err(span, &msg);
} else {
- tcx.sess.delay_span_bug(span, &msg)
+ tcx.sess.delay_span_bug(span, &msg);
}
PatKind::Wild
}
lint::builtin::NONTRIVIAL_STRUCTURAL_MATCH,
id,
span,
- |lint| lint.build(&msg).emit(),
+ |lint| {
+ lint.build(&msg).emit();
+ },
);
}
/// This means that the variant has a stdlib unstable feature marking it.
pub(super) fn is_unstable_variant(&self, pcx: PatCtxt<'_, '_, 'tcx>) -> bool {
if let Constructor::Variant(idx) = self && let ty::Adt(adt, _) = pcx.ty.kind() {
- let variant_def_id = adt.variants[*idx].def_id;
+ let variant_def_id = adt.variant(*idx).def_id;
// Filter variants that depend on a disabled unstable feature.
return matches!(
pcx.cx.tcx.eval_stability(variant_def_id, None, DUMMY_SP, None),
}
/// Checks if the `Constructor` is a `Constructor::Variant` with a `#[doc(hidden)]`
- /// attribute.
+ /// attribute from a type not local to the current crate.
pub(super) fn is_doc_hidden_variant(&self, pcx: PatCtxt<'_, '_, 'tcx>) -> bool {
if let Constructor::Variant(idx) = self && let ty::Adt(adt, _) = pcx.ty.kind() {
- let variant_def_id = adt.variants[*idx].def_id;
- return pcx.cx.tcx.is_doc_hidden(variant_def_id);
+ let variant_def_id = adt.variants()[*idx].def_id;
+ return pcx.cx.tcx.is_doc_hidden(variant_def_id) && !variant_def_id.is_local();
}
false
}
- fn variant_index_for_adt(&self, adt: &'tcx ty::AdtDef) -> VariantIdx {
+ fn variant_index_for_adt(&self, adt: ty::AdtDef<'tcx>) -> VariantIdx {
match *self {
Variant(idx) => idx,
Single => {
// patterns. If we're here we can assume this is a box pattern.
1
} else {
- let variant = &adt.variants[self.variant_index_for_adt(adt)];
+ let variant = &adt.variant(self.variant_index_for_adt(*adt));
Fields::list_variant_nonhidden_fields(pcx.cx, pcx.ty, variant).count()
}
}
// exception is if the pattern is at the top level, because we want empty matches to be
// considered exhaustive.
let is_secretly_empty =
- def.variants.is_empty() && !is_exhaustive_pat_feature && !pcx.is_top_level;
+ def.variants().is_empty() && !is_exhaustive_pat_feature && !pcx.is_top_level;
let mut ctors: SmallVec<[_; 1]> = def
- .variants
+ .variants()
.iter_enumerated()
.filter(|(_, v)| {
// If `exhaustive_patterns` is enabled, we exclude variants known to be
) -> impl Iterator<Item = (Field, Ty<'tcx>)> + Captures<'a> + Captures<'p> {
let ty::Adt(adt, substs) = ty.kind() else { bug!() };
// Whether we must not match the fields of this variant exhaustively.
- let is_non_exhaustive = variant.is_field_list_non_exhaustive() && !adt.did.is_local();
+ let is_non_exhaustive = variant.is_field_list_non_exhaustive() && !adt.did().is_local();
variant.fields.iter().enumerate().filter_map(move |(i, field)| {
let ty = field.ty(cx.tcx, substs);
// patterns. If we're here we can assume this is a box pattern.
Fields::wildcards_from_tys(cx, once(substs.type_at(0)))
} else {
- let variant = &adt.variants[constructor.variant_index_for_adt(adt)];
+ let variant = &adt.variant(constructor.variant_index_for_adt(*adt));
let tys = Fields::list_variant_nonhidden_fields(cx, ty, variant)
.map(|(_, ty)| ty);
Fields::wildcards_from_tys(cx, tys)
PatKind::Variant { variant_index, .. } => Variant(*variant_index),
_ => bug!(),
};
- let variant = &adt.variants[ctor.variant_index_for_adt(adt)];
+ let variant = &adt.variant(ctor.variant_index_for_adt(*adt));
// For each field in the variant, we store the relevant index into `self.fields` if any.
let mut field_id_to_id: Vec<Option<usize>> =
(0..variant.fields.len()).map(|_| None).collect();
PatKind::Deref { subpattern: subpatterns.next().unwrap() }
}
ty::Adt(adt_def, substs) => {
- let variant_index = self.ctor.variant_index_for_adt(adt_def);
- let variant = &adt_def.variants[variant_index];
+ let variant_index = self.ctor.variant_index_for_adt(*adt_def);
+ let variant = &adt_def.variant(variant_index);
let subpatterns = Fields::list_variant_nonhidden_fields(cx, self.ty, variant)
.zip(subpatterns)
.map(|((field, _ty), pattern)| FieldPat { field, pattern })
.collect();
if adt_def.is_enum() {
- PatKind::Variant { adt_def, substs, variant_index, subpatterns }
+ PatKind::Variant { adt_def: *adt_def, substs, variant_index, subpatterns }
} else {
PatKind::Leaf { subpatterns }
}
}
ty::Adt(..) | ty::Tuple(..) => {
let variant = match self.ty.kind() {
- ty::Adt(adt, _) => {
- Some(&adt.variants[self.ctor.variant_index_for_adt(adt)])
- }
+ ty::Adt(adt, _) => Some(adt.variant(self.ctor.variant_index_for_adt(*adt))),
ty::Tuple(_) => None,
_ => unreachable!(),
};
CloneImpls! { <'tcx>
Span, Field, Mutability, Symbol, hir::HirId, usize, ty::Const<'tcx>,
- Region<'tcx>, Ty<'tcx>, BindingMode, &'tcx AdtDef,
+ Region<'tcx>, Ty<'tcx>, BindingMode, AdtDef<'tcx>,
SubstsRef<'tcx>, &'tcx GenericArg<'tcx>, UserType<'tcx>,
UserTypeProjection, PatTyProj<'tcx>
}
pub(super) fn is_foreign_non_exhaustive_enum(&self, ty: Ty<'tcx>) -> bool {
match ty.kind() {
ty::Adt(def, ..) => {
- def.is_enum() && def.is_variant_list_non_exhaustive() && !def.did.is_local()
+ def.is_enum() && def.is_variant_list_non_exhaustive() && !def.did().is_local()
}
_ => false,
}
match ty.kind() {
ty::Adt(adt_def, ..) => {
if let UserType::TypeOf(ref mut did, _) = &mut user_ty.value {
- *did = adt_def.did;
+ *did = adt_def.did();
}
Some(user_ty)
}
self.drop_ladder(fields, succ, unwind).0
}
- fn open_drop_for_box(&mut self, adt: &'tcx ty::AdtDef, substs: SubstsRef<'tcx>) -> BasicBlock {
+ fn open_drop_for_box(&mut self, adt: ty::AdtDef<'tcx>, substs: SubstsRef<'tcx>) -> BasicBlock {
debug!("open_drop_for_box({:?}, {:?}, {:?})", self, adt, substs);
let interior = self.tcx().mk_place_deref(self.place);
self.drop_subpath(interior, interior_path, succ, unwind_succ)
}
- fn open_drop_for_adt(&mut self, adt: &'tcx ty::AdtDef, substs: SubstsRef<'tcx>) -> BasicBlock {
+ fn open_drop_for_adt(&mut self, adt: ty::AdtDef<'tcx>, substs: SubstsRef<'tcx>) -> BasicBlock {
debug!("open_drop_for_adt({:?}, {:?}, {:?})", self, adt, substs);
- if adt.variants.is_empty() {
+ if adt.variants().is_empty() {
return self.elaborator.patch().new_block(BasicBlockData {
statements: vec![],
terminator: Some(Terminator {
}
let skip_contents =
- adt.is_union() || Some(adt.did) == self.tcx().lang_items().manually_drop();
+ adt.is_union() || Some(adt.did()) == self.tcx().lang_items().manually_drop();
let contents_drop = if skip_contents {
(self.succ, self.unwind)
} else {
fn open_drop_for_adt_contents(
&mut self,
- adt: &'tcx ty::AdtDef,
+ adt: ty::AdtDef<'tcx>,
substs: SubstsRef<'tcx>,
) -> (BasicBlock, Unwind) {
let (succ, unwind) = self.drop_ladder_bottom();
let fields = self.move_paths_for_fields(
self.place,
self.path,
- &adt.variants[VariantIdx::new(0)],
+ &adt.variant(VariantIdx::new(0)),
substs,
);
self.drop_ladder(fields, succ, unwind)
fn open_drop_for_multivariant(
&mut self,
- adt: &'tcx ty::AdtDef,
+ adt: ty::AdtDef<'tcx>,
substs: SubstsRef<'tcx>,
succ: BasicBlock,
unwind: Unwind,
) -> (BasicBlock, Unwind) {
- let mut values = Vec::with_capacity(adt.variants.len());
- let mut normal_blocks = Vec::with_capacity(adt.variants.len());
+ let mut values = Vec::with_capacity(adt.variants().len());
+ let mut normal_blocks = Vec::with_capacity(adt.variants().len());
let mut unwind_blocks =
- if unwind.is_cleanup() { None } else { Some(Vec::with_capacity(adt.variants.len())) };
+ if unwind.is_cleanup() { None } else { Some(Vec::with_capacity(adt.variants().len())) };
let mut have_otherwise_with_drop_glue = false;
let mut have_otherwise = false;
let tcx = self.tcx();
for (variant_index, discr) in adt.discriminants(tcx) {
- let variant = &adt.variants[variant_index];
+ let variant = &adt.variant(variant_index);
let subpath = self.elaborator.downcast_subpath(self.path, variant_index);
if let Some(variant_path) = subpath {
fn adt_switch_block(
&mut self,
- adt: &'tcx ty::AdtDef,
+ adt: ty::AdtDef<'tcx>,
blocks: Vec<BasicBlock>,
values: &[u128],
succ: BasicBlock,
// Additionally, we do not want to switch on the
// discriminant after it is free-ed, because that
// way lies only trouble.
- let discr_ty = adt.repr.discr_type().to_ty(self.tcx());
+ let discr_ty = adt.repr().discr_type().to_ty(self.tcx());
let discr = Place::from(self.new_temp(discr_ty));
let discr_rv = Rvalue::Discriminant(self.place);
let switch_block = BasicBlockData {
ty::Tuple(fields) => self.open_drop_for_tuple(fields),
ty::Adt(def, substs) => {
if def.is_box() {
- self.open_drop_for_box(def, substs)
+ self.open_drop_for_box(*def, substs)
} else {
- self.open_drop_for_adt(def, substs)
+ self.open_drop_for_adt(*def, substs)
}
}
ty::Dynamic(..) => self.complete_drop(self.succ, self.unwind),
/// The contained value will not be dropped.
fn box_free_block(
&mut self,
- adt: &'tcx ty::AdtDef,
+ adt: ty::AdtDef<'tcx>,
substs: SubstsRef<'tcx>,
target: BasicBlock,
unwind: Unwind,
/// value).
fn unelaborated_free_block(
&mut self,
- adt: &'tcx ty::AdtDef,
+ adt: ty::AdtDef<'tcx>,
substs: SubstsRef<'tcx>,
target: BasicBlock,
unwind: Unwind,
let tcx = self.tcx();
let unit_temp = Place::from(self.new_temp(tcx.mk_unit()));
let free_func = tcx.require_lang_item(LangItem::BoxFree, Some(self.source_info.span));
- let args = adt.variants[VariantIdx::new(0)]
+ let args = adt
+ .variant(VariantIdx::new(0))
.fields
.iter()
.enumerate()
);
propagate(pred, &tmp);
}
+
mir::TerminatorKind::InlineAsm {
destination: Some(dest), ref operands, ..
} if dest == bb => {
propagate(pred, &tmp);
}
+ mir::TerminatorKind::SwitchInt { targets: _, ref discr, switch_ty: _ } => {
+ let mut applier = BackwardSwitchIntEdgeEffectsApplier {
+ pred,
+ exit_state,
+ values: &body.switch_sources()[bb][pred],
+ bb,
+ propagate: &mut propagate,
+ effects_applied: false,
+ };
+
+ analysis.apply_switch_int_edge_effects(pred, discr, &mut applier);
+
+ if !applier.effects_applied {
+ propagate(pred, exit_state)
+ }
+ }
+
// Ignore dead unwinds.
mir::TerminatorKind::Call { cleanup: Some(unwind), .. }
| mir::TerminatorKind::Assert { cleanup: Some(unwind), .. }
}
}
+struct BackwardSwitchIntEdgeEffectsApplier<'a, D, F> {
+ pred: BasicBlock,
+ exit_state: &'a mut D,
+ values: &'a [Option<u128>],
+ bb: BasicBlock,
+ propagate: &'a mut F,
+
+ effects_applied: bool,
+}
+
+impl<D, F> super::SwitchIntEdgeEffects<D> for BackwardSwitchIntEdgeEffectsApplier<'_, D, F>
+where
+ D: Clone,
+ F: FnMut(BasicBlock, &D),
+{
+ fn apply(&mut self, mut apply_edge_effect: impl FnMut(&mut D, SwitchIntTarget)) {
+ assert!(!self.effects_applied);
+
+ let targets = self.values.iter().map(|&value| SwitchIntTarget { value, target: self.bb });
+
+ let mut tmp = None;
+ for target in targets {
+ let tmp = opt_clone_from_or_clone(&mut tmp, self.exit_state);
+ apply_edge_effect(tmp, target);
+ (self.propagate)(self.pred, tmp);
+ }
+
+ self.effects_applied = true;
+ }
+}
+
/// Dataflow that runs from the entry of a block (the first statement), to its exit (terminator).
pub struct Forward;
}
SwitchInt { ref targets, ref discr, switch_ty: _ } => {
- let mut applier = SwitchIntEdgeEffectApplier {
+ let mut applier = ForwardSwitchIntEdgeEffectsApplier {
exit_state,
targets,
propagate,
analysis.apply_switch_int_edge_effects(bb, discr, &mut applier);
- let SwitchIntEdgeEffectApplier {
- exit_state, mut propagate, effects_applied, ..
+ let ForwardSwitchIntEdgeEffectsApplier {
+ exit_state,
+ mut propagate,
+ effects_applied,
+ ..
} = applier;
if !effects_applied {
}
}
-struct SwitchIntEdgeEffectApplier<'a, D, F> {
+struct ForwardSwitchIntEdgeEffectsApplier<'a, D, F> {
exit_state: &'a mut D,
targets: &'a SwitchTargets,
propagate: F,
effects_applied: bool,
}
-impl<D, F> super::SwitchIntEdgeEffects<D> for SwitchIntEdgeEffectApplier<'_, D, F>
+impl<D, F> super::SwitchIntEdgeEffects<D> for ForwardSwitchIntEdgeEffectsApplier<'_, D, F>
where
D: Clone,
F: FnMut(BasicBlock, &D),
/// about a given `SwitchInt` terminator for each one of its edges—and more efficient—the
/// engine doesn't need to clone the exit state for a block unless
/// `SwitchIntEdgeEffects::apply` is actually called.
- ///
- /// FIXME: This class of effects is not supported for backward dataflow analyses.
fn apply_switch_int_edge_effects(
&self,
_block: BasicBlock,
body: &'mir mir::Body<'tcx>,
block: &'mir mir::BasicBlockData<'tcx>,
switch_on: mir::Place<'tcx>,
-) -> Option<(mir::Place<'tcx>, &'tcx ty::AdtDef)> {
+) -> Option<(mir::Place<'tcx>, ty::AdtDef<'tcx>)> {
for statement in block.statements.iter().rev() {
match &statement.kind {
mir::StatementKind::Assign(box (lhs, mir::Rvalue::Discriminant(discriminated)))
if *lhs == switch_on =>
{
- match &discriminated.ty(body, tcx).ty.kind() {
- ty::Adt(def, _) => return Some((*discriminated, def)),
+ match discriminated.ty(body, tcx).ty.kind() {
+ ty::Adt(def, _) => return Some((*discriminated, *def)),
// `Rvalue::Discriminant` is also used to get the active yield point for a
// generator, but we do not need edge-specific effects in that case. This may
pub use self::framework::{
fmt, graphviz, lattice, visit_results, Analysis, AnalysisDomain, Backward, CallReturnPlaces,
Direction, Engine, Forward, GenKill, GenKillAnalysis, JoinSemiLattice, Results, ResultsCursor,
- ResultsRefCursor, ResultsVisitable, ResultsVisitor,
+ ResultsRefCursor, ResultsVisitable, ResultsVisitor, SwitchIntEdgeEffects,
};
use self::move_paths::MoveData;
place: &Place<'tcx>,
const_item: DefId,
location: Location,
- decorate: impl for<'b> FnOnce(LintDiagnosticBuilder<'b>) -> DiagnosticBuilder<'b, ()>,
+ decorate: impl for<'b> FnOnce(LintDiagnosticBuilder<'b, ()>) -> DiagnosticBuilder<'b, ()>,
) {
// Don't lint on borrowing/assigning when a dereference is involved.
// If we 'leave' the temporary via a dereference, we must
|lint| {
decorate(lint)
.span_note(self.tcx.def_span(const_item), "`const` item defined here")
- .emit()
+ .emit();
},
);
}
does not derive Copy (error E0133)"
.to_string()
};
- lint.build(&message).emit()
+ lint.build(&message).emit();
});
}
reference with a raw pointer and use `read_unaligned`/`write_unaligned` \
(loads and stores via `*p` must be properly aligned even when using raw pointers)"
)
- .emit()
+ .emit();
},
);
}
// temporary holding the static pointer to avoid duplicate errors
// <https://github.com/rust-lang/rust/pull/78068#issuecomment-731753506>.
if decl.internal && place.projection.first() == Some(&ProjectionElem::Deref) {
- // If the projection root is an artifical local that we introduced when
+ // If the projection root is an artificial local that we introduced when
// desugaring `static`, give a more specific error message
// (avoid the general "raw pointer" clause below, that would only be confusing).
if let Some(box LocalInfo::StaticRef { def_id, .. }) = decl.local_info {
ProjectionElem::Field(..) => {
let ty = place_base.ty(&self.body.local_decls, self.tcx).ty;
if let ty::Adt(def, _) = ty.kind() {
- if self.tcx.layout_scalar_valid_range(def.did)
+ if self.tcx.layout_scalar_valid_range(def.did())
!= (Bound::Unbounded, Bound::Unbounded)
{
let details = if is_mut_use {
tcx.lint_level_at_node(UNSAFE_OP_IN_UNSAFE_FN, usage_lint_root);
assert_eq!(level, Level::Allow);
lint::explain_lint_level_source(
- tcx.sess,
UNSAFE_OP_IN_UNSAFE_FN,
Level::Allow,
source,
}
}
- let mut eligable_locals = Vec::new();
+ let mut eligible_locals = Vec::new();
for (local, mutating_uses) in visitor.local_mutating_uses.drain_enumerated(..) {
if mutating_uses != 1 || !locals_to_debuginfo.contains(local) {
continue;
&bb.statements[location.statement_index].kind
{
if let Some(local) = p.as_local() {
- eligable_locals.push((local, *c));
+ eligible_locals.push((local, *c));
}
}
}
}
- eligable_locals
+ eligible_locals
}
impl Visitor<'_> for LocalUseVisitor {
use rustc_ast::Mutability;
use rustc_data_structures::fx::FxHashSet;
use rustc_hir::def::DefKind;
-use rustc_hir::HirId;
use rustc_index::bit_set::BitSet;
use rustc_index::vec::IndexVec;
use rustc_middle::mir::visit::{
MutVisitor, MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor,
};
use rustc_middle::mir::{
- AssertKind, BasicBlock, BinOp, Body, Constant, ConstantKind, Local, LocalDecl, LocalKind,
- Location, Operand, Place, Rvalue, SourceInfo, SourceScope, SourceScopeData, Statement,
- StatementKind, Terminator, TerminatorKind, UnOp, RETURN_PLACE,
+ BasicBlock, BinOp, Body, Constant, ConstantKind, Local, LocalDecl, LocalKind, Location,
+ Operand, Place, Rvalue, SourceInfo, Statement, StatementKind, Terminator, TerminatorKind, UnOp,
+ RETURN_PLACE,
};
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
use rustc_middle::ty::subst::{InternalSubsts, Subst};
-use rustc_middle::ty::{
- self, ConstInt, ConstKind, Instance, ParamEnv, ScalarInt, Ty, TyCtxt, TypeFoldable,
-};
-use rustc_session::lint;
+use rustc_middle::ty::{self, ConstKind, Instance, ParamEnv, Ty, TyCtxt, TypeFoldable};
use rustc_span::{def_id::DefId, Span};
use rustc_target::abi::{HasDataLayout, Size, TargetDataLayout};
use rustc_target::spec::abi::Abi;
use rustc_trait_selection::traits;
use crate::MirPass;
-use rustc_const_eval::const_eval::ConstEvalErr;
use rustc_const_eval::interpret::{
self, compile_time_machine, AllocId, ConstAllocation, ConstValue, CtfeValidationMode, Frame,
ImmTy, Immediate, InterpCx, InterpResult, LocalState, LocalValue, MemPlace, MemoryKind, OpTy,
) -> InterpResult<'tcx, InterpOperand<Self::PointerTag>> {
let l = &frame.locals[local];
- if l.value == LocalValue::Uninitialized {
- throw_machine_stop_str!("tried to access an uninitialized local")
+ if l.value == LocalValue::Unallocated {
+ throw_machine_stop_str!("tried to access an unallocated local")
}
l.access()
ecx: InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>,
tcx: TyCtxt<'tcx>,
param_env: ParamEnv<'tcx>,
- // FIXME(eddyb) avoid cloning these two fields more than once,
- // by accessing them through `ecx` instead.
- source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>,
+ // FIXME(eddyb) avoid cloning this field more than once,
+ // by accessing it through `ecx` instead.
local_decls: IndexVec<Local, LocalDecl<'tcx>>,
// Because we have `MutVisitor` we can't obtain the `SourceInfo` from a `Location`. So we store
// the last known `SourceInfo` here and just keep revisiting it.
ecx,
tcx,
param_env,
- // FIXME(eddyb) avoid cloning these two fields more than once,
- // by accessing them through `ecx` instead.
- source_scopes: body.source_scopes.clone(),
+ // FIXME(eddyb) avoid cloning this field more than once,
+ // by accessing it through `ecx` instead.
//FIXME(wesleywiser) we can't steal this because `Visitor::super_visit_body()` needs it
local_decls: body.local_decls.clone(),
source_info: None,
/// but not reading from them anymore.
fn remove_const(ecx: &mut InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>, local: Local) {
ecx.frame_mut().locals[local] =
- LocalState { value: LocalValue::Uninitialized, layout: Cell::new(None) };
- }
-
- fn lint_root(&self, source_info: SourceInfo) -> Option<HirId> {
- source_info.scope.lint_root(&self.source_scopes)
+ LocalState { value: LocalValue::Unallocated, layout: Cell::new(None) };
}
fn use_ecx<F, T>(&mut self, f: F) -> Option<T>
}
/// Returns the value, if any, of evaluating `c`.
- fn eval_constant(&mut self, c: &Constant<'tcx>, source_info: SourceInfo) -> Option<OpTy<'tcx>> {
+ fn eval_constant(&mut self, c: &Constant<'tcx>) -> Option<OpTy<'tcx>> {
// FIXME we need to revisit this for #67176
if c.needs_subst() {
return None;
}
- match self.ecx.mir_const_to_op(&c.literal, None) {
- Ok(op) => Some(op),
- Err(error) => {
- let tcx = self.ecx.tcx.at(c.span);
- let err = ConstEvalErr::new(&self.ecx, error, Some(c.span));
- if let Some(lint_root) = self.lint_root(source_info) {
- let lint_only = match c.literal {
- ConstantKind::Ty(ct) => match ct.val() {
- // Promoteds must lint and not error as the user didn't ask for them
- ConstKind::Unevaluated(ty::Unevaluated {
- def: _,
- substs: _,
- promoted: Some(_),
- }) => true,
- // Out of backwards compatibility we cannot report hard errors in unused
- // generic functions using associated constants of the generic parameters.
- _ => c.literal.needs_subst(),
- },
- ConstantKind::Val(_, ty) => ty.needs_subst(),
- };
- if lint_only {
- // Out of backwards compatibility we cannot report hard errors in unused
- // generic functions using associated constants of the generic parameters.
- err.report_as_lint(tcx, "erroneous constant used", lint_root, Some(c.span));
- } else {
- err.report_as_error(tcx, "erroneous constant used");
- }
- } else {
- err.report_as_error(tcx, "erroneous constant used");
- }
- None
- }
- }
+ self.ecx.mir_const_to_op(&c.literal, None).ok()
}
/// Returns the value, if any, of evaluating `place`.
/// Returns the value, if any, of evaluating `op`. Calls upon `eval_constant`
/// or `eval_place`, depending on the variant of `Operand` used.
- fn eval_operand(&mut self, op: &Operand<'tcx>, source_info: SourceInfo) -> Option<OpTy<'tcx>> {
+ fn eval_operand(&mut self, op: &Operand<'tcx>) -> Option<OpTy<'tcx>> {
match *op {
- Operand::Constant(ref c) => self.eval_constant(c, source_info),
+ Operand::Constant(ref c) => self.eval_constant(c),
Operand::Move(place) | Operand::Copy(place) => self.eval_place(place),
}
}
- fn report_assert_as_lint(
- &self,
- lint: &'static lint::Lint,
- source_info: SourceInfo,
- message: &'static str,
- panic: AssertKind<impl std::fmt::Debug>,
- ) {
- if let Some(lint_root) = self.lint_root(source_info) {
- self.tcx.struct_span_lint_hir(lint, lint_root, source_info.span, |lint| {
- let mut err = lint.build(message);
- err.span_label(source_info.span, format!("{:?}", panic));
- err.emit()
- });
- }
- }
-
- fn check_unary_op(
- &mut self,
- op: UnOp,
- arg: &Operand<'tcx>,
- source_info: SourceInfo,
- ) -> Option<()> {
- if let (val, true) = self.use_ecx(|this| {
+ fn check_unary_op(&mut self, op: UnOp, arg: &Operand<'tcx>) -> Option<()> {
+ if self.use_ecx(|this| {
let val = this.ecx.read_immediate(&this.ecx.eval_operand(arg, None)?)?;
let (_res, overflow, _ty) = this.ecx.overflowing_unary_op(op, &val)?;
- Ok((val, overflow))
+ Ok(overflow)
})? {
// `AssertKind` only has an `OverflowNeg` variant, so make sure that is
// appropriate to use.
assert_eq!(op, UnOp::Neg, "Neg is the only UnOp that can overflow");
- self.report_assert_as_lint(
- lint::builtin::ARITHMETIC_OVERFLOW,
- source_info,
- "this arithmetic operation will overflow",
- AssertKind::OverflowNeg(val.to_const_int()),
- );
return None;
}
op: BinOp,
left: &Operand<'tcx>,
right: &Operand<'tcx>,
- source_info: SourceInfo,
) -> Option<()> {
let r = self.use_ecx(|this| this.ecx.read_immediate(&this.ecx.eval_operand(right, None)?));
let l = self.use_ecx(|this| this.ecx.read_immediate(&this.ecx.eval_operand(left, None)?));
let r_bits = r.to_scalar().ok();
let r_bits = r_bits.and_then(|r| r.to_bits(right_size).ok());
if r_bits.map_or(false, |b| b >= left_size.bits() as u128) {
- debug!("check_binary_op: reporting assert for {:?}", source_info);
- self.report_assert_as_lint(
- lint::builtin::ARITHMETIC_OVERFLOW,
- source_info,
- "this arithmetic operation will overflow",
- AssertKind::Overflow(
- op,
- match l {
- Some(l) => l.to_const_int(),
- // Invent a dummy value, the diagnostic ignores it anyway
- None => ConstInt::new(
- ScalarInt::try_from_uint(1_u8, left_size).unwrap(),
- left_ty.is_signed(),
- left_ty.is_ptr_sized_integral(),
- ),
- },
- r.to_const_int(),
- ),
- );
return None;
}
}
let (_res, overflow, _ty) = this.ecx.overflowing_binary_op(op, l, r)?;
Ok(overflow)
})? {
- self.report_assert_as_lint(
- lint::builtin::ARITHMETIC_OVERFLOW,
- source_info,
- "this arithmetic operation will overflow",
- AssertKind::Overflow(op, l.to_const_int(), r.to_const_int()),
- );
return None;
}
}
}
}
- fn const_prop(
- &mut self,
- rvalue: &Rvalue<'tcx>,
- source_info: SourceInfo,
- place: Place<'tcx>,
- ) -> Option<()> {
+ fn const_prop(&mut self, rvalue: &Rvalue<'tcx>, place: Place<'tcx>) -> Option<()> {
// Perform any special handling for specific Rvalue types.
// Generally, checks here fall into one of two categories:
// 1. Additional checking to provide useful lints to the user
// lint.
Rvalue::UnaryOp(op, arg) => {
trace!("checking UnaryOp(op = {:?}, arg = {:?})", op, arg);
- self.check_unary_op(*op, arg, source_info)?;
+ self.check_unary_op(*op, arg)?;
}
Rvalue::BinaryOp(op, box (left, right)) => {
trace!("checking BinaryOp(op = {:?}, left = {:?}, right = {:?})", op, left, right);
- self.check_binary_op(*op, left, right, source_info)?;
+ self.check_binary_op(*op, left, right)?;
}
Rvalue::CheckedBinaryOp(op, box (left, right)) => {
trace!(
left,
right
);
- self.check_binary_op(*op, left, right, source_info)?;
+ self.check_binary_op(*op, left, right)?;
}
// Do not try creating references (#67862)
fn visit_constant(&mut self, constant: &mut Constant<'tcx>, location: Location) {
trace!("visit_constant: {:?}", constant);
self.super_constant(constant, location);
- self.eval_constant(constant, self.source_info.unwrap());
+ self.eval_constant(constant);
}
fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) {
self.source_info = Some(source_info);
if let StatementKind::Assign(box (place, ref mut rval)) = statement.kind {
let can_const_prop = self.ecx.machine.can_const_prop[place.local];
- if let Some(()) = self.const_prop(rval, source_info, place) {
+ if let Some(()) = self.const_prop(rval, place) {
// This will return None if the above `const_prop` invocation only "wrote" a
// type whose creation requires no write. E.g. a generator whose initial state
// consists solely of uninitialized memory (so it doesn't capture any locals).
let frame = self.ecx.frame_mut();
frame.locals[local].value =
if let StatementKind::StorageLive(_) = statement.kind {
- LocalValue::Uninitialized
+ LocalValue::Unallocated
} else {
LocalValue::Dead
};
self.source_info = Some(source_info);
self.super_terminator(terminator, location);
match &mut terminator.kind {
- TerminatorKind::Assert { expected, ref msg, ref mut cond, .. } => {
- if let Some(ref value) = self.eval_operand(&cond, source_info) {
+ TerminatorKind::Assert { expected, ref mut cond, .. } => {
+ if let Some(ref value) = self.eval_operand(&cond) {
trace!("assertion on {:?} should be {:?}", value, expected);
let expected = ScalarMaybeUninit::from(Scalar::from_bool(*expected));
let value_const = self.ecx.read_scalar(&value).unwrap();
if expected != value_const {
- enum DbgVal<T> {
- Val(T),
- Underscore,
- }
- impl<T: std::fmt::Debug> std::fmt::Debug for DbgVal<T> {
- fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- match self {
- Self::Val(val) => val.fmt(fmt),
- Self::Underscore => fmt.write_str("_"),
- }
- }
- }
- let mut eval_to_int = |op| {
- // This can be `None` if the lhs wasn't const propagated and we just
- // triggered the assert on the value of the rhs.
- self.eval_operand(op, source_info).map_or(DbgVal::Underscore, |op| {
- DbgVal::Val(self.ecx.read_immediate(&op).unwrap().to_const_int())
- })
- };
- let msg = match msg {
- AssertKind::DivisionByZero(op) => {
- Some(AssertKind::DivisionByZero(eval_to_int(op)))
- }
- AssertKind::RemainderByZero(op) => {
- Some(AssertKind::RemainderByZero(eval_to_int(op)))
- }
- AssertKind::Overflow(bin_op @ (BinOp::Div | BinOp::Rem), op1, op2) => {
- // Division overflow is *UB* in the MIR, and different than the
- // other overflow checks.
- Some(AssertKind::Overflow(
- *bin_op,
- eval_to_int(op1),
- eval_to_int(op2),
- ))
- }
- AssertKind::BoundsCheck { ref len, ref index } => {
- let len = eval_to_int(len);
- let index = eval_to_int(index);
- Some(AssertKind::BoundsCheck { len, index })
- }
- // Remaining overflow errors are already covered by checks on the binary operators.
- AssertKind::Overflow(..) | AssertKind::OverflowNeg(_) => None,
- // Need proper const propagator for these.
- _ => None,
- };
// Poison all places this operand references so that further code
// doesn't use the invalid value
match cond {
}
Operand::Constant(_) => {}
}
- if let Some(msg) = msg {
- self.report_assert_as_lint(
- lint::builtin::UNCONDITIONAL_PANIC,
- source_info,
- "this operation will panic at runtime",
- msg,
- );
- }
} else {
if self.should_const_prop(value) {
if let ScalarMaybeUninit::Scalar(scalar) = value_const {
--- /dev/null
+//! Propagates constants for early reporting of statically known
+//! assertion failures
+
+use std::cell::Cell;
+
+use rustc_ast::Mutability;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir::def::DefKind;
+use rustc_hir::HirId;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::{
+ AssertKind, BasicBlock, BinOp, Body, Constant, ConstantKind, Local, LocalDecl, LocalKind,
+ Location, Operand, Place, Rvalue, SourceInfo, SourceScope, SourceScopeData, Statement,
+ StatementKind, Terminator, TerminatorKind, UnOp, RETURN_PLACE,
+};
+use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
+use rustc_middle::ty::subst::{InternalSubsts, Subst};
+use rustc_middle::ty::{
+ self, ConstInt, ConstKind, Instance, ParamEnv, ScalarInt, Ty, TyCtxt, TypeFoldable,
+};
+use rustc_session::lint;
+use rustc_span::{def_id::DefId, Span};
+use rustc_target::abi::{HasDataLayout, Size, TargetDataLayout};
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::traits;
+
+use crate::MirLint;
+use rustc_const_eval::const_eval::ConstEvalErr;
+use rustc_const_eval::interpret::{
+ self, compile_time_machine, AllocId, ConstAllocation, Frame, ImmTy, InterpCx, InterpResult,
+ LocalState, LocalValue, MemPlace, MemoryKind, OpTy, Operand as InterpOperand, PlaceTy, Scalar,
+ ScalarMaybeUninit, StackPopCleanup, StackPopUnwind,
+};
+
+/// The maximum number of bytes that we'll allocate space for a local or the return value.
+/// Needed for #66397, because otherwise we eval into large places and that can cause OOM or just
+/// Severely regress performance.
+const MAX_ALLOC_LIMIT: u64 = 1024;
+
+/// Macro for machine-specific `InterpError` without allocation.
+/// (These will never be shown to the user, but they help diagnose ICEs.)
+macro_rules! throw_machine_stop_str {
+ ($($tt:tt)*) => {{
+ // We make a new local type for it. The type itself does not carry any information,
+ // but its vtable (for the `MachineStopType` trait) does.
+ struct Zst;
+ // Printing this type shows the desired string.
+ impl std::fmt::Display for Zst {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, $($tt)*)
+ }
+ }
+ impl rustc_middle::mir::interpret::MachineStopType for Zst {}
+ throw_machine_stop!(Zst)
+ }};
+}
+
+pub struct ConstProp;
+
+impl<'tcx> MirLint<'tcx> for ConstProp {
+ fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
+ // will be evaluated by miri and produce its errors there
+ if body.source.promoted.is_some() {
+ return;
+ }
+
+ let def_id = body.source.def_id().expect_local();
+ let is_fn_like = tcx.hir().get_by_def_id(def_id).fn_kind().is_some();
+ let is_assoc_const = tcx.def_kind(def_id) == DefKind::AssocConst;
+
+ // Only run const prop on functions, methods, closures and associated constants
+ if !is_fn_like && !is_assoc_const {
+ // skip anon_const/statics/consts because they'll be evaluated by miri anyway
+ trace!("ConstProp skipped for {:?}", def_id);
+ return;
+ }
+
+ let is_generator = tcx.type_of(def_id.to_def_id()).is_generator();
+ // FIXME(welseywiser) const prop doesn't work on generators because of query cycles
+ // computing their layout.
+ if is_generator {
+ trace!("ConstProp skipped for generator {:?}", def_id);
+ return;
+ }
+
+ // Check if it's even possible to satisfy the 'where' clauses
+ // for this item.
+ // This branch will never be taken for any normal function.
+ // However, it's possible to `#!feature(trivial_bounds)]` to write
+ // a function with impossible to satisfy clauses, e.g.:
+ // `fn foo() where String: Copy {}`
+ //
+ // We don't usually need to worry about this kind of case,
+ // since we would get a compilation error if the user tried
+ // to call it. However, since we can do const propagation
+ // even without any calls to the function, we need to make
+ // sure that it even makes sense to try to evaluate the body.
+ // If there are unsatisfiable where clauses, then all bets are
+ // off, and we just give up.
+ //
+ // We manually filter the predicates, skipping anything that's not
+ // "global". We are in a potentially generic context
+ // (e.g. we are evaluating a function without substituting generic
+ // parameters, so this filtering serves two purposes:
+ //
+ // 1. We skip evaluating any predicates that we would
+ // never be able prove are unsatisfiable (e.g. `<T as Foo>`
+ // 2. We avoid trying to normalize predicates involving generic
+ // parameters (e.g. `<T as Foo>::MyItem`). This can confuse
+ // the normalization code (leading to cycle errors), since
+ // it's usually never invoked in this way.
+ let predicates = tcx
+ .predicates_of(def_id.to_def_id())
+ .predicates
+ .iter()
+ .filter_map(|(p, _)| if p.is_global() { Some(*p) } else { None });
+ if traits::impossible_predicates(
+ tcx,
+ traits::elaborate_predicates(tcx, predicates).map(|o| o.predicate).collect(),
+ ) {
+ trace!("ConstProp skipped for {:?}: found unsatisfiable predicates", def_id);
+ return;
+ }
+
+ trace!("ConstProp starting for {:?}", def_id);
+
+ let dummy_body = &Body::new(
+ body.source,
+ body.basic_blocks().clone(),
+ body.source_scopes.clone(),
+ body.local_decls.clone(),
+ Default::default(),
+ body.arg_count,
+ Default::default(),
+ body.span,
+ body.generator_kind(),
+ body.tainted_by_errors,
+ );
+
+ // FIXME(oli-obk, eddyb) Optimize locals (or even local paths) to hold
+ // constants, instead of just checking for const-folding succeeding.
+ // That would require a uniform one-def no-mutation analysis
+ // and RPO (or recursing when needing the value of a local).
+ let mut optimization_finder = ConstPropagator::new(body, dummy_body, tcx);
+ optimization_finder.visit_body(body);
+
+ trace!("ConstProp done for {:?}", def_id);
+ }
+}
+
+struct ConstPropMachine<'mir, 'tcx> {
+ /// The virtual call stack.
+ stack: Vec<Frame<'mir, 'tcx>>,
+ /// `OnlyInsideOwnBlock` locals that were written in the current block get erased at the end.
+ written_only_inside_own_block_locals: FxHashSet<Local>,
+ /// Locals that need to be cleared after every block terminates.
+ only_propagate_inside_block_locals: BitSet<Local>,
+ can_const_prop: IndexVec<Local, ConstPropMode>,
+}
+
+impl ConstPropMachine<'_, '_> {
+ fn new(
+ only_propagate_inside_block_locals: BitSet<Local>,
+ can_const_prop: IndexVec<Local, ConstPropMode>,
+ ) -> Self {
+ Self {
+ stack: Vec::new(),
+ written_only_inside_own_block_locals: Default::default(),
+ only_propagate_inside_block_locals,
+ can_const_prop,
+ }
+ }
+}
+
+impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx> {
+ compile_time_machine!(<'mir, 'tcx>);
+ const PANIC_ON_ALLOC_FAIL: bool = true; // all allocations are small (see `MAX_ALLOC_LIMIT`)
+
+ type MemoryKind = !;
+
+ type MemoryExtra = ();
+
+ fn load_mir(
+ _ecx: &InterpCx<'mir, 'tcx, Self>,
+ _instance: ty::InstanceDef<'tcx>,
+ ) -> InterpResult<'tcx, &'tcx Body<'tcx>> {
+ throw_machine_stop_str!("calling functions isn't supported in ConstProp")
+ }
+
+ fn find_mir_or_eval_fn(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _instance: ty::Instance<'tcx>,
+ _abi: Abi,
+ _args: &[OpTy<'tcx>],
+ _ret: Option<(&PlaceTy<'tcx>, BasicBlock)>,
+ _unwind: StackPopUnwind,
+ ) -> InterpResult<'tcx, Option<(&'mir Body<'tcx>, ty::Instance<'tcx>)>> {
+ Ok(None)
+ }
+
+ fn call_intrinsic(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _instance: ty::Instance<'tcx>,
+ _args: &[OpTy<'tcx>],
+ _ret: Option<(&PlaceTy<'tcx>, BasicBlock)>,
+ _unwind: StackPopUnwind,
+ ) -> InterpResult<'tcx> {
+ throw_machine_stop_str!("calling intrinsics isn't supported in ConstProp")
+ }
+
+ fn assert_panic(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _msg: &rustc_middle::mir::AssertMessage<'tcx>,
+ _unwind: Option<rustc_middle::mir::BasicBlock>,
+ ) -> InterpResult<'tcx> {
+ bug!("panics terminators are not evaluated in ConstProp")
+ }
+
+ fn binary_ptr_op(
+ _ecx: &InterpCx<'mir, 'tcx, Self>,
+ _bin_op: BinOp,
+ _left: &ImmTy<'tcx>,
+ _right: &ImmTy<'tcx>,
+ ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
+ // We can't do this because aliasing of memory can differ between const eval and llvm
+ throw_machine_stop_str!("pointer arithmetic or comparisons aren't supported in ConstProp")
+ }
+
+ fn access_local(
+ _ecx: &InterpCx<'mir, 'tcx, Self>,
+ frame: &Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
+ local: Local,
+ ) -> InterpResult<'tcx, InterpOperand<Self::PointerTag>> {
+ let l = &frame.locals[local];
+
+ if l.value == LocalValue::Unallocated {
+ throw_machine_stop_str!("tried to access an uninitialized local")
+ }
+
+ l.access()
+ }
+
+ fn access_local_mut<'a>(
+ ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+ frame: usize,
+ local: Local,
+ ) -> InterpResult<'tcx, Result<&'a mut LocalValue<Self::PointerTag>, MemPlace<Self::PointerTag>>>
+ {
+ if ecx.machine.can_const_prop[local] == ConstPropMode::NoPropagation {
+ throw_machine_stop_str!("tried to write to a local that is marked as not propagatable")
+ }
+ if frame == 0 && ecx.machine.only_propagate_inside_block_locals.contains(local) {
+ trace!(
+ "mutating local {:?} which is restricted to its block. \
+ Will remove it from const-prop after block is finished.",
+ local
+ );
+ ecx.machine.written_only_inside_own_block_locals.insert(local);
+ }
+ ecx.machine.stack[frame].locals[local].access_mut()
+ }
+
+ fn before_access_global(
+ _memory_extra: &(),
+ _alloc_id: AllocId,
+ alloc: ConstAllocation<'tcx, Self::PointerTag, Self::AllocExtra>,
+ _static_def_id: Option<DefId>,
+ is_write: bool,
+ ) -> InterpResult<'tcx> {
+ if is_write {
+ throw_machine_stop_str!("can't write to global");
+ }
+ // If the static allocation is mutable, then we can't const prop it as its content
+ // might be different at runtime.
+ if alloc.inner().mutability == Mutability::Mut {
+ throw_machine_stop_str!("can't access mutable globals in ConstProp");
+ }
+
+ Ok(())
+ }
+
+ #[inline(always)]
+ fn init_frame_extra(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ frame: Frame<'mir, 'tcx>,
+ ) -> InterpResult<'tcx, Frame<'mir, 'tcx>> {
+ Ok(frame)
+ }
+
+ #[inline(always)]
+ fn stack<'a>(
+ ecx: &'a InterpCx<'mir, 'tcx, Self>,
+ ) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] {
+ &ecx.machine.stack
+ }
+
+ #[inline(always)]
+ fn stack_mut<'a>(
+ ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+ ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>> {
+ &mut ecx.machine.stack
+ }
+}
+
+/// Finds optimization opportunities on the MIR.
+struct ConstPropagator<'mir, 'tcx> {
+ ecx: InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ // FIXME(eddyb) avoid cloning these two fields more than once,
+ // by accessing them through `ecx` instead.
+ source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>,
+ local_decls: IndexVec<Local, LocalDecl<'tcx>>,
+ // Because we have `MutVisitor` we can't obtain the `SourceInfo` from a `Location`. So we store
+ // the last known `SourceInfo` here and just keep revisiting it.
+ source_info: Option<SourceInfo>,
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for ConstPropagator<'_, 'tcx> {
+ type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
+ err
+ }
+}
+
+impl HasDataLayout for ConstPropagator<'_, '_> {
+ #[inline]
+ fn data_layout(&self) -> &TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
+impl<'tcx> ty::layout::HasTyCtxt<'tcx> for ConstPropagator<'_, 'tcx> {
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
+
+impl<'tcx> ty::layout::HasParamEnv<'tcx> for ConstPropagator<'_, 'tcx> {
+ #[inline]
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+}
+
+impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
+ fn new(
+ body: &Body<'tcx>,
+ dummy_body: &'mir Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ ) -> ConstPropagator<'mir, 'tcx> {
+ let def_id = body.source.def_id();
+ let substs = &InternalSubsts::identity_for_item(tcx, def_id);
+ let param_env = tcx.param_env_reveal_all_normalized(def_id);
+
+ let span = tcx.def_span(def_id);
+ // FIXME: `CanConstProp::check` computes the layout of all locals, return those layouts
+ // so we can write them to `ecx.frame_mut().locals.layout, reducing the duplication in
+ // `layout_of` query invocations.
+ let can_const_prop = CanConstProp::check(tcx, param_env, body);
+ let mut only_propagate_inside_block_locals = BitSet::new_empty(can_const_prop.len());
+ for (l, mode) in can_const_prop.iter_enumerated() {
+ if *mode == ConstPropMode::OnlyInsideOwnBlock {
+ only_propagate_inside_block_locals.insert(l);
+ }
+ }
+ let mut ecx = InterpCx::new(
+ tcx,
+ span,
+ param_env,
+ ConstPropMachine::new(only_propagate_inside_block_locals, can_const_prop),
+ (),
+ );
+
+ let ret = ecx
+ .layout_of(body.return_ty().subst(tcx, substs))
+ .ok()
+ // Don't bother allocating memory for ZST types which have no values
+ // or for large values.
+ .filter(|ret_layout| {
+ !ret_layout.is_zst() && ret_layout.size < Size::from_bytes(MAX_ALLOC_LIMIT)
+ })
+ .map(|ret_layout| {
+ ecx.allocate(ret_layout, MemoryKind::Stack)
+ .expect("couldn't perform small allocation")
+ .into()
+ });
+
+ ecx.push_stack_frame(
+ Instance::new(def_id, substs),
+ dummy_body,
+ ret.as_ref(),
+ StackPopCleanup::Root { cleanup: false },
+ )
+ .expect("failed to push initial stack frame");
+
+ ConstPropagator {
+ ecx,
+ tcx,
+ param_env,
+ // FIXME(eddyb) avoid cloning these two fields more than once,
+ // by accessing them through `ecx` instead.
+ source_scopes: body.source_scopes.clone(),
+ //FIXME(wesleywiser) we can't steal this because `Visitor::super_visit_body()` needs it
+ local_decls: body.local_decls.clone(),
+ source_info: None,
+ }
+ }
+
+ fn get_const(&self, place: Place<'tcx>) -> Option<OpTy<'tcx>> {
+ let op = match self.ecx.eval_place_to_op(place, None) {
+ Ok(op) => op,
+ Err(e) => {
+ trace!("get_const failed: {}", e);
+ return None;
+ }
+ };
+
+ // Try to read the local as an immediate so that if it is representable as a scalar, we can
+ // handle it as such, but otherwise, just return the value as is.
+ Some(match self.ecx.try_read_immediate(&op) {
+ Ok(Ok(imm)) => imm.into(),
+ _ => op,
+ })
+ }
+
+ /// Remove `local` from the pool of `Locals`. Allows writing to them,
+ /// but not reading from them anymore.
+ fn remove_const(ecx: &mut InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>, local: Local) {
+ ecx.frame_mut().locals[local] =
+ LocalState { value: LocalValue::Unallocated, layout: Cell::new(None) };
+ }
+
+ fn lint_root(&self, source_info: SourceInfo) -> Option<HirId> {
+ source_info.scope.lint_root(&self.source_scopes)
+ }
+
+ fn use_ecx<F, T>(&mut self, f: F) -> Option<T>
+ where
+ F: FnOnce(&mut Self) -> InterpResult<'tcx, T>,
+ {
+ match f(self) {
+ Ok(val) => Some(val),
+ Err(error) => {
+ trace!("InterpCx operation failed: {:?}", error);
+ // Some errors shouldn't come up because creating them causes
+ // an allocation, which we should avoid. When that happens,
+ // dedicated error variants should be introduced instead.
+ assert!(
+ !error.kind().formatted_string(),
+ "const-prop encountered formatting error: {}",
+ error
+ );
+ None
+ }
+ }
+ }
+
+ /// Returns the value, if any, of evaluating `c`.
+ fn eval_constant(&mut self, c: &Constant<'tcx>, source_info: SourceInfo) -> Option<OpTy<'tcx>> {
+ // FIXME we need to revisit this for #67176
+ if c.needs_subst() {
+ return None;
+ }
+
+ match self.ecx.mir_const_to_op(&c.literal, None) {
+ Ok(op) => Some(op),
+ Err(error) => {
+ let tcx = self.ecx.tcx.at(c.span);
+ let err = ConstEvalErr::new(&self.ecx, error, Some(c.span));
+ if let Some(lint_root) = self.lint_root(source_info) {
+ let lint_only = match c.literal {
+ ConstantKind::Ty(ct) => match ct.val() {
+ // Promoteds must lint and not error as the user didn't ask for them
+ ConstKind::Unevaluated(ty::Unevaluated {
+ def: _,
+ substs: _,
+ promoted: Some(_),
+ }) => true,
+ // Out of backwards compatibility we cannot report hard errors in unused
+ // generic functions using associated constants of the generic parameters.
+ _ => c.literal.needs_subst(),
+ },
+ ConstantKind::Val(_, ty) => ty.needs_subst(),
+ };
+ if lint_only {
+ // Out of backwards compatibility we cannot report hard errors in unused
+ // generic functions using associated constants of the generic parameters.
+ err.report_as_lint(tcx, "erroneous constant used", lint_root, Some(c.span));
+ } else {
+ err.report_as_error(tcx, "erroneous constant used");
+ }
+ } else {
+ err.report_as_error(tcx, "erroneous constant used");
+ }
+ None
+ }
+ }
+ }
+
+ /// Returns the value, if any, of evaluating `place`.
+ fn eval_place(&mut self, place: Place<'tcx>) -> Option<OpTy<'tcx>> {
+ trace!("eval_place(place={:?})", place);
+ self.use_ecx(|this| this.ecx.eval_place_to_op(place, None))
+ }
+
+ /// Returns the value, if any, of evaluating `op`. Calls upon `eval_constant`
+ /// or `eval_place`, depending on the variant of `Operand` used.
+ fn eval_operand(&mut self, op: &Operand<'tcx>, source_info: SourceInfo) -> Option<OpTy<'tcx>> {
+ match *op {
+ Operand::Constant(ref c) => self.eval_constant(c, source_info),
+ Operand::Move(place) | Operand::Copy(place) => self.eval_place(place),
+ }
+ }
+
+ fn report_assert_as_lint(
+ &self,
+ lint: &'static lint::Lint,
+ source_info: SourceInfo,
+ message: &'static str,
+ panic: AssertKind<impl std::fmt::Debug>,
+ ) {
+ if let Some(lint_root) = self.lint_root(source_info) {
+ self.tcx.struct_span_lint_hir(lint, lint_root, source_info.span, |lint| {
+ let mut err = lint.build(message);
+ err.span_label(source_info.span, format!("{:?}", panic));
+ err.emit();
+ });
+ }
+ }
+
+ fn check_unary_op(
+ &mut self,
+ op: UnOp,
+ arg: &Operand<'tcx>,
+ source_info: SourceInfo,
+ ) -> Option<()> {
+ if let (val, true) = self.use_ecx(|this| {
+ let val = this.ecx.read_immediate(&this.ecx.eval_operand(arg, None)?)?;
+ let (_res, overflow, _ty) = this.ecx.overflowing_unary_op(op, &val)?;
+ Ok((val, overflow))
+ })? {
+ // `AssertKind` only has an `OverflowNeg` variant, so make sure that is
+ // appropriate to use.
+ assert_eq!(op, UnOp::Neg, "Neg is the only UnOp that can overflow");
+ self.report_assert_as_lint(
+ lint::builtin::ARITHMETIC_OVERFLOW,
+ source_info,
+ "this arithmetic operation will overflow",
+ AssertKind::OverflowNeg(val.to_const_int()),
+ );
+ return None;
+ }
+
+ Some(())
+ }
+
+ fn check_binary_op(
+ &mut self,
+ op: BinOp,
+ left: &Operand<'tcx>,
+ right: &Operand<'tcx>,
+ source_info: SourceInfo,
+ ) -> Option<()> {
+ let r = self.use_ecx(|this| this.ecx.read_immediate(&this.ecx.eval_operand(right, None)?));
+ let l = self.use_ecx(|this| this.ecx.read_immediate(&this.ecx.eval_operand(left, None)?));
+ // Check for exceeding shifts *even if* we cannot evaluate the LHS.
+ if op == BinOp::Shr || op == BinOp::Shl {
+ let r = r?;
+ // We need the type of the LHS. We cannot use `place_layout` as that is the type
+ // of the result, which for checked binops is not the same!
+ let left_ty = left.ty(&self.local_decls, self.tcx);
+ let left_size = self.ecx.layout_of(left_ty).ok()?.size;
+ let right_size = r.layout.size;
+ let r_bits = r.to_scalar().ok();
+ let r_bits = r_bits.and_then(|r| r.to_bits(right_size).ok());
+ if r_bits.map_or(false, |b| b >= left_size.bits() as u128) {
+ debug!("check_binary_op: reporting assert for {:?}", source_info);
+ self.report_assert_as_lint(
+ lint::builtin::ARITHMETIC_OVERFLOW,
+ source_info,
+ "this arithmetic operation will overflow",
+ AssertKind::Overflow(
+ op,
+ match l {
+ Some(l) => l.to_const_int(),
+ // Invent a dummy value, the diagnostic ignores it anyway
+ None => ConstInt::new(
+ ScalarInt::try_from_uint(1_u8, left_size).unwrap(),
+ left_ty.is_signed(),
+ left_ty.is_ptr_sized_integral(),
+ ),
+ },
+ r.to_const_int(),
+ ),
+ );
+ return None;
+ }
+ }
+
+ if let (Some(l), Some(r)) = (&l, &r) {
+ // The remaining operators are handled through `overflowing_binary_op`.
+ if self.use_ecx(|this| {
+ let (_res, overflow, _ty) = this.ecx.overflowing_binary_op(op, l, r)?;
+ Ok(overflow)
+ })? {
+ self.report_assert_as_lint(
+ lint::builtin::ARITHMETIC_OVERFLOW,
+ source_info,
+ "this arithmetic operation will overflow",
+ AssertKind::Overflow(op, l.to_const_int(), r.to_const_int()),
+ );
+ return None;
+ }
+ }
+ Some(())
+ }
+
+ fn const_prop(
+ &mut self,
+ rvalue: &Rvalue<'tcx>,
+ source_info: SourceInfo,
+ place: Place<'tcx>,
+ ) -> Option<()> {
+ // Perform any special handling for specific Rvalue types.
+ // Generally, checks here fall into one of two categories:
+ // 1. Additional checking to provide useful lints to the user
+ // - In this case, we will do some validation and then fall through to the
+ // end of the function which evals the assignment.
+ // 2. Working around bugs in other parts of the compiler
+ // - In this case, we'll return `None` from this function to stop evaluation.
+ match rvalue {
+ // Additional checking: give lints to the user if an overflow would occur.
+ // We do this here and not in the `Assert` terminator as that terminator is
+ // only sometimes emitted (overflow checks can be disabled), but we want to always
+ // lint.
+ Rvalue::UnaryOp(op, arg) => {
+ trace!("checking UnaryOp(op = {:?}, arg = {:?})", op, arg);
+ self.check_unary_op(*op, arg, source_info)?;
+ }
+ Rvalue::BinaryOp(op, box (left, right)) => {
+ trace!("checking BinaryOp(op = {:?}, left = {:?}, right = {:?})", op, left, right);
+ self.check_binary_op(*op, left, right, source_info)?;
+ }
+ Rvalue::CheckedBinaryOp(op, box (left, right)) => {
+ trace!(
+ "checking CheckedBinaryOp(op = {:?}, left = {:?}, right = {:?})",
+ op,
+ left,
+ right
+ );
+ self.check_binary_op(*op, left, right, source_info)?;
+ }
+
+ // Do not try creating references (#67862)
+ Rvalue::AddressOf(_, place) | Rvalue::Ref(_, _, place) => {
+ trace!("skipping AddressOf | Ref for {:?}", place);
+
+ // This may be creating mutable references or immutable references to cells.
+ // If that happens, the pointed to value could be mutated via that reference.
+ // Since we aren't tracking references, the const propagator loses track of what
+ // value the local has right now.
+ // Thus, all locals that have their reference taken
+ // must not take part in propagation.
+ Self::remove_const(&mut self.ecx, place.local);
+
+ return None;
+ }
+ Rvalue::ThreadLocalRef(def_id) => {
+ trace!("skipping ThreadLocalRef({:?})", def_id);
+
+ return None;
+ }
+
+ // There's no other checking to do at this time.
+ Rvalue::Aggregate(..)
+ | Rvalue::Use(..)
+ | Rvalue::Repeat(..)
+ | Rvalue::Len(..)
+ | Rvalue::Cast(..)
+ | Rvalue::ShallowInitBox(..)
+ | Rvalue::Discriminant(..)
+ | Rvalue::NullaryOp(..) => {}
+ }
+
+ // FIXME we need to revisit this for #67176
+ if rvalue.needs_subst() {
+ return None;
+ }
+
+ self.use_ecx(|this| this.ecx.eval_rvalue_into_place(rvalue, place))
+ }
+}
+
+/// The mode that `ConstProp` is allowed to run in for a given `Local`.
+#[derive(Clone, Copy, Debug, PartialEq)]
+enum ConstPropMode {
+ /// The `Local` can be propagated into and reads of this `Local` can also be propagated.
+ FullConstProp,
+ /// The `Local` can only be propagated into and from its own block.
+ OnlyInsideOwnBlock,
+ /// The `Local` can be propagated into but reads cannot be propagated.
+ OnlyPropagateInto,
+ /// The `Local` cannot be part of propagation at all. Any statement
+ /// referencing it either for reading or writing will not get propagated.
+ NoPropagation,
+}
+
+struct CanConstProp {
+ can_const_prop: IndexVec<Local, ConstPropMode>,
+ // False at the beginning. Once set, no more assignments are allowed to that local.
+ found_assignment: BitSet<Local>,
+ // Cache of locals' information
+ local_kinds: IndexVec<Local, LocalKind>,
+}
+
+impl CanConstProp {
+ /// Returns true if `local` can be propagated
+ fn check<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ body: &Body<'tcx>,
+ ) -> IndexVec<Local, ConstPropMode> {
+ let mut cpv = CanConstProp {
+ can_const_prop: IndexVec::from_elem(ConstPropMode::FullConstProp, &body.local_decls),
+ found_assignment: BitSet::new_empty(body.local_decls.len()),
+ local_kinds: IndexVec::from_fn_n(
+ |local| body.local_kind(local),
+ body.local_decls.len(),
+ ),
+ };
+ for (local, val) in cpv.can_const_prop.iter_enumerated_mut() {
+ let ty = body.local_decls[local].ty;
+ match tcx.layout_of(param_env.and(ty)) {
+ Ok(layout) if layout.size < Size::from_bytes(MAX_ALLOC_LIMIT) => {}
+ // Either the layout fails to compute, then we can't use this local anyway
+ // or the local is too large, then we don't want to.
+ _ => {
+ *val = ConstPropMode::NoPropagation;
+ continue;
+ }
+ }
+ // Cannot use args at all
+ // Cannot use locals because if x < y { y - x } else { x - y } would
+ // lint for x != y
+ // FIXME(oli-obk): lint variables until they are used in a condition
+ // FIXME(oli-obk): lint if return value is constant
+ if cpv.local_kinds[local] == LocalKind::Arg {
+ *val = ConstPropMode::OnlyPropagateInto;
+ trace!(
+ "local {:?} can't be const propagated because it's a function argument",
+ local
+ );
+ } else if cpv.local_kinds[local] == LocalKind::Var {
+ *val = ConstPropMode::OnlyInsideOwnBlock;
+ trace!(
+ "local {:?} will only be propagated inside its block, because it's a user variable",
+ local
+ );
+ }
+ }
+ cpv.visit_body(&body);
+ cpv.can_const_prop
+ }
+}
+
+impl Visitor<'_> for CanConstProp {
+ fn visit_local(&mut self, &local: &Local, context: PlaceContext, _: Location) {
+ use rustc_middle::mir::visit::PlaceContext::*;
+ match context {
+ // Projections are fine, because `&mut foo.x` will be caught by
+ // `MutatingUseContext::Borrow` elsewhere.
+ MutatingUse(MutatingUseContext::Projection)
+ // These are just stores, where the storing is not propagatable, but there may be later
+ // mutations of the same local via `Store`
+ | MutatingUse(MutatingUseContext::Call)
+ | MutatingUse(MutatingUseContext::AsmOutput)
+ // Actual store that can possibly even propagate a value
+ | MutatingUse(MutatingUseContext::Store) => {
+ if !self.found_assignment.insert(local) {
+ match &mut self.can_const_prop[local] {
+ // If the local can only get propagated in its own block, then we don't have
+ // to worry about multiple assignments, as we'll nuke the const state at the
+ // end of the block anyway, and inside the block we overwrite previous
+ // states as applicable.
+ ConstPropMode::OnlyInsideOwnBlock => {}
+ ConstPropMode::NoPropagation => {}
+ ConstPropMode::OnlyPropagateInto => {}
+ other @ ConstPropMode::FullConstProp => {
+ trace!(
+ "local {:?} can't be propagated because of multiple assignments. Previous state: {:?}",
+ local, other,
+ );
+ *other = ConstPropMode::OnlyInsideOwnBlock;
+ }
+ }
+ }
+ }
+ // Reading constants is allowed an arbitrary number of times
+ NonMutatingUse(NonMutatingUseContext::Copy)
+ | NonMutatingUse(NonMutatingUseContext::Move)
+ | NonMutatingUse(NonMutatingUseContext::Inspect)
+ | NonMutatingUse(NonMutatingUseContext::Projection)
+ | NonUse(_) => {}
+
+ // These could be propagated with a smarter analysis or just some careful thinking about
+ // whether they'd be fine right now.
+ MutatingUse(MutatingUseContext::Yield)
+ | MutatingUse(MutatingUseContext::Drop)
+ | MutatingUse(MutatingUseContext::Retag)
+ // These can't ever be propagated under any scheme, as we can't reason about indirect
+ // mutation.
+ | NonMutatingUse(NonMutatingUseContext::SharedBorrow)
+ | NonMutatingUse(NonMutatingUseContext::ShallowBorrow)
+ | NonMutatingUse(NonMutatingUseContext::UniqueBorrow)
+ | NonMutatingUse(NonMutatingUseContext::AddressOf)
+ | MutatingUse(MutatingUseContext::Borrow)
+ | MutatingUse(MutatingUseContext::AddressOf) => {
+ trace!("local {:?} can't be propagaged because it's used: {:?}", local, context);
+ self.can_const_prop[local] = ConstPropMode::NoPropagation;
+ }
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
+ fn visit_body(&mut self, body: &Body<'tcx>) {
+ for (bb, data) in body.basic_blocks().iter_enumerated() {
+ self.visit_basic_block_data(bb, data);
+ }
+ }
+
+ fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
+ self.super_operand(operand, location);
+ }
+
+ fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) {
+ trace!("visit_constant: {:?}", constant);
+ self.super_constant(constant, location);
+ self.eval_constant(constant, self.source_info.unwrap());
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ trace!("visit_statement: {:?}", statement);
+ let source_info = statement.source_info;
+ self.source_info = Some(source_info);
+ if let StatementKind::Assign(box (place, ref rval)) = statement.kind {
+ let can_const_prop = self.ecx.machine.can_const_prop[place.local];
+ if let Some(()) = self.const_prop(rval, source_info, place) {
+ match can_const_prop {
+ ConstPropMode::OnlyInsideOwnBlock => {
+ trace!(
+ "found local restricted to its block. \
+ Will remove it from const-prop after block is finished. Local: {:?}",
+ place.local
+ );
+ }
+ ConstPropMode::OnlyPropagateInto | ConstPropMode::NoPropagation => {
+ trace!("can't propagate into {:?}", place);
+ if place.local != RETURN_PLACE {
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ }
+ ConstPropMode::FullConstProp => {}
+ }
+ } else {
+ // Const prop failed, so erase the destination, ensuring that whatever happens
+ // from here on, does not know about the previous value.
+ // This is important in case we have
+ // ```rust
+ // let mut x = 42;
+ // x = SOME_MUTABLE_STATIC;
+ // // x must now be uninit
+ // ```
+ // FIXME: we overzealously erase the entire local, because that's easier to
+ // implement.
+ trace!(
+ "propagation into {:?} failed.
+ Nuking the entire site from orbit, it's the only way to be sure",
+ place,
+ );
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ } else {
+ match statement.kind {
+ StatementKind::SetDiscriminant { ref place, .. } => {
+ match self.ecx.machine.can_const_prop[place.local] {
+ ConstPropMode::FullConstProp | ConstPropMode::OnlyInsideOwnBlock => {
+ if self.use_ecx(|this| this.ecx.statement(statement)).is_some() {
+ trace!("propped discriminant into {:?}", place);
+ } else {
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ }
+ ConstPropMode::OnlyPropagateInto | ConstPropMode::NoPropagation => {
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ }
+ }
+ StatementKind::StorageLive(local) | StatementKind::StorageDead(local) => {
+ let frame = self.ecx.frame_mut();
+ frame.locals[local].value =
+ if let StatementKind::StorageLive(_) = statement.kind {
+ LocalValue::Unallocated
+ } else {
+ LocalValue::Dead
+ };
+ }
+ _ => {}
+ }
+ }
+
+ self.super_statement(statement, location);
+ }
+
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ let source_info = terminator.source_info;
+ self.source_info = Some(source_info);
+ self.super_terminator(terminator, location);
+ match &terminator.kind {
+ TerminatorKind::Assert { expected, ref msg, ref cond, .. } => {
+ if let Some(ref value) = self.eval_operand(&cond, source_info) {
+ trace!("assertion on {:?} should be {:?}", value, expected);
+ let expected = ScalarMaybeUninit::from(Scalar::from_bool(*expected));
+ let value_const = self.ecx.read_scalar(&value).unwrap();
+ if expected != value_const {
+ enum DbgVal<T> {
+ Val(T),
+ Underscore,
+ }
+ impl<T: std::fmt::Debug> std::fmt::Debug for DbgVal<T> {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Self::Val(val) => val.fmt(fmt),
+ Self::Underscore => fmt.write_str("_"),
+ }
+ }
+ }
+ let mut eval_to_int = |op| {
+ // This can be `None` if the lhs wasn't const propagated and we just
+ // triggered the assert on the value of the rhs.
+ self.eval_operand(op, source_info).map_or(DbgVal::Underscore, |op| {
+ DbgVal::Val(self.ecx.read_immediate(&op).unwrap().to_const_int())
+ })
+ };
+ let msg = match msg {
+ AssertKind::DivisionByZero(op) => {
+ Some(AssertKind::DivisionByZero(eval_to_int(op)))
+ }
+ AssertKind::RemainderByZero(op) => {
+ Some(AssertKind::RemainderByZero(eval_to_int(op)))
+ }
+ AssertKind::Overflow(bin_op @ (BinOp::Div | BinOp::Rem), op1, op2) => {
+ // Division overflow is *UB* in the MIR, and different than the
+ // other overflow checks.
+ Some(AssertKind::Overflow(
+ *bin_op,
+ eval_to_int(op1),
+ eval_to_int(op2),
+ ))
+ }
+ AssertKind::BoundsCheck { ref len, ref index } => {
+ let len = eval_to_int(len);
+ let index = eval_to_int(index);
+ Some(AssertKind::BoundsCheck { len, index })
+ }
+ // Remaining overflow errors are already covered by checks on the binary operators.
+ AssertKind::Overflow(..) | AssertKind::OverflowNeg(_) => None,
+ // Need proper const propagator for these.
+ _ => None,
+ };
+ // Poison all places this operand references so that further code
+ // doesn't use the invalid value
+ match cond {
+ Operand::Move(ref place) | Operand::Copy(ref place) => {
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ Operand::Constant(_) => {}
+ }
+ if let Some(msg) = msg {
+ self.report_assert_as_lint(
+ lint::builtin::UNCONDITIONAL_PANIC,
+ source_info,
+ "this operation will panic at runtime",
+ msg,
+ );
+ }
+ }
+ }
+ }
+ // None of these have Operands to const-propagate.
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Call { .. }
+ | TerminatorKind::InlineAsm { .. } => {}
+ }
+
+ // We remove all Locals which are restricted in propagation to their containing blocks and
+ // which were modified in the current block.
+ // Take it out of the ecx so we can get a mutable reference to the ecx for `remove_const`.
+ let mut locals = std::mem::take(&mut self.ecx.machine.written_only_inside_own_block_locals);
+ for &local in locals.iter() {
+ Self::remove_const(&mut self.ecx, local);
+ }
+ locals.clear();
+ // Put it back so we reuse the heap of the storage
+ self.ecx.machine.written_only_inside_own_block_locals = locals;
+ if cfg!(debug_assertions) {
+ // Ensure we are correctly erasing locals with the non-debug-assert logic.
+ for local in self.ecx.machine.only_propagate_inside_block_locals.iter() {
+ assert!(
+ self.get_const(local.into()).is_none()
+ || self
+ .layout_of(self.local_decls[local].ty)
+ .map_or(true, |layout| layout.is_zst())
+ )
+ }
+ }
+ }
+}
pub struct Deaggregator;
impl<'tcx> MirPass<'tcx> for Deaggregator {
+ fn phase_change(&self) -> Option<MirPhase> {
+ Some(MirPhase::Deaggregated)
+ }
+
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
let (basic_blocks, local_decls) = body.basic_blocks_and_local_decls_mut();
let local_decls = &*local_decls;
impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch {
fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
- sess.mir_opt_level() >= 2
+ sess.mir_opt_level() >= 3 && sess.opts.debugging_opts.unsound_mir_opts
}
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
/// Returns true if computing the discriminant of `place` may be hoisted out of the branch
fn may_hoist<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, place: Place<'tcx>) -> bool {
+ // FIXME(JakobDegen): This is unsound. Someone could write code like this:
+ // ```rust
+ // let Q = val;
+ // if discriminant(P) == otherwise {
+ // let ptr = &mut Q as *mut _ as *mut u8;
+ // unsafe { *ptr = 10; } // Any invalid value for the type
+ // }
+ //
+ // match P {
+ // A => match Q {
+ // A => {
+ // // code
+ // }
+ // _ => {
+ // // don't use Q
+ // }
+ // }
+ // _ => {
+ // // don't use Q
+ // }
+ // };
+ // ```
+ //
+ // Hoisting the `discriminant(Q)` out of the `A` arm causes us to compute the discriminant of an
+ // invalid value, which is UB.
+ //
+ // In order to fix this, we would either need to show that the discriminant computation of
+ // `place` is computed in all branches, including the `otherwise` branch, or we would need
+ // another analysis pass to determine that the place is fully initialized. It might even be best
+ // to have the hoisting be performed in a different pass and just do the CFG changing in this
+ // pass.
for (place, proj) in place.iter_projections() {
match proj {
// Dereferencing in the computation of `place` might cause issues from one of two
if branch.statements.len() != 1 {
return false;
}
- // ...assign the descriminant of `place` in that statement
+ // ...assign the discriminant of `place` in that statement
let StatementKind::Assign(boxed) = &branch.statements[0].kind else {
return false
};
impl<'tcx> MirPass<'tcx> for ElaborateDrops {
fn phase_change(&self) -> Option<MirPhase> {
- Some(MirPhase::DropLowering)
+ Some(MirPhase::DropsLowered)
}
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
struct TransformVisitor<'tcx> {
tcx: TyCtxt<'tcx>,
- state_adt_ref: &'tcx AdtDef,
+ state_adt_ref: AdtDef<'tcx>,
state_substs: SubstsRef<'tcx>,
// The type of the discriminant in the generator struct
val: Operand<'tcx>,
source_info: SourceInfo,
) -> impl Iterator<Item = Statement<'tcx>> {
- let kind = AggregateKind::Adt(self.state_adt_ref.did, idx, self.state_substs, None, None);
- assert_eq!(self.state_adt_ref.variants[idx].fields.len(), 1);
+ let kind = AggregateKind::Adt(self.state_adt_ref.did(), idx, self.state_substs, None, None);
+ assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 1);
let ty = self
.tcx
- .type_of(self.state_adt_ref.variants[idx].fields[0].did)
+ .type_of(self.state_adt_ref.variant(idx).fields[0].did)
.subst(self.tcx, self.state_substs);
expand_aggregate(
Place::return_place(),
impl<'tcx> MirPass<'tcx> for StateTransform {
fn phase_change(&self) -> Option<MirPhase> {
- Some(MirPhase::GeneratorLowering)
+ Some(MirPhase::GeneratorsLowered)
}
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
mod const_debuginfo;
mod const_goto;
mod const_prop;
+mod const_prop_lint;
mod coverage;
mod deaggregator;
mod deduplicate_blocks;
pm::run_passes(
tcx,
&mut body,
- &[&const_prop::ConstProp, &marker::PhaseChange(MirPhase::Optimization)],
+ &[&const_prop::ConstProp, &marker::PhaseChange(MirPhase::Optimized)],
);
}
}
}
run_post_borrowck_cleanup_passes(tcx, &mut body);
- assert!(body.phase == MirPhase::DropLowering);
+ assert!(body.phase == MirPhase::Deaggregated);
tcx.alloc_steal_mir(body)
}
// `Deaggregator` is conceptually part of MIR building, some backends rely on it happening
// and it can help optimizations.
&deaggregator::Deaggregator,
+ &Lint(const_prop_lint::ConstProp),
];
pm::run_passes(tcx, body, post_borrowck_cleanup);
],
);
- assert!(body.phase == MirPhase::GeneratorLowering);
+ assert!(body.phase == MirPhase::GeneratorsLowered);
// The main optimizations that we do on MIR.
pm::run_passes(
&deduplicate_blocks::DeduplicateBlocks,
// Some cleanup necessary at least for LLVM and potentially other codegen backends.
&add_call_guards::CriticalCallEdges,
- &marker::PhaseChange(MirPhase::Optimization),
+ &marker::PhaseChange(MirPhase::Optimized),
// Dump the end result for testing and debugging purposes.
&dump_mir::Marker("PreCodegen"),
],
}
}
- if validate || body.phase == MirPhase::Optimization {
+ if validate || body.phase == MirPhase::Optimized {
validate_body(tcx, body, format!("end of phase transition to {:?}", body.phase));
}
}
//
// If its projection *is* present in `MoveData`, then the field may have been moved
// from separate from its parent. Recurse.
- adt.variants.iter_enumerated().any(|(vid, variant)| {
+ adt.variants().iter_enumerated().any(|(vid, variant)| {
// Enums have multiple variants, which are discriminated with a `Downcast` projection.
// Structs have a single variant, and don't use a `Downcast` projection.
let mpi = if adt.is_enum() {
let statements = expand_aggregate(
Place::return_place(),
- adt_def.variants[variant_index].fields.iter().enumerate().map(|(idx, field_def)| {
+ adt_def.variant(variant_index).fields.iter().enumerate().map(|(idx, field_def)| {
(Operand::Move(Place::from(Local::new(idx + 1))), field_def.ty(tcx, substs))
}),
- AggregateKind::Adt(adt_def.did, variant_index, substs, None, None),
+ AggregateKind::Adt(adt_def.did(), variant_index, substs, None, None),
source_info,
tcx,
)
return false;
} else if last_assigned_to != opt_info.local_tmp_s1 {
trace!(
- "NO: end of assignemnt chain does not match written enum temp: {:?} != {:?}",
+ "NO: end of assignment chain does not match written enum temp: {:?} != {:?}",
last_assigned_to,
opt_info.local_tmp_s1
);
);
return StatementEquality::NotEqual;
}
- let variant_is_fieldless = adt.variants[variant_index].fields.is_empty();
+ let variant_is_fieldless = adt.variant(variant_index).fields.is_empty();
if !variant_is_fieldless {
trace!("NO: variant {:?} was not fieldless", variant_index);
return StatementEquality::NotEqual;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::sync::{par_iter, MTLock, MTRef, ParallelIterator};
-use rustc_errors::{ErrorGuaranteed, FatalError};
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, DefIdMap, LocalDefId, LOCAL_CRATE};
use rustc_hir::itemlikevisit::ItemLikeVisitor;
if let Some(path) = written_to_path {
err.note(&format!("the full type name has been written to '{}'", path.display()));
}
- err.emit();
- FatalError.raise();
+ err.emit()
}
recursion_depths.insert(def_id, recursion_depth + 1);
"consider adding a `#![type_length_limit=\"{}\"]` attribute to your crate",
type_length
));
- diag.emit();
- tcx.sess.abort_if_errors();
+ diag.emit()
}
}
match self.tcx.const_eval_resolve(param_env, ct, None) {
// The `monomorphize` call should have evaluated that constant already.
Ok(val) => val,
- Err(ErrorHandled::Reported(ErrorGuaranteed) | ErrorHandled::Linted) => {
- return;
- }
+ Err(ErrorHandled::Reported(_) | ErrorHandled::Linted) => return,
Err(ErrorHandled::TooGeneric) => span_bug!(
self.body.source_info(location).span,
"collection encountered polymorphic constant: {:?}",
substituted_constant,
val
),
- Err(ErrorHandled::Reported(ErrorGuaranteed) | ErrorHandled::Linted) => {}
+ Err(ErrorHandled::Reported(_) | ErrorHandled::Linted) => {}
Err(ErrorHandled::TooGeneric) => span_bug!(
self.body.source_info(location).span,
"collection encountered polymorphic constant: {}",
|lint| {
let mut err = lint.build(&format!("moving {} bytes", layout.size.bytes()));
err.span_label(source_info.span, "value moved from here");
- err.emit()
+ err.emit();
},
);
}
use rustc_ast::token::{self, CommentKind, Token, TokenKind};
use rustc_ast::tokenstream::{Spacing, TokenStream};
use rustc_ast::util::unicode::contains_text_flow_control_chars;
-use rustc_errors::{
- error_code, Applicability, DiagnosticBuilder, ErrorGuaranteed, FatalError, PResult,
-};
+use rustc_errors::{error_code, Applicability, DiagnosticBuilder, ErrorGuaranteed, PResult};
use rustc_lexer::unescape::{self, Mode};
use rustc_lexer::{Base, DocStyle, RawStrError};
use rustc_session::lint::builtin::{
}
/// Report a fatal lexical error with a given span.
- fn fatal_span(&self, sp: Span, m: &str) -> FatalError {
+ fn fatal_span(&self, sp: Span, m: &str) -> ! {
self.sess.span_diagnostic.span_fatal(sp, m)
}
}
/// Report a fatal error spanning [`from_pos`, `to_pos`).
- fn fatal_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str) -> FatalError {
+ fn fatal_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str) -> ! {
self.fatal_span(self.mk_sp(from_pos, to_pos), m)
}
to_pos: BytePos,
m: &str,
c: char,
- ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ ) -> DiagnosticBuilder<'a, !> {
self.sess
.span_diagnostic
.struct_span_fatal(self.mk_sp(from_pos, to_pos), &format!("{}: {}", m, escaped_char(c)))
}
+ fn struct_err_span_char(
+ &self,
+ from_pos: BytePos,
+ to_pos: BytePos,
+ m: &str,
+ c: char,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ self.sess
+ .span_diagnostic
+ .struct_span_err(self.mk_sp(from_pos, to_pos), &format!("{}: {}", m, escaped_char(c)))
+ }
+
/// Detect usages of Unicode codepoints changing the direction of the text on screen and loudly
/// complain about it.
fn lint_unicode_text_flow(&self, start: BytePos) {
rustc_lexer::TokenKind::Unknown | rustc_lexer::TokenKind::InvalidIdent => {
let c = self.str_from(start).chars().next().unwrap();
let mut err =
- self.struct_fatal_span_char(start, self.pos, "unknown start of token", c);
+ self.struct_err_span_char(start, self.pos, "unknown start of token", c);
// FIXME: the lexer could be used to turn the ASCII version of unicode homoglyphs,
// instead of keeping a table in `check_for_substitution`into the token. Ideally,
// this should be inside `rustc_lexer`. However, we should first remove compound
"found invalid character; only `#` is allowed in raw string delimitation",
bad_char,
)
- .emit();
- FatalError.raise()
+ .emit()
}
fn report_unterminated_raw_string(
);
}
- err.emit();
- FatalError.raise()
+ err.emit()
}
// RFC 3101 introduced the idea of (reserved) prefixes. As of Rust 2021,
found
),
)
- .raise();
}
fn validate_literal_escape(
diag.emit();
}
EscapeError::TooShortHexEscape => {
- handler.span_err(span, "numeric character escape is too short")
+ handler.span_err(span, "numeric character escape is too short");
}
EscapeError::InvalidCharInHexEscape | EscapeError::InvalidCharInUnicodeEscape => {
let (c, span) = last_char();
#![feature(if_let_guard)]
#![feature(let_chains)]
#![feature(let_else)]
+#![feature(never_type)]
#![recursion_limit = "256"]
#[macro_use]
match $e {
Ok(e) => e,
Err(errs) => {
- for e in errs {
- $handler.emit_diagnostic(&e);
+ for mut e in errs {
+ $handler.emit_diagnostic(&mut e);
}
FatalError.raise()
}
fn file_to_source_file(sess: &ParseSess, path: &Path, spanopt: Option<Span>) -> Lrc<SourceFile> {
match try_file_to_source_file(sess, path, spanopt) {
Ok(source_file) => source_file,
- Err(d) => {
- sess.span_diagnostic.emit_diagnostic(&d);
+ Err(mut d) => {
+ sess.span_diagnostic.emit_diagnostic(&mut d);
FatalError.raise();
}
}
Nonterminal::NtMeta(ref attr) => convert_tokens(attr.tokens.as_ref()),
Nonterminal::NtPath(ref path) => convert_tokens(path.tokens.as_ref()),
Nonterminal::NtVis(ref vis) => convert_tokens(vis.tokens.as_ref()),
- Nonterminal::NtTT(ref tt) => Some(tt.clone().into()),
Nonterminal::NtExpr(ref expr) | Nonterminal::NtLiteral(ref expr) => {
prepend_attrs(&expr.attrs, expr.tokens.as_ref())
}
pub fn fake_token_stream_for_crate(sess: &ParseSess, krate: &ast::Crate) -> TokenStream {
let source = pprust::crate_to_string_for_macros(krate);
let filename = FileName::macro_expansion_source_code(&source);
- parse_stream_from_source_str(filename, source, sess, Some(krate.span))
+ parse_stream_from_source_str(filename, source, sess, Some(krate.spans.inner_span))
}
pub fn parse_cfg_attr(
Ok(attr::mk_attr_from_item(item, None, style, attr_sp))
} else {
let token_str = pprust::token_to_string(&this.token);
- let msg = &format!("expected `#`, found `{}`", token_str);
+ let msg = &format!("expected `#`, found `{token_str}`");
Err(this.struct_span_err(this.token.span, msg))
}
})
span: Span,
attr_type: OuterAttributeType,
) -> Option<Span> {
- let mut snapshot = self.clone();
+ let mut snapshot = self.create_snapshot_for_diagnostic();
let lo = span.lo()
+ BytePos(match attr_type {
OuterAttributeType::Attribute => 1,
}
let found = pprust::token_to_string(&self.token);
- let msg = format!("expected unsuffixed literal or identifier, found `{}`", found);
+ let msg = format!("expected unsuffixed literal or identifier, found `{found}`");
Err(self.struct_span_err(self.token.span, &msg))
}
}
impl CreateTokenStream for LazyTokenStreamImpl {
fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
- // The token produced by the final call to `next` or `next_desugared`
- // was not actually consumed by the callback. The combination
- // of chaining the initial token and using `take` produces the desired
- // result - we produce an empty `TokenStream` if no calls were made,
- // and omit the final token otherwise.
+ // The token produced by the final call to `{,inlined_}next` or
+ // `{,inlined_}next_desugared` was not actually consumed by the
+ // callback. The combination of chaining the initial token and using
+ // `take` produces the desired result - we produce an empty
+ // `TokenStream` if no calls were made, and omit the final token
+ // otherwise.
let mut cursor_snapshot = self.cursor_snapshot.clone();
let tokens =
std::iter::once((FlatToken::Token(self.start_token.0.clone()), self.start_token.1))
SemiColonMode, SeqSep, TokenExpectType, TokenType,
};
+use crate::lexer::UnmatchedBrace;
use rustc_ast as ast;
use rustc_ast::ptr::P;
use rustc_ast::token::{self, Lit, LitKind, TokenKind};
use rustc_span::source_map::Spanned;
use rustc_span::symbol::{kw, Ident};
use rustc_span::{MultiSpan, Span, SpanSnippetError, DUMMY_SP};
+use std::ops::{Deref, DerefMut};
use std::mem::take;
}
}
+// SnapshotParser is used to create a snapshot of the parser
+// without causing duplicate errors being emitted when the `Parser`
+// is dropped.
+pub(super) struct SnapshotParser<'a> {
+ parser: Parser<'a>,
+ unclosed_delims: Vec<UnmatchedBrace>,
+}
+
+impl<'a> Deref for SnapshotParser<'a> {
+ type Target = Parser<'a>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.parser
+ }
+}
+
+impl<'a> DerefMut for SnapshotParser<'a> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.parser
+ }
+}
+
impl<'a> Parser<'a> {
pub(super) fn span_err<S: Into<MultiSpan>>(
&self,
&self.sess.span_diagnostic
}
+ /// Relace `self` with `snapshot.parser` and extend `unclosed_delims` with `snapshot.unclosed_delims`.
+ /// This is to avoid losing unclosed delims errors `create_snapshot_for_diagnostic` clears.
+ pub(super) fn restore_snapshot(&mut self, snapshot: SnapshotParser<'a>) {
+ *self = snapshot.parser;
+ self.unclosed_delims.extend(snapshot.unclosed_delims.clone());
+ }
+
+ /// Create a snapshot of the `Parser`.
+ pub(super) fn create_snapshot_for_diagnostic(&self) -> SnapshotParser<'a> {
+ let mut snapshot = self.clone();
+ let unclosed_delims = self.unclosed_delims.clone();
+ // Clear `unclosed_delims` in snapshot to avoid
+ // duplicate errors being emitted when the `Parser`
+ // is dropped (which may or may not happen, depending
+ // if the parsing the snapshot is created for is successful)
+ snapshot.unclosed_delims.clear();
+ SnapshotParser { parser: snapshot, unclosed_delims }
+ }
+
pub(super) fn span_to_snippet(&self, span: Span) -> Result<String, SpanSnippetError> {
self.sess.source_map().span_to_snippet(span)
}
expect.clone()
};
(
- format!("expected one of {}, found {}", expect, actual),
- (self.prev_token.span.shrink_to_hi(), format!("expected one of {}", short_expect)),
+ format!("expected one of {expect}, found {actual}"),
+ (self.prev_token.span.shrink_to_hi(), format!("expected one of {short_expect}")),
)
} else if expected.is_empty() {
(
)
} else {
(
- format!("expected {}, found {}", expect, actual),
- (self.prev_token.span.shrink_to_hi(), format!("expected {}", expect)),
+ format!("expected {expect}, found {actual}"),
+ (self.prev_token.span.shrink_to_hi(), format!("expected {expect}")),
)
};
self.last_unexpected_token_span = Some(self.token.span);
String::new(),
Applicability::MachineApplicable,
);
- err.note(&format!("the raw string started with {} `#`s", n_hashes));
+ err.note(&format!("the raw string started with {n_hashes} `#`s"));
true
}
_ => false,
// fn foo() -> Foo {
// field: value,
// }
- let mut snapshot = self.clone();
+ let mut snapshot = self.create_snapshot_for_diagnostic();
let path =
Path { segments: vec![], span: self.prev_token.span.shrink_to_lo(), tokens: None };
let struct_expr = snapshot.parse_struct_expr(None, path, AttrVec::new(), false);
Applicability::MaybeIncorrect,
)
.emit();
- *self = snapshot;
+ self.restore_snapshot(snapshot);
let mut tail = self.mk_block(
vec![self.mk_stmt_err(expr.span)],
s,
/// angle brackets.
pub(super) fn check_turbofish_missing_angle_brackets(&mut self, segment: &mut PathSegment) {
if token::ModSep == self.token.kind && segment.args.is_none() {
- let snapshot = self.clone();
+ let snapshot = self.create_snapshot_for_diagnostic();
self.bump();
let lo = self.token.span;
match self.parse_angle_args(None) {
.emit();
} else {
// This doesn't look like an invalid turbofish, can't recover parse state.
- *self = snapshot;
+ self.restore_snapshot(snapshot);
}
}
Err(err) => {
// We couldn't parse generic parameters, unlikely to be a turbofish. Rely on
// generic parse error instead.
err.cancel();
- *self = snapshot;
+ self.restore_snapshot(snapshot);
}
}
}
// `x == y < z`
(BinOpKind::Eq, AssocOp::Less | AssocOp::LessEqual | AssocOp::Greater | AssocOp::GreaterEqual) => {
// Consume `z`/outer-op-rhs.
- let snapshot = self.clone();
+ let snapshot = self.create_snapshot_for_diagnostic();
match self.parse_expr() {
Ok(r2) => {
// We are sure that outer-op-rhs could be consumed, the suggestion is
}
Err(expr_err) => {
expr_err.cancel();
- *self = snapshot;
+ self.restore_snapshot(snapshot);
false
}
}
}
// `x > y == z`
(BinOpKind::Lt | BinOpKind::Le | BinOpKind::Gt | BinOpKind::Ge, AssocOp::Equal) => {
- let snapshot = self.clone();
+ let snapshot = self.create_snapshot_for_diagnostic();
// At this point it is always valid to enclose the lhs in parentheses, no
// further checks are necessary.
match self.parse_expr() {
}
Err(expr_err) => {
expr_err.cancel();
- *self = snapshot;
+ self.restore_snapshot(snapshot);
false
}
}
|| outer_op.node == AssocOp::Greater
{
if outer_op.node == AssocOp::Less {
- let snapshot = self.clone();
+ let snapshot = self.create_snapshot_for_diagnostic();
self.bump();
// So far we have parsed `foo<bar<`, consume the rest of the type args.
let modifiers =
{
// We don't have `foo< bar >(` or `foo< bar >::`, so we rewind the
// parser and bail out.
- *self = snapshot.clone();
+ self.restore_snapshot(snapshot);
}
}
return if token::ModSep == self.token.kind {
// `foo< bar >::`
suggest(&mut err);
- let snapshot = self.clone();
+ let snapshot = self.create_snapshot_for_diagnostic();
self.bump(); // `::`
// Consume the rest of the likely `foo<bar>::new()` or return at `foo<bar>`.
expr_err.cancel();
// Not entirely sure now, but we bubble the error up with the
// suggestion.
- *self = snapshot;
+ self.restore_snapshot(snapshot);
Err(err)
}
}
}
fn consume_fn_args(&mut self) -> Result<(), ()> {
- let snapshot = self.clone();
+ let snapshot = self.create_snapshot_for_diagnostic();
self.bump(); // `(`
// Consume the fn call arguments.
if self.token.kind == token::Eof {
// Not entirely sure that what we consumed were fn arguments, rollback.
- *self = snapshot;
+ self.restore_snapshot(snapshot);
Err(())
} else {
// 99% certain that the suggestion is correct, continue parsing.
_ => None,
};
if let Some(name) = previous_item_kind_name {
- err.help(&format!("{} declarations are not followed by a semicolon", name));
+ err.help(&format!("{name} declarations are not followed by a semicolon"));
}
}
err.emit();
"expected `{}`, found {}",
token_str,
match (&self.token.kind, self.subparser_name) {
- (token::Eof, Some(origin)) => format!("end of {}", origin),
+ (token::Eof, Some(origin)) => format!("end of {origin}"),
_ => this_token_str,
},
);
let mut err = self.struct_span_err(sp, &msg);
- let label_exp = format!("expected `{}`", token_str);
+ let label_exp = format!("expected `{token_str}`");
match self.recover_closing_delimiter(&[t.clone()], err) {
Err(e) => err = e,
Ok(recovered) => {
Applicability::MachineApplicable,
);
}
- err.span_suggestion(lo.shrink_to_lo(), &format!("{}you can still access the deprecated `try!()` macro using the \"raw identifier\" syntax", prefix), "r#".to_string(), Applicability::MachineApplicable);
+ err.span_suggestion(lo.shrink_to_lo(), &format!("{prefix}you can still access the deprecated `try!()` macro using the \"raw identifier\" syntax"), "r#".to_string(), Applicability::MachineApplicable);
err.emit();
Ok(self.mk_expr_err(lo.to(hi)))
} else {
delim.retain(|c| c != '`');
err.span_suggestion_short(
self.prev_token.span.shrink_to_hi(),
- &format!("`{}` may belong here", delim),
+ &format!("`{delim}` may belong here"),
delim,
Applicability::MaybeIncorrect,
);
(
ident,
"self: ".to_string(),
- format!("{}: &{}TypeName", ident, mutab),
+ format!("{ident}: &{mutab}TypeName"),
"_: ".to_string(),
pat.span.shrink_to_lo(),
pat.span,
let (span, msg) = match (&self.token.kind, self.subparser_name) {
(&token::Eof, Some(origin)) => {
let sp = self.sess.source_map().next_point(self.prev_token.span);
- (sp, format!("expected expression, found end of {}", origin))
+ (sp, format!("expected expression, found end of {origin}"))
}
_ => (
self.token.span,
}
fn recover_const_param_decl(&mut self, ty_generics: Option<&Generics>) -> Option<GenericArg> {
- let snapshot = self.clone();
+ let snapshot = self.create_snapshot_for_diagnostic();
let param = match self.parse_const_param(vec![]) {
Ok(param) => param,
Err(err) => {
err.cancel();
- *self = snapshot;
+ self.restore_snapshot(snapshot);
return None;
}
};
(ty_generics, self.sess.source_map().span_to_snippet(param.span()))
{
let (span, sugg) = match &generics.params[..] {
- [] => (generics.span, format!("<{}>", snippet)),
- [.., generic] => (generic.span().shrink_to_hi(), format!(", {}", snippet)),
+ [] => (generics.span, format!("<{snippet}>")),
+ [.., generic] => (generic.span().shrink_to_hi(), format!(", {snippet}")),
};
err.multipart_suggestion(
"`const` parameters must be declared for the `impl`",
// We perform these checks and early return to avoid taking a snapshot unnecessarily.
return Err(err);
}
- let snapshot = self.clone();
+ let snapshot = self.create_snapshot_for_diagnostic();
if is_op_or_dot {
self.bump();
}
let value = self.mk_expr_err(start.to(expr.span));
err.emit();
return Ok(GenericArg::Const(AnonConst { id: ast::DUMMY_NODE_ID, value }));
+ } else if token::Colon == snapshot.token.kind
+ && expr.span.lo() == snapshot.token.span.hi()
+ && matches!(expr.kind, ExprKind::Path(..))
+ {
+ // Find a mistake like "foo::var:A".
+ err.span_suggestion(
+ snapshot.token.span,
+ "write a path separator here",
+ "::".to_string(),
+ Applicability::MaybeIncorrect,
+ );
+ err.emit();
+ return Ok(GenericArg::Type(self.mk_ty(start.to(expr.span), TyKind::Err)));
} else if token::Comma == self.token.kind || self.token.kind.should_end_const_arg()
{
// Avoid the following output by checking that we consumed a full const arg:
err.cancel();
}
}
- *self = snapshot;
+ self.restore_snapshot(snapshot);
Err(err)
}
let span = self.token.span;
// We only emit "unexpected `:`" error here if we can successfully parse the
// whole pattern correctly in that case.
- let snapshot = self.clone();
+ let snapshot = self.create_snapshot_for_diagnostic();
// Create error for "unexpected `:`".
match self.expected_one_of_not_found(&[], &[]) {
// reasonable error.
inner_err.cancel();
err.cancel();
- *self = snapshot;
+ self.restore_snapshot(snapshot);
}
Ok(mut pat) => {
// We've parsed the rest of the pattern.
}
_ => {
// Carry on as if we had not done anything. This should be unreachable.
- *self = snapshot;
+ self.restore_snapshot(snapshot);
}
};
first_pat
Err(err)
}
+ crate fn maybe_recover_bounds_doubled_colon(&mut self, ty: &Ty) -> PResult<'a, ()> {
+ let TyKind::Path(qself, path) = &ty.kind else { return Ok(()) };
+ let qself_position = qself.as_ref().map(|qself| qself.position);
+ for (i, segments) in path.segments.windows(2).enumerate() {
+ if qself_position.map(|pos| i < pos).unwrap_or(false) {
+ continue;
+ }
+ if let [a, b] = segments {
+ let (a_span, b_span) = (a.span(), b.span());
+ let between_span = a_span.shrink_to_hi().to(b_span.shrink_to_lo());
+ if self.span_to_snippet(between_span).as_ref().map(|a| &a[..]) == Ok(":: ") {
+ let mut err = self.struct_span_err(
+ path.span.shrink_to_hi(),
+ "expected `:` followed by trait or lifetime",
+ );
+ err.span_suggestion(
+ between_span,
+ "use single colon",
+ ": ".to_owned(),
+ Applicability::MachineApplicable,
+ );
+ return Err(err);
+ }
+ }
+ }
+ Ok(())
+ }
+
/// Parse and throw away a parenthesized comma separated
/// sequence of patterns until `)` is reached.
fn skip_pat_list(&mut self) -> PResult<'a, ()> {
+use super::diagnostics::SnapshotParser;
use super::pat::{CommaRecoveryMode, RecoverColon, RecoverComma, PARAM_EXPECTED};
use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign};
use super::{
- AttrWrapper, BlockMode, ClosureSpans, ForceCollect, Parser, PathStyle, Restrictions, TokenType,
+ AttrWrapper, BlockMode, ClosureSpans, ForceCollect, Parser, PathStyle, Restrictions,
+ SemiColonMode, SeqSep, TokenExpectType, TokenType, TrailingToken,
};
-use super::{SemiColonMode, SeqSep, TokenExpectType, TrailingToken};
use crate::maybe_recover_from_interpolated_ty_qpath;
use ast::token::DelimToken;
AssocOp::NotEqual => "!=",
_ => unreachable!(),
};
- self.struct_span_err(sp, &format!("invalid comparison operator `{}=`", sugg))
+ self.struct_span_err(sp, &format!("invalid comparison operator `{sugg}=`"))
.span_suggestion_short(
sp,
&format!("`{s}=` is not a valid comparison operator, use `{s}`", s = sugg),
/// Error on `and` and `or` suggesting `&&` and `||` respectively.
fn error_bad_logical_op(&self, bad: &str, good: &str, english: &str) {
- self.struct_span_err(self.token.span, &format!("`{}` is not a logical operator", bad))
+ self.struct_span_err(self.token.span, &format!("`{bad}` is not a logical operator"))
.span_suggestion_short(
self.token.span,
- &format!("use `{}` to perform logical {}", good, english),
+ &format!("use `{good}` to perform logical {english}"),
good.to_string(),
Applicability::MachineApplicable,
)
ExprKind::Path(None, ast::Path { segments, .. }),
TokenKind::Ident(kw::For | kw::Loop | kw::While, false),
) if segments.len() == 1 => {
- let snapshot = self.clone();
+ let snapshot = self.create_snapshot_for_diagnostic();
let label = Label {
ident: Ident::from_str_and_span(
&format!("'{}", segments[0].ident),
}
Err(err) => {
err.cancel();
- *self = snapshot;
+ self.restore_snapshot(snapshot);
}
}
}
self.look_ahead(1, |t| t.span).to(span_after_type),
"interpreted as generic arguments",
)
- .span_label(self.token.span, format!("not interpreted as {}", op_noun))
+ .span_label(self.token.span, format!("not interpreted as {op_noun}"))
.multipart_suggestion(
- &format!("try {} the cast value", op_verb),
+ &format!("try {op_verb} the cast value"),
vec![
(expr.span.shrink_to_lo(), "(".to_string()),
(expr.span.shrink_to_hi(), ")".to_string()),
&mut self,
cast_expr: P<Expr>,
) -> PResult<'a, P<Expr>> {
+ let span = cast_expr.span;
+ let maybe_ascription_span = if let ExprKind::Type(ascripted_expr, _) = &cast_expr.kind {
+ Some(ascripted_expr.span.shrink_to_hi().with_hi(span.hi()))
+ } else {
+ None
+ };
+
// Save the memory location of expr before parsing any following postfix operators.
// This will be compared with the memory location of the output expression.
// If they different we can assume we parsed another expression because the existing expression is not reallocated.
let addr_before = &*cast_expr as *const _ as usize;
- let span = cast_expr.span;
let with_postfix = self.parse_dot_or_call_expr_with_(cast_expr, span)?;
let changed = addr_before != &*with_postfix as *const _ as usize;
}
);
let mut err = self.struct_span_err(span, &msg);
- // If type ascription is "likely an error", the user will already be getting a useful
- // help message, and doesn't need a second.
- if self.last_type_ascription.map_or(false, |last_ascription| last_ascription.1) {
- self.maybe_annotate_with_ascription(&mut err, false);
- } else {
+
+ let suggest_parens = |err: &mut DiagnosticBuilder<'_, _>| {
let suggestions = vec![
(span.shrink_to_lo(), "(".to_string()),
(span.shrink_to_hi(), ")".to_string()),
suggestions,
Applicability::MachineApplicable,
);
+ };
+
+ // If type ascription is "likely an error", the user will already be getting a useful
+ // help message, and doesn't need a second.
+ if self.last_type_ascription.map_or(false, |last_ascription| last_ascription.1) {
+ self.maybe_annotate_with_ascription(&mut err, false);
+ } else if let Some(ascription_span) = maybe_ascription_span {
+ let is_nightly = self.sess.unstable_features.is_nightly_build();
+ if is_nightly {
+ suggest_parens(&mut err);
+ }
+ err.span_suggestion(
+ ascription_span,
+ &format!(
+ "{}remove the type ascription",
+ if is_nightly { "alternatively, " } else { "" }
+ ),
+ String::new(),
+ if is_nightly {
+ Applicability::MaybeIncorrect
+ } else {
+ Applicability::MachineApplicable
+ },
+ );
+ } else {
+ suggest_parens(&mut err);
}
err.emit();
};
fn error_unexpected_after_dot(&self) {
// FIXME Could factor this out into non_fatal_unexpected or something.
let actual = pprust::token_to_string(&self.token);
- self.struct_span_err(self.token.span, &format!("unexpected token: `{}`", actual)).emit();
+ self.struct_span_err(self.token.span, &format!("unexpected token: `{actual}`")).emit();
}
// We need an identifier or integer, but the next token is a float.
let snapshot = if self.token.kind == token::OpenDelim(token::Paren)
&& self.look_ahead_type_ascription_as_field()
{
- Some((self.clone(), fun.kind.clone()))
+ Some((self.create_snapshot_for_diagnostic(), fun.kind.clone()))
} else {
None
};
lo: Span,
open_paren: Span,
seq: &mut PResult<'a, P<Expr>>,
- snapshot: Option<(Self, ExprKind)>,
+ snapshot: Option<(SnapshotParser<'a>, ExprKind)>,
) -> Option<P<Expr>> {
match (seq.as_mut(), snapshot) {
(Err(err), Some((mut snapshot, ExprKind::Path(None, path)))) => {
Ok((fields, ..)) if snapshot.eat(&token::CloseDelim(token::Paren)) => {
// We are certain we have `Enum::Foo(a: 3, b: 4)`, suggest
// `Enum::Foo { a: 3, b: 4 }` or `Enum::Foo(3, 4)`.
- *self = snapshot;
+ self.restore_snapshot(snapshot);
let close_paren = self.prev_token.span;
let span = lo.to(self.prev_token.span);
if !fields.is_empty() {
mem::replace(err, replacement_err).cancel();
err.multipart_suggestion(
- &format!("if `{}` is a struct, use braces as delimiters", name),
+ &format!("if `{name}` is a struct, use braces as delimiters"),
vec![
(open_paren, " { ".to_string()),
(close_paren, " }".to_string()),
Applicability::MaybeIncorrect,
);
err.multipart_suggestion(
- &format!("if `{}` is a function, use the arguments directly", name),
+ &format!("if `{name}` is a function, use the arguments directly"),
fields
.into_iter()
.map(|field| (field.span.until(field.expr.span), String::new()))
)
.emit();
} else {
- let msg = format!("invalid suffix `{}` for number literal", suf);
+ let msg = format!("invalid suffix `{suf}` for number literal");
self.struct_span_err(span, &msg)
- .span_label(span, format!("invalid suffix `{}`", suf))
+ .span_label(span, format!("invalid suffix `{suf}`"))
.help("the suffix must be one of the numeric types (`u32`, `isize`, `f32`, etc.)")
.emit();
}
let msg = format!("invalid width `{}` for float literal", &suf[1..]);
self.struct_span_err(span, &msg).help("valid widths are 32 and 64").emit();
} else {
- let msg = format!("invalid suffix `{}` for float literal", suf);
+ let msg = format!("invalid suffix `{suf}` for float literal");
self.struct_span_err(span, &msg)
- .span_label(span, format!("invalid suffix `{}`", suf))
+ .span_label(span, format!("invalid suffix `{suf}`"))
.help("valid suffixes are `f32` and `f64`")
.emit();
}
2 => "binary",
_ => unreachable!(),
};
- self.struct_span_err(span, &format!("{} float literal is not supported", descr))
+ self.struct_span_err(span, &format!("{descr} float literal is not supported"))
.span_label(span, "not supported")
.emit();
}
let mut err = self
.sess
.span_diagnostic
- .struct_span_warn(sp, &format!("suffixes on {} are invalid", kind));
+ .struct_span_warn(sp, &format!("suffixes on {kind} are invalid"));
err.note(&format!(
"`{}` is *temporarily* accepted on tuple index fields as it was \
incorrectly accepted on stable for a few releases",
);
err
} else {
- self.struct_span_err(sp, &format!("suffixes on {} are invalid", kind))
+ self.struct_span_err(sp, &format!("suffixes on {kind} are invalid"))
.forget_guarantee()
};
- err.span_label(sp, format!("invalid suffix `{}`", suf));
+ err.span_label(sp, format!("invalid suffix `{suf}`"));
err.emit();
}
}
lo: Span,
attrs: AttrVec,
) -> Option<P<Expr>> {
- let mut snapshot = self.clone();
+ let mut snapshot = self.create_snapshot_for_diagnostic();
match snapshot.parse_array_or_repeat_expr(attrs, token::Brace) {
Ok(arr) => {
let hi = snapshot.prev_token.span;
.note("to define an array, one would use square brackets instead of curly braces")
.emit();
- *self = snapshot;
+ self.restore_snapshot(snapshot);
Some(self.mk_expr_err(arr.span))
}
Err(e) => {
let ctx = if is_ctx_else { "else" } else { "if" };
self.struct_span_err(last, "outer attributes are not allowed on `if` and `else` branches")
.span_label(branch_span, "the attributes are attached to this branch")
- .span_label(ctx_span, format!("the branch belongs to this `{}`", ctx))
+ .span_label(ctx_span, format!("the branch belongs to this `{ctx}`"))
.span_suggestion(
span,
"remove the attributes",
if self.token.kind != token::Semi {
return None;
}
- let start_snapshot = self.clone();
+ let start_snapshot = self.create_snapshot_for_diagnostic();
let semi_sp = self.token.span;
self.bump(); // `;`
let mut stmts =
err.span_label(arrow_span, "while parsing the `match` arm starting here");
if stmts.len() > 1 {
err.multipart_suggestion(
- &format!("surround the statement{} with a body", s),
+ &format!("surround the statement{s} with a body"),
vec![
(span.shrink_to_lo(), "{ ".to_string()),
(span.shrink_to_hi(), " }".to_string()),
return Some(err(self, stmts));
}
if self.token.kind == token::Comma {
- *self = start_snapshot;
+ self.restore_snapshot(start_snapshot);
return None;
}
- let pre_pat_snapshot = self.clone();
+ let pre_pat_snapshot = self.create_snapshot_for_diagnostic();
match self.parse_pat_no_top_alt(None) {
Ok(_pat) => {
if self.token.kind == token::FatArrow {
// Reached arm end.
- *self = pre_pat_snapshot;
+ self.restore_snapshot(pre_pat_snapshot);
return Some(err(self, stmts));
}
}
}
}
- *self = pre_pat_snapshot;
+ self.restore_snapshot(pre_pat_snapshot);
match self.parse_stmt_without_recovery(true, ForceCollect::No) {
// Consume statements for as long as possible.
Ok(Some(stmt)) => {
stmts.push(stmt);
}
Ok(None) => {
- *self = start_snapshot;
+ self.restore_snapshot(start_snapshot);
break;
}
// We couldn't parse either yet another statement missing it's
// enclosing block nor the next arm's pattern or closing brace.
Err(stmt_err) => {
stmt_err.cancel();
- *self = start_snapshot;
+ self.restore_snapshot(start_snapshot);
break;
}
}
Some(this.parse_ty_param(attrs)?)
} else if this.token.can_begin_type() {
// Trying to write an associated type bound? (#26271)
- let snapshot = this.clone();
+ let snapshot = this.create_snapshot_for_diagnostic();
match this.parse_ty_where_predicate() {
Ok(where_predicate) => {
this.struct_span_err(
Err(err) => {
err.cancel();
// FIXME - maybe we should overwrite 'self' outside of `collect_tokens`?
- *this = snapshot;
+ this.restore_snapshot(snapshot);
return Ok((None, TrailingToken::None));
}
}
id: ast::DUMMY_NODE_ID,
}))
} else {
+ self.maybe_recover_bounds_doubled_colon(&ty)?;
self.unexpected()
}
}
impl<'a> Parser<'a> {
/// Parses a source module as a crate. This is the main entry point for the parser.
pub fn parse_crate_mod(&mut self) -> PResult<'a, ast::Crate> {
- let (attrs, items, span) = self.parse_mod(&token::Eof)?;
- Ok(ast::Crate { attrs, items, span, id: DUMMY_NODE_ID, is_placeholder: false })
+ let (attrs, items, spans) = self.parse_mod(&token::Eof)?;
+ Ok(ast::Crate { attrs, items, spans, id: DUMMY_NODE_ID, is_placeholder: false })
}
/// Parses a `mod <foo> { ... }` or `mod <foo>;` item.
pub fn parse_mod(
&mut self,
term: &TokenKind,
- ) -> PResult<'a, (Vec<Attribute>, Vec<P<Item>>, Span)> {
+ ) -> PResult<'a, (Vec<Attribute>, Vec<P<Item>>, ModSpans)> {
let lo = self.token.span;
let attrs = self.parse_inner_attributes()?;
+ let post_attr_lo = self.token.span;
let mut items = vec![];
while let Some(item) = self.parse_item(ForceCollect::No)? {
items.push(item);
if !self.eat(term) {
let token_str = super::token_descr(&self.token);
if !self.maybe_consume_incorrect_semicolon(&items) {
- let msg = &format!("expected item, found {}", token_str);
+ let msg = &format!("expected item, found {token_str}");
let mut err = self.struct_span_err(self.token.span, msg);
err.span_label(self.token.span, "expected item");
return Err(err);
}
}
- Ok((attrs, items, lo.to(self.prev_token.span)))
+ let inject_use_span = post_attr_lo.data().with_hi(post_attr_lo.lo());
+ let mod_spans = ModSpans { inner_span: lo.to(self.prev_token.span), inject_use_span };
+ Ok((attrs, items, mod_spans))
}
}
}
let vs = pprust::vis_to_string(&vis);
let vs = vs.trim_end();
- self.struct_span_err(vis.span, &format!("visibility `{}` is not followed by an item", vs))
+ self.struct_span_err(vis.span, &format!("visibility `{vs}` is not followed by an item"))
.span_label(vis.span, "the visibility")
- .help(&format!("you likely meant to define an item, e.g., `{} fn foo() {{}}`", vs))
+ .help(&format!("you likely meant to define an item, e.g., `{vs} fn foo() {{}}`"))
.emit();
}
if let Err(mut e) = self.expect_semi() {
match tree.kind {
UseTreeKind::Glob => {
- e.note("the wildcard token must be last on the path").emit();
+ e.note("the wildcard token must be last on the path");
}
UseTreeKind::Nested(..) => {
- e.note("glob-like brace syntax must be last on the path").emit();
+ e.note("glob-like brace syntax must be last on the path");
}
_ => (),
}
if self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) {
// possible public struct definition where `struct` was forgotten
let ident = self.parse_ident().unwrap();
- let msg = format!("add `struct` here to parse `{}` as a public struct", ident);
+ let msg = format!("add `struct` here to parse `{ident}` as a public struct");
let mut err = self.struct_span_err(sp, "missing `struct` for struct definition");
err.span_suggestion_short(
sp,
("fn` or `struct", "function or struct", true)
};
- let msg = format!("missing `{}` for {} definition", kw, kw_name);
+ let msg = format!("missing `{kw}` for {kw_name} definition");
let mut err = self.struct_span_err(sp, &msg);
if !ambiguous {
self.consume_block(token::Brace, ConsumeClosingDelim::Yes);
let suggestion =
- format!("add `{}` here to parse `{}` as a public {}", kw, ident, kw_name);
+ format!("add `{kw}` here to parse `{ident}` as a public {kw_name}");
err.span_suggestion_short(
sp,
&suggestion,
- format!(" {} ", kw),
+ format!(" {kw} "),
Applicability::MachineApplicable,
);
} else if let Ok(snippet) = self.span_to_snippet(ident_sp) {
} else {
("fn` or `struct", "function or struct", true)
};
- let msg = format!("missing `{}` for {} definition", kw, kw_name);
+ let msg = format!("missing `{kw}` for {kw_name} definition");
let mut err = self.struct_span_err(sp, &msg);
if !ambiguous {
err.span_suggestion_short(
sp,
- &format!("add `{}` here to parse `{}` as a public {}", kw, ident, kw_name),
+ &format!("add `{kw}` here to parse `{ident}` as a public {kw_name}"),
format!(" {} ", kw),
Applicability::MachineApplicable,
);
attrs: &mut Vec<Attribute>,
unsafety: Unsafe,
) -> PResult<'a, ItemInfo> {
+ let sp_start = self.prev_token.span;
let abi = self.parse_abi(); // ABI?
- let items = self.parse_item_list(attrs, |p| p.parse_foreign_item(ForceCollect::No))?;
- let module = ast::ForeignMod { unsafety, abi, items };
- Ok((Ident::empty(), ItemKind::ForeignMod(module)))
+ match self.parse_item_list(attrs, |p| p.parse_foreign_item(ForceCollect::No)) {
+ Ok(items) => {
+ let module = ast::ForeignMod { unsafety, abi, items };
+ Ok((Ident::empty(), ItemKind::ForeignMod(module)))
+ }
+ Err(mut err) => {
+ let current_qual_sp = self.prev_token.span;
+ let current_qual_sp = current_qual_sp.to(sp_start);
+ if let Ok(current_qual) = self.span_to_snippet(current_qual_sp) {
+ if err.message() == "expected `{`, found keyword `unsafe`" {
+ let invalid_qual_sp = self.token.uninterpolated_span();
+ let invalid_qual = self.span_to_snippet(invalid_qual_sp).unwrap();
+
+ err.span_suggestion(
+ current_qual_sp.to(invalid_qual_sp),
+ &format!("`{}` must come before `{}`", invalid_qual, current_qual),
+ format!("{} {}", invalid_qual, current_qual),
+ Applicability::MachineApplicable,
+ ).note("keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`");
+ }
+ }
+ Err(err)
+ }
+ }
}
/// Parses a foreign item (one in an `extern { ... }` block).
fn error_bad_item_kind<T>(&self, span: Span, kind: &ItemKind, ctx: &str) -> Option<T> {
let span = self.sess.source_map().guess_head_span(span);
let descr = kind.descr();
- self.struct_span_err(span, &format!("{} is not supported in {}", descr, ctx))
- .help(&format!("consider moving the {} out to a nearby module scope", descr))
+ self.struct_span_err(span, &format!("{descr} is not supported in {ctx}"))
+ .help(&format!("consider moving the {descr} out to a nearby module scope"))
.emit();
None
}
Some(Mutability::Not) => "static",
None => "const",
};
- let mut err = self.struct_span_err(id.span, &format!("missing type for `{}` item", kind));
+ let mut err = self.struct_span_err(id.span, &format!("missing type for `{kind}` item"));
err.span_suggestion(
id.span,
"provide a type for the item",
- format!("{}: <type>", id),
+ format!("{id}: <type>"),
Applicability::HasPlaceholders,
);
err.stash(id.span, StashKey::ItemNoType);
} else {
let token_str = super::token_descr(&self.token);
let msg = &format!(
- "expected `where`, `{{`, `(`, or `;` after struct name, found {}",
- token_str
+ "expected `where`, `{{`, `(`, or `;` after struct name, found {token_str}"
);
let mut err = self.struct_span_err(self.token.span, msg);
err.span_label(self.token.span, "expected `where`, `{`, `(`, or `;` after struct name");
VariantData::Struct(fields, recovered)
} else {
let token_str = super::token_descr(&self.token);
- let msg = &format!("expected `where` or `{{` after union name, found {}", token_str);
+ let msg = &format!("expected `where` or `{{` after union name, found {token_str}");
let mut err = self.struct_span_err(self.token.span, msg);
err.span_label(self.token.span, "expected `where` or `{` after union name");
return Err(err);
// `check_trailing_angle_brackets` already emitted a nicer error
// NOTE(eddyb) this was `.cancel()`, but `err`
// gets returned, so we can't fully defuse it.
- err.downgrade_to_delayed_bug();
+ err.delay_as_bug();
}
}
}
// Make sure an error was emitted (either by recovering an angle bracket,
// or by finding an identifier as the next token), since we're
// going to continue parsing
- assert!(self.sess.span_diagnostic.has_errors());
+ assert!(self.sess.span_diagnostic.has_errors().is_some());
} else {
return Err(err);
}
let name = self.parse_field_ident(adt_ty, lo)?;
self.expect_field_ty_separator()?;
let ty = self.parse_ty()?;
+ if self.token.kind == token::Colon && self.look_ahead(1, |tok| tok.kind != token::Colon) {
+ self.struct_span_err(self.token.span, "found single colon in a struct field type path")
+ .span_suggestion_verbose(
+ self.token.span,
+ "write a path separator here",
+ "::".to_string(),
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
if self.token.kind == token::Eq {
self.bump();
let const_expr = self.parse_anon_const_expr()?;
}
let mut err = self.struct_span_err(
lo.to(self.prev_token.span),
- &format!("functions are not allowed in {} definitions", adt_ty),
+ &format!("functions are not allowed in {adt_ty} definitions"),
);
err.help("unlike in C++, Java, and C#, functions are declared in `impl` blocks");
err.help("see https://doc.rust-lang.org/book/ch05-03-method-syntax.html for more information");
let vstr = pprust::vis_to_string(vis);
let vstr = vstr.trim_end();
if macro_rules {
- let msg = format!("can't qualify macro_rules invocation with `{}`", vstr);
+ let msg = format!("can't qualify macro_rules invocation with `{vstr}`");
self.struct_span_err(vis.span, &msg)
.span_suggestion(
vis.span,
String::new(),
Applicability::MachineApplicable,
)
- .help(&format!("try adjusting the macro to put `{}` inside the invocation", vstr))
+ .help(&format!("try adjusting the macro to put `{vstr}` inside the invocation"))
.emit();
}
}
self.struct_span_err(
kw_token.span,
- &format!("`{}` definition cannot be nested inside `{}`", kw_str, keyword),
+ &format!("`{kw_str}` definition cannot be nested inside `{keyword}`"),
)
.span_suggestion(
item.unwrap().span,
- &format!("consider creating a new `{}` definition instead of nesting", kw_str),
+ &format!("consider creating a new `{kw_str}` definition instead of nesting"),
String::new(),
Applicability::MaybeIncorrect,
)
// We use an over-approximation here.
// `const const`, `fn const` won't parse, but we're not stepping over other syntax either.
// `pub` is added in case users got confused with the ordering like `async pub fn`,
- // only if it wasn't preceeded by `default` as `default pub` is invalid.
+ // only if it wasn't preceded by `default` as `default pub` is invalid.
let quals: &[Symbol] = if check_pub {
&[kw::Pub, kw::Const, kw::Async, kw::Unsafe, kw::Extern]
} else {
err.span_suggestion(
self.token.uninterpolated_span(),
- &format!("`{}` already used earlier, remove this one", original_kw),
+ &format!("`{original_kw}` already used earlier, remove this one"),
"".to_string(),
Applicability::MachineApplicable,
)
- .span_note(original_sp, &format!("`{}` first seen here", original_kw));
+ .span_note(original_sp, &format!("`{original_kw}` first seen here"));
}
// The keyword has not been seen yet, suggest correct placement in the function front matter
else if let Some(WrongKw::Misplaced(correct_pos_sp)) = wrong_kw {
err.span_suggestion(
correct_pos_sp.to(misplaced_qual_sp),
- &format!("`{}` must come before `{}`", misplaced_qual, current_qual),
- format!("{} {}", misplaced_qual, current_qual),
+ &format!("`{misplaced_qual}` must come before `{current_qual}`"),
+ format!("{misplaced_qual} {current_qual}"),
Applicability::MachineApplicable,
).note("keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`");
}
if matches!(orig_vis.kind, VisibilityKind::Inherited) {
err.span_suggestion(
sp_start.to(self.prev_token.span),
- &format!("visibility `{}` must come before `{}`", vs, snippet),
- format!("{} {}", vs, snippet),
+ &format!("visibility `{vs}` must come before `{snippet}`"),
+ format!("{vs} {snippet}"),
Applicability::MachineApplicable,
);
}
pub use path::PathStyle;
use rustc_ast::ptr::P;
-use rustc_ast::token::{self, DelimToken, Token, TokenKind};
+use rustc_ast::token::{self, DelimToken, Nonterminal, Token, TokenKind};
use rustc_ast::tokenstream::AttributesData;
use rustc_ast::tokenstream::{self, DelimSpan, Spacing};
use rustc_ast::tokenstream::{TokenStream, TokenTree};
frame: TokenCursorFrame,
stack: Vec<TokenCursorFrame>,
desugar_doc_comments: bool,
- // Counts the number of calls to `next` or `next_desugared`,
- // depending on whether `desugar_doc_comments` is set.
+ // Counts the number of calls to `{,inlined_}next` or
+ // `{,inlined_}next_desugared`, depending on whether
+ // `desugar_doc_comments` is set.
num_next_calls: usize,
// During parsing, we may sometimes need to 'unglue' a
// glued token into two component tokens
impl TokenCursor {
fn next(&mut self) -> (Token, Spacing) {
+ self.inlined_next()
+ }
+
+ /// This always-inlined version should only be used on hot code paths.
+ #[inline(always)]
+ fn inlined_next(&mut self) -> (Token, Spacing) {
loop {
let (tree, spacing) = if !self.frame.open_delim {
self.frame.open_delim = true;
}
fn next_desugared(&mut self) -> (Token, Spacing) {
- let (data, attr_style, sp) = match self.next() {
+ self.inlined_next_desugared()
+ }
+
+ /// This always-inlined version should only be used on hot code paths.
+ #[inline(always)]
+ fn inlined_next_desugared(&mut self) -> (Token, Spacing) {
+ let (data, attr_style, sp) = match self.inlined_next() {
(Token { kind: token::DocComment(_, attr_style, data), span }, _) => {
(data, attr_style, span)
}
parser
}
+ #[inline]
fn next_tok(&mut self, fallback_span: Span) -> (Token, Spacing) {
loop {
let (mut next, spacing) = if self.desugar_doc_comments {
- self.token_cursor.next_desugared()
+ self.token_cursor.inlined_next_desugared()
} else {
- self.token_cursor.next()
+ self.token_cursor.inlined_next()
};
self.token_cursor.num_next_calls += 1;
// We've retrieved an token from the underlying
}
/// Advance the parser by one token using provided token as the next one.
- fn bump_with(&mut self, (next_token, next_spacing): (Token, Spacing)) {
+ fn bump_with(&mut self, next: (Token, Spacing)) {
+ self.inlined_bump_with(next)
+ }
+
+ /// This always-inlined version should only be used on hot code paths.
+ #[inline(always)]
+ fn inlined_bump_with(&mut self, (next_token, next_spacing): (Token, Spacing)) {
// Bumping after EOF is a bad sign, usually an infinite loop.
if self.prev_token.kind == TokenKind::Eof {
let msg = "attempted to bump the parser past EOF (may be stuck in a loop)";
/// Advance the parser by one token.
pub fn bump(&mut self) {
let next_token = self.next_tok(self.token.span);
- self.bump_with(next_token);
+ self.inlined_bump_with(next_token);
}
/// Look-ahead `dist` tokens of `self.token` and get access to that token there.
/// handling of replace ranges.
Empty,
}
+
+#[derive(Debug)]
+pub enum NtOrTt {
+ Nt(Nonterminal),
+ Tt(TokenTree),
+}
use rustc_ast::ptr::P;
-use rustc_ast::token::{self, Nonterminal, NonterminalKind, Token};
+use rustc_ast::token::{self, NonterminalKind, Token};
use rustc_ast::AstLike;
use rustc_ast_pretty::pprust;
use rustc_errors::PResult;
use rustc_span::symbol::{kw, Ident};
use crate::parser::pat::{CommaRecoveryMode, RecoverColon, RecoverComma};
-use crate::parser::{FollowedByType, ForceCollect, Parser, PathStyle};
+use crate::parser::{FollowedByType, ForceCollect, NtOrTt, Parser, PathStyle};
impl<'a> Parser<'a> {
/// Checks whether a non-terminal may begin with a particular token.
NonterminalKind::Lifetime => match token.kind {
token::Lifetime(_) => true,
token::Interpolated(ref nt) => {
- matches!(**nt, token::NtLifetime(_) | token::NtTT(_))
+ matches!(**nt, token::NtLifetime(_))
}
_ => false,
},
}
/// Parse a non-terminal (e.g. MBE `:pat` or `:ident`).
- pub fn parse_nonterminal(&mut self, kind: NonterminalKind) -> PResult<'a, Nonterminal> {
+ pub fn parse_nonterminal(&mut self, kind: NonterminalKind) -> PResult<'a, NtOrTt> {
// Any `Nonterminal` which stores its tokens (currently `NtItem` and `NtExpr`)
// needs to have them force-captured here.
// A `macro_rules!` invocation may pass a captured item/expr to a proc-macro,
// in advance whether or not a proc-macro will be (transitively) invoked,
// we always capture tokens for any `Nonterminal` which needs them.
let mut nt = match kind {
+ // Note that TT is treated differently to all the others.
+ NonterminalKind::TT => return Ok(NtOrTt::Tt(self.parse_token_tree())),
NonterminalKind::Item => match self.parse_item(ForceCollect::Yes)? {
Some(item) => token::NtItem(item),
None => {
NonterminalKind::PatParam { .. } | NonterminalKind::PatWithOr { .. } => {
token::NtPat(self.collect_tokens_no_attrs(|this| match kind {
NonterminalKind::PatParam { .. } => this.parse_pat_no_top_alt(None),
- NonterminalKind::PatWithOr { .. } => {
- this.parse_pat_allow_top_alt(None, RecoverComma::No, RecoverColon::No, CommaRecoveryMode::EitherTupleOrPipe)
- }
+ NonterminalKind::PatWithOr { .. } => this.parse_pat_allow_top_alt(
+ None,
+ RecoverComma::No,
+ RecoverColon::No,
+ CommaRecoveryMode::EitherTupleOrPipe,
+ ),
_ => unreachable!(),
})?)
}
)
}
- NonterminalKind::Ty => {
- token::NtTy(self.collect_tokens_no_attrs(|this| this.parse_no_question_mark_recover())?)
- }
+ NonterminalKind::Ty => token::NtTy(
+ self.collect_tokens_no_attrs(|this| this.parse_no_question_mark_recover())?,
+ ),
+
// this could be handled like a token, since it is one
NonterminalKind::Ident
if let Some((ident, is_raw)) = get_macro_ident(&self.token) =>
self.collect_tokens_no_attrs(|this| this.parse_path(PathStyle::Type))?,
),
NonterminalKind::Meta => token::NtMeta(P(self.parse_attr_item(true)?)),
- NonterminalKind::TT => token::NtTT(self.parse_token_tree()),
NonterminalKind::Vis => token::NtVis(
self.collect_tokens_no_attrs(|this| this.parse_visibility(FollowedByType::Yes))?,
),
);
}
- Ok(nt)
+ Ok(NtOrTt::Nt(nt))
}
}
lo,
ty_generics,
)?;
- self.expect_gt()?;
+ self.expect_gt().map_err(|mut err| {
+ // Attempt to find places where a missing `>` might belong.
+ if let Some(arg) = args
+ .iter()
+ .rev()
+ .skip_while(|arg| matches!(arg, AngleBracketedArg::Constraint(_)))
+ .next()
+ {
+ err.span_suggestion_verbose(
+ arg.span().shrink_to_hi(),
+ "you might have meant to end the type parameters here",
+ ">".to_string(),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err
+ })?;
let span = lo.to(self.prev_token.span);
AngleBracketedArgs { args, span }.into()
} else {
while let Some(arg) = self.parse_angle_arg(ty_generics)? {
args.push(arg);
if !self.eat(&token::Comma) {
+ if self.token.kind == token::Semi
+ && self.look_ahead(1, |t| t.is_ident() || t.is_lifetime())
+ {
+ // Add `>` to the list of expected tokens.
+ self.check(&token::Gt);
+ // Handle `,` to `;` substitution
+ let mut err = self.unexpected::<()>().unwrap_err();
+ self.bump();
+ err.span_suggestion_verbose(
+ self.prev_token.span.until(self.token.span),
+ "use a comma to separate type parameters",
+ ", ".to_string(),
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ continue;
+ }
if !self.token.kind.should_end_const_arg() {
if self.handle_ambiguous_unbraced_const_arg(&mut args)? {
// We've managed to (partially) recover, so continue trying to parse
GenericArg::Const(self.parse_const_arg()?)
} else if self.check_type() {
// Parse type argument.
+ let is_const_fn = self.look_ahead(1, |t| t.kind == token::OpenDelim(token::Paren));
+ let mut snapshot = self.create_snapshot_for_diagnostic();
match self.parse_ty() {
Ok(ty) => GenericArg::Type(ty),
Err(err) => {
+ if is_const_fn {
+ match (*snapshot).parse_expr_res(Restrictions::CONST_EXPR, None) {
+ Ok(expr) => {
+ self.restore_snapshot(snapshot);
+ return Ok(Some(self.dummy_const_arg_needs_braces(err, expr.span)));
+ }
+ Err(err) => {
+ err.cancel();
+ }
+ }
+ }
// Try to recover from possible `const` arg without braces.
return self.recover_const_arg(start, err).map(Some);
}
} else {
// Fall back by trying to parse a const-expr expression. If we successfully do so,
// then we should report an error that it needs to be wrapped in braces.
- let snapshot = self.clone();
+ let snapshot = self.create_snapshot_for_diagnostic();
match self.parse_expr_res(Restrictions::CONST_EXPR, None) {
Ok(expr) => {
return Ok(Some(self.dummy_const_arg_needs_braces(
)));
}
Err(err) => {
- *self = snapshot;
+ self.restore_snapshot(snapshot);
err.cancel();
return Ok(None);
}
}
ast::AttrStyle::Inner => "crate-level attribute should be in the root module",
};
- lint.build(msg).emit()
+ lint.build(msg).emit();
});
}
}
fn inline_attr_str_error_with_macro_def(&self, hir_id: HirId, attr: &Attribute, sym: &str) {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build(&format!(
- "`#[{}]` is ignored on struct fields, match arms and macro defs",
- sym,
+ "`#[{sym}]` is ignored on struct fields, match arms and macro defs",
))
.warn(
"this was previously accepted by the compiler but is \
fn inline_attr_str_error_without_macro_def(&self, hir_id: HirId, attr: &Attribute, sym: &str) {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
- lint.build(&format!("`#[{}]` is ignored on struct fields and match arms", sym))
+ lint.build(&format!("`#[{sym}]` is ignored on struct fields and match arms"))
.warn(
"this was previously accepted by the compiler but is \
being phased out; it will become a hard error in \
| Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
Target::Method(MethodKind::Trait { body: false }) | Target::ForeignFn => {
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
- lint.build("`#[inline]` is ignored on function prototypes").emit()
+ lint.build("`#[inline]` is ignored on function prototypes").emit();
});
true
}
.sess
.struct_span_err(
meta.name_value_literal_span().unwrap_or_else(|| meta.span()),
- &format!("`{}` is not a valid identifier", doc_keyword),
+ &format!("`{doc_keyword}` is not a valid identifier"),
)
.emit();
return false;
.struct_span_err(
meta.span(),
&format!(
- "`#![doc({} = \"...\")]` isn't allowed as a crate-level attribute",
- attr_name,
+ "`#![doc({attr_name} = \"...\")]` isn't allowed as a crate-level attribute",
),
)
.emit();
attr.meta().unwrap().span,
"use `doc = include_str!` instead",
format!(
- "#{}[doc = include_str!(\"{}\")]",
- inner, value
+ "#{inner}[doc = include_str!(\"{value}\")]",
),
applicability,
);
if let Some(value) = attr.value_str() {
diag.span_help(
attr.span,
- &format!(r#"try `#[link(name = "{}")]` instead"#, value),
+ &format!(r#"try `#[link(name = "{value}")]` instead"#),
);
} else {
diag.span_help(attr.span, r#"try `#[link(name = "...")]` instead"#);
span,
format!(
"there {} only {} argument{}",
- if arg_count != 1 { "are" } else { "is" },
+ pluralize!("is", arg_count),
arg_count,
pluralize!(arg_count)
),
};
self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
lint.build(&format!(
- "`#[no_mangle]` has no effect on a foreign {}",
- foreign_item_kind
+ "`#[no_mangle]` has no effect on a foreign {foreign_item_kind}"
))
.warn(
"this was previously accepted by the compiler but is \
being phased out; it will become a hard error in \
a future release!",
)
- .span_label(span, format!("foreign {}", foreign_item_kind))
+ .span_label(span, format!("foreign {foreign_item_kind}"))
.note("symbol names in extern blocks are not mangled")
.span_suggestion(
attr.span,
hint.span(),
E0517,
"{}",
- &format!("attribute should be applied to {} {}", article, allowed_targets)
+ &format!("attribute should be applied to {article} {allowed_targets}")
)
- .span_label(span, &format!("not {} {}", article, allowed_targets))
+ .span_label(span, &format!("not {article} {allowed_targets}"))
.emit();
}
/// of the trait being implemented; as those provided functions can be non-const.
fn visit_item<'hir>(&mut self, item: &'hir hir::Item<'hir>) {
let _: Option<_> = try {
- if let hir::ItemKind::Impl(ref imp) = item.kind {
- if let hir::Constness::Const = imp.constness {
+ if let hir::ItemKind::Impl(ref imp) = item.kind && let hir::Constness::Const = imp.constness {
let trait_def_id = imp.of_trait.as_ref()?.trait_def_id()?;
let ancestors = self
.tcx
.note(&format!("`{}` not implemented", to_implement.join("`, `")))
.emit();
}
- }
}
};
}
if self.tcx.has_attr(trait_of, sym::rustc_trivial_field_reads) {
let trait_ref = self.tcx.impl_trait_ref(impl_of).unwrap();
if let ty::Adt(adt_def, _) = trait_ref.self_ty().kind() {
- if let Some(adt_def_id) = adt_def.did.as_local() {
+ if let Some(adt_def_id) = adt_def.did().as_local() {
self.ignored_derived_traits
.entry(adt_def_id)
.or_default()
match item.kind {
hir::ItemKind::Struct(..) | hir::ItemKind::Union(..) => {
let def = self.tcx.adt_def(item.def_id);
- self.repr_has_repr_c = def.repr.c();
+ self.repr_has_repr_c = def.repr().c();
intravisit::walk_item(self, &item);
}
self.repr_has_repr_c = had_repr_c;
}
- fn mark_as_used_if_union(&mut self, adt: &ty::AdtDef, fields: &[hir::ExprField<'_>]) {
- if adt.is_union() && adt.non_enum_variant().fields.len() > 1 && adt.did.is_local() {
+ fn mark_as_used_if_union(&mut self, adt: ty::AdtDef<'tcx>, fields: &[hir::ExprField<'_>]) {
+ if adt.is_union() && adt.non_enum_variant().fields.len() > 1 && adt.did().is_local() {
for field in fields {
let index = self.tcx.field_index(field.hir_id, self.typeck_results());
self.insert_def_id(adt.non_enum_variant().fields[index].did);
hir::ExprKind::Struct(ref qpath, ref fields, _) => {
let res = self.typeck_results().qpath_res(qpath, expr.hir_id);
self.handle_res(res);
- if let ty::Adt(ref adt, _) = self.typeck_results().expr_ty(expr).kind() {
- self.mark_as_used_if_union(adt, fields);
+ if let ty::Adt(adt, _) = self.typeck_results().expr_ty(expr).kind() {
+ self.mark_as_used_if_union(*adt, fields);
}
}
_ => (),
if let Some(original_def_id) = items.name_to_id.insert(name, item_def_id) {
if original_def_id != item_def_id {
let mut err = match tcx.hir().span_if_local(item_def_id) {
- Some(span) => tcx.sess.struct_span_err(
- span,
- &format!("duplicate diagnostic item found: `{}`.", name),
- ),
+ Some(span) => tcx
+ .sess
+ .struct_span_err(span, &format!("duplicate diagnostic item found: `{name}`.")),
None => tcx.sess.struct_err(&format!(
"duplicate diagnostic item in crate `{}`: `{}`.",
tcx.crate_name(item_def_id.krate),
} else if let Some((def_id, _)) = visitor.attr_main_fn {
Some((def_id.to_def_id(), EntryFnType::Main))
} else {
- if let Some(main_def) = tcx.resolutions(()).main_def {
- if let Some(def_id) = main_def.opt_fn_def_id() {
- // non-local main imports are handled below
- if let Some(def_id) = def_id.as_local() {
- if matches!(tcx.hir().find_by_def_id(def_id), Some(Node::ForeignItem(_))) {
- tcx.sess
- .struct_span_err(
- tcx.def_span(def_id),
- "the `main` function cannot be declared in an `extern` block",
- )
- .emit();
- return None;
- }
- }
-
- if main_def.is_import && !tcx.features().imported_main {
- let span = main_def.span;
- feature_err(
- &tcx.sess.parse_sess,
- sym::imported_main,
- span,
- "using an imported function as entry point `main` is experimental",
+ if let Some(main_def) = tcx.resolutions(()).main_def && let Some(def_id) = main_def.opt_fn_def_id() {
+ // non-local main imports are handled below
+ if let Some(def_id) = def_id.as_local() && matches!(tcx.hir().find_by_def_id(def_id), Some(Node::ForeignItem(_))) {
+ tcx.sess
+ .struct_span_err(
+ tcx.def_span(def_id),
+ "the `main` function cannot be declared in an `extern` block",
)
.emit();
- }
- return Some((def_id, EntryFnType::Main));
+ return None;
}
+
+ if main_def.is_import && !tcx.features().imported_main {
+ let span = main_def.span;
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::imported_main,
+ span,
+ "using an imported function as entry point `main` is experimental",
+ )
+ .emit();
+ }
+ return Some((def_id, EntryFnType::Main));
}
no_main_err(tcx, visitor);
None
err.note(¬e);
}
- if let Some(main_def) = tcx.resolutions(()).main_def {
- if main_def.opt_fn_def_id().is_none() {
- // There is something at `crate::main`, but it is not a function definition.
- err.span_label(main_def.span, "non-function item at `crate::main` is found");
- }
+ if let Some(main_def) = tcx.resolutions(()).main_def && main_def.opt_fn_def_id().is_none(){
+ // There is something at `crate::main`, but it is not a function definition.
+ err.span_label(main_def.span, "non-function item at `crate::main` is found");
}
if tcx.sess.teach(&err.get_code().unwrap()) {
fn unpack_option_like<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
let ty::Adt(def, substs) = *ty.kind() else { return ty };
- if def.variants.len() == 2 && !def.repr.c() && def.repr.int.is_none() {
+ if def.variants().len() == 2 && !def.repr().c() && def.repr().int.is_none() {
let data_idx;
let one = VariantIdx::new(1);
let zero = VariantIdx::new(0);
- if def.variants[zero].fields.is_empty() {
+ if def.variant(zero).fields.is_empty() {
data_idx = one;
- } else if def.variants[one].fields.is_empty() {
+ } else if def.variant(one).fields.is_empty() {
data_idx = zero;
} else {
return ty;
}
- if def.variants[data_idx].fields.len() == 1 {
- return def.variants[data_idx].fields[0].ty(tcx, substs);
+ if def.variant(data_idx).fields.len() == 1 {
+ return def.variant(data_idx).fields[0].ty(tcx, substs);
}
}
// Special-case transmuting from `typeof(function)` and
// `Option<typeof(function)>` to present a clearer error.
let from = unpack_option_like(self.tcx, from);
- if let (&ty::FnDef(..), SizeSkeleton::Known(size_to)) = (from.kind(), sk_to) {
- if size_to == Pointer.size(&self.tcx) {
- struct_span_err!(self.tcx.sess, span, E0591, "can't transmute zero-sized type")
- .note(&format!("source type: {}", from))
- .note(&format!("target type: {}", to))
- .help("cast with `as` to a pointer instead")
- .emit();
- return;
- }
+ if let (&ty::FnDef(..), SizeSkeleton::Known(size_to)) = (from.kind(), sk_to) && size_to == Pointer.size(&self.tcx) {
+ struct_span_err!(self.tcx.sess, span, E0591, "can't transmute zero-sized type")
+ .note(&format!("source type: {from}"))
+ .note(&format!("target type: {to}"))
+ .help("cast with `as` to a pointer instead")
+ .emit();
+ return;
}
}
// Try to display a sensible error with as much information as possible.
let skeleton_string = |ty: Ty<'tcx>, sk| match sk {
Ok(SizeSkeleton::Known(size)) => format!("{} bits", size.bits()),
- Ok(SizeSkeleton::Pointer { tail, .. }) => format!("pointer to `{}`", tail),
+ Ok(SizeSkeleton::Pointer { tail, .. }) => format!("pointer to `{tail}`"),
Err(LayoutError::Unknown(bad)) => {
if bad == ty {
"this type does not have a fixed size".to_owned()
} else {
- format!("size can vary because of {}", bad)
+ format!("size can vary because of {bad}")
}
}
Err(err) => err.to_string(),
or dependently-sized types"
);
if from == to {
- err.note(&format!("`{}` does not have a fixed size", from));
+ err.note(&format!("`{from}` does not have a fixed size"));
} else {
err.note(&format!("source type: `{}` ({})", from, skeleton_string(from, sk_from)))
.note(&format!("target type: `{}` ({})", to, skeleton_string(to, sk_to)));
ty::RawPtr(ty::TypeAndMut { ty, mutbl: _ }) if self.is_thin_ptr_ty(ty) => {
Some(asm_ty_isize)
}
- ty::Adt(adt, substs) if adt.repr.simd() => {
+ ty::Adt(adt, substs) if adt.repr().simd() => {
let fields = &adt.non_enum_variant().fields;
let elem_ty = fields[0].ty(self.tcx, substs);
match elem_ty.kind() {
_ => None,
};
let Some(asm_ty) = asm_ty else {
- let msg = &format!("cannot use value of type `{}` for inline assembly", ty);
+ let msg = &format!("cannot use value of type `{ty}` for inline assembly");
let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
err.note(
"only integers, floats, SIMD vectors, pointers and function pointers \
if !ty.is_copy_modulo_regions(self.tcx.at(DUMMY_SP), self.param_env) {
let msg = "arguments for inline assembly must be copyable";
let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
- err.note(&format!("`{}` does not implement the Copy trait", ty));
+ err.note(&format!("`{ty}` does not implement the Copy trait"));
err.emit();
}
in_expr.span,
&format!("type `{}`", self.typeck_results.expr_ty_adjusted(in_expr)),
);
- err.span_label(expr.span, &format!("type `{}`", ty));
+ err.span_label(expr.span, &format!("type `{ty}`"));
err.note(
"asm inout arguments must have the same type, \
unless they are both pointers or integers of the same size",
let reg_class = reg.reg_class();
let supported_tys = reg_class.supported_types(asm_arch);
let Some((_, feature)) = supported_tys.iter().find(|&&(t, _)| t == asm_ty) else {
- let msg = &format!("type `{}` cannot be used with this register class", ty);
+ let msg = &format!("type `{ty}` cannot be used with this register class");
let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
let supported_tys: Vec<_> =
supported_tys.iter().map(|(t, _)| t.to_string()).collect();
let mut err = lint.build(msg);
err.span_label(expr.span, "for this argument");
err.help(&format!(
- "use the `{}` modifier to have the register formatted as `{}`",
- suggested_modifier, suggested_result,
+ "use the `{suggested_modifier}` modifier to have the register formatted as `{suggested_result}`",
));
err.help(&format!(
- "or use the `{}` modifier to keep the default formatting of `{}`",
- default_modifier, default_result,
+ "or use the `{default_modifier}` modifier to keep the default formatting of `{default_result}`",
));
err.emit();
},
match expr.kind {
hir::ExprKind::Path(ref qpath) => {
let res = self.typeck_results.qpath_res(qpath, expr.hir_id);
- if let Res::Def(DefKind::Fn, did) = res {
- if self.def_id_is_transmute(did) {
- let typ = self.typeck_results.node_type(expr.hir_id);
- let sig = typ.fn_sig(self.tcx);
- let from = sig.inputs().skip_binder()[0];
- let to = sig.output().skip_binder();
- self.check_transmute(expr.span, from, to);
- }
+ if let Res::Def(DefKind::Fn, did) = res
+ && self.def_id_is_transmute(did)
+ {
+ let typ = self.typeck_results.node_type(expr.hir_id);
+ let sig = typ.fn_sig(self.tcx);
+ let from = sig.inputs().skip_binder()[0];
+ let to = sig.output().skip_binder();
+ self.check_transmute(expr.span, from, to);
}
}
shorthands,
Applicability::MachineApplicable,
);
- err.emit()
+ err.emit();
},
);
} else {
non_shorthands,
Applicability::MachineApplicable,
);
- err.emit()
+ err.emit();
},
);
}
String::new(),
rustc_errors::Applicability::MachineApplicable,
)
- .emit()
+ .emit();
});
}
INEFFECTIVE_UNSTABLE_TRAIT_IMPL,
item.hir_id(),
span,
- |lint| lint
+ |lint| {lint
.build("an `#[unstable]` annotation here has no effect")
.note("see issue #55436 <https://github.com/rust-lang/rust/issues/55436> for more information")
- .emit()
+ .emit();}
);
}
}
use rustc_ast::MacroDef;
use rustc_attr as attr;
use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::intern::Interned;
use rustc_errors::struct_span_err;
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
let tcx = self.def_id_visitor.tcx();
// InternalSubsts are not visited here because they are visited below in `super_visit_with`.
match *ty.kind() {
- ty::Adt(&ty::AdtDef { did: def_id, .. }, ..)
+ ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), ..)
| ty::Foreign(def_id)
| ty::FnDef(def_id, ..)
| ty::Closure(def_id, ..)
&mut self,
use_ctxt: Span, // syntax context of the field name at the use site
span: Span, // span of the field pattern, e.g., `x: 0`
- def: &'tcx ty::AdtDef, // definition of the struct or enum
+ def: ty::AdtDef<'tcx>, // definition of the struct or enum
field: &'tcx ty::FieldDef,
in_update_syntax: bool,
) {
// definition of the field
let ident = Ident::new(kw::Empty, use_ctxt);
let hir_id = self.tcx.hir().local_def_id_to_hir_id(self.current_item);
- let def_id = self.tcx.adjust_ident_and_get_scope(ident, def.did, hir_id).1;
+ let def_id = self.tcx.adjust_ident_and_get_scope(ident, def.did(), hir_id).1;
if !field.vis.is_accessible_from(def_id, self.tcx) {
let label = if in_update_syntax {
format!("field `{}` is private", field.name)
"field `{}` of {} `{}` is private",
field.name,
def.variant_descr(),
- self.tcx.def_path_str(def.did)
+ self.tcx.def_path_str(def.did())
)
.span_label(span, label)
.emit();
descr,
self.tcx.crate_name(def_id.krate)
))
- .emit()
+ .emit();
},
);
}
lint::builtin::PRIVATE_IN_PUBLIC,
hir_id,
span,
- |lint| lint.build(&format!("{} (error {})", make_msg(), err_code)).emit(),
+ |lint| {
+ lint.build(&format!("{} (error {})", make_msg(), err_code)).emit();
+ },
);
}
}
[dependencies]
measureme = "10.0.0"
-rustc-rayon-core = "0.3.2"
+rustc-rayon-core = { version = "0.3.2", optional = true }
rustc_ast = { path = "../rustc_ast" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
rustc_serialize = { path = "../rustc_serialize" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
+
+[features]
+rustc_use_parallel_compiler = ["rustc-rayon-core", "rustc_query_system/rustc_use_parallel_compiler"]
[dependencies]
rustc_arena = { path = "../rustc_arena" }
tracing = "0.1"
-rustc-rayon-core = "0.3.2"
+rustc-rayon-core = { version = "0.3.2", optional = true }
rustc_ast = { path = "../rustc_ast" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
rustc_target = { path = "../rustc_target" }
parking_lot = "0.11"
smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
+
+[features]
+rustc_use_parallel_compiler = ["rustc-rayon-core"]
let handle = tcx.dep_context().sess().diagnostic();
- for diagnostic in side_effects.diagnostics {
- handle.emit_diagnostic(&diagnostic);
+ for mut diagnostic in side_effects.diagnostics {
+ handle.emit_diagnostic(&mut diagnostic);
}
}
}
use crate::imports::{Import, ImportKind};
use crate::macros::{MacroRulesBinding, MacroRulesScope, MacroRulesScopeRef};
use crate::Namespace::{self, MacroNS, TypeNS, ValueNS};
-use crate::{CrateLint, Determinacy, ExternPreludeEntry, Module, ModuleKind, ModuleOrUniformRoot};
+use crate::{Determinacy, ExternPreludeEntry, Finalize, Module, ModuleKind, ModuleOrUniformRoot};
use crate::{NameBinding, NameBindingKind, ParentScope, PathResult, PerNS, ResolutionError};
use crate::{Resolver, ResolverArenas, Segment, ToNameBinding, VisResolutionError};
impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
fn resolve_visibility(&mut self, vis: &ast::Visibility) -> ty::Visibility {
- self.resolve_visibility_speculative(vis, false).unwrap_or_else(|err| {
+ self.try_resolve_visibility(vis, true).unwrap_or_else(|err| {
self.r.report_vis_error(err);
ty::Visibility::Public
})
}
- fn resolve_visibility_speculative<'ast>(
+ fn try_resolve_visibility<'ast>(
&mut self,
vis: &'ast ast::Visibility,
- speculative: bool,
+ finalize: bool,
) -> Result<ty::Visibility, VisResolutionError<'ast>> {
let parent_scope = &self.parent_scope;
match vis.kind {
&segments,
Some(TypeNS),
parent_scope,
- !speculative,
- path.span,
- CrateLint::SimplePath(id),
+ if finalize { Finalize::SimplePath(id, path.span) } else { Finalize::No },
) {
PathResult::Module(ModuleOrUniformRoot::Module(module)) => {
let res = module.res().expect("visibility resolved to unnamed block");
- if !speculative {
+ if finalize {
self.r.record_partial_res(id, PartialRes::new(res));
}
if module.is_normal() {
// correct visibilities for unnamed field placeholders specifically, so the
// constructor visibility should still be determined correctly.
let field_vis = self
- .resolve_visibility_speculative(&field.vis, true)
+ .try_resolve_visibility(&field.vis, false)
.unwrap_or(ty::Visibility::Public);
if ctor_vis.is_at_least(field_vis, &*self.r) {
ctor_vis = field_vis;
ident,
MacroNS,
&self.parent_scope,
- false,
- ident.span,
+ None,
);
if let Ok(binding) = result {
let import = macro_use_import(self, ident.span);
let vis = match item.kind {
// Visibilities must not be resolved non-speculatively twice
// and we already resolved this one as a `fn` item visibility.
- ItemKind::Fn(..) => self
- .resolve_visibility_speculative(&item.vis, true)
- .unwrap_or(ty::Visibility::Public),
+ ItemKind::Fn(..) => {
+ self.try_resolve_visibility(&item.vis, false).unwrap_or(ty::Visibility::Public)
+ }
_ => self.resolve_visibility(&item.vis),
};
if vis != ty::Visibility::Public {
use crate::imports::{Import, ImportKind, ImportResolver};
use crate::path_names_to_string;
use crate::{AmbiguityError, AmbiguityErrorMisc, AmbiguityKind};
-use crate::{
- BindingError, CrateLint, HasGenericParams, MacroRulesScope, Module, ModuleOrUniformRoot,
-};
-use crate::{NameBinding, NameBindingKind, PrivacyError, VisResolutionError};
+use crate::{BindingError, HasGenericParams, MacroRulesScope, Module, ModuleOrUniformRoot};
+use crate::{Finalize, NameBinding, NameBindingKind, PrivacyError, VisResolutionError};
use crate::{ParentScope, PathResult, ResolutionError, Resolver, Scope, ScopeSet, Segment};
type Res = def::Res<ast::NodeId>;
ident,
ScopeSet::All(ns, false),
&parent_scope,
+ None,
false,
- false,
- ident.span,
) {
let desc = match binding.res() {
Res::Def(DefKind::Macro(MacroKind::Bang), _) => {
_ => return None,
}
- self.make_missing_self_suggestion(span, path.clone(), parent_scope)
- .or_else(|| self.make_missing_crate_suggestion(span, path.clone(), parent_scope))
- .or_else(|| self.make_missing_super_suggestion(span, path.clone(), parent_scope))
- .or_else(|| self.make_external_crate_suggestion(span, path, parent_scope))
+ self.make_missing_self_suggestion(path.clone(), parent_scope)
+ .or_else(|| self.make_missing_crate_suggestion(path.clone(), parent_scope))
+ .or_else(|| self.make_missing_super_suggestion(path.clone(), parent_scope))
+ .or_else(|| self.make_external_crate_suggestion(path, parent_scope))
}
/// Suggest a missing `self::` if that resolves to an correct module.
/// ```
fn make_missing_self_suggestion(
&mut self,
- span: Span,
mut path: Vec<Segment>,
parent_scope: &ParentScope<'b>,
) -> Option<(Vec<Segment>, Vec<String>)> {
// Replace first ident with `self` and check if that is valid.
path[0].ident.name = kw::SelfLower;
- let result = self.r.resolve_path(&path, None, parent_scope, false, span, CrateLint::No);
+ let result = self.r.resolve_path(&path, None, parent_scope, Finalize::No);
debug!("make_missing_self_suggestion: path={:?} result={:?}", path, result);
if let PathResult::Module(..) = result { Some((path, Vec::new())) } else { None }
}
/// ```
fn make_missing_crate_suggestion(
&mut self,
- span: Span,
mut path: Vec<Segment>,
parent_scope: &ParentScope<'b>,
) -> Option<(Vec<Segment>, Vec<String>)> {
// Replace first ident with `crate` and check if that is valid.
path[0].ident.name = kw::Crate;
- let result = self.r.resolve_path(&path, None, parent_scope, false, span, CrateLint::No);
+ let result = self.r.resolve_path(&path, None, parent_scope, Finalize::No);
debug!("make_missing_crate_suggestion: path={:?} result={:?}", path, result);
if let PathResult::Module(..) = result {
Some((
/// ```
fn make_missing_super_suggestion(
&mut self,
- span: Span,
mut path: Vec<Segment>,
parent_scope: &ParentScope<'b>,
) -> Option<(Vec<Segment>, Vec<String>)> {
// Replace first ident with `crate` and check if that is valid.
path[0].ident.name = kw::Super;
- let result = self.r.resolve_path(&path, None, parent_scope, false, span, CrateLint::No);
+ let result = self.r.resolve_path(&path, None, parent_scope, Finalize::No);
debug!("make_missing_super_suggestion: path={:?} result={:?}", path, result);
if let PathResult::Module(..) = result { Some((path, Vec::new())) } else { None }
}
/// name as the first part of path.
fn make_external_crate_suggestion(
&mut self,
- span: Span,
mut path: Vec<Segment>,
parent_scope: &ParentScope<'b>,
) -> Option<(Vec<Segment>, Vec<String>)> {
for name in extern_crate_names.into_iter() {
// Replace first ident with a crate name and check if that is valid.
path[0].ident.name = name;
- let result = self.r.resolve_path(&path, None, parent_scope, false, span, CrateLint::No);
+ let result = self.r.resolve_path(&path, None, parent_scope, Finalize::No);
debug!(
"make_external_crate_suggestion: name={:?} path={:?} result={:?}",
name, path, result
use crate::{module_to_string, names_to_string};
use crate::{AmbiguityError, AmbiguityErrorMisc, AmbiguityKind};
use crate::{BindingKey, ModuleKind, ResolutionError, Resolver, Segment};
-use crate::{CrateLint, Module, ModuleOrUniformRoot, ParentScope, PerNS, ScopeSet, Weak};
+use crate::{Finalize, Module, ModuleOrUniformRoot, ParentScope, PerNS, ScopeSet, Weak};
use crate::{NameBinding, NameBindingKind, PathResult, PrivacyError, ToNameBinding};
use rustc_ast::NodeId;
_ => false,
}
}
-
- crate fn crate_lint(&self) -> CrateLint {
- CrateLint::UsePath { root_id: self.root_id, root_span: self.root_span }
- }
}
#[derive(Clone, Default, Debug)]
ident: Ident,
ns: Namespace,
parent_scope: &ParentScope<'a>,
- record_used: bool,
- path_span: Span,
+ finalize: Option<Span>,
) -> Result<&'a NameBinding<'a>, Determinacy> {
self.resolve_ident_in_module_unadjusted_ext(
module,
ns,
parent_scope,
false,
- record_used,
- path_span,
+ finalize,
)
.map_err(|(determinacy, _)| determinacy)
}
/// Attempts to resolve `ident` in namespaces `ns` of `module`.
- /// Invariant: if `record_used` is `Some`, expansion and import resolution must be complete.
+ /// Invariant: if `finalize` is `Some`, expansion and import resolution must be complete.
crate fn resolve_ident_in_module_unadjusted_ext(
&mut self,
module: ModuleOrUniformRoot<'a>,
ns: Namespace,
parent_scope: &ParentScope<'a>,
restricted_shadowing: bool,
- record_used: bool,
- path_span: Span,
+ finalize: Option<Span>,
) -> Result<&'a NameBinding<'a>, (Determinacy, Weak)> {
let module = match module {
ModuleOrUniformRoot::Module(module) => module,
ident,
ScopeSet::AbsolutePath(ns),
parent_scope,
- record_used,
- record_used,
- path_span,
+ finalize,
+ finalize.is_some(),
);
return binding.map_err(|determinacy| (determinacy, Weak::No));
}
assert!(!restricted_shadowing);
return if ns != TypeNS {
Err((Determined, Weak::No))
- } else if let Some(binding) = self.extern_prelude_get(ident, !record_used) {
+ } else if let Some(binding) = self.extern_prelude_get(ident, finalize.is_some()) {
Ok(binding)
} else if !self.graph_root.unexpanded_invocations.borrow().is_empty() {
// Macro-expanded `extern crate` items can add names to extern prelude.
ident,
scopes,
parent_scope,
- record_used,
- record_used,
- path_span,
+ finalize,
+ finalize.is_some(),
);
return binding.map_err(|determinacy| (determinacy, Weak::No));
}
let resolution =
self.resolution(module, key).try_borrow_mut().map_err(|_| (Determined, Weak::No))?; // This happens when there is a cycle of imports.
- if let Some(binding) = resolution.binding {
+ if let Some(binding) = resolution.binding && let Some(path_span) = finalize {
if !restricted_shadowing && binding.expansion != LocalExpnId::ROOT {
if let NameBindingKind::Res(_, true) = binding.kind {
self.macro_expanded_macro_export_errors.insert((path_span, binding.span));
if usable { Ok(binding) } else { Err((Determined, Weak::No)) }
};
- if record_used {
+ if let Some(path_span) = finalize {
return resolution
.binding
.and_then(|binding| {
let ImportKind::Single { source: ident, .. } = single_import.kind else {
unreachable!();
};
- match self.resolve_ident_in_module(
- module,
- ident,
- ns,
- &single_import.parent_scope,
- false,
- path_span,
- ) {
+ match self.resolve_ident_in_module(module, ident, ns, &single_import.parent_scope, None)
+ {
Err(Determined) => continue,
Ok(binding)
if !self.is_accessible_from(binding.vis, single_import.parent_scope.module) =>
ident,
ns,
adjusted_parent_scope,
- false,
- path_span,
+ None,
);
match result {
// For better failure detection, pretend that the import will
// not define any names while resolving its module path.
let orig_vis = import.vis.replace(ty::Visibility::Invisible);
- let path_res = self.r.resolve_path(
- &import.module_path,
- None,
- &import.parent_scope,
- false,
- import.span,
- import.crate_lint(),
- );
+ let path_res =
+ self.r.resolve_path(&import.module_path, None, &import.parent_scope, Finalize::No);
import.vis.set(orig_vis);
match path_res {
source,
ns,
&import.parent_scope,
- false,
- import.span,
+ None,
);
import.vis.set(orig_vis);
source_bindings[ns].set(binding);
_ => None,
};
let prev_ambiguity_errors_len = self.r.ambiguity_errors.len();
- let path_res = self.r.resolve_path(
- &import.module_path,
- None,
- &import.parent_scope,
- true,
- import.span,
- import.crate_lint(),
- );
+ let finalize = Finalize::UsePath {
+ root_id: import.root_id,
+ root_span: import.root_span,
+ path_span: import.span,
+ };
+ let path_res =
+ self.r.resolve_path(&import.module_path, None, &import.parent_scope, finalize);
let no_ambiguity = self.r.ambiguity_errors.len() == prev_ambiguity_errors_len;
if let Some(orig_unusable_binding) = orig_unusable_binding {
self.r.unusable_binding = orig_unusable_binding;
// 2 segments, so the `resolve_path` above won't trigger it.
let mut full_path = import.module_path.clone();
full_path.push(Segment::from_ident(Ident::empty()));
- self.r.lint_if_path_starts_with_module(
- import.crate_lint(),
- &full_path,
- import.span,
- None,
- );
+ self.r.lint_if_path_starts_with_module(finalize, &full_path, None);
}
if let ModuleOrUniformRoot::Module(module) = module {
ident,
ns,
&import.parent_scope,
- true,
- import.span,
+ Some(import.span),
);
this.last_import_segment = orig_last_import_segment;
this.unusable_binding = orig_unusable_binding;
ident,
ns,
&import.parent_scope,
- true,
- import.span,
+ Some(import.span),
);
if binding.is_ok() {
all_ns_failed = false;
full_path.push(Segment::from_ident(ident));
self.r.per_ns(|this, ns| {
if let Ok(binding) = source_bindings[ns].get() {
- this.lint_if_path_starts_with_module(
- import.crate_lint(),
- &full_path,
- import.span,
- Some(binding),
- );
+ this.lint_if_path_starts_with_module(finalize, &full_path, Some(binding));
}
});
}
target,
ScopeSet::All(ns, false),
&import.parent_scope,
+ None,
false,
- false,
- import.span,
) {
Ok(other_binding) => {
is_redundant[ns] = Some(
use RibKind::*;
-use crate::{path_names_to_string, BindingError, CrateLint, LexicalScopeBinding};
+use crate::{path_names_to_string, BindingError, Finalize, LexicalScopeBinding};
use crate::{Module, ModuleOrUniformRoot, ParentScope, PathResult};
use crate::{ResolutionError, Resolver, Segment, UseError};
TyKind::ImplicitSelf => {
let self_ty = Ident::with_dummy_span(kw::SelfUpper);
let res = self
- .resolve_ident_in_lexical_scope(self_ty, TypeNS, Some(ty.id), ty.span)
+ .resolve_ident_in_lexical_scope(
+ self_ty,
+ TypeNS,
+ Finalize::SimplePath(ty.id, ty.span),
+ )
.map_or(Res::Err, |d| d.res());
self.r.record_partial_res(ty.id, PartialRes::new(res));
}
self.resolve_ident_in_lexical_scope(
path.segments[0].ident,
ns,
- None,
- path.span,
+ Finalize::No,
)
.is_some()
};
&mut self,
ident: Ident,
ns: Namespace,
- record_used_id: Option<NodeId>,
- path_span: Span,
+ finalize: Finalize,
) -> Option<LexicalScopeBinding<'a>> {
self.r.resolve_ident_in_lexical_scope(
ident,
ns,
&self.parent_scope,
- record_used_id,
- path_span,
+ finalize,
&self.ribs[ns],
)
}
&mut self,
path: &[Segment],
opt_ns: Option<Namespace>, // `None` indicates a module path in import
- record_used: bool,
- path_span: Span,
- crate_lint: CrateLint,
+ finalize: Finalize,
) -> PathResult<'a> {
- self.r.resolve_path_with_ribs(
- path,
- opt_ns,
- &self.parent_scope,
- record_used,
- path_span,
- crate_lint,
- Some(&self.ribs),
- )
+ self.r.resolve_path_with_ribs(path, opt_ns, &self.parent_scope, finalize, Some(&self.ribs))
}
// AST resolution
};
for &ns in nss {
- match self.resolve_ident_in_lexical_scope(ident, ns, None, use_tree.prefix.span) {
+ match self.resolve_ident_in_lexical_scope(ident, ns, Finalize::No) {
Some(LexicalScopeBinding::Res(..)) => {
report_error(self, ns);
}
Some(LexicalScopeBinding::Item(binding)) => {
let orig_unusable_binding =
replace(&mut self.r.unusable_binding, Some(binding));
- if let Some(LexicalScopeBinding::Res(..)) = self
- .resolve_ident_in_lexical_scope(ident, ns, None, use_tree.prefix.span)
+ if let Some(LexicalScopeBinding::Res(..)) =
+ self.resolve_ident_in_lexical_scope(ident, ns, Finalize::No)
{
report_error(self, ns);
}
}
for param in &generics.params {
- if let GenericParamKind::Lifetime { .. } = param.kind {
+ if let GenericParamKind::Lifetime = param.kind {
continue;
}
if let Some(trait_ref) = opt_trait_ref {
let path: Vec<_> = Segment::from_path(&trait_ref.path);
let res = self.smart_resolve_path_fragment(
- trait_ref.ref_id,
None,
&path,
- trait_ref.path.span,
PathSource::Trait(AliasPossibility::No),
- CrateLint::SimplePath(trait_ref.ref_id),
+ Finalize::SimplePath(trait_ref.ref_id, trait_ref.path.span),
);
- let res = res.base_res();
- if res != Res::Err {
- if let PathResult::Module(ModuleOrUniformRoot::Module(module)) = self.resolve_path(
- &path,
- Some(TypeNS),
- true,
- trait_ref.path.span,
- CrateLint::SimplePath(trait_ref.ref_id),
- ) {
- new_id = Some(res.def_id());
- new_val = Some((module, trait_ref.clone()));
- }
+ if let Some(def_id) = res.base_res().opt_def_id() {
+ new_id = Some(def_id);
+ new_val = Some((self.r.expect_module(def_id), trait_ref.clone()));
}
}
let original_trait_ref = replace(&mut self.current_trait_ref, new_val);
// then fall back to a fresh binding.
let has_sub = sub.is_some();
let res = self
- .try_resolve_as_non_binding(pat_src, pat, bmode, ident, has_sub)
+ .try_resolve_as_non_binding(pat_src, bmode, ident, has_sub)
.unwrap_or_else(|| self.fresh_binding(ident, pat.id, pat_src, bindings));
self.r.record_partial_res(pat.id, PartialRes::new(res));
self.r.record_pat_span(pat.id, pat.span);
fn try_resolve_as_non_binding(
&mut self,
pat_src: PatternSource,
- pat: &Pat,
bm: BindingMode,
ident: Ident,
has_sub: bool,
// also be interpreted as a path to e.g. a constant, variant, etc.
let is_syntactic_ambiguity = !has_sub && bm == BindingMode::ByValue(Mutability::Not);
- let ls_binding = self.resolve_ident_in_lexical_scope(ident, ValueNS, None, pat.span)?;
+ let ls_binding = self.resolve_ident_in_lexical_scope(ident, ValueNS, Finalize::No)?;
let (res, binding) = match ls_binding {
LexicalScopeBinding::Item(binding)
if is_syntactic_ambiguity && binding.is_ambiguity() =>
source: PathSource<'ast>,
) {
self.smart_resolve_path_fragment(
- id,
qself,
&Segment::from_path(path),
- path.span,
source,
- CrateLint::SimplePath(id),
+ Finalize::SimplePath(id, path.span),
);
}
fn smart_resolve_path_fragment(
&mut self,
- id: NodeId,
qself: Option<&QSelf>,
path: &[Segment],
- span: Span,
source: PathSource<'ast>,
- crate_lint: CrateLint,
+ finalize: Finalize,
) -> PartialRes {
tracing::debug!(
- "smart_resolve_path_fragment(id={:?}, qself={:?}, path={:?})",
- id,
+ "smart_resolve_path_fragment(qself={:?}, path={:?}, finalize={:?})",
qself,
- path
+ path,
+ finalize,
);
let ns = source.namespace();
+ let (id, path_span) =
+ finalize.node_id_and_path_span().expect("unexpected speculative resolution");
let report_errors = |this: &mut Self, res: Option<Res>| {
if this.should_report_errs() {
- let (err, candidates) = this.smart_resolve_report_errors(path, span, source, res);
+ let (err, candidates) =
+ this.smart_resolve_report_errors(path, path_span, source, res);
let def_id = this.parent_scope.module.nearest_parent_mod();
let instead = res.is_some();
};
let (mut err, candidates) =
- this.smart_resolve_report_errors(path, span, PathSource::Type, None);
+ this.smart_resolve_report_errors(path, path_span, PathSource::Type, None);
if candidates.is_empty() {
err.cancel();
};
let partial_res = match self.resolve_qpath_anywhere(
- id,
qself,
path,
ns,
- span,
+ path_span,
source.defer_to_typeck(),
- crate_lint,
+ finalize,
) {
Ok(Some(partial_res)) if partial_res.unresolved_segments() == 0 => {
if source.is_expected(partial_res.base_res()) || partial_res.base_res() == Res::Err
std_path.push(Segment::from_ident(Ident::with_dummy_span(sym::std)));
std_path.extend(path);
if let PathResult::Module(_) | PathResult::NonModule(_) =
- self.resolve_path(&std_path, Some(ns), false, span, CrateLint::No)
+ self.resolve_path(&std_path, Some(ns), Finalize::No)
{
// Check if we wrote `str::from_utf8` instead of `std::str::from_utf8`
let item_span =
- path.iter().last().map_or(span, |segment| segment.ident.span);
+ path.iter().last().map_or(path_span, |segment| segment.ident.span);
- self.r.confused_type_with_std_module.insert(item_span, span);
- self.r.confused_type_with_std_module.insert(span, span);
+ self.r.confused_type_with_std_module.insert(item_span, path_span);
+ self.r.confused_type_with_std_module.insert(path_span, path_span);
}
}
partial_res
}
- fn self_type_is_available(&mut self, span: Span) -> bool {
+ fn self_type_is_available(&mut self) -> bool {
let binding = self.resolve_ident_in_lexical_scope(
Ident::with_dummy_span(kw::SelfUpper),
TypeNS,
- None,
- span,
+ Finalize::No,
);
if let Some(LexicalScopeBinding::Res(res)) = binding { res != Res::Err } else { false }
}
- fn self_value_is_available(&mut self, self_span: Span, path_span: Span) -> bool {
+ fn self_value_is_available(&mut self, self_span: Span) -> bool {
let ident = Ident::new(kw::SelfLower, self_span);
- let binding = self.resolve_ident_in_lexical_scope(ident, ValueNS, None, path_span);
+ let binding = self.resolve_ident_in_lexical_scope(ident, ValueNS, Finalize::No);
if let Some(LexicalScopeBinding::Res(res)) = binding { res != Res::Err } else { false }
}
// Resolve in alternative namespaces if resolution in the primary namespace fails.
fn resolve_qpath_anywhere(
&mut self,
- id: NodeId,
qself: Option<&QSelf>,
path: &[Segment],
primary_ns: Namespace,
span: Span,
defer_to_typeck: bool,
- crate_lint: CrateLint,
+ finalize: Finalize,
) -> Result<Option<PartialRes>, Spanned<ResolutionError<'a>>> {
let mut fin_res = None;
for (i, &ns) in [primary_ns, TypeNS, ValueNS].iter().enumerate() {
if i == 0 || ns != primary_ns {
- match self.resolve_qpath(id, qself, path, ns, span, crate_lint)? {
+ match self.resolve_qpath(qself, path, ns, finalize)? {
Some(partial_res)
if partial_res.unresolved_segments() == 0 || defer_to_typeck =>
{
/// Handles paths that may refer to associated items.
fn resolve_qpath(
&mut self,
- id: NodeId,
qself: Option<&QSelf>,
path: &[Segment],
ns: Namespace,
- span: Span,
- crate_lint: CrateLint,
+ finalize: Finalize,
) -> Result<Option<PartialRes>, Spanned<ResolutionError<'a>>> {
debug!(
- "resolve_qpath(id={:?}, qself={:?}, path={:?}, ns={:?}, span={:?})",
- id, qself, path, ns, span,
+ "resolve_qpath(qself={:?}, path={:?}, ns={:?}, finalize={:?})",
+ qself, path, ns, finalize,
);
if let Some(qself) = qself {
// *actually* appears, so for the purposes of the crate
// lint we pass along information that this is the trait
// name from a fully qualified path, and this also
- // contains the full span (the `CrateLint::QPathTrait`).
+ // contains the full span (the `Finalize::QPathTrait`).
let ns = if qself.position + 1 == path.len() { ns } else { TypeNS };
let partial_res = self.smart_resolve_path_fragment(
- id,
None,
&path[..=qself.position],
- span,
PathSource::TraitItem(ns),
- CrateLint::QPathTrait { qpath_id: id, qpath_span: qself.path_span },
+ finalize.node_id_and_path_span().map_or(Finalize::No, |(qpath_id, path_span)| {
+ Finalize::QPathTrait { qpath_id, qpath_span: qself.path_span, path_span }
+ }),
);
// The remaining segments (the `C` in our example) will
)));
}
- let result = match self.resolve_path(&path, Some(ns), true, span, crate_lint) {
+ let result = match self.resolve_path(&path, Some(ns), finalize) {
PathResult::NonModule(path_res) => path_res,
PathResult::Module(ModuleOrUniformRoot::Module(module)) if !module.is_normal() => {
PartialRes::new(module.res().unwrap())
&& result.base_res() != Res::Err
&& path[0].ident.name != kw::PathRoot
&& path[0].ident.name != kw::DollarCrate
+ && let Some((id, path_span)) = finalize.node_id_and_path_span()
{
let unqualified_result = {
- match self.resolve_path(
- &[*path.last().unwrap()],
- Some(ns),
- false,
- span,
- CrateLint::No,
- ) {
+ match self.resolve_path(&[*path.last().unwrap()], Some(ns), Finalize::No) {
PathResult::NonModule(path_res) => path_res.base_res(),
PathResult::Module(ModuleOrUniformRoot::Module(module)) => {
module.res().unwrap()
};
if result.base_res() == unqualified_result {
let lint = lint::builtin::UNUSED_QUALIFICATIONS;
- self.r.lint_buffer.buffer_lint(lint, id, span, "unnecessary qualification")
+ self.r.lint_buffer.buffer_lint(lint, id, path_span, "unnecessary qualification")
}
}
use crate::late::lifetimes::{ElisionFailureInfo, LifetimeContext};
use crate::late::{AliasPossibility, LateResolutionVisitor, RibKind};
use crate::path_names_to_string;
-use crate::{CrateLint, Module, ModuleKind, ModuleOrUniformRoot};
+use crate::{Finalize, Module, ModuleKind, ModuleOrUniformRoot};
use crate::{PathResult, PathSource, Segment};
use rustc_ast::visit::FnKind;
(String::new(), "the crate root".to_string())
} else {
let mod_path = &path[..path.len() - 1];
- let mod_prefix =
- match self.resolve_path(mod_path, Some(TypeNS), false, span, CrateLint::No) {
- PathResult::Module(ModuleOrUniformRoot::Module(module)) => module.res(),
- _ => None,
- }
- .map_or_else(String::new, |res| format!("{} ", res.descr()));
+ let mod_prefix = match self.resolve_path(mod_path, Some(TypeNS), Finalize::No) {
+ PathResult::Module(ModuleOrUniformRoot::Module(module)) => module.res(),
+ _ => None,
+ }
+ .map_or_else(String::new, |res| format!("{} ", res.descr()));
(mod_prefix, format!("`{}`", Segment::names_to_string(mod_path)))
};
(
_ => {}
}
- let is_assoc_fn = self.self_type_is_available(span);
+ let is_assoc_fn = self.self_type_is_available();
// Emit help message for fake-self from other languages (e.g., `this` in Javascript).
if ["this", "my"].contains(&item_str.as_str()) && is_assoc_fn {
err.span_suggestion_short(
"self".to_string(),
Applicability::MaybeIncorrect,
);
- if !self.self_value_is_available(path[0].ident.span, span) {
+ if !self.self_value_is_available(path[0].ident.span) {
if let Some((FnKind::Fn(_, _, sig, ..), fn_span)) =
&self.diagnostic_metadata.current_function
{
);
}
}
- if path.len() == 1 && self.self_type_is_available(span) {
+ if path.len() == 1 && self.self_type_is_available() {
if let Some(candidate) = self.lookup_assoc_candidate(ident, ns, is_expected) {
- let self_is_available = self.self_value_is_available(path[0].ident.span, span);
+ let self_is_available = self.self_value_is_available(path[0].ident.span);
match candidate {
AssocSuggestion::Field => {
if self_is_available {
}
// Try Levenshtein algorithm.
- let typo_sugg = self.lookup_typo_candidate(path, ns, is_expected, span);
+ let typo_sugg = self.lookup_typo_candidate(path, ns, is_expected);
// Try context-dependent help if relaxed lookup didn't work.
if let Some(res) = res {
if self.smart_resolve_context_dependent_help(
for sp in spans {
let msg = if sp == last_bound_span {
format!(
- "...because of {} bound{}",
- if bounds.len() <= 2 { "this" } else { "these" },
- if bounds.len() <= 2 { "" } else { "s" },
+ "...because of {these} bound{s}",
+ these = pluralize!("this", bounds.len() - 1),
+ s = pluralize!(bounds.len() - 1),
)
} else {
String::new()
}
// If the trait has a single item (which wasn't matched by Levenshtein), suggest it
- let suggestion = self.get_single_associated_item(&path, span, &source, is_expected);
+ let suggestion = self.get_single_associated_item(&path, &source, is_expected);
self.r.add_typo_suggestion(&mut err, suggestion, ident_span);
}
if fallback {
fn get_single_associated_item(
&mut self,
path: &[Segment],
- span: Span,
source: &PathSource<'_>,
filter_fn: &impl Fn(Res) -> bool,
) -> Option<TypoSuggestion> {
if let crate::PathSource::TraitItem(_) = source {
let mod_path = &path[..path.len() - 1];
if let PathResult::Module(ModuleOrUniformRoot::Module(module)) =
- self.resolve_path(mod_path, None, false, span, CrateLint::No)
+ self.resolve_path(mod_path, None, Finalize::No)
{
let resolutions = self.r.resolutions(module).borrow();
let targets: Vec<_> =
) = &bounded_ty.kind
{
// use this to verify that ident is a type param.
- let Ok(Some(partial_res)) = self.resolve_qpath_anywhere(
- bounded_ty.id,
- None,
- &Segment::from_path(path),
- Namespace::TypeNS,
- span,
- true,
- CrateLint::No,
- ) else {
+ let Some(partial_res) = self.r.partial_res_map.get(&bounded_ty.id) else {
return false;
};
if !(matches!(
return false;
};
- if let ast::TyKind::Path(None, type_param_path) = &ty.peel_refs().kind {
+ let peeled_ty = ty.peel_refs();
+ if let ast::TyKind::Path(None, type_param_path) = &peeled_ty.kind {
// Confirm that the `SelfTy` is a type parameter.
- let Ok(Some(partial_res)) = self.resolve_qpath_anywhere(
- bounded_ty.id,
- None,
- &Segment::from_path(type_param_path),
- Namespace::TypeNS,
- span,
- true,
- CrateLint::No,
- ) else {
+ let Some(partial_res) = self.r.partial_res_map.get(&peeled_ty.id) else {
return false;
};
if !(matches!(
ident,
ns,
&self.parent_scope,
- false,
- module.span,
+ None,
) {
let res = binding.res();
if filter_fn(res) {
path: &[Segment],
ns: Namespace,
filter_fn: &impl Fn(Res) -> bool,
- span: Span,
) -> Option<TypoSuggestion> {
let mut names = Vec::new();
if path.len() == 1 {
// Search in module.
let mod_path = &path[..path.len() - 1];
if let PathResult::Module(ModuleOrUniformRoot::Module(module)) =
- self.resolve_path(mod_path, Some(TypeNS), false, span, CrateLint::No)
+ self.resolve_path(mod_path, Some(TypeNS), Finalize::No)
{
self.r.add_module_candidates(module, &mut names, &filter_fn);
}
Vec::new(),
&[],
);
- db.emit()
+ db.emit();
},
);
}
+// ignore-tidy-filelength
//! Name resolution for lifetimes.
//!
//! Name resolution for lifetimes follows *much* simpler rules than the
hir_id: hir::HirId,
s: ScopeRef<'a>,
+
+ /// In some cases not allowing late bounds allows us to avoid ICEs.
+ /// This is almost ways set to true.
+ allow_late_bound: bool,
},
/// Lifetimes introduced by a fn are scoped to the call-site for that fn,
scope_type,
hir_id,
s: _,
+ allow_late_bound,
} => f
.debug_struct("Binder")
.field("lifetimes", lifetimes)
.field("scope_type", scope_type)
.field("hir_id", hir_id)
.field("s", &"..")
+ .field("allow_late_bound", allow_late_bound)
.finish(),
Scope::Body { id, s: _ } => {
f.debug_struct("Body").field("id", id).field("s", &"..").finish()
track_lifetime_uses: true,
opaque_type_parent: false,
scope_type: BinderScopeType::Normal,
+ allow_late_bound: true,
};
self.with(scope, move |_old_scope, this| {
intravisit::walk_fn(this, fk, fd, b, s, hir_id)
track_lifetime_uses,
scope_type: BinderScopeType::Normal,
s: ROOT_SCOPE,
+ allow_late_bound: false,
};
self.with(scope, |old_scope, this| {
this.check_lifetime_params(old_scope, &generics.params);
track_lifetime_uses: true,
opaque_type_parent: false,
scope_type: BinderScopeType::Normal,
+ allow_late_bound: true,
};
self.with(scope, |old_scope, this| {
// a bare fn has no bounds, so everything
track_lifetime_uses: true,
opaque_type_parent: false,
scope_type: BinderScopeType::Normal,
+ allow_late_bound: false,
};
this.with(scope, |_old_scope, this| {
this.visit_generics(generics);
track_lifetime_uses: true,
opaque_type_parent: false,
scope_type: BinderScopeType::Normal,
+ allow_late_bound: false,
};
self.with(scope, |_old_scope, this| {
let scope = Scope::TraitRefBoundary { s: this.scope };
track_lifetime_uses: true,
opaque_type_parent: true,
scope_type: BinderScopeType::Normal,
+ allow_late_bound: false,
};
self.with(scope, |old_scope, this| {
this.check_lifetime_params(old_scope, &generics.params);
track_lifetime_uses: true,
opaque_type_parent: true,
scope_type: BinderScopeType::Normal,
+ allow_late_bound: true,
};
self.with(scope, |old_scope, this| {
this.check_lifetime_params(old_scope, &generics.params);
track_lifetime_uses: true,
opaque_type_parent: false,
scope_type: BinderScopeType::Normal,
+ allow_late_bound: true,
};
this.with(scope, |old_scope, this| {
this.check_lifetime_params(old_scope, &bound_generic_params);
track_lifetime_uses: true,
opaque_type_parent: false,
scope_type,
+ allow_late_bound: true,
};
self.with(scope, |_, this| {
intravisit::walk_param_bound(this, bound);
track_lifetime_uses: true,
opaque_type_parent: false,
scope_type,
+ allow_late_bound: true,
};
self.with(scope, |old_scope, this| {
this.check_lifetime_params(old_scope, &trait_ref.bound_generic_params);
if !matches!(opaque.origin, hir::OpaqueTyOrigin::AsyncFn(..)) {
continue 'lifetimes;
}
- // We want to do this only if the liftime identifier is already defined
+ // We want to do this only if the lifetime identifier is already defined
// in the async function that generated this. Otherwise it could be
// an opaque type defined by the developer and we still want this
// lint to fail compilation
opaque_type_parent: true,
track_lifetime_uses: false,
scope_type: BinderScopeType::Normal,
+ allow_late_bound: true,
};
self.with(scope, move |old_scope, this| {
this.check_lifetime_params(old_scope, &generics.params);
let mut scope = &*self.scope;
let hir_id = loop {
match scope {
- Scope::Binder { hir_id, .. } => {
+ Scope::Binder { hir_id, allow_late_bound: true, .. } => {
break *hir_id;
}
Scope::ObjectLifetimeDefault { ref s, .. }
| Scope::TraitRefBoundary { ref s, .. } => {
scope = *s;
}
- Scope::Root | Scope::Body { .. } => {
+ Scope::Root
+ | Scope::Body { .. }
+ | Scope::Binder { allow_late_bound: false, .. } => {
// See issues #83907 and #83693. Just bail out from looking inside.
+ // See the issue #95023 for not allowing late bound
self.tcx.sess.delay_span_bug(
rustc_span::DUMMY_SP,
"In fn_like_elision without appropriate scope above",
for bound in lifetime_i.bounds {
match bound {
hir::GenericBound::Outlives(ref lt) => match lt.name {
- hir::LifetimeName::Underscore => self.tcx.sess.delay_span_bug(
- lt.span,
- "use of `'_` in illegal place, but not caught by lowering",
- ),
+ hir::LifetimeName::Underscore => {
+ self.tcx.sess.delay_span_bug(
+ lt.span,
+ "use of `'_` in illegal place, but not caught by lowering",
+ );
+ }
hir::LifetimeName::Static => {
self.insert_lifetime(lt, Region::Static);
self.tcx
lt.span,
"lowering generated `ImplicitObjectLifetimeDefault` \
outside of an object type",
- )
+ );
}
hir::LifetimeName::Error => {
// No need to do anything, error already reported.
#![feature(drain_filter)]
#![feature(bool_to_option)]
#![feature(crate_visibility_modifier)]
+#![feature(let_chains)]
#![feature(let_else)]
#![feature(never_type)]
#![feature(nll)]
use rustc_metadata::creader::{CStore, CrateLoader};
use rustc_middle::metadata::ModChild;
use rustc_middle::middle::privacy::AccessLevels;
-use rustc_middle::span_bug;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, DefIdTree, MainDefinition, RegisteredTools, ResolverOutputs};
+use rustc_middle::{bug, span_bug};
use rustc_query_system::ich::StableHashingContext;
use rustc_session::cstore::{CrateStore, MetadataLoaderDyn};
use rustc_session::lint;
use smallvec::{smallvec, SmallVec};
use std::cell::{Cell, RefCell};
use std::collections::BTreeSet;
-use std::ops::ControlFlow;
-use std::{cmp, fmt, iter, mem, ptr};
+use std::{cmp, fmt, mem, ptr};
use tracing::debug;
use diagnostics::{extend_span_to_previous_binding, find_span_of_binding_until_next_binding};
}
}
+#[derive(Debug)]
struct UsePlacementFinder {
target_module: NodeId,
- span: Option<Span>,
- found_use: bool,
+ first_legal_span: Option<Span>,
+ first_use_span: Option<Span>,
}
impl UsePlacementFinder {
fn check(krate: &Crate, target_module: NodeId) -> (Option<Span>, bool) {
- let mut finder = UsePlacementFinder { target_module, span: None, found_use: false };
- if let ControlFlow::Continue(..) = finder.check_mod(&krate.items, CRATE_NODE_ID) {
- visit::walk_crate(&mut finder, krate);
- }
- (finder.span, finder.found_use)
- }
-
- fn check_mod(&mut self, items: &[P<ast::Item>], node_id: NodeId) -> ControlFlow<()> {
- if self.span.is_some() {
- return ControlFlow::Break(());
- }
- if node_id != self.target_module {
- return ControlFlow::Continue(());
- }
- // find a use statement
- for item in items {
- match item.kind {
- ItemKind::Use(..) => {
- // don't suggest placing a use before the prelude
- // import or other generated ones
- if !item.span.from_expansion() {
- self.span = Some(item.span.shrink_to_lo());
- self.found_use = true;
- return ControlFlow::Break(());
- }
- }
- // don't place use before extern crate
- ItemKind::ExternCrate(_) => {}
- // but place them before the first other item
- _ => {
- if self.span.map_or(true, |span| item.span < span)
- && !item.span.from_expansion()
- {
- self.span = Some(item.span.shrink_to_lo());
- // don't insert between attributes and an item
- // find the first attribute on the item
- // FIXME: This is broken for active attributes.
- for attr in &item.attrs {
- if !attr.span.is_dummy()
- && self.span.map_or(true, |span| attr.span < span)
- {
- self.span = Some(attr.span.shrink_to_lo());
- }
- }
- }
- }
+ let mut finder =
+ UsePlacementFinder { target_module, first_legal_span: None, first_use_span: None };
+ finder.visit_crate(krate);
+ if let Some(use_span) = finder.first_use_span {
+ (Some(use_span), true)
+ } else {
+ (finder.first_legal_span, false)
+ }
+ }
+}
+
+fn is_span_suitable_for_use_injection(s: Span) -> bool {
+ // don't suggest placing a use before the prelude
+ // import or other generated ones
+ !s.from_expansion()
+}
+
+fn search_for_any_use_in_items(items: &[P<ast::Item>]) -> Option<Span> {
+ for item in items {
+ if let ItemKind::Use(..) = item.kind {
+ if is_span_suitable_for_use_injection(item.span) {
+ return Some(item.span.shrink_to_lo());
}
}
- ControlFlow::Continue(())
}
+ return None;
}
impl<'tcx> Visitor<'tcx> for UsePlacementFinder {
+ fn visit_crate(&mut self, c: &Crate) {
+ if self.target_module == CRATE_NODE_ID {
+ let inject = c.spans.inject_use_span;
+ if is_span_suitable_for_use_injection(inject) {
+ self.first_legal_span = Some(inject);
+ }
+ self.first_use_span = search_for_any_use_in_items(&c.items);
+ return;
+ } else {
+ visit::walk_crate(self, c);
+ }
+ }
+
fn visit_item(&mut self, item: &'tcx ast::Item) {
- if let ItemKind::Mod(_, ModKind::Loaded(items, ..)) = &item.kind {
- if let ControlFlow::Break(..) = self.check_mod(items, item.id) {
+ if self.target_module == item.id {
+ if let ItemKind::Mod(_, ModKind::Loaded(items, _inline, mod_spans)) = &item.kind {
+ let inject = mod_spans.inject_use_span;
+ if is_span_suitable_for_use_injection(inject) {
+ self.first_legal_span = Some(inject);
+ }
+ self.first_use_span = search_for_any_use_in_items(items);
return;
}
+ } else {
+ visit::walk_item(self, item);
}
- visit::walk_item(self, item);
}
}
},
}
+impl<'a> PathResult<'a> {
+ fn failed(
+ span: Span,
+ is_error_from_last_segment: bool,
+ finalize: bool,
+ label_and_suggestion: impl FnOnce() -> (String, Option<Suggestion>),
+ ) -> PathResult<'a> {
+ let (label, suggestion) =
+ if finalize { label_and_suggestion() } else { (String::new(), None) };
+ PathResult::Failed { span, label, suggestion, is_error_from_last_segment }
+ }
+}
+
#[derive(Debug)]
enum ModuleKind {
/// An anonymous module; e.g., just a block.
None,
ModuleKind::Def(DefKind::Mod, root_def_id, kw::Empty),
ExpnId::root(),
- krate.span,
+ krate.spans.inner_span,
session.contains_name(&krate.attrs, sym::no_implicit_prelude),
&mut module_map,
);
&mut FxHashMap::default(),
);
- let definitions = Definitions::new(session.local_stable_crate_id(), krate.span);
+ let definitions = Definitions::new(session.local_stable_crate_id(), krate.spans.inner_span);
let root = definitions.get_root_def();
let mut visibilities = FxHashMap::default();
mut ident: Ident,
ns: Namespace,
parent_scope: &ParentScope<'a>,
- record_used_id: Option<NodeId>,
- path_span: Span,
+ finalize_full: Finalize,
ribs: &[Rib<'a>],
) -> Option<LexicalScopeBinding<'a>> {
assert!(ns == TypeNS || ns == ValueNS);
let normalized_ident = Ident { span: normalized_span, ..ident };
// Walk backwards up the ribs in scope.
- let record_used = record_used_id.is_some();
+ let finalize = finalize_full.path_span();
let mut module = self.graph_root;
for i in (0..ribs.len()).rev() {
debug!("walk rib\n{:?}", ribs[i].bindings);
i,
rib_ident,
*res,
- record_used,
- path_span,
+ finalize,
*original_rib_ident_def,
ribs,
)));
ident,
ns,
parent_scope,
- record_used,
- path_span,
+ finalize,
);
if let Ok(binding) = item {
// The ident resolves to an item.
}
self.early_resolve_ident_in_lexical_scope(
orig_ident,
- ScopeSet::Late(ns, module, record_used_id),
+ ScopeSet::Late(ns, module, finalize_full.node_id()),
parent_scope,
- record_used,
- record_used,
- path_span,
+ finalize,
+ finalize.is_some(),
)
.ok()
.map(LexicalScopeBinding::Item)
ident: Ident,
ns: Namespace,
parent_scope: &ParentScope<'a>,
- record_used: bool,
- path_span: Span,
+ finalize: Option<Span>,
) -> Result<&'a NameBinding<'a>, Determinacy> {
- self.resolve_ident_in_module_ext(module, ident, ns, parent_scope, record_used, path_span)
+ self.resolve_ident_in_module_ext(module, ident, ns, parent_scope, finalize)
.map_err(|(determinacy, _)| determinacy)
}
mut ident: Ident,
ns: Namespace,
parent_scope: &ParentScope<'a>,
- record_used: bool,
- path_span: Span,
+ finalize: Option<Span>,
) -> Result<&'a NameBinding<'a>, (Determinacy, Weak)> {
let tmp_parent_scope;
let mut adjusted_parent_scope = parent_scope;
ns,
adjusted_parent_scope,
false,
- record_used,
- path_span,
+ finalize,
)
}
path: &[Segment],
opt_ns: Option<Namespace>, // `None` indicates a module path in import
parent_scope: &ParentScope<'a>,
- record_used: bool,
- path_span: Span,
- crate_lint: CrateLint,
+ finalize: Finalize,
) -> PathResult<'a> {
- self.resolve_path_with_ribs(
- path,
- opt_ns,
- parent_scope,
- record_used,
- path_span,
- crate_lint,
- None,
- )
+ self.resolve_path_with_ribs(path, opt_ns, parent_scope, finalize, None)
}
fn resolve_path_with_ribs(
path: &[Segment],
opt_ns: Option<Namespace>, // `None` indicates a module path in import
parent_scope: &ParentScope<'a>,
- record_used: bool,
- path_span: Span,
- crate_lint: CrateLint,
+ finalize_full: Finalize,
ribs: Option<&PerNS<Vec<Rib<'a>>>>,
) -> PathResult<'a> {
+ debug!("resolve_path(path={:?}, opt_ns={:?}, finalize={:?})", path, opt_ns, finalize_full);
+
+ let finalize = finalize_full.path_span();
let mut module = None;
let mut allow_super = true;
let mut second_binding = None;
- debug!(
- "resolve_path(path={:?}, opt_ns={:?}, record_used={:?}, \
- path_span={:?}, crate_lint={:?})",
- path, opt_ns, record_used, path_span, crate_lint,
- );
-
for (i, &Segment { ident, id, has_generic_args: _ }) in path.iter().enumerate() {
debug!("resolve_path ident {} {:?} {:?}", i, ident, id);
let record_segment_res = |this: &mut Self, res| {
- if record_used {
+ if finalize.is_some() {
if let Some(id) = id {
if !this.partial_res_map.contains_key(&id) {
assert!(id != ast::DUMMY_NODE_ID, "Trying to resolve dummy id");
continue;
}
}
- let msg = "there are too many leading `super` keywords".to_string();
- return PathResult::Failed {
- span: ident.span,
- label: msg,
- suggestion: None,
- is_error_from_last_segment: false,
- };
+ return PathResult::failed(ident.span, false, finalize.is_some(), || {
+ ("there are too many leading `super` keywords".to_string(), None)
+ });
}
if i == 0 {
if name == kw::SelfLower {
// Report special messages for path segment keywords in wrong positions.
if ident.is_path_segment_keyword() && i != 0 {
- let name_str = if name == kw::PathRoot {
- "crate root".to_string()
- } else {
- format!("`{}`", name)
- };
- let label = if i == 1 && path[0].ident.name == kw::PathRoot {
- format!("global paths cannot start with {}", name_str)
- } else {
- format!("{} in paths can only be used in start position", name_str)
- };
- return PathResult::Failed {
- span: ident.span,
- label,
- suggestion: None,
- is_error_from_last_segment: false,
- };
+ return PathResult::failed(ident.span, false, finalize.is_some(), || {
+ let name_str = if name == kw::PathRoot {
+ "crate root".to_string()
+ } else {
+ format!("`{}`", name)
+ };
+ let label = if i == 1 && path[0].ident.name == kw::PathRoot {
+ format!("global paths cannot start with {}", name_str)
+ } else {
+ format!("{} in paths can only be used in start position", name_str)
+ };
+ (label, None)
+ });
}
enum FindBindingResult<'a> {
}
let find_binding_in_ns = |this: &mut Self, ns| {
let binding = if let Some(module) = module {
- this.resolve_ident_in_module(
- module,
- ident,
- ns,
- parent_scope,
- record_used,
- path_span,
- )
+ this.resolve_ident_in_module(module, ident, ns, parent_scope, finalize)
} else if ribs.is_none() || opt_ns.is_none() || opt_ns == Some(MacroNS) {
let scopes = ScopeSet::All(ns, opt_ns.is_none());
this.early_resolve_ident_in_lexical_scope(
ident,
scopes,
parent_scope,
- record_used,
- record_used,
- path_span,
+ finalize,
+ finalize.is_some(),
)
} else {
- let record_used_id = if record_used {
- crate_lint.node_id().or(Some(CRATE_NODE_ID))
- } else {
- None
- };
match this.resolve_ident_in_lexical_scope(
ident,
ns,
parent_scope,
- record_used_id,
- path_span,
+ finalize_full,
&ribs.unwrap()[ns],
) {
// we found a locally-imported or available item/module
PartialRes::with_unresolved_segments(res, path.len() - 1),
));
}
- _ => Err(Determinacy::determined(record_used)),
+ _ => Err(Determinacy::determined(finalize.is_some())),
}
};
FindBindingResult::Binding(binding)
} else if res == Res::Err {
return PathResult::NonModule(PartialRes::new(Res::Err));
} else if opt_ns.is_some() && (is_last || maybe_assoc) {
- self.lint_if_path_starts_with_module(
- crate_lint,
- path,
- path_span,
- second_binding,
- );
+ self.lint_if_path_starts_with_module(finalize_full, path, second_binding);
return PathResult::NonModule(PartialRes::with_unresolved_segments(
res,
path.len() - i - 1,
));
} else {
- let label = format!(
- "`{}` is {} {}, not a module",
- ident,
- res.article(),
- res.descr(),
- );
-
- return PathResult::Failed {
- span: ident.span,
- label,
- suggestion: None,
- is_error_from_last_segment: is_last,
- };
+ return PathResult::failed(ident.span, is_last, finalize.is_some(), || {
+ let label = format!(
+ "`{ident}` is {} {}, not a module",
+ res.article(),
+ res.descr()
+ );
+ (label, None)
+ });
}
}
Err(Undetermined) => return PathResult::Indeterminate,
));
}
}
- let module_res = match module {
- Some(ModuleOrUniformRoot::Module(module)) => module.res(),
- _ => None,
- };
- let (label, suggestion) = if module_res == self.graph_root.res() {
- let is_mod = |res| matches!(res, Res::Def(DefKind::Mod, _));
- // Don't look up import candidates if this is a speculative resolve
- let mut candidates = if record_used {
- self.lookup_import_candidates(ident, TypeNS, parent_scope, is_mod)
- } else {
- Vec::new()
- };
- candidates.sort_by_cached_key(|c| {
- (c.path.segments.len(), pprust::path_to_string(&c.path))
- });
- if let Some(candidate) = candidates.get(0) {
- (
- String::from("unresolved import"),
- Some((
- vec![(ident.span, pprust::path_to_string(&candidate.path))],
- String::from("a similar path exists"),
- Applicability::MaybeIncorrect,
- )),
- )
- } else if self.session.edition() == Edition::Edition2015 {
- (format!("maybe a missing crate `{}`?", ident), None)
- } else {
- (format!("could not find `{}` in the crate root", ident), None)
- }
- } else if i == 0 {
- if ident
- .name
- .as_str()
- .chars()
- .next()
- .map_or(false, |c| c.is_ascii_uppercase())
- {
- // Check whether the name refers to an item in the value namespace.
- let suggestion = if ribs.is_some() {
- let match_span = match self.resolve_ident_in_lexical_scope(
- ident,
- ValueNS,
- parent_scope,
- None,
- path_span,
- &ribs.unwrap()[ValueNS],
- ) {
- // Name matches a local variable. For example:
- // ```
- // fn f() {
- // let Foo: &str = "";
- // println!("{}", Foo::Bar); // Name refers to local
- // // variable `Foo`.
- // }
- // ```
- Some(LexicalScopeBinding::Res(Res::Local(id))) => {
- Some(*self.pat_span_map.get(&id).unwrap())
- }
- // Name matches item from a local name binding
- // created by `use` declaration. For example:
- // ```
- // pub Foo: &str = "";
- //
- // mod submod {
- // use super::Foo;
- // println!("{}", Foo::Bar); // Name refers to local
- // // binding `Foo`.
- // }
- // ```
- Some(LexicalScopeBinding::Item(name_binding)) => {
- Some(name_binding.span)
- }
- _ => None,
- };
-
- if let Some(span) = match_span {
+ return PathResult::failed(ident.span, is_last, finalize.is_some(), || {
+ let module_res = match module {
+ Some(ModuleOrUniformRoot::Module(module)) => module.res(),
+ _ => None,
+ };
+ if module_res == self.graph_root.res() {
+ let is_mod = |res| matches!(res, Res::Def(DefKind::Mod, _));
+ let mut candidates =
+ self.lookup_import_candidates(ident, TypeNS, parent_scope, is_mod);
+ candidates.sort_by_cached_key(|c| {
+ (c.path.segments.len(), pprust::path_to_string(&c.path))
+ });
+ if let Some(candidate) = candidates.get(0) {
+ (
+ String::from("unresolved import"),
Some((
- vec![(span, String::from(""))],
- format!("`{}` is defined here, but is not a type", ident),
+ vec![(ident.span, pprust::path_to_string(&candidate.path))],
+ String::from("a similar path exists"),
Applicability::MaybeIncorrect,
- ))
- } else {
- None
- }
+ )),
+ )
+ } else if self.session.edition() == Edition::Edition2015 {
+ (format!("maybe a missing crate `{}`?", ident), None)
} else {
- None
- };
+ (format!("could not find `{}` in the crate root", ident), None)
+ }
+ } else if i == 0 {
+ if ident
+ .name
+ .as_str()
+ .chars()
+ .next()
+ .map_or(false, |c| c.is_ascii_uppercase())
+ {
+ // Check whether the name refers to an item in the value namespace.
+ let suggestion = if ribs.is_some() {
+ let match_span = match self.resolve_ident_in_lexical_scope(
+ ident,
+ ValueNS,
+ parent_scope,
+ Finalize::No,
+ &ribs.unwrap()[ValueNS],
+ ) {
+ // Name matches a local variable. For example:
+ // ```
+ // fn f() {
+ // let Foo: &str = "";
+ // println!("{}", Foo::Bar); // Name refers to local
+ // // variable `Foo`.
+ // }
+ // ```
+ Some(LexicalScopeBinding::Res(Res::Local(id))) => {
+ Some(*self.pat_span_map.get(&id).unwrap())
+ }
+
+ // Name matches item from a local name binding
+ // created by `use` declaration. For example:
+ // ```
+ // pub Foo: &str = "";
+ //
+ // mod submod {
+ // use super::Foo;
+ // println!("{}", Foo::Bar); // Name refers to local
+ // // binding `Foo`.
+ // }
+ // ```
+ Some(LexicalScopeBinding::Item(name_binding)) => {
+ Some(name_binding.span)
+ }
+ _ => None,
+ };
- (format!("use of undeclared type `{}`", ident), suggestion)
- } else {
- (
- format!("use of undeclared crate or module `{}`", ident),
- if ident.name == sym::alloc {
- Some((
- vec![],
- String::from(
- "add `extern crate alloc` to use the `alloc` crate",
- ),
- Applicability::MaybeIncorrect,
- ))
+ if let Some(span) = match_span {
+ Some((
+ vec![(span, String::from(""))],
+ format!(
+ "`{}` is defined here, but is not a type",
+ ident
+ ),
+ Applicability::MaybeIncorrect,
+ ))
+ } else {
+ None
+ }
} else {
- self.find_similarly_named_module_or_crate(
- ident.name,
- &parent_scope.module,
- )
- .map(|sugg| {
- (
- vec![(ident.span, sugg.to_string())],
+ None
+ };
+
+ (format!("use of undeclared type `{}`", ident), suggestion)
+ } else {
+ (
+ format!("use of undeclared crate or module `{}`", ident),
+ if ident.name == sym::alloc {
+ Some((
+ vec![],
String::from(
- "there is a crate or module with a similar name",
+ "add `extern crate alloc` to use the `alloc` crate",
),
Applicability::MaybeIncorrect,
+ ))
+ } else {
+ self.find_similarly_named_module_or_crate(
+ ident.name,
+ &parent_scope.module,
)
- })
- },
- )
- }
- } else {
- let parent = path[i - 1].ident.name;
- let parent = match parent {
- // ::foo is mounted at the crate root for 2015, and is the extern
- // prelude for 2018+
- kw::PathRoot if self.session.edition() > Edition::Edition2015 => {
- "the list of imported crates".to_owned()
- }
- kw::PathRoot | kw::Crate => "the crate root".to_owned(),
- _ => {
- format!("`{}`", parent)
+ .map(|sugg| {
+ (
+ vec![(ident.span, sugg.to_string())],
+ String::from(
+ "there is a crate or module with a similar name",
+ ),
+ Applicability::MaybeIncorrect,
+ )
+ })
+ },
+ )
}
- };
-
- let mut msg = format!("could not find `{}` in {}", ident, parent);
- if ns == TypeNS || ns == ValueNS {
- let ns_to_try = if ns == TypeNS { ValueNS } else { TypeNS };
- if let FindBindingResult::Binding(Ok(binding)) =
- find_binding_in_ns(self, ns_to_try)
- {
- let mut found = |what| {
- msg = format!(
- "expected {}, found {} `{}` in {}",
- ns.descr(),
- what,
- ident,
- parent
- )
- };
- if binding.module().is_some() {
- found("module")
- } else {
- match binding.res() {
- def::Res::<NodeId>::Def(kind, id) => found(kind.descr(id)),
- _ => found(ns_to_try.descr()),
- }
+ } else {
+ let parent = path[i - 1].ident.name;
+ let parent = match parent {
+ // ::foo is mounted at the crate root for 2015, and is the extern
+ // prelude for 2018+
+ kw::PathRoot if self.session.edition() > Edition::Edition2015 => {
+ "the list of imported crates".to_owned()
+ }
+ kw::PathRoot | kw::Crate => "the crate root".to_owned(),
+ _ => {
+ format!("`{}`", parent)
}
};
+
+ let mut msg = format!("could not find `{}` in {}", ident, parent);
+ if ns == TypeNS || ns == ValueNS {
+ let ns_to_try = if ns == TypeNS { ValueNS } else { TypeNS };
+ if let FindBindingResult::Binding(Ok(binding)) =
+ find_binding_in_ns(self, ns_to_try)
+ {
+ let mut found = |what| {
+ msg = format!(
+ "expected {}, found {} `{}` in {}",
+ ns.descr(),
+ what,
+ ident,
+ parent
+ )
+ };
+ if binding.module().is_some() {
+ found("module")
+ } else {
+ match binding.res() {
+ def::Res::<NodeId>::Def(kind, id) => {
+ found(kind.descr(id))
+ }
+ _ => found(ns_to_try.descr()),
+ }
+ }
+ };
+ }
+ (msg, None)
}
- (msg, None)
- };
- return PathResult::Failed {
- span: ident.span,
- label,
- suggestion,
- is_error_from_last_segment: is_last,
- };
+ });
}
}
}
- self.lint_if_path_starts_with_module(crate_lint, path, path_span, second_binding);
+ self.lint_if_path_starts_with_module(finalize_full, path, second_binding);
PathResult::Module(match module {
Some(module) => module,
None if path.is_empty() => ModuleOrUniformRoot::CurrentScope,
- _ => span_bug!(path_span, "resolve_path: non-empty path `{:?}` has no module", path),
+ _ => bug!("resolve_path: non-empty path `{:?}` has no module", path),
})
}
fn lint_if_path_starts_with_module(
&mut self,
- crate_lint: CrateLint,
+ finalize: Finalize,
path: &[Segment],
- path_span: Span,
second_binding: Option<&NameBinding<'_>>,
) {
- let (diag_id, diag_span) = match crate_lint {
- CrateLint::No => return,
- CrateLint::SimplePath(id) => (id, path_span),
- CrateLint::UsePath { root_id, root_span } => (root_id, root_span),
- CrateLint::QPathTrait { qpath_id, qpath_span } => (qpath_id, qpath_span),
+ let (diag_id, diag_span) = match finalize {
+ Finalize::No => return,
+ Finalize::SimplePath(id, path_span) => (id, path_span),
+ Finalize::UsePath { root_id, root_span, .. } => (root_id, root_span),
+ Finalize::QPathTrait { qpath_id, qpath_span, .. } => (qpath_id, qpath_span),
};
let first_name = match path.get(0) {
rib_index: usize,
rib_ident: Ident,
mut res: Res,
- record_used: bool,
- span: Span,
+ finalize: Option<Span>,
original_rib_ident_def: Ident,
all_ribs: &[Rib<'a>],
) -> Res {
// An invalid forward use of a generic parameter from a previous default.
if let ForwardGenericParamBanRibKind = all_ribs[rib_index].kind {
- if record_used {
+ if let Some(span) = finalize {
let res_error = if rib_ident.name == kw::SelfUpper {
ResolutionError::SelfInGenericParamDefault
} else {
// This was an attempt to access an upvar inside a
// named function item. This is not allowed, so we
// report an error.
- if record_used {
+ if let Some(span) = finalize {
// We don't immediately trigger a resolve error, because
// we want certain other resolution errors (namely those
// emitted for `ConstantItemRibKind` below) to take
// precedence.
- res_err = Some(CannotCaptureDynamicEnvironmentInFnItem);
+ res_err = Some((span, CannotCaptureDynamicEnvironmentInFnItem));
}
}
ConstantItemRibKind(_, item) => {
// Still doesn't deal with upvars
- if record_used {
+ if let Some(span) = finalize {
let (span, resolution_error) =
if let Some((ident, constant_item_kind)) = item {
let kind_str = match constant_item_kind {
return Res::Err;
}
ConstParamTyRibKind => {
- if record_used {
+ if let Some(span) = finalize {
self.report_error(span, ParamInTyOfConstParam(rib_ident.name));
}
return Res::Err;
}
}
}
- if let Some(res_err) = res_err {
+ if let Some((span, res_err)) = res_err {
self.report_error(span, res_err);
return Res::Err;
}
if let Res::SelfTy { trait_, alias_to: Some((def, _)) } = res {
res = Res::SelfTy { trait_, alias_to: Some((def, true)) }
} else {
- if record_used {
+ if let Some(span) = finalize {
self.report_error(
span,
ResolutionError::ParamInNonTrivialAnonConst {
is_type: true,
},
);
+ self.session.delay_span_bug(span, CG_BUG_STR);
}
- self.session.delay_span_bug(span, CG_BUG_STR);
return Res::Err;
}
}
ItemRibKind(has_generic_params) => has_generic_params,
FnItemRibKind => HasGenericParams::Yes,
ConstParamTyRibKind => {
- if record_used {
+ if let Some(span) = finalize {
self.report_error(
span,
ResolutionError::ParamInTyOfConstParam(rib_ident.name),
}
};
- if record_used {
+ if let Some(span) = finalize {
self.report_error(
span,
ResolutionError::GenericParamsFromOuterFunction(
let features = self.session.features_untracked();
// HACK(min_const_generics): We currently only allow `N` or `{ N }`.
if !(trivial || features.generic_const_exprs) {
- if record_used {
+ if let Some(span) = finalize {
self.report_error(
span,
ResolutionError::ParamInNonTrivialAnonConst {
is_type: false,
},
);
+ self.session.delay_span_bug(span, CG_BUG_STR);
}
- self.session.delay_span_bug(span, CG_BUG_STR);
return Res::Err;
}
ItemRibKind(has_generic_params) => has_generic_params,
FnItemRibKind => HasGenericParams::Yes,
ConstParamTyRibKind => {
- if record_used {
+ if let Some(span) = finalize {
self.report_error(
span,
ResolutionError::ParamInTyOfConstParam(rib_ident.name),
};
// This was an attempt to use a const parameter outside its scope.
- if record_used {
+ if let Some(span) = finalize {
self.report_error(
span,
ResolutionError::GenericParamsFromOuterFunction(
err.span_suggestion(span, message, String::new(), Applicability::MachineApplicable);
}
- fn extern_prelude_get(
- &mut self,
- ident: Ident,
- speculative: bool,
- ) -> Option<&'a NameBinding<'a>> {
+ fn extern_prelude_get(&mut self, ident: Ident, finalize: bool) -> Option<&'a NameBinding<'a>> {
if ident.is_path_segment_keyword() {
// Make sure `self`, `super` etc produce an error when passed to here.
return None;
}
self.extern_prelude.get(&ident.normalize_to_macros_2_0()).cloned().and_then(|entry| {
if let Some(binding) = entry.extern_crate_item {
- if !speculative && entry.introduced_by_item {
+ if finalize && entry.introduced_by_item {
self.record_use(ident, binding, false);
}
Some(binding)
} else {
- let crate_id = if !speculative {
+ let crate_id = if finalize {
let Some(crate_id) =
self.crate_loader.process_path_extern(ident.name, ident.span) else { return Some(self.dummy_binding); };
crate_id
})
}
- /// Rustdoc uses this to resolve things in a recoverable way. `ResolutionError<'a>`
+ /// Rustdoc uses this to resolve doc link paths in a recoverable way. `PathResult<'a>`
/// isn't something that can be returned because it can't be made to live that long,
/// and also it's a private type. Fortunately rustdoc doesn't need to know the error,
/// just that an error occurred.
- // FIXME(Manishearth): intra-doc links won't get warned of epoch changes.
- pub fn resolve_str_path_error(
+ pub fn resolve_rustdoc_path(
&mut self,
- span: Span,
path_str: &str,
ns: Namespace,
module_id: DefId,
- ) -> Result<(ast::Path, Res), ()> {
- let path = if path_str.starts_with("::") {
- ast::Path {
- span,
- segments: iter::once(Ident::with_dummy_span(kw::PathRoot))
- .chain(path_str.split("::").skip(1).map(Ident::from_str))
- .map(|i| self.new_ast_path_segment(i))
- .collect(),
- tokens: None,
- }
- } else {
- ast::Path {
- span,
- segments: path_str
- .split("::")
- .map(Ident::from_str)
- .map(|i| self.new_ast_path_segment(i))
- .collect(),
- tokens: None,
- }
- };
- let module = self.expect_module(module_id);
- let parent_scope = &ParentScope::module(module, self);
- let res = self.resolve_ast_path(&path, ns, parent_scope).map_err(|_| ())?;
- Ok((path, res))
- }
+ ) -> Option<Res> {
+ let mut segments =
+ Vec::from_iter(path_str.split("::").map(Ident::from_str).map(Segment::from_ident));
+ if path_str.starts_with("::") {
+ segments[0].ident.name = kw::PathRoot;
+ }
- // Resolve a path passed from rustdoc or HIR lowering.
- fn resolve_ast_path(
- &mut self,
- path: &ast::Path,
- ns: Namespace,
- parent_scope: &ParentScope<'a>,
- ) -> Result<Res, (Span, ResolutionError<'a>)> {
+ let module = self.expect_module(module_id);
match self.resolve_path(
- &Segment::from_path(path),
+ &segments,
Some(ns),
- parent_scope,
- false,
- path.span,
- CrateLint::No,
+ &ParentScope::module(module, self),
+ Finalize::No,
) {
- PathResult::Module(ModuleOrUniformRoot::Module(module)) => Ok(module.res().unwrap()),
+ PathResult::Module(ModuleOrUniformRoot::Module(module)) => Some(module.res().unwrap()),
PathResult::NonModule(path_res) if path_res.unresolved_segments() == 0 => {
- Ok(path_res.base_res())
+ Some(path_res.base_res())
}
- PathResult::NonModule(..) => Err((
- path.span,
- ResolutionError::FailedToResolve {
- label: String::from("type-relative paths are not supported in this context"),
- suggestion: None,
- },
- )),
+ PathResult::NonModule(..) | PathResult::Failed { .. } => None,
PathResult::Module(..) | PathResult::Indeterminate => unreachable!(),
- PathResult::Failed { span, label, suggestion, .. } => {
- Err((span, ResolutionError::FailedToResolve { label, suggestion }))
- }
}
}
- fn new_ast_path_segment(&mut self, ident: Ident) -> ast::PathSegment {
- let mut seg = ast::PathSegment::from_ident(ident);
- seg.id = self.next_node_id();
- seg
- }
-
// For rustdoc.
pub fn graph_root(&self) -> Module<'a> {
self.graph_root
ident,
ValueNS,
parent_scope,
- false,
- DUMMY_SP,
+ None
) else {
return;
};
}
#[derive(Copy, Clone, Debug)]
-enum CrateLint {
+enum Finalize {
/// Do not issue the lint.
No,
/// This lint applies to some arbitrary path; e.g., `impl ::foo::Bar`.
/// In this case, we can take the span of that path.
- SimplePath(NodeId),
+ SimplePath(NodeId, Span),
/// This lint comes from a `use` statement. In this case, what we
/// care about really is the *root* `use` statement; e.g., if we
/// have nested things like `use a::{b, c}`, we care about the
/// `use a` part.
- UsePath { root_id: NodeId, root_span: Span },
+ UsePath { root_id: NodeId, root_span: Span, path_span: Span },
/// This is the "trait item" from a fully qualified path. For example,
/// we might be resolving `X::Y::Z` from a path like `<T as X::Y>::Z`.
/// The `path_span` is the span of the to the trait itself (`X::Y`).
- QPathTrait { qpath_id: NodeId, qpath_span: Span },
+ QPathTrait { qpath_id: NodeId, qpath_span: Span, path_span: Span },
}
-impl CrateLint {
- fn node_id(&self) -> Option<NodeId> {
+impl Finalize {
+ fn node_id_and_path_span(&self) -> Option<(NodeId, Span)> {
match *self {
- CrateLint::No => None,
- CrateLint::SimplePath(id)
- | CrateLint::UsePath { root_id: id, .. }
- | CrateLint::QPathTrait { qpath_id: id, .. } => Some(id),
+ Finalize::No => None,
+ Finalize::SimplePath(id, path_span)
+ | Finalize::UsePath { root_id: id, path_span, .. }
+ | Finalize::QPathTrait { qpath_id: id, path_span, .. } => Some((id, path_span)),
}
}
+
+ fn node_id(&self) -> Option<NodeId> {
+ self.node_id_and_path_span().map(|(id, _)| id)
+ }
+
+ fn path_span(&self) -> Option<Span> {
+ self.node_id_and_path_span().map(|(_, path_span)| path_span)
+ }
}
pub fn provide(providers: &mut Providers) {
use crate::imports::ImportResolver;
use crate::Namespace::*;
use crate::{AmbiguityError, AmbiguityErrorMisc, AmbiguityKind, BuiltinMacroState, Determinacy};
-use crate::{CrateLint, DeriveData, ParentScope, ResolutionError, Resolver, Scope, ScopeSet, Weak};
+use crate::{DeriveData, Finalize, ParentScope, ResolutionError, Resolver, Scope, ScopeSet, Weak};
use crate::{ModuleKind, ModuleOrUniformRoot, NameBinding, PathResult, Segment, ToNameBinding};
use rustc_ast::{self as ast, Inline, ItemKind, ModKind, NodeId};
use rustc_ast_lowering::ResolverAstLowering;
let mut indeterminate = false;
for ns in [TypeNS, ValueNS, MacroNS].iter().copied() {
- match self.resolve_path(path, Some(ns), &parent_scope, false, span, CrateLint::No) {
+ match self.resolve_path(path, Some(ns), &parent_scope, Finalize::No) {
PathResult::Module(ModuleOrUniformRoot::Module(_)) => return Ok(true),
PathResult::NonModule(partial_res) if partial_res.unresolved_segments() == 0 => {
return Ok(true);
}
let res = if path.len() > 1 {
- let res = match self.resolve_path(
- &path,
- Some(MacroNS),
- parent_scope,
- false,
- path_span,
- CrateLint::No,
- ) {
+ let res = match self.resolve_path(&path, Some(MacroNS), parent_scope, Finalize::No) {
PathResult::NonModule(path_res) if path_res.unresolved_segments() == 0 => {
Ok(path_res.base_res())
}
path[0].ident,
scope_set,
parent_scope,
- false,
+ None,
force,
- path_span,
);
if let Err(Determinacy::Undetermined) = binding {
return Err(Determinacy::Undetermined);
orig_ident: Ident,
scope_set: ScopeSet<'a>,
parent_scope: &ParentScope<'a>,
- record_used: bool,
+ finalize: Option<Span>,
force: bool,
- path_span: Span,
) -> Result<&'a NameBinding<'a>, Determinacy> {
bitflags::bitflags! {
struct Flags: u8 {
}
}
- assert!(force || !record_used); // `record_used` implies `force`
+ assert!(force || !finalize.is_some()); // `finalize` implies `force`
// Make sure `self`, `super` etc produce an error when passed to here.
if orig_ident.is_path_segment_keyword() {
ident,
ns,
parent_scope,
- record_used,
- path_span,
+ finalize,
);
match binding {
Ok(binding) => Ok((binding, Flags::MODULE | Flags::MISC_SUGGEST_CRATE)),
ns,
adjusted_parent_scope,
!matches!(scope_set, ScopeSet::Late(..)),
- record_used,
- path_span,
+ finalize,
);
match binding {
Ok(binding) => {
Err(Determinacy::Determined)
}
}
- Scope::ExternPrelude => match this.extern_prelude_get(ident, !record_used) {
- Some(binding) => Ok((binding, Flags::empty())),
- None => Err(Determinacy::determined(
- this.graph_root.unexpanded_invocations.borrow().is_empty(),
- )),
- },
+ Scope::ExternPrelude => {
+ match this.extern_prelude_get(ident, finalize.is_some()) {
+ Some(binding) => Ok((binding, Flags::empty())),
+ None => Err(Determinacy::determined(
+ this.graph_root.unexpanded_invocations.borrow().is_empty(),
+ )),
+ }
+ }
Scope::ToolPrelude => match this.registered_tools.get(&ident).cloned() {
Some(ident) => ok(Res::ToolMod, ident.span, this.arenas),
None => Err(Determinacy::Determined),
ident,
ns,
parent_scope,
- false,
- path_span,
+ None,
) {
if use_prelude || this.is_builtin_macro(binding.res()) {
result = Ok((binding, Flags::MISC_FROM_PRELUDE));
Ok((binding, flags))
if sub_namespace_match(binding.macro_kind(), macro_kind) =>
{
- if !record_used || matches!(scope_set, ScopeSet::Late(..)) {
+ if finalize.is_none() || matches!(scope_set, ScopeSet::Late(..)) {
return Some(Ok(binding));
}
&path,
Some(MacroNS),
&parent_scope,
- true,
- path_span,
- CrateLint::No,
+ Finalize::SimplePath(ast::CRATE_NODE_ID, path_span),
) {
PathResult::NonModule(path_res) if path_res.unresolved_segments() == 0 => {
let res = path_res.base_res();
ident,
ScopeSet::Macro(kind),
&parent_scope,
+ Some(ident.span),
true,
- true,
- ident.span,
) {
Ok(binding) => {
let initial_res = initial_binding.map(|initial_binding| {
ident,
ScopeSet::Macro(MacroKind::Attr),
&parent_scope,
+ Some(ident.span),
true,
- true,
- ident.span,
);
}
}
Some(Data::RefData(Ref {
kind: RefKind::Type,
span,
- ref_id: id_from_def_id(def.did),
+ ref_id: id_from_def_id(def.did()),
}))
}
_ => {
pub fn set_actual_reuse(&self, cgu_name: &str, kind: CguReuse) {
if let Some(ref data) = self.data {
- debug!("set_actual_reuse({:?}, {:?})", cgu_name, kind);
+ debug!("set_actual_reuse({cgu_name:?}, {kind:?})");
let prev_reuse = data.lock().unwrap().actual_reuse.insert(cgu_name.to_string(), kind);
comparison_kind: ComparisonKind,
) {
if let Some(ref data) = self.data {
- debug!("set_expectation({:?}, {:?}, {:?})", cgu_name, expected_reuse, comparison_kind);
+ debug!("set_expectation({cgu_name:?}, {expected_reuse:?}, {comparison_kind:?})");
let mut data = data.lock().unwrap();
data.expected_reuse.insert(
if error {
let at_least = if at_least { "at least " } else { "" };
let msg = format!(
- "CGU-reuse for `{}` is `{:?}` but \
- should be {}`{:?}`",
- cgu_user_name, actual_reuse, at_least, expected_reuse
+ "CGU-reuse for `{cgu_user_name}` is `{actual_reuse:?}` but \
+ should be {at_least}`{expected_reuse:?}`"
);
diag.span_err(error_span.0, &msg);
}
} else {
let msg = format!(
- "CGU-reuse for `{}` (mangled: `{}`) was \
- not recorded",
- cgu_user_name, cgu_name
+ "CGU-reuse for `{cgu_user_name}` (mangled: `{cgu_name}`) was \
+ not recorded"
);
diag.span_fatal(error_span.0, &msg)
}
}
});
- for info in &sorted {
+ for info in sorted {
+ let TypeSizeInfo { type_description, overall_size, align, kind, variants, .. } = info;
println!(
- "print-type-size type: `{}`: {} bytes, alignment: {} bytes",
- info.type_description, info.overall_size, info.align
+ "print-type-size type: `{type_description}`: {overall_size} bytes, alignment: {align} bytes"
);
let indent = " ";
let discr_size = if let Some(discr_size) = info.opt_discr_size {
- println!("print-type-size {}discriminant: {} bytes", indent, discr_size);
+ println!("print-type-size {indent}discriminant: {discr_size} bytes");
discr_size
} else {
0
// to reflect the presence of the discriminant.
let mut max_variant_size = discr_size;
- let struct_like = match info.kind {
+ let struct_like = match kind {
DataTypeKind::Struct | DataTypeKind::Closure => true,
DataTypeKind::Enum | DataTypeKind::Union => false,
};
- for (i, variant_info) in info.variants.iter().enumerate() {
+ for (i, variant_info) in variants.into_iter().enumerate() {
let VariantInfo { ref name, kind: _, align: _, size, ref fields } = *variant_info;
let indent = if !struct_like {
let name = match name.as_ref() {
None => i.to_string(),
};
println!(
- "print-type-size {}variant `{}`: {} bytes",
- indent,
- name,
- size - discr_size
+ "print-type-size {indent}variant `{name}`: {diff} bytes",
+ diff = size - discr_size
);
" "
} else {
let mut fields = fields.clone();
fields.sort_by_key(|f| (f.offset, f.size));
- for field in fields.iter() {
- let FieldInfo { ref name, offset, size, align } = *field;
+ for field in fields {
+ let FieldInfo { ref name, offset, size, align } = field;
if offset > min_offset {
let pad = offset - min_offset;
- println!("print-type-size {}padding: {} bytes", indent, pad);
+ println!("print-type-size {indent}padding: {pad} bytes");
}
if offset < min_offset {
// If this happens it's probably a union.
println!(
- "print-type-size {}field `.{}`: {} bytes, \
- offset: {} bytes, \
- alignment: {} bytes",
- indent, name, size, offset, align
+ "print-type-size {indent}field `.{name}`: {size} bytes, \
+ offset: {offset} bytes, \
+ alignment: {align} bytes"
);
} else if info.packed || offset == min_offset {
- println!("print-type-size {}field `.{}`: {} bytes", indent, name, size);
+ println!("print-type-size {indent}field `.{name}`: {size} bytes");
} else {
// Include field alignment in output only if it caused padding injection
println!(
- "print-type-size {}field `.{}`: {} bytes, \
- alignment: {} bytes",
- indent, name, size, align
+ "print-type-size {indent}field `.{name}`: {size} bytes, \
+ alignment: {align} bytes"
);
}
}
}
- assert!(
- max_variant_size <= info.overall_size,
- "max_variant_size {} !<= {} overall_size",
- max_variant_size,
- info.overall_size
- );
- if max_variant_size < info.overall_size {
- println!(
- "print-type-size {}end padding: {} bytes",
- indent,
- info.overall_size - max_variant_size
- );
+ match overall_size.checked_sub(max_variant_size) {
+ None => panic!("max_variant_size {max_variant_size} > {overall_size} overall_size"),
+ Some(diff @ 1..) => println!("print-type-size {indent}end padding: {diff} bytes"),
+ Some(0) => {}
}
}
}
single_output_file,
temps_directory,
outputs,
- filestem: format!("{}{}", out_filestem, extra),
+ filestem: format!("{out_filestem}{extra}"),
}
}
let lint_cap = matches.opt_str("cap-lints").map(|cap| {
lint::Level::from_str(&cap)
- .unwrap_or_else(|| early_error(error_format, &format!("unknown lint level: `{}`", cap)))
+ .unwrap_or_else(|| early_error(error_format, &format!("unknown lint level: `{cap}`")))
});
(lint_opts, describe_lints, lint_cap)
ErrorOutputType::default(),
&format!(
"argument for `--color` must be auto, \
- always or never (instead was `{}`)",
- arg
+ always or never (instead was `{arg}`)"
),
),
}
"future-incompat" => json_future_incompat = true,
s => early_error(
ErrorOutputType::default(),
- &format!("unknown `--json` option `{}`", s),
+ &format!("unknown `--json` option `{s}`"),
),
}
}
ErrorOutputType::HumanReadable(HumanReadableErrorType::Default(color)),
&format!(
"argument for `--error-format` must be `human`, `json` or \
- `short` (instead was `{}`)",
- arg
+ `short` (instead was `{arg}`)"
),
),
}
ErrorOutputType::default(),
&format!(
"argument for `--edition` must be one of: \
- {}. (instead was `{}`)",
- EDITION_NAME_LIST, arg
+ {EDITION_NAME_LIST}. (instead was `{arg}`)"
),
)
}),
edition, LATEST_STABLE_EDITION
)
} else {
- format!("edition {} is unstable and only available with -Z unstable-options", edition)
+ format!("edition {edition} is unstable and only available with -Z unstable-options")
};
early_error(ErrorOutputType::default(), &msg)
}
early_error(
error_format,
&format!(
- "unknown emission type: `{}` - expected one of: {}",
- shorthand,
- OutputType::shorthands_display(),
+ "unknown emission type: `{shorthand}` - expected one of: {display}",
+ display = OutputType::shorthands_display(),
),
)
});
early_warn(
error_format,
&format!(
- "`--emit={}` with `-o` incompatible with \
+ "`--emit={ot}` with `-o` incompatible with \
`-C codegen-units=N` for N > 1",
- ot
),
);
}
}
}
"link-args" => PrintRequest::LinkArgs,
- req => early_error(error_format, &format!("unknown print request `{}`", req)),
+ req => early_error(error_format, &format!("unknown print request `{req}`")),
}));
prints
Some(target) if target.ends_with(".json") => {
let path = Path::new(&target);
TargetTriple::from_path(&path).unwrap_or_else(|_| {
- early_error(error_format, &format!("target file {:?} does not exist", path))
+ early_error(error_format, &format!("target file {path:?} does not exist"))
})
}
Some(target) => TargetTriple::TargetTriple(target),
error_format,
&format!(
"optimization level needs to be \
- between 0-3, s or z (instead was `{}`)",
- arg
+ between 0-3, s or z (instead was `{arg}`)"
),
);
}
error_format,
&format!(
"debug info level needs to be between \
- 0-2 (instead was `{}`)",
- arg
+ 0-2 (instead was `{arg}`)"
),
);
}
match opt_assertion {
Some(s) if s.as_str() == "loaded" => Some(IncrementalStateAssertion::Loaded),
Some(s) if s.as_str() == "not-loaded" => Some(IncrementalStateAssertion::NotLoaded),
- Some(s) => early_error(
- error_format,
- &format!("unexpected incremental state assertion value: {}", s),
- ),
+ Some(s) => {
+ early_error(error_format, &format!("unexpected incremental state assertion value: {s}"))
+ }
None => None,
}
}
}
s => early_error(
error_format,
- &format!("unknown library kind `{}`, expected one of dylib, framework, or static", s),
+ &format!("unknown library kind `{s}`, expected one of dylib, framework, or static"),
),
};
match modifiers {
_ => early_error(
error_format,
&format!(
- "unrecognized linking modifier `{}`, expected one \
- of: bundle, verbatim, whole-archive, as-needed",
- modifier
+ "unrecognized linking modifier `{modifier}`, expected one \
+ of: bundle, verbatim, whole-archive, as-needed"
),
),
}
match dopts.borrowck.as_ref() {
"migrate" => BorrowckMode::Migrate,
"mir" => BorrowckMode::Mir,
- m => early_error(error_format, &format!("unknown borrowck mode `{}`", m)),
+ m => early_error(error_format, &format!("unknown borrowck mode `{m}`")),
}
}
);
}
}
- _ => early_error(error_format, &format!("unknown --extern option `{}`", opt)),
+ _ => early_error(error_format, &format!("unknown --extern option `{opt}`")),
}
}
}
let loc = parts.next().unwrap_or_else(|| {
early_error(
error_format,
- &format!("`--extern-location`: specify location for extern crate `{}`", name),
+ &format!("`--extern-location`: specify location for extern crate `{name}`"),
)
});
let json = json::from_str(raw).unwrap_or_else(|_| {
early_error(
error_format,
- &format!("`--extern-location`: malformed json location `{}`", raw),
+ &format!("`--extern-location`: malformed json location `{raw}`"),
)
});
ExternDepSpec::Json(json)
}
[bad, ..] => early_error(
error_format,
- &format!("unknown location type `{}`: use `raw` or `json`", bad),
+ &format!("unknown location type `{bad}`: use `raw` or `json`"),
),
[] => early_error(error_format, "missing location specification"),
};
&& !target_triple.triple().contains("apple")
&& cg.split_debuginfo.is_some()
{
- {
- early_error(error_format, "`-Csplit-debuginfo` is unstable on this platform");
- }
+ early_error(error_format, "`-Csplit-debuginfo` is unstable on this platform");
}
// Try to find a directory containing the Rust `src`, for more details see
};
let working_dir = std::env::current_dir().unwrap_or_else(|e| {
- early_error(error_format, &format!("Current directory is invalid: {}", e));
+ early_error(error_format, &format!("Current directory is invalid: {e}"));
});
let (path, remapped) =
"argument to `unpretty` must be one of `normal`, `identified`, \
`expanded`, `expanded,identified`, `expanded,hygiene`, \
`ast-tree`, `ast-tree,expanded`, `hir`, `hir,identified`, \
- `hir,typed`, `hir-tree`, `thir-tree`, `mir` or `mir-cfg`; got {}",
- name
+ `hir,typed`, `hir-tree`, `thir-tree`, `mir` or `mir-cfg`; got {name}"
),
),
};
- tracing::debug!("got unpretty option: {:?}", first);
+ tracing::debug!("got unpretty option: {first:?}");
Some(first)
}
"cdylib" => CrateType::Cdylib,
"bin" => CrateType::Executable,
"proc-macro" => CrateType::ProcMacro,
- _ => return Err(format!("unknown crate type: `{}`", part)),
+ _ => return Err(format!("unknown crate type: `{part}`")),
};
if !crate_types.contains(&new_part) {
crate_types.push(new_part)
crate mod dep_tracking {
use super::{
BranchProtection, CFGuard, CFProtection, CrateType, DebugInfo, ErrorOutputType,
- InstrumentCoverage, LdImpl, LinkerPluginLto, LocationDetail, LtoCli, OptLevel, OutputType,
- OutputTypes, Passes, SourceFileHashAlgorithm, SwitchWithOptPath, SymbolManglingVersion,
- TrimmedDefPaths,
+ InstrumentCoverage, LdImpl, LinkerPluginLto, LocationDetail, LtoCli, OomStrategy, OptLevel,
+ OutputType, OutputTypes, Passes, SourceFileHashAlgorithm, SwitchWithOptPath,
+ SymbolManglingVersion, TrimmedDefPaths,
};
use crate::lint;
use crate::options::WasiExecModel;
RealFileName,
LocationDetail,
BranchProtection,
+ OomStrategy,
);
impl<T1, T2> DepTrackingHash for (T1, T2)
}
}
}
+
+/// Default behavior to use in out-of-memory situations.
+#[derive(Clone, Copy, PartialEq, Hash, Debug, Encodable, Decodable, HashStable_Generic)]
+pub enum OomStrategy {
+ /// Generate a panic that can be caught by `catch_unwind`.
+ Panic,
+
+ /// Abort the process immediately.
+ Abort,
+}
+
+impl OomStrategy {
+ pub const SYMBOL: &'static str = "__rust_alloc_error_handler_should_panic";
+
+ pub fn should_panic(self) -> u8 {
+ match self {
+ OomStrategy::Panic => 1,
+ OomStrategy::Abort => 0,
+ }
+ }
+}
p.pop();
p
}
- Err(e) => panic!("failed to get current_exe: {}", e),
+ Err(e) => panic!("failed to get current_exe: {e}"),
}
}
#![feature(crate_visibility_modifier)]
#![feature(derive_default_enum)]
+#![feature(let_chains)]
#![feature(let_else)]
#![feature(min_specialization)]
+#![feature(never_type)]
#![feature(once_cell)]
#![feature(option_get_or_insert_default)]
#![recursion_limit = "256"]
/// `true` if we're emitting a JSON blob containing the unused externs
json_unused_externs: bool [UNTRACKED],
- /// `true` if we're emitting a JSON job containg a future-incompat report for lints
+ /// `true` if we're emitting a JSON job containing a future-incompat report for lints
json_future_incompat: bool [TRACKED],
pretty: Option<PpMode> [UNTRACKED],
Some(value) => early_error(
error_format,
&format!(
- "incorrect value `{}` for {} option `{}` - {} was expected",
- value, outputname, key, type_desc
+ "incorrect value `{value}` for {outputname} option `{key}` - {type_desc} was expected"
),
),
}
}
}
- None => early_error(error_format, &format!("unknown {} option: `{}`", outputname, key)),
+ None => early_error(error_format, &format!("unknown {outputname} option: `{key}`")),
}
}
return op;
pub const parse_passes: &str = "a space-separated list of passes, or `all`";
pub const parse_panic_strategy: &str = "either `unwind` or `abort`";
pub const parse_opt_panic_strategy: &str = parse_panic_strategy;
+ pub const parse_oom_strategy: &str = "either `panic` or `abort`";
pub const parse_relro_level: &str = "one of: `full`, `partial`, or `off`";
pub const parse_sanitizers: &str = "comma separated list of sanitizers: `address`, `cfi`, `hwaddress`, `leak`, `memory`, `memtag`, or `thread`";
pub const parse_sanitizer_memory_track_origins: &str = "0, 1, or 2";
pub const parse_linker_plugin_lto: &str =
"either a boolean (`yes`, `no`, `on`, `off`, etc), or the path to the linker plugin";
pub const parse_location_detail: &str =
- "comma seperated list of location details to track: `file`, `line`, or `column`";
+ "comma separated list of location details to track: `file`, `line`, or `column`";
pub const parse_switch_with_opt_path: &str =
"an optional path to the profiling data output directory";
pub const parse_merge_functions: &str = "one of: `disabled`, `trampolines`, or `aliases`";
true
}
+ crate fn parse_oom_strategy(slot: &mut OomStrategy, v: Option<&str>) -> bool {
+ match v {
+ Some("panic") => *slot = OomStrategy::Panic,
+ Some("abort") => *slot = OomStrategy::Abort,
+ _ => return false,
+ }
+ true
+ }
+
crate fn parse_relro_level(slot: &mut Option<RelroLevel>, v: Option<&str>) -> bool {
match v {
Some(s) => match s.parse::<RelroLevel>() {
llvm_time_trace: bool = (false, parse_bool, [UNTRACKED],
"generate JSON tracing data file from LLVM data (default: no)"),
location_detail: LocationDetail = (LocationDetail::all(), parse_location_detail, [TRACKED],
- "comma seperated list of location details to be tracked when using caller_location \
+ "comma separated list of location details to be tracked when using caller_location \
valid options are `file`, `line`, and `column` (default: all)"),
ls: bool = (false, parse_bool, [UNTRACKED],
"list the symbols defined by a library crate (default: no)"),
"prevent automatic injection of the profiler_builtins crate"),
normalize_docs: bool = (false, parse_bool, [TRACKED],
"normalize associated items in rustdoc when generating documentation"),
+ oom: OomStrategy = (OomStrategy::Abort, parse_oom_strategy, [TRACKED],
+ "panic strategy for out-of-memory handling"),
osx_rpath_install_name: bool = (false, parse_bool, [TRACKED],
"pass `-install_name @rpath/...` to the macOS linker (default: no)"),
panic_abort_tests: bool = (false, parse_bool, [TRACKED],
if name.as_str() != s {
let msg = format!(
"`--crate-name` and `#[crate_name]` are \
- required to match, but `{}` != `{}`",
- s, name
+ required to match, but `{s}` != `{name}`"
);
sess.span_err(attr.span, &msg);
}
if s.starts_with('-') {
let msg = format!(
"crate names cannot start with a `-`, but \
- `{}` has a leading hyphen",
- s
+ `{s}` has a leading hyphen"
);
sess.err(&msg);
} else {
match sp {
Some(sp) => sess.span_err(sp, s),
None => sess.err(s),
- }
+ };
err_count += 1;
};
if s.is_empty() {
if c == '_' {
continue;
}
- say(&format!("invalid character `{}` in crate name: `{}`", c, s));
+ say(&format!("invalid character `{c}` in crate name: `{s}`"));
}
}
let out_filename = outputs
.single_output_file
.clone()
- .unwrap_or_else(|| outputs.out_directory.join(&format!("lib{}.rmeta", libname)));
+ .unwrap_or_else(|| outputs.out_directory.join(&format!("lib{libname}.rmeta")));
check_file_is_writeable(&out_filename, sess);
let libname = format!("{}{}", crate_name, sess.opts.cg.extra_filename);
match crate_type {
- CrateType::Rlib => outputs.out_directory.join(&format!("lib{}.rlib", libname)),
+ CrateType::Rlib => outputs.out_directory.join(&format!("lib{libname}.rlib")),
CrateType::Cdylib | CrateType::ProcMacro | CrateType::Dylib => {
let (prefix, suffix) = (&sess.target.dll_prefix, &sess.target.dll_suffix);
- outputs.out_directory.join(&format!("{}{}{}", prefix, libname, suffix))
+ outputs.out_directory.join(&format!("{prefix}{libname}{suffix}"))
}
CrateType::Staticlib => {
let (prefix, suffix) = (&sess.target.staticlib_prefix, &sess.target.staticlib_suffix);
- outputs.out_directory.join(&format!("{}{}{}", prefix, libname, suffix))
+ outputs.out_directory.join(&format!("{prefix}{libname}{suffix}"))
}
CrateType::Executable => {
let suffix = &sess.target.exe_suffix;
/// Checks if target supports crate_type as output
pub fn invalid_output_for_target(sess: &Session, crate_type: CrateType) -> bool {
- match crate_type {
- CrateType::Cdylib | CrateType::Dylib | CrateType::ProcMacro => {
- if !sess.target.dynamic_linking {
- return true;
- }
- if sess.crt_static(Some(crate_type)) && !sess.target.crt_static_allows_dylibs {
- return true;
- }
+ if let CrateType::Cdylib | CrateType::Dylib | CrateType::ProcMacro = crate_type {
+ if !sess.target.dynamic_linking {
+ return true;
}
- _ => {}
- }
- if sess.target.only_cdylib {
- match crate_type {
- CrateType::ProcMacro | CrateType::Dylib => return true,
- _ => {}
+ if sess.crt_static(Some(crate_type)) && !sess.target.crt_static_allows_dylibs {
+ return true;
}
}
- if !sess.target.executables && crate_type == CrateType::Executable {
+ if let CrateType::ProcMacro | CrateType::Dylib = crate_type && sess.target.only_cdylib {
+ return true;
+ }
+ if let CrateType::Executable = crate_type && !sess.target.executables {
return true;
}
impl SymbolGallery {
/// Insert a symbol and its span into symbol gallery.
- /// If the symbol has occurred before, ignore the new occurance.
+ /// If the symbol has occurred before, ignore the new occurrence.
pub fn insert(&self, symbol: Symbol, span: Span) {
self.symbols.lock().entry(symbol).or_insert(span);
}
) {
if let Some(n) = find_feature_issue(feature, issue) {
err.note(&format!(
- "see issue #{} <https://github.com/rust-lang/rust/issues/{}> for more information",
- n, n,
+ "see issue #{n} <https://github.com/rust-lang/rust/issues/{n}> for more information"
));
}
// #23973: do not suggest `#![feature(...)]` if we are in beta/stable
if sess.unstable_features.is_nightly_build() {
- err.help(&format!("add `#![feature({})]` to the crate attributes to enable", feature));
+ err.help(&format!("add `#![feature({feature})]` to the crate attributes to enable"));
}
}
use rustc_errors::emitter::{Emitter, EmitterWriter, HumanReadableErrorType};
use rustc_errors::json::JsonEmitter;
use rustc_errors::registry::Registry;
-use rustc_errors::{Diagnostic, DiagnosticBuilder, DiagnosticId, ErrorGuaranteed};
+use rustc_errors::{DiagnosticBuilder, DiagnosticId, ErrorGuaranteed};
use rustc_macros::HashStable_Generic;
pub use rustc_span::def_id::StableCrateId;
use rustc_span::edition::Edition;
use std::env;
use std::fmt;
use std::io::Write;
-use std::num::NonZeroU32;
use std::ops::{Div, Mul};
use std::path::{Path, PathBuf};
use std::str::FromStr;
impl fmt::Display for Limit {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- write!(f, "{}", self.0)
+ self.0.fmt(f)
}
}
/// `None` means that there is no source file.
pub local_crate_source_file: Option<PathBuf>,
- /// Set of `(DiagnosticId, Option<Span>, message)` tuples tracking
- /// (sub)diagnostics that have been set once, but should not be set again,
- /// in order to avoid redundantly verbose output (Issue #24690, #44953).
- pub one_time_diagnostics: Lock<FxHashSet<(DiagnosticMessageId, Option<Span>, String)>>,
crate_types: OnceCell<Vec<CrateType>>,
/// The `stable_crate_id` is constructed out of the crate name and all the
/// `-C metadata` arguments passed to the compiler. Its value forms a unique
pub normalize_projection_ty: AtomicUsize,
}
-/// Enum to support dispatch of one-time diagnostics (in `Session.diag_once`).
-enum DiagnosticBuilderMethod {
- Note,
- SpanNote,
- // Add more variants as needed to support one-time diagnostics.
-}
-
/// Trait implemented by error types. This should not be implemented manually. Instead, use
/// `#[derive(SessionDiagnostic)]` -- see [rustc_macros::SessionDiagnostic].
pub trait SessionDiagnostic<'a> {
fn into_diagnostic(self, sess: &'a Session) -> DiagnosticBuilder<'a, ErrorGuaranteed>;
}
-/// Diagnostic message ID, used by `Session.one_time_diagnostics` to avoid
-/// emitting the same message more than once.
-#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
-pub enum DiagnosticMessageId {
- ErrorId(u16), // EXXXX error code as integer
- LintId(lint::LintId),
- StabilityId(Option<NonZeroU32>), // issue number
-}
-
-impl From<&'static lint::Lint> for DiagnosticMessageId {
- fn from(lint: &'static lint::Lint) -> Self {
- DiagnosticMessageId::LintId(lint::LintId::of(lint))
- }
-}
-
impl Session {
pub fn miri_unleashed_feature(&self, span: Span, feature_gate: Option<Symbol>) {
self.miri_unleashed_features.lock().push((span, feature_gate));
let mut diag = self.struct_warn("skipping const checks");
for &(span, feature_gate) in unleashed_features.iter() {
// FIXME: `span_label` doesn't do anything, so we use "help" as a hack.
- if let Some(feature_gate) = feature_gate {
- diag.span_help(span, &format!("skipping check for `{}` feature", feature_gate));
+ if let Some(gate) = feature_gate {
+ diag.span_help(span, &format!("skipping check for `{gate}` feature"));
// The unleash flag must *not* be used to just "hack around" feature gates.
must_err = true;
} else {
}
diag.emit();
// If we should err, make sure we did.
- if must_err && !self.has_errors() {
+ if must_err && !self.has_errors().is_some() {
// We have skipped a feature gate, and not run into other errors... reject.
self.err(
"`-Zunleash-the-miri-inside-of-you` may not be used to circumvent feature \
&self,
sp: S,
msg: &str,
- ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ ) -> DiagnosticBuilder<'_, !> {
self.diagnostic().struct_span_fatal(sp, msg)
}
pub fn struct_span_fatal_with_code<S: Into<MultiSpan>>(
sp: S,
msg: &str,
code: DiagnosticId,
- ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ ) -> DiagnosticBuilder<'_, !> {
self.diagnostic().struct_span_fatal_with_code(sp, msg, code)
}
- pub fn struct_fatal(&self, msg: &str) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ pub fn struct_fatal(&self, msg: &str) -> DiagnosticBuilder<'_, !> {
self.diagnostic().struct_fatal(msg)
}
self.span_err(sp, msg);
}
}
- pub fn span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
+ pub fn span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> ErrorGuaranteed {
self.diagnostic().span_err(sp, msg)
}
pub fn span_err_with_code<S: Into<MultiSpan>>(&self, sp: S, msg: &str, code: DiagnosticId) {
self.diagnostic().span_err_with_code(sp, &msg, code)
}
- pub fn err(&self, msg: &str) {
+ pub fn err(&self, msg: &str) -> ErrorGuaranteed {
self.diagnostic().err(msg)
}
pub fn emit_err<'a>(&'a self, err: impl SessionDiagnostic<'a>) -> ErrorGuaranteed {
pub fn err_count(&self) -> usize {
self.diagnostic().err_count()
}
- pub fn has_errors(&self) -> bool {
+ pub fn has_errors(&self) -> Option<ErrorGuaranteed> {
self.diagnostic().has_errors()
}
pub fn has_errors_or_delayed_span_bugs(&self) -> bool {
self.diagnostic().abort_if_errors();
}
pub fn compile_status(&self) -> Result<(), ErrorGuaranteed> {
- if self.diagnostic().has_errors_or_lint_errors() {
- self.diagnostic().emit_stashed_diagnostics();
- Err(ErrorGuaranteed)
+ if let Some(reported) = self.diagnostic().has_errors_or_lint_errors() {
+ let _ = self.diagnostic().emit_stashed_diagnostics();
+ Err(reported)
} else {
Ok(())
}
{
let old_count = self.err_count();
let result = f();
- if self.err_count() == old_count { Ok(result) } else { Err(ErrorGuaranteed) }
+ if self.err_count() == old_count {
+ Ok(result)
+ } else {
+ Err(ErrorGuaranteed::unchecked_claim_error_was_emitted())
+ }
}
pub fn span_warn<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
self.diagnostic().span_warn(sp, msg)
}
/// Delay a span_bug() call until abort_if_errors()
#[track_caller]
- pub fn delay_span_bug<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
+ pub fn delay_span_bug<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> ErrorGuaranteed {
self.diagnostic().delay_span_bug(sp, msg)
}
&self.parse_sess.span_diagnostic
}
- /// Analogous to calling methods on the given `DiagnosticBuilder`, but
- /// deduplicates on lint ID, span (if any), and message for this `Session`
- fn diag_once(
- &self,
- diag: &mut Diagnostic,
- method: DiagnosticBuilderMethod,
- msg_id: DiagnosticMessageId,
- message: &str,
- span_maybe: Option<Span>,
- ) {
- let id_span_message = (msg_id, span_maybe, message.to_owned());
- let fresh = self.one_time_diagnostics.borrow_mut().insert(id_span_message);
- if fresh {
- match method {
- DiagnosticBuilderMethod::Note => {
- diag.note(message);
- }
- DiagnosticBuilderMethod::SpanNote => {
- let span = span_maybe.expect("`span_note` needs a span");
- diag.span_note(span, message);
- }
- }
- }
- }
-
- pub fn diag_span_note_once(
- &self,
- diag: &mut Diagnostic,
- msg_id: DiagnosticMessageId,
- span: Span,
- message: &str,
- ) {
- self.diag_once(diag, DiagnosticBuilderMethod::SpanNote, msg_id, message, Some(span));
- }
-
- pub fn diag_note_once(
- &self,
- diag: &mut Diagnostic,
- msg_id: DiagnosticMessageId,
- message: &str,
- ) {
- self.diag_once(diag, DiagnosticBuilderMethod::Note, msg_id, message, None);
- }
-
#[inline]
pub fn source_map(&self) -> &SourceMap {
self.parse_sess.source_map()
let target_cfg = config::build_target_config(&sopts, target_override, &sysroot);
let host_triple = TargetTriple::from_triple(config::host_triple());
let (host, target_warnings) = Target::search(&host_triple, &sysroot).unwrap_or_else(|e| {
- early_error(sopts.error_format, &format!("Error loading host specification: {}", e))
+ early_error(sopts.error_format, &format!("Error loading host specification: {e}"))
});
for warning in target_warnings.warning_messages() {
early_warn(sopts.error_format, &warning)
match profiler {
Ok(profiler) => Some(Arc::new(profiler)),
Err(e) => {
- early_warn(sopts.error_format, &format!("failed to create profiler: {}", e));
+ early_warn(sopts.error_format, &format!("failed to create profiler: {e}"));
None
}
}
parse_sess,
sysroot,
local_crate_source_file,
- one_time_diagnostics: Default::default(),
crate_types: OnceCell::new(),
stable_crate_id: OnceCell::new(),
features: OnceCell::new(),
let unsupported_sanitizers = sess.opts.debugging_opts.sanitizer - supported_sanitizers;
match unsupported_sanitizers.into_iter().count() {
0 => {}
- 1 => sess
- .err(&format!("{} sanitizer is not supported for this target", unsupported_sanitizers)),
- _ => sess.err(&format!(
- "{} sanitizers are not supported for this target",
- unsupported_sanitizers
- )),
+ 1 => {
+ sess.err(&format!(
+ "{} sanitizer is not supported for this target",
+ unsupported_sanitizers
+ ));
+ }
+ _ => {
+ sess.err(&format!(
+ "{} sanitizers are not supported for this target",
+ unsupported_sanitizers
+ ));
+ }
}
// Cannot mix and match sanitizers.
let mut sanitizer_iter = sess.opts.debugging_opts.sanitizer.into_iter();
if let (Some(first), Some(second)) = (sanitizer_iter.next(), sanitizer_iter.next()) {
- sess.err(&format!("`-Zsanitizer={}` is incompatible with `-Zsanitizer={}`", first, second));
+ sess.err(&format!("`-Zsanitizer={first}` is incompatible with `-Zsanitizer={second}`"));
}
// Cannot enable crt-static with sanitizers on Linux
InvalidBecauseOfErrors { session_directory: PathBuf },
}
-pub fn early_error_no_abort(output: config::ErrorOutputType, msg: &str) {
+fn early_error_handler(output: config::ErrorOutputType) -> rustc_errors::Handler {
let emitter: Box<dyn Emitter + sync::Send> = match output {
config::ErrorOutputType::HumanReadable(kind) => {
let (short, color_config) = kind.unzip();
Box::new(JsonEmitter::basic(pretty, json_rendered, None, false))
}
};
- let handler = rustc_errors::Handler::with_emitter(true, None, emitter);
- handler.struct_fatal(msg).emit();
+ rustc_errors::Handler::with_emitter(true, None, emitter)
+}
+
+pub fn early_error_no_abort(output: config::ErrorOutputType, msg: &str) -> ErrorGuaranteed {
+ early_error_handler(output).struct_err(msg).emit()
}
pub fn early_error(output: config::ErrorOutputType, msg: &str) -> ! {
- early_error_no_abort(output, msg);
- rustc_errors::FatalError.raise();
+ early_error_handler(output).struct_fatal(msg).emit()
}
pub fn early_warn(output: config::ErrorOutputType, msg: &str) {
- let emitter: Box<dyn Emitter + sync::Send> = match output {
- config::ErrorOutputType::HumanReadable(kind) => {
- let (short, color_config) = kind.unzip();
- Box::new(EmitterWriter::stderr(color_config, None, short, false, None, false))
- }
- config::ErrorOutputType::Json { pretty, json_rendered } => {
- Box::new(JsonEmitter::basic(pretty, json_rendered, None, false))
- }
- };
- let handler = rustc_errors::Handler::with_emitter(true, None, emitter);
- handler.struct_warn(msg).emit();
+ early_error_handler(output).struct_warn(msg).emit()
}
/// Dynamic library (e.g. `libfoo.so` on Linux)
/// or an import library corresponding to a dynamic library (e.g. `foo.lib` on Windows/MSVC).
Dylib {
- /// Whether the dynamic library will be linked only if it satifies some undefined symbols
+ /// Whether the dynamic library will be linked only if it satisfies some undefined symbols
as_needed: Option<bool>,
},
/// Dynamic library (e.g. `foo.dll` on Windows) without a corresponding import library.
RawDylib,
/// A macOS-specific kind of dynamic libraries.
Framework {
- /// Whether the framework will be linked only if it satifies some undefined symbols
+ /// Whether the framework will be linked only if it satisfies some undefined symbols
as_needed: Option<bool>,
},
/// The library kind wasn't specified, `Dylib` is currently used as a default.
impl std::fmt::Display for FatalError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- write!(f, "parser fatal error")
+ write!(f, "fatal error")
}
}
_task_context,
a32,
aarch64_target_feature,
+ aarch64_ver_target_feature,
abi,
abi_amdgpu_kernel,
abi_avr_interrupt,
delay_span_bug_from_inside_query,
deny,
deprecated,
+ deprecated_safe,
deprecated_suggestion,
deref,
deref_method,
deref_target,
derive,
derive_default_enum,
+ destruct,
destructuring_assignment,
diagnostic,
direct,
store,
str,
str_alloc,
+ str_split_whitespace,
+ str_trim,
+ str_trim_end,
+ str_trim_start,
stringify,
stringify_macro,
struct_field_attributes,
// If we're dealing with an instance of a function that's inlined from
// another crate but we're marking it as globally shared to our
- // compliation (aka we're not making an internal copy in each of our
+ // compilation (aka we're not making an internal copy in each of our
// codegen units) then this symbol may become an exported (but hidden
// visibility) symbol. This means that multiple crates may do the same
// and we want to be sure to avoid any symbol conflicts here.
use rustc_data_structures::base_n;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::intern::Interned;
use rustc_hir as hir;
use rustc_hir::def::CtorKind;
use rustc_hir::def_id::{CrateNum, DefId};
}
// Mangle all nominal types as paths.
- ty::Adt(&ty::AdtDef { did: def_id, .. }, substs)
+ ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), substs)
| ty::FnDef(def_id, substs)
| ty::Opaque(def_id, substs)
| ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs })
ty::Adt(def, substs) => {
let variant_idx =
contents.variant.expect("destructed const of adt without variant idx");
- let variant_def = &def.variants[variant_idx];
+ let variant_def = &def.variant(variant_idx);
self.push("V");
self = self.print_def_path(variant_def.def_id, substs)?;
// Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
// Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
- // This is accomplished by prefering enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
+ // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
// Having `None` in niche zero can enable some special optimizations.
//
// Bound selection criteria:
match self {
Self::reg => types! { _: I8, I16, I32, I64, F32, F64; },
Self::vreg | Self::vreg_low16 => types! {
- fp: I8, I16, I32, I64, F32, F64,
+ neon: I8, I16, I32, I64, F32, F64,
VecI8(8), VecI16(4), VecI32(2), VecI64(1), VecF32(2), VecF64(1),
VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2);
},
+++ /dev/null
-use crate::spec::Target;
-
-pub fn target() -> Target {
- let mut base = super::hermit_kernel_base::opts();
- base.max_atomic_width = Some(128);
- base.abi = "softfloat".to_string();
- base.features = "+strict-align,-neon,-fp-armv8".to_string();
-
- Target {
- llvm_target: "aarch64-unknown-hermit".to_string(),
- pointer_width: 64,
- data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".to_string(),
- arch: "aarch64".to_string(),
- options: base,
- }
-}
pre_link_args,
exe_suffix: ".elf".to_string(),
no_default_libraries: false,
+ has_thread_local: true,
..Default::default()
},
}
+++ /dev/null
-use crate::spec::{LinkArgs, LinkerFlavor, LldFlavor, PanicStrategy, TargetOptions};
-
-pub fn opts() -> TargetOptions {
- let mut pre_link_args = LinkArgs::new();
- pre_link_args.insert(
- LinkerFlavor::Lld(LldFlavor::Ld),
- vec!["--build-id".to_string(), "--hash-style=gnu".to_string(), "--Bstatic".to_string()],
- );
-
- TargetOptions {
- linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
- disable_redzone: true,
- linker: Some("rust-lld".to_owned()),
- executables: true,
- pre_link_args,
- panic_strategy: PanicStrategy::Abort,
- position_independent_executables: true,
- static_position_independent_executables: true,
- ..Default::default()
- }
-}
mod fuchsia_base;
mod haiku_base;
mod hermit_base;
-mod hermit_kernel_base;
mod illumos_base;
mod l4re_base;
mod linux_base;
("aarch64-unknown-hermit", aarch64_unknown_hermit),
("x86_64-unknown-hermit", x86_64_unknown_hermit),
- ("aarch64-unknown-none-hermitkernel", aarch64_unknown_none_hermitkernel),
- ("x86_64-unknown-none-hermitkernel", x86_64_unknown_none_hermitkernel),
-
("riscv32i-unknown-none-elf", riscv32i_unknown_none_elf),
+ ("riscv32im-unknown-none-elf", riscv32im_unknown_none_elf),
("riscv32imc-unknown-none-elf", riscv32imc_unknown_none_elf),
("riscv32imc-esp-espidf", riscv32imc_esp_espidf),
("riscv32imac-unknown-none-elf", riscv32imac_unknown_none_elf),
--- /dev/null
+use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ data_layout: "e-m:e-p:32:32-i64:64-n32-S128".to_string(),
+ llvm_target: "riscv32".to_string(),
+ pointer_width: 32,
+ arch: "riscv32".to_string(),
+
+ options: TargetOptions {
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".to_string()),
+ cpu: "generic-rv32".to_string(),
+ max_atomic_width: Some(0),
+ atomic_cas: false,
+ features: "+m".to_string(),
+ executables: true,
+ panic_strategy: PanicStrategy::Abort,
+ relocation_model: RelocModel::Static,
+ emit_debug_gdb_scripts: false,
+ eh_frame_header: false,
+ ..Default::default()
+ },
+ }
+}
+++ /dev/null
-use crate::spec::{StackProbeType, Target};
-
-pub fn target() -> Target {
- let mut base = super::hermit_kernel_base::opts();
- base.cpu = "x86-64".to_string();
- base.max_atomic_width = Some(64);
- base.features =
- "-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2,+soft-float"
- .to_string();
- // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
- base.stack_probes = StackProbeType::Call;
-
- Target {
- llvm_target: "x86_64-unknown-hermit".to_string(),
- pointer_width: 64,
- data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
- .to_string(),
- arch: "x86_64".to_string(),
- options: base,
- }
-}
use rustc_infer::infer::InferCtxt;
use rustc_middle::ty::{self, TraitRef, Ty, TyCtxt};
use rustc_middle::ty::{ToPredicate, TypeFoldable};
-use rustc_session::{DiagnosticMessageId, Limit};
+use rustc_session::Limit;
use rustc_span::def_id::LOCAL_CRATE;
use rustc_span::Span;
Limit(0) => Limit(2),
limit => limit * 2,
};
- let msg = format!("reached the recursion limit while auto-dereferencing `{:?}`", ty);
- let error_id = (DiagnosticMessageId::ErrorId(55), Some(span), msg);
- let fresh = tcx.sess.one_time_diagnostics.borrow_mut().insert(error_id);
- if fresh {
- struct_span_err!(
- tcx.sess,
- span,
- E0055,
- "reached the recursion limit while auto-dereferencing `{:?}`",
- ty
- )
- .span_label(span, "deref recursion limit reached")
- .help(&format!(
- "consider increasing the recursion limit by adding a \
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0055,
+ "reached the recursion limit while auto-dereferencing `{:?}`",
+ ty
+ )
+ .span_label(span, "deref recursion limit reached")
+ .help(&format!(
+ "consider increasing the recursion limit by adding a \
`#![recursion_limit = \"{}\"]` attribute to your crate (`{}`)",
- suggested_limit,
- tcx.crate_name(LOCAL_CRATE),
- ))
- .emit();
- }
+ suggested_limit,
+ tcx.crate_name(LOCAL_CRATE),
+ ))
+ .emit();
}
let trait_pred = ty::Binder::dummy(trait_ref);
let bail_out = tcx.infer_ctxt().enter(|infcx| {
- let mut selcx = SelectionContext::with_negative(&infcx, true);
+ let mut selcx = SelectionContext::new(&infcx);
let result = selcx.select(&Obligation::new(
ObligationCause::dummy(),
orig_env,
trait_pred.to_poly_trait_predicate(),
));
+ match result {
+ Ok(Some(ImplSource::UserDefined(_))) => {
+ debug!(
+ "find_auto_trait_generics({:?}): \
+ manual impl found, bailing out",
+ trait_ref
+ );
+ return true;
+ }
+ _ => {}
+ }
+
+ let result = selcx.select(&Obligation::new(
+ ObligationCause::dummy(),
+ orig_env,
+ trait_pred.to_poly_trait_predicate_negative_polarity(),
+ ));
+
match result {
Ok(Some(ImplSource::UserDefined(_))) => {
debug!(
fresh_preds.insert(self.clean_pred(infcx, predicate));
}
- let mut select = SelectionContext::with_negative(&infcx, true);
+ let mut select = SelectionContext::new(&infcx);
let mut already_visited = FxHashSet::default();
let mut predicates = VecDeque::new();
// leading to an ambiguous result. So report this as an
// overflow bug, since I believe this is the only case
// where ambiguity can result.
- infcx.tcx.sess.delay_span_bug(
+ let reported = infcx.tcx.sess.delay_span_bug(
rustc_span::DUMMY_SP,
&format!(
"encountered ambiguity selecting `{:?}` during codegen, presuming due to \
trait_ref
),
);
- return Err(ErrorGuaranteed);
+ return Err(reported);
}
Err(Unimplemented) => {
// This can trigger when we probe for the source of a `'static` lifetime requirement
// on a trait object: `impl Foo for dyn Trait {}` has an implicit `'static` bound.
// This can also trigger when we have a global bound that is not actually satisfied,
// but was included during typeck due to the trivial_bounds feature.
- infcx.tcx.sess.delay_span_bug(
+ let guar = infcx.tcx.sess.delay_span_bug(
rustc_span::DUMMY_SP,
&format!(
"Encountered error `Unimplemented` selecting `{:?}` during codegen",
trait_ref
),
);
- return Err(ErrorGuaranteed);
+ return Err(guar);
}
Err(e) => {
bug!("Encountered error `{:?}` selecting `{:?}` during codegen", e, trait_ref)
use crate::infer::outlives::env::OutlivesEnvironment;
use crate::infer::{CombinedSnapshot, InferOk, RegionckMode};
use crate::traits::select::IntercrateAmbiguityCause;
-use crate::traits::util::impl_trait_ref_and_oblig;
+use crate::traits::util::impl_subject_and_oblig;
use crate::traits::SkipLeakCheck;
use crate::traits::{
self, FulfillmentContext, Normalized, Obligation, ObligationCause, PredicateObligation,
use rustc_errors::Diagnostic;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_hir::CRATE_HIR_ID;
-use rustc_infer::infer::TyCtxtInferExt;
-use rustc_infer::traits::TraitEngine;
+use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
+use rustc_infer::traits::{util, TraitEngine};
use rustc_middle::traits::specialization_graph::OverlapMode;
use rustc_middle::ty::fast_reject::{self, TreatParams};
use rustc_middle::ty::fold::TypeFoldable;
use rustc_middle::ty::subst::Subst;
-use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{self, ImplSubject, Ty, TyCtxt};
use rustc_span::symbol::sym;
use rustc_span::DUMMY_SP;
+use std::fmt::Debug;
use std::iter;
/// Whether we do the orphan check relative to this crate or
debug!("negative_impl(impl1_def_id={:?}, impl2_def_id={:?})", impl1_def_id, impl2_def_id);
let tcx = selcx.infcx().tcx;
- // create a parameter environment corresponding to a (placeholder) instantiation of impl1
- let impl1_env = tcx.param_env(impl1_def_id);
- let impl1_trait_ref = tcx.impl_trait_ref(impl1_def_id).unwrap();
-
// Create an infcx, taking the predicates of impl1 as assumptions:
tcx.infer_ctxt().enter(|infcx| {
- // Normalize the trait reference. The WF rules ought to ensure
- // that this always succeeds.
- let impl1_trait_ref = match traits::fully_normalize(
+ // create a parameter environment corresponding to a (placeholder) instantiation of impl1
+ let impl_env = tcx.param_env(impl1_def_id);
+ let subject1 = match traits::fully_normalize(
&infcx,
FulfillmentContext::new(),
ObligationCause::dummy(),
- impl1_env,
- impl1_trait_ref,
+ impl_env,
+ tcx.impl_subject(impl1_def_id),
) {
- Ok(impl1_trait_ref) => impl1_trait_ref,
- Err(err) => {
- bug!("failed to fully normalize {:?}: {:?}", impl1_trait_ref, err);
- }
+ Ok(s) => s,
+ Err(err) => bug!("failed to fully normalize {:?}: {:?}", impl1_def_id, err),
};
// Attempt to prove that impl2 applies, given all of the above.
let selcx = &mut SelectionContext::new(&infcx);
let impl2_substs = infcx.fresh_substs_for_item(DUMMY_SP, impl2_def_id);
- let (impl2_trait_ref, obligations) =
- impl_trait_ref_and_oblig(selcx, impl1_env, impl2_def_id, impl2_substs);
-
- // do the impls unify? If not, not disjoint.
- let Ok(InferOk { obligations: more_obligations, .. }) = infcx
- .at(&ObligationCause::dummy(), impl1_env)
- .eq(impl1_trait_ref, impl2_trait_ref)
- else {
- debug!(
- "explicit_disjoint: {:?} does not unify with {:?}",
- impl1_trait_ref, impl2_trait_ref
- );
- return false;
- };
+ let (subject2, obligations) =
+ impl_subject_and_oblig(selcx, impl_env, impl2_def_id, impl2_substs);
- let opt_failing_obligation = obligations
- .into_iter()
- .chain(more_obligations)
- .find(|o| negative_impl_exists(selcx, impl1_env, impl1_def_id, o));
-
- if let Some(failing_obligation) = opt_failing_obligation {
- debug!("overlap: obligation unsatisfiable {:?}", failing_obligation);
- true
- } else {
- false
- }
+ !equate(&infcx, impl_env, impl1_def_id, subject1, subject2, obligations)
})
}
+fn equate<'cx, 'tcx>(
+ infcx: &InferCtxt<'cx, 'tcx>,
+ impl_env: ty::ParamEnv<'tcx>,
+ impl1_def_id: DefId,
+ subject1: ImplSubject<'tcx>,
+ subject2: ImplSubject<'tcx>,
+ obligations: impl Iterator<Item = PredicateObligation<'tcx>>,
+) -> bool {
+ // do the impls unify? If not, not disjoint.
+ let Ok(InferOk { obligations: more_obligations, .. }) =
+ infcx.at(&ObligationCause::dummy(), impl_env).eq(subject1, subject2)
+ else {
+ debug!("explicit_disjoint: {:?} does not unify with {:?}", subject1, subject2);
+ return true;
+ };
+
+ let selcx = &mut SelectionContext::new(&infcx);
+ let opt_failing_obligation = obligations
+ .into_iter()
+ .chain(more_obligations)
+ .find(|o| negative_impl_exists(selcx, impl_env, impl1_def_id, o));
+
+ if let Some(failing_obligation) = opt_failing_obligation {
+ debug!("overlap: obligation unsatisfiable {:?}", failing_obligation);
+ false
+ } else {
+ true
+ }
+}
+
+/// Try to prove that a negative impl exist for the given obligation and its super predicates.
+#[instrument(level = "debug", skip(selcx))]
fn negative_impl_exists<'cx, 'tcx>(
selcx: &SelectionContext<'cx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
o: &PredicateObligation<'tcx>,
) -> bool {
let infcx = &selcx.infcx().fork();
+
+ if resolve_negative_obligation(infcx, param_env, region_context, o) {
+ return true;
+ }
+
+ // Try to prove a negative obligation exists for super predicates
+ for o in util::elaborate_predicates(infcx.tcx, iter::once(o.predicate)) {
+ if resolve_negative_obligation(infcx, param_env, region_context, &o) {
+ return true;
+ }
+ }
+
+ false
+}
+
+#[instrument(level = "debug", skip(infcx))]
+fn resolve_negative_obligation<'cx, 'tcx>(
+ infcx: &InferCtxt<'cx, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ region_context: DefId,
+ o: &PredicateObligation<'tcx>,
+) -> bool {
let tcx = infcx.tcx;
- o.flip_polarity(tcx)
- .map(|o| {
- let mut fulfillment_cx = FulfillmentContext::new();
- fulfillment_cx.register_predicate_obligation(infcx, o);
-
- let errors = fulfillment_cx.select_all_or_error(infcx);
- if !errors.is_empty() {
- return false;
- }
- let mut outlives_env = OutlivesEnvironment::new(param_env);
- // FIXME -- add "assumed to be well formed" types into the `outlives_env`
-
- // "Save" the accumulated implied bounds into the outlives environment
- // (due to the FIXME above, there aren't any, but this step is still needed).
- // The "body id" is given as `CRATE_HIR_ID`, which is the same body-id used
- // by the "dummy" causes elsewhere (body-id is only relevant when checking
- // function bodies with closures).
- outlives_env.save_implied_bounds(CRATE_HIR_ID);
-
- infcx.process_registered_region_obligations(
- outlives_env.region_bound_pairs_map(),
- Some(tcx.lifetimes.re_root_empty),
- param_env,
- );
-
- let errors =
- infcx.resolve_regions(region_context, &outlives_env, RegionckMode::default());
- if !errors.is_empty() {
- return false;
- }
+ let Some(o) = o.flip_polarity(tcx) else {
+ return false;
+ };
- true
- })
- .unwrap_or(false)
+ let mut fulfillment_cx = FulfillmentContext::new();
+ fulfillment_cx.register_predicate_obligation(infcx, o);
+
+ let errors = fulfillment_cx.select_all_or_error(infcx);
+
+ if !errors.is_empty() {
+ return false;
+ }
+
+ let mut outlives_env = OutlivesEnvironment::new(param_env);
+ // FIXME -- add "assumed to be well formed" types into the `outlives_env`
+
+ // "Save" the accumulated implied bounds into the outlives environment
+ // (due to the FIXME above, there aren't any, but this step is still needed).
+ // The "body id" is given as `CRATE_HIR_ID`, which is the same body-id used
+ // by the "dummy" causes elsewhere (body-id is only relevant when checking
+ // function bodies with closures).
+ outlives_env.save_implied_bounds(CRATE_HIR_ID);
+
+ infcx.process_registered_region_obligations(
+ outlives_env.region_bound_pairs_map(),
+ Some(tcx.lifetimes.re_root_empty),
+ param_env,
+ );
+
+ let errors = infcx.resolve_regions(region_context, &outlives_env, RegionckMode::default());
+
+ if !errors.is_empty() {
+ return false;
+ }
+
+ true
}
pub fn trait_ref_is_knowable<'tcx>(
match types.next() {
None => {
tcx.sess.span_err(
- tcx.def_span(def.did),
+ tcx.def_span(def.did()),
"`#[fundamental]` requires at least one type parameter",
);
InCrate::Remote => true,
},
- ty::Adt(def, _) => def_id_is_local(def.did, in_crate),
+ ty::Adt(def, _) => def_id_is_local(def.did(), in_crate),
ty::Foreign(did) => def_id_is_local(did, in_crate),
ty::Opaque(..) => {
// This merits some explanation.
use rustc_index::vec::IndexVec;
use rustc_infer::infer::InferCtxt;
use rustc_middle::mir;
-use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_middle::mir::interpret::{
+ ConstValue, ErrorHandled, LitToConstError, LitToConstInput, Scalar,
+};
use rustc_middle::thir;
use rustc_middle::thir::abstract_const::{self, Node, NodeId, NotConstEvaluatable};
use rustc_middle::ty::subst::{Subst, SubstsRef};
-use rustc_middle::ty::{self, TyCtxt, TypeFoldable};
+use rustc_middle::ty::{self, DelaySpanBugEmitted, TyCtxt, TypeFoldable};
use rustc_session::lint;
use rustc_span::def_id::LocalDefId;
use rustc_span::Span;
use std::ops::ControlFlow;
/// Check if a given constant can be evaluated.
+#[instrument(skip(infcx), level = "debug")]
pub fn is_const_evaluatable<'cx, 'tcx>(
infcx: &InferCtxt<'cx, 'tcx>,
uv: ty::Unevaluated<'tcx, ()>,
param_env: ty::ParamEnv<'tcx>,
span: Span,
) -> Result<(), NotConstEvaluatable> {
- debug!("is_const_evaluatable({:?})", uv);
let tcx = infcx.tcx;
if tcx.features().generic_const_exprs {
"#![feature(generic_const_exprs)]\n".to_string(),
rustc_errors::Applicability::MaybeIncorrect,
)
- .emit();
- rustc_errors::FatalError.raise();
+ .emit()
}
debug!(?concrete, "is_const_evaluatable");
false => NotConstEvaluatable::MentionsParam,
}),
Err(ErrorHandled::Linted) => {
- infcx.tcx.sess.delay_span_bug(span, "constant in type had error reported as lint");
- Err(NotConstEvaluatable::Error(ErrorGuaranteed))
+ let reported =
+ infcx.tcx.sess.delay_span_bug(span, "constant in type had error reported as lint");
+ Err(NotConstEvaluatable::Error(reported))
}
Err(ErrorHandled::Reported(e)) => Err(NotConstEvaluatable::Error(e)),
Ok(_) => Ok(()),
}
}
+#[instrument(skip(tcx), level = "debug")]
fn satisfied_from_param_env<'tcx>(
tcx: TyCtxt<'tcx>,
ct: AbstractConst<'tcx>,
match pred.kind().skip_binder() {
ty::PredicateKind::ConstEvaluatable(uv) => {
if let Some(b_ct) = AbstractConst::new(tcx, uv)? {
+ let const_unify_ctxt = ConstUnifyCtxt { tcx, param_env };
+
// Try to unify with each subtree in the AbstractConst to allow for
// `N + 1` being const evaluatable even if theres only a `ConstEvaluatable`
// predicate for `(N + 1) * 2`
- let result =
- walk_abstract_const(tcx, b_ct, |b_ct| match try_unify(tcx, ct, b_ct) {
+ let result = walk_abstract_const(tcx, b_ct, |b_ct| {
+ match const_unify_ctxt.try_unify(ct, b_ct) {
true => ControlFlow::BREAK,
false => ControlFlow::CONTINUE,
- });
+ }
+ });
if let ControlFlow::Break(()) = result {
debug!("is_const_evaluatable: abstract_const ~~> ok");
) -> Result<Option<AbstractConst<'tcx>>, ErrorGuaranteed> {
match ct.val() {
ty::ConstKind::Unevaluated(uv) => AbstractConst::new(tcx, uv.shrink()),
- ty::ConstKind::Error(_) => Err(ErrorGuaranteed),
+ ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => Err(reported),
_ => Ok(None),
}
}
}
fn error(&mut self, span: Span, msg: &str) -> Result<!, ErrorGuaranteed> {
- self.tcx
+ let reported = self
+ .tcx
.sess
.struct_span_err(self.root_span(), "overly complex generic constant")
.span_label(span, msg)
.help("consider moving this anonymous constant into a `const` function")
.emit();
- Err(ErrorGuaranteed)
+ Err(reported)
}
fn maybe_supported_error(&mut self, span: Span, msg: &str) -> Result<!, ErrorGuaranteed> {
- self.tcx
+ let reported = self
+ .tcx
.sess
.struct_span_err(self.root_span(), "overly complex generic constant")
.span_label(span, msg)
.note("this operation may be supported in the future")
.emit();
- Err(ErrorGuaranteed)
+ Err(reported)
}
+ #[instrument(skip(tcx, body, body_id), level = "debug")]
fn new(
tcx: TyCtxt<'tcx>,
(body, body_id): (&'a thir::Thir<'tcx>, thir::ExprId),
thir: &'a thir::Thir<'tcx>,
}
+ use crate::rustc_middle::thir::visit::Visitor;
use thir::visit;
- impl<'a, 'tcx: 'a> visit::Visitor<'a, 'tcx> for IsThirPolymorphic<'a, 'tcx> {
+
+ impl<'a, 'tcx> IsThirPolymorphic<'a, 'tcx> {
+ fn expr_is_poly(&mut self, expr: &thir::Expr<'tcx>) -> bool {
+ if expr.ty.has_param_types_or_consts() {
+ return true;
+ }
+
+ match expr.kind {
+ thir::ExprKind::NamedConst { substs, .. } => substs.has_param_types_or_consts(),
+ thir::ExprKind::ConstParam { .. } => true,
+ thir::ExprKind::Repeat { value, count } => {
+ self.visit_expr(&self.thir()[value]);
+ count.has_param_types_or_consts()
+ }
+ _ => false,
+ }
+ }
+
+ fn pat_is_poly(&mut self, pat: &thir::Pat<'tcx>) -> bool {
+ if pat.ty.has_param_types_or_consts() {
+ return true;
+ }
+
+ match pat.kind.as_ref() {
+ thir::PatKind::Constant { value } => value.has_param_types_or_consts(),
+ thir::PatKind::Range(thir::PatRange { lo, hi, .. }) => {
+ lo.has_param_types_or_consts() || hi.has_param_types_or_consts()
+ }
+ _ => false,
+ }
+ }
+ }
+
+ impl<'a, 'tcx> visit::Visitor<'a, 'tcx> for IsThirPolymorphic<'a, 'tcx> {
fn thir(&self) -> &'a thir::Thir<'tcx> {
&self.thir
}
+ #[instrument(skip(self), level = "debug")]
fn visit_expr(&mut self, expr: &thir::Expr<'tcx>) {
- self.is_poly |= expr.ty.has_param_types_or_consts();
+ self.is_poly |= self.expr_is_poly(expr);
if !self.is_poly {
visit::walk_expr(self, expr)
}
}
+ #[instrument(skip(self), level = "debug")]
fn visit_pat(&mut self, pat: &thir::Pat<'tcx>) {
- self.is_poly |= pat.ty.has_param_types_or_consts();
+ self.is_poly |= self.pat_is_poly(pat);
if !self.is_poly {
visit::walk_pat(self, pat);
}
}
-
- fn visit_const(&mut self, ct: ty::Const<'tcx>) {
- self.is_poly |= ct.has_param_types_or_consts();
- }
}
let mut is_poly_vis = IsThirPolymorphic { is_poly: false, thir: body };
fn recurse_build(&mut self, node: thir::ExprId) -> Result<NodeId, ErrorGuaranteed> {
use thir::ExprKind;
let node = &self.body.exprs[node];
- debug!("recurse_build: node={:?}", node);
Ok(match &node.kind {
// I dont know if handling of these 3 is correct
&ExprKind::Scope { value, .. } => self.recurse_build(value)?,
&ExprKind::PlaceTypeAscription { source, .. }
| &ExprKind::ValueTypeAscription { source, .. } => self.recurse_build(source)?,
+ &ExprKind::Literal { lit, neg} => {
+ let sp = node.span;
+ let constant =
+ match self.tcx.at(sp).lit_to_const(LitToConstInput { lit: &lit.node, ty: node.ty, neg }) {
+ Ok(c) => c,
+ Err(LitToConstError::Reported) => {
+ self.tcx.const_error(node.ty)
+ }
+ Err(LitToConstError::TypeError) => {
+ bug!("encountered type error in lit_to_const")
+ }
+ };
+
+ self.nodes.push(Node::Leaf(constant))
+ }
+ &ExprKind::NonHirLiteral { lit , user_ty: _} => {
+ // FIXME Construct a Valtree from this ScalarInt when introducing Valtrees
+ let const_value = ConstValue::Scalar(Scalar::Int(lit));
+ self.nodes.push(Node::Leaf(ty::Const::from_value(self.tcx, const_value, node.ty)))
+ }
+ &ExprKind::NamedConst { def_id, substs, user_ty: _ } => {
+ let uneval = ty::Unevaluated::new(ty::WithOptConstParam::unknown(def_id), substs);
+
+ let constant = self.tcx.mk_const(ty::ConstS {
+ val: ty::ConstKind::Unevaluated(uneval),
+ ty: node.ty,
+ });
+
+ self.nodes.push(Node::Leaf(constant))
+ }
- // subtle: associated consts are literals this arm handles
- // `<T as Trait>::ASSOC` as well as `12`
- &ExprKind::Literal { literal, .. } => self.nodes.push(Node::Leaf(literal)),
+ ExprKind::ConstParam {param, ..} => {
+ let const_param = self.tcx.mk_const(ty::ConstS {
+ val: ty::ConstKind::Param(*param),
+ ty: node.ty,
+ });
+ self.nodes.push(Node::Leaf(const_param))
+ }
ExprKind::Call { fun, args, .. } => {
let fun = self.recurse_build(*fun)?;
_ => return Ok(None),
}
- let body = tcx.thir_body(def);
- if body.0.borrow().exprs.is_empty() {
- // type error in constant, there is no thir
- return Err(ErrorGuaranteed);
- }
+ let body = tcx.thir_body(def)?;
AbstractConstBuilder::new(tcx, (&*body.0.borrow(), body.1))?
.map(AbstractConstBuilder::build)
pub(super) fn try_unify_abstract_consts<'tcx>(
tcx: TyCtxt<'tcx>,
(a, b): (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>),
+ param_env: ty::ParamEnv<'tcx>,
) -> bool {
(|| {
if let Some(a) = AbstractConst::new(tcx, a)? {
if let Some(b) = AbstractConst::new(tcx, b)? {
- return Ok(try_unify(tcx, a, b));
+ let const_unify_ctxt = ConstUnifyCtxt { tcx, param_env };
+ return Ok(const_unify_ctxt.try_unify(a, b));
}
}
Ok(false)
})()
- .unwrap_or_else(|ErrorGuaranteed| true)
+ .unwrap_or_else(|_: ErrorGuaranteed| true)
// FIXME(generic_const_exprs): We should instead have this
// method return the resulting `ty::Const` and return `ConstKind::Error`
// on `ErrorGuaranteed`.
}
+#[instrument(skip(tcx, f), level = "debug")]
pub fn walk_abstract_const<'tcx, R, F>(
tcx: TyCtxt<'tcx>,
ct: AbstractConst<'tcx>,
where
F: FnMut(AbstractConst<'tcx>) -> ControlFlow<R>,
{
+ #[instrument(skip(tcx, f), level = "debug")]
fn recurse<'tcx, R>(
tcx: TyCtxt<'tcx>,
ct: AbstractConst<'tcx>,
) -> ControlFlow<R> {
f(ct)?;
let root = ct.root(tcx);
+ debug!(?root);
match root {
Node::Leaf(_) => ControlFlow::CONTINUE,
Node::Binop(_, l, r) => {
recurse(tcx, ct, &mut f)
}
-/// Tries to unify two abstract constants using structural equality.
-pub(super) fn try_unify<'tcx>(
+struct ConstUnifyCtxt<'tcx> {
tcx: TyCtxt<'tcx>,
- mut a: AbstractConst<'tcx>,
- mut b: AbstractConst<'tcx>,
-) -> bool {
- // We substitute generics repeatedly to allow AbstractConsts to unify where a
+ param_env: ty::ParamEnv<'tcx>,
+}
+
+impl<'tcx> ConstUnifyCtxt<'tcx> {
+ // Substitutes generics repeatedly to allow AbstractConsts to unify where a
// ConstKind::Unevalated could be turned into an AbstractConst that would unify e.g.
// Param(N) should unify with Param(T), substs: [Unevaluated("T2", [Unevaluated("T3", [Param(N)])])]
- while let Node::Leaf(a_ct) = a.root(tcx) {
- match AbstractConst::from_const(tcx, a_ct) {
- Ok(Some(a_act)) => a = a_act,
- Ok(None) => break,
- Err(_) => return true,
- }
- }
- while let Node::Leaf(b_ct) = b.root(tcx) {
- match AbstractConst::from_const(tcx, b_ct) {
- Ok(Some(b_act)) => b = b_act,
- Ok(None) => break,
- Err(_) => return true,
+ #[inline]
+ #[instrument(skip(self), level = "debug")]
+ fn try_replace_substs_in_root(
+ &self,
+ mut abstr_const: AbstractConst<'tcx>,
+ ) -> Option<AbstractConst<'tcx>> {
+ while let Node::Leaf(ct) = abstr_const.root(self.tcx) {
+ match AbstractConst::from_const(self.tcx, ct) {
+ Ok(Some(act)) => abstr_const = act,
+ Ok(None) => break,
+ Err(_) => return None,
+ }
}
- }
- match (a.root(tcx), b.root(tcx)) {
- (Node::Leaf(a_ct), Node::Leaf(b_ct)) => {
- if a_ct.ty() != b_ct.ty() {
- return false;
- }
+ Some(abstr_const)
+ }
- match (a_ct.val(), b_ct.val()) {
- // We can just unify errors with everything to reduce the amount of
- // emitted errors here.
- (ty::ConstKind::Error(_), _) | (_, ty::ConstKind::Error(_)) => true,
- (ty::ConstKind::Param(a_param), ty::ConstKind::Param(b_param)) => {
- a_param == b_param
+ /// Tries to unify two abstract constants using structural equality.
+ #[instrument(skip(self), level = "debug")]
+ fn try_unify(&self, a: AbstractConst<'tcx>, b: AbstractConst<'tcx>) -> bool {
+ let a = if let Some(a) = self.try_replace_substs_in_root(a) {
+ a
+ } else {
+ return true;
+ };
+
+ let b = if let Some(b) = self.try_replace_substs_in_root(b) {
+ b
+ } else {
+ return true;
+ };
+
+ let a_root = a.root(self.tcx);
+ let b_root = b.root(self.tcx);
+ debug!(?a_root, ?b_root);
+
+ match (a_root, b_root) {
+ (Node::Leaf(a_ct), Node::Leaf(b_ct)) => {
+ let a_ct = a_ct.eval(self.tcx, self.param_env);
+ debug!("a_ct evaluated: {:?}", a_ct);
+ let b_ct = b_ct.eval(self.tcx, self.param_env);
+ debug!("b_ct evaluated: {:?}", b_ct);
+
+ if a_ct.ty() != b_ct.ty() {
+ return false;
}
- (ty::ConstKind::Value(a_val), ty::ConstKind::Value(b_val)) => a_val == b_val,
- // If we have `fn a<const N: usize>() -> [u8; N + 1]` and `fn b<const M: usize>() -> [u8; 1 + M]`
- // we do not want to use `assert_eq!(a(), b())` to infer that `N` and `M` have to be `1`. This
- // means that we only allow inference variables if they are equal.
- (ty::ConstKind::Infer(a_val), ty::ConstKind::Infer(b_val)) => a_val == b_val,
- // We expand generic anonymous constants at the start of this function, so this
- // branch should only be taking when dealing with associated constants, at
- // which point directly comparing them seems like the desired behavior.
- //
- // FIXME(generic_const_exprs): This isn't actually the case.
- // We also take this branch for concrete anonymous constants and
- // expand generic anonymous constants with concrete substs.
- (ty::ConstKind::Unevaluated(a_uv), ty::ConstKind::Unevaluated(b_uv)) => {
- a_uv == b_uv
+
+ match (a_ct.val(), b_ct.val()) {
+ // We can just unify errors with everything to reduce the amount of
+ // emitted errors here.
+ (ty::ConstKind::Error(_), _) | (_, ty::ConstKind::Error(_)) => true,
+ (ty::ConstKind::Param(a_param), ty::ConstKind::Param(b_param)) => {
+ a_param == b_param
+ }
+ (ty::ConstKind::Value(a_val), ty::ConstKind::Value(b_val)) => a_val == b_val,
+ // If we have `fn a<const N: usize>() -> [u8; N + 1]` and `fn b<const M: usize>() -> [u8; 1 + M]`
+ // we do not want to use `assert_eq!(a(), b())` to infer that `N` and `M` have to be `1`. This
+ // means that we only allow inference variables if they are equal.
+ (ty::ConstKind::Infer(a_val), ty::ConstKind::Infer(b_val)) => a_val == b_val,
+ // We expand generic anonymous constants at the start of this function, so this
+ // branch should only be taking when dealing with associated constants, at
+ // which point directly comparing them seems like the desired behavior.
+ //
+ // FIXME(generic_const_exprs): This isn't actually the case.
+ // We also take this branch for concrete anonymous constants and
+ // expand generic anonymous constants with concrete substs.
+ (ty::ConstKind::Unevaluated(a_uv), ty::ConstKind::Unevaluated(b_uv)) => {
+ a_uv == b_uv
+ }
+ // FIXME(generic_const_exprs): We may want to either actually try
+ // to evaluate `a_ct` and `b_ct` if they are are fully concrete or something like
+ // this, for now we just return false here.
+ _ => false,
}
- // FIXME(generic_const_exprs): We may want to either actually try
- // to evaluate `a_ct` and `b_ct` if they are are fully concrete or something like
- // this, for now we just return false here.
- _ => false,
}
+ (Node::Binop(a_op, al, ar), Node::Binop(b_op, bl, br)) if a_op == b_op => {
+ self.try_unify(a.subtree(al), b.subtree(bl))
+ && self.try_unify(a.subtree(ar), b.subtree(br))
+ }
+ (Node::UnaryOp(a_op, av), Node::UnaryOp(b_op, bv)) if a_op == b_op => {
+ self.try_unify(a.subtree(av), b.subtree(bv))
+ }
+ (Node::FunctionCall(a_f, a_args), Node::FunctionCall(b_f, b_args))
+ if a_args.len() == b_args.len() =>
+ {
+ self.try_unify(a.subtree(a_f), b.subtree(b_f))
+ && iter::zip(a_args, b_args)
+ .all(|(&an, &bn)| self.try_unify(a.subtree(an), b.subtree(bn)))
+ }
+ (Node::Cast(a_kind, a_operand, a_ty), Node::Cast(b_kind, b_operand, b_ty))
+ if (a_ty == b_ty) && (a_kind == b_kind) =>
+ {
+ self.try_unify(a.subtree(a_operand), b.subtree(b_operand))
+ }
+ // use this over `_ => false` to make adding variants to `Node` less error prone
+ (Node::Cast(..), _)
+ | (Node::FunctionCall(..), _)
+ | (Node::UnaryOp(..), _)
+ | (Node::Binop(..), _)
+ | (Node::Leaf(..), _) => false,
}
- (Node::Binop(a_op, al, ar), Node::Binop(b_op, bl, br)) if a_op == b_op => {
- try_unify(tcx, a.subtree(al), b.subtree(bl))
- && try_unify(tcx, a.subtree(ar), b.subtree(br))
- }
- (Node::UnaryOp(a_op, av), Node::UnaryOp(b_op, bv)) if a_op == b_op => {
- try_unify(tcx, a.subtree(av), b.subtree(bv))
- }
- (Node::FunctionCall(a_f, a_args), Node::FunctionCall(b_f, b_args))
- if a_args.len() == b_args.len() =>
- {
- try_unify(tcx, a.subtree(a_f), b.subtree(b_f))
- && iter::zip(a_args, b_args)
- .all(|(&an, &bn)| try_unify(tcx, a.subtree(an), b.subtree(bn)))
- }
- (Node::Cast(a_kind, a_operand, a_ty), Node::Cast(b_kind, b_operand, b_ty))
- if (a_ty == b_ty) && (a_kind == b_kind) =>
- {
- try_unify(tcx, a.subtree(a_operand), b.subtree(b_operand))
- }
- // use this over `_ => false` to make adding variants to `Node` less error prone
- (Node::Cast(..), _)
- | (Node::FunctionCall(..), _)
- | (Node::UnaryOp(..), _)
- | (Node::Binop(..), _)
- | (Node::Leaf(..), _) => false,
}
}
use rustc_hir::Item;
use rustc_hir::Node;
use rustc_middle::thir::abstract_const::NotConstEvaluatable;
+use rustc_middle::traits::select::OverflowError;
use rustc_middle::ty::error::ExpectedFound;
use rustc_middle::ty::fold::TypeFolder;
use rustc_middle::ty::{
self, SubtypePredicate, ToPolyTraitRef, ToPredicate, Ty, TyCtxt, TypeFoldable,
};
-use rustc_session::DiagnosticMessageId;
use rustc_span::symbol::{kw, sym};
use rustc_span::{ExpnKind, MultiSpan, Span, DUMMY_SP};
use std::fmt;
errors: &[FulfillmentError<'tcx>],
body_id: Option<hir::BodyId>,
fallback_has_occurred: bool,
- );
+ ) -> ErrorGuaranteed;
fn report_overflow_error<T>(
&self,
errors: &[FulfillmentError<'tcx>],
body_id: Option<hir::BodyId>,
fallback_has_occurred: bool,
- ) {
+ ) -> ErrorGuaranteed {
#[derive(Debug)]
struct ErrorDescriptor<'tcx> {
predicate: ty::Predicate<'tcx>,
self.report_fulfillment_error(error, body_id, fallback_has_occurred);
}
}
+
+ self.tcx.sess.delay_span_bug(DUMMY_SP, "expected fullfillment errors")
}
/// Reports that an overflow has occurred and halts compilation. We
let predicate_is_const = ty::BoundConstness::ConstIfConst
== trait_predicate.skip_binder().constness;
- if self.tcx.sess.has_errors() && trait_predicate.references_error() {
+ if self.tcx.sess.has_errors().is_some()
+ && trait_predicate.references_error()
+ {
return;
}
let trait_ref = trait_predicate.to_poly_trait_ref();
(true, Some(None)) => {
Some(format!("{cannot_do_this} in const contexts"))
}
- // overriden post message
+ // overridden post message
(true, Some(Some(post_message))) => {
Some(format!("{cannot_do_this}{post_message}"))
}
| ty::Foreign(did)
| ty::FnDef(did, _)
| ty::Generator(did, ..) => Some(did),
- ty::Adt(def, _) => Some(def.did),
+ ty::Adt(def, _) => Some(def.did()),
_ => None,
};
}
// Already reported in the query.
- SelectionError::NotConstEvaluatable(NotConstEvaluatable::Error(ErrorGuaranteed)) => {
+ SelectionError::NotConstEvaluatable(NotConstEvaluatable::Error(_)) => {
// FIXME(eddyb) remove this once `ErrorGuaranteed` becomes a proof token.
self.tcx.sess.delay_span_bug(span, "`ErrorGuaranteed` without an error");
return;
}
-
- Overflow => {
+ // Already reported.
+ Overflow(OverflowError::Error(_)) => {
+ self.tcx.sess.delay_span_bug(span, "`OverflowError` has been reported");
+ return;
+ }
+ Overflow(_) => {
bug!("overflow should be handled before the `report_selection_error` path");
}
SelectionError::ErrorReporting => {
}
}
- let msg = format!("type mismatch resolving `{}`", predicate);
- let error_id = (DiagnosticMessageId::ErrorId(271), Some(obligation.cause.span), msg);
- let fresh = self.tcx.sess.one_time_diagnostics.borrow_mut().insert(error_id);
- if fresh {
- let mut diag = struct_span_err!(
- self.tcx.sess,
- obligation.cause.span,
- E0271,
- "type mismatch resolving `{}`",
- predicate
- );
- let secondary_span = match predicate.kind().skip_binder() {
- ty::PredicateKind::Projection(proj) => self
- .tcx
- .opt_associated_item(proj.projection_ty.item_def_id)
- .and_then(|trait_assoc_item| {
+ let mut diag = struct_span_err!(
+ self.tcx.sess,
+ obligation.cause.span,
+ E0271,
+ "type mismatch resolving `{}`",
+ predicate
+ );
+ let secondary_span = match predicate.kind().skip_binder() {
+ ty::PredicateKind::Projection(proj) => self
+ .tcx
+ .opt_associated_item(proj.projection_ty.item_def_id)
+ .and_then(|trait_assoc_item| {
+ self.tcx
+ .trait_of_item(proj.projection_ty.item_def_id)
+ .map(|id| (trait_assoc_item, id))
+ })
+ .and_then(|(trait_assoc_item, id)| {
+ let trait_assoc_ident = trait_assoc_item.ident(self.tcx);
+ self.tcx.find_map_relevant_impl(id, proj.projection_ty.self_ty(), |did| {
self.tcx
- .trait_of_item(proj.projection_ty.item_def_id)
- .map(|id| (trait_assoc_item, id))
- })
- .and_then(|(trait_assoc_item, id)| {
- let trait_assoc_ident = trait_assoc_item.ident(self.tcx);
- self.tcx.find_map_relevant_impl(
- id,
- proj.projection_ty.self_ty(),
- |did| {
- self.tcx
- .associated_items(did)
- .in_definition_order()
- .find(|assoc| assoc.ident(self.tcx) == trait_assoc_ident)
- },
- )
+ .associated_items(did)
+ .in_definition_order()
+ .find(|assoc| assoc.ident(self.tcx) == trait_assoc_ident)
})
- .and_then(|item| match self.tcx.hir().get_if_local(item.def_id) {
- Some(
- hir::Node::TraitItem(hir::TraitItem {
- kind: hir::TraitItemKind::Type(_, Some(ty)),
- ..
- })
- | hir::Node::ImplItem(hir::ImplItem {
- kind: hir::ImplItemKind::TyAlias(ty),
- ..
- }),
- ) => {
- Some((ty.span, format!("type mismatch resolving `{}`", predicate)))
- }
- _ => None,
- }),
- _ => None,
- };
- self.note_type_err(&mut diag, &obligation.cause, secondary_span, values, err, true);
- self.note_obligation_cause(&mut diag, obligation);
- diag.emit();
- }
+ })
+ .and_then(|item| match self.tcx.hir().get_if_local(item.def_id) {
+ Some(
+ hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Type(_, Some(ty)),
+ ..
+ })
+ | hir::Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::TyAlias(ty),
+ ..
+ }),
+ ) => Some((ty.span, format!("type mismatch resolving `{}`", predicate))),
+ _ => None,
+ }),
+ _ => None,
+ };
+ self.note_type_err(&mut diag, &obligation.cause, secondary_span, values, err, true);
+ self.note_obligation_cause(&mut diag, obligation);
+ diag.emit();
});
}
ty::Bool => Some(0),
ty::Char => Some(1),
ty::Str => Some(2),
- ty::Adt(def, _) if tcx.is_diagnostic_item(sym::String, def.did) => Some(2),
+ ty::Adt(def, _) if tcx.is_diagnostic_item(sym::String, def.did()) => Some(2),
ty::Int(..)
| ty::Uint(..)
| ty::Float(..)
// Same hacky approach as above to avoid deluging user
// with error messages.
if arg.references_error()
- || self.tcx.sess.has_errors()
+ || self.tcx.sess.has_errors().is_some()
|| self.is_tainted_by_errors()
{
return;
ty::PredicateKind::Subtype(data) => {
if data.references_error()
- || self.tcx.sess.has_errors()
+ || self.tcx.sess.has_errors().is_some()
|| self.is_tainted_by_errors()
{
// no need to overload user in such cases
}
_ => {
- if self.tcx.sess.has_errors() || self.is_tainted_by_errors() {
+ if self.tcx.sess.has_errors().is_some() || self.is_tainted_by_errors() {
return;
}
let mut err = struct_span_err!(
if self.is_tainted_by_errors()
&& crate_names.len() == 1
- && crate_names[0] == "`core`"
+ && ["`core`", "`alloc`", "`std`"].contains(&crate_names[0].as_str())
&& spans.len() == 0
{
// Avoid complaining about other inference issues for expressions like
if let Some(def) = self_ty.ty_adt_def() {
// We also want to be able to select self's original
// signature with no type arguments resolved
- flags.push((sym::_Self, Some(self.tcx.type_of(def.did).to_string())));
+ flags.push((sym::_Self, Some(self.tcx.type_of(def.did()).to_string())));
}
for param in generics.params.iter() {
if let Some(def) = param_ty.ty_adt_def() {
// We also want to be able to select the parameter's
// original signature with no type arguments resolved
- flags.push((name, Some(self.tcx.type_of(def.did).to_string())));
+ flags.push((name, Some(self.tcx.type_of(def.did()).to_string())));
}
}
}
- if let Some(true) = self_ty.ty_adt_def().map(|def| def.did.is_local()) {
+ if let Some(true) = self_ty.ty_adt_def().map(|def| def.did().is_local()) {
flags.push((sym::crate_local, None));
}
if let Some(def) = aty.ty_adt_def() {
// We also want to be able to select the array's type's original
// signature with no type arguments resolved
- let type_string = self.tcx.type_of(def.did).to_string();
+ let type_string = self.tcx.type_of(def.did()).to_string();
flags.push((sym::_Self, Some(format!("[{}]", type_string))));
let len =
let body_id = obligation.cause.body_id;
let span = obligation.cause.span;
let real_trait_pred = match &*code {
- ObligationCauseCode::ImplDerivedObligation(cause)
- | ObligationCauseCode::DerivedObligation(cause)
+ ObligationCauseCode::ImplDerivedObligation(cause) => cause.derived.parent_trait_pred,
+ ObligationCauseCode::DerivedObligation(cause)
| ObligationCauseCode::BuiltinDerivedObligation(cause) => cause.parent_trait_pred,
_ => trait_pred,
};
return false;
};
- if let ObligationCauseCode::ImplDerivedObligation(obligation) = code {
- try_borrowing(obligation.parent_trait_pred, &[])
+ if let ObligationCauseCode::ImplDerivedObligation(cause) = &*code {
+ try_borrowing(cause.derived.parent_trait_pred, &[])
} else if let ObligationCauseCode::BindingObligation(_, _)
| ObligationCauseCode::ItemObligation(_) = code
{
ObligationCauseCode::FunctionArgumentObligation { parent_code, .. } => {
next_code = Some(parent_code.as_ref());
}
+ ObligationCauseCode::ImplDerivedObligation(cause) => {
+ let ty = cause.derived.parent_trait_pred.skip_binder().self_ty();
+ debug!(
+ "maybe_note_obligation_cause_for_async_await: ImplDerived \
+ parent_trait_ref={:?} self_ty.kind={:?}",
+ cause.derived.parent_trait_pred,
+ ty.kind()
+ );
+
+ match *ty.kind() {
+ ty::Generator(did, ..) => {
+ generator = generator.or(Some(did));
+ outer_generator = Some(did);
+ }
+ ty::GeneratorWitness(..) => {}
+ ty::Tuple(_) if !seen_upvar_tys_infer_tuple => {
+ // By introducing a tuple of upvar types into the chain of obligations
+ // of a generator, the first non-generator item is now the tuple itself,
+ // we shall ignore this.
+
+ seen_upvar_tys_infer_tuple = true;
+ }
+ _ if generator.is_none() => {
+ trait_ref = Some(cause.derived.parent_trait_pred.skip_binder());
+ target_ty = Some(ty);
+ }
+ _ => {}
+ }
+
+ next_code = Some(cause.derived.parent_code.as_ref());
+ }
ObligationCauseCode::DerivedObligation(derived_obligation)
- | ObligationCauseCode::BuiltinDerivedObligation(derived_obligation)
- | ObligationCauseCode::ImplDerivedObligation(derived_obligation) => {
+ | ObligationCauseCode::BuiltinDerivedObligation(derived_obligation) => {
let ty = derived_obligation.parent_trait_pred.skip_binder().self_ty();
debug!(
"maybe_note_obligation_cause_for_async_await: \
- parent_trait_ref={:?} self_ty.kind={:?}",
+ parent_trait_ref={:?} self_ty.kind={:?}",
derived_obligation.parent_trait_pred,
ty.kind()
);
if !is_upvar_tys_infer_tuple {
let msg = format!("required because it appears within the type `{}`", ty);
match ty.kind() {
- ty::Adt(def, _) => match self.tcx.opt_item_name(def.did) {
+ ty::Adt(def, _) => match self.tcx.opt_item_name(def.did()) {
Some(ident) => err.span_note(ident.span, &msg),
None => err.note(&msg),
},
}
}
ObligationCauseCode::ImplDerivedObligation(ref data) => {
- let mut parent_trait_pred = self.resolve_vars_if_possible(data.parent_trait_pred);
+ let mut parent_trait_pred =
+ self.resolve_vars_if_possible(data.derived.parent_trait_pred);
parent_trait_pred.remap_constness_diag(param_env);
let parent_def_id = parent_trait_pred.def_id();
let msg = format!(
parent_trait_pred.print_modifiers_and_trait_path(),
parent_trait_pred.skip_binder().self_ty()
);
- let mut candidates = vec![];
- self.tcx.for_each_relevant_impl(
- parent_def_id,
- parent_trait_pred.self_ty().skip_binder(),
- |impl_def_id| match self.tcx.hir().get_if_local(impl_def_id) {
- Some(Node::Item(hir::Item {
- kind: hir::ItemKind::Impl(hir::Impl { .. }),
- ..
- })) => {
- candidates.push(impl_def_id);
- }
- _ => {}
- },
- );
- match &candidates[..] {
- [def_id] => match self.tcx.hir().get_if_local(*def_id) {
- Some(Node::Item(hir::Item {
- kind: hir::ItemKind::Impl(hir::Impl { of_trait, self_ty, .. }),
- ..
- })) => {
- let mut spans = Vec::with_capacity(2);
- if let Some(trait_ref) = of_trait {
- spans.push(trait_ref.path.span);
- }
- spans.push(self_ty.span);
- err.span_note(spans, &msg)
+ let mut is_auto_trait = false;
+ match self.tcx.hir().get_if_local(data.impl_def_id) {
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Trait(is_auto, ..),
+ ident,
+ ..
+ })) => {
+ // FIXME: we should do something else so that it works even on crate foreign
+ // auto traits.
+ is_auto_trait = matches!(is_auto, hir::IsAuto::Yes);
+ err.span_note(ident.span, &msg)
+ }
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { of_trait, self_ty, .. }),
+ ..
+ })) => {
+ let mut spans = Vec::with_capacity(2);
+ if let Some(trait_ref) = of_trait {
+ spans.push(trait_ref.path.span);
}
- _ => err.note(&msg),
- },
+ spans.push(self_ty.span);
+ err.span_note(spans, &msg)
+ }
_ => err.note(&msg),
};
let mut parent_predicate = parent_trait_pred.to_predicate(tcx);
- let mut data = data;
+ let mut data = &data.derived;
let mut count = 0;
seen_requirements.insert(parent_def_id);
+ if is_auto_trait {
+ // We don't want to point at the ADT saying "required because it appears within
+ // the type `X`", like we would otherwise do in test `supertrait-auto-trait.rs`.
+ while let ObligationCauseCode::BuiltinDerivedObligation(derived) =
+ &*data.parent_code
+ {
+ let child_trait_ref =
+ self.resolve_vars_if_possible(derived.parent_trait_pred);
+ let child_def_id = child_trait_ref.def_id();
+ if seen_requirements.insert(child_def_id) {
+ break;
+ }
+ data = derived;
+ parent_predicate = child_trait_ref.to_predicate(tcx);
+ parent_trait_pred = child_trait_ref;
+ }
+ }
while let ObligationCauseCode::ImplDerivedObligation(child) = &*data.parent_code {
// Skip redundant recursive obligation notes. See `ui/issue-20413.rs`.
- let child_trait_pred = self.resolve_vars_if_possible(child.parent_trait_pred);
+ let child_trait_pred =
+ self.resolve_vars_if_possible(child.derived.parent_trait_pred);
let child_def_id = child_trait_pred.def_id();
if seen_requirements.insert(child_def_id) {
break;
}
count += 1;
- data = child;
+ data = &child.derived;
parent_predicate = child_trait_pred.to_predicate(tcx);
parent_trait_pred = child_trait_pred;
}
use rustc_data_structures::obligation_forest::ProcessResult;
use rustc_data_structures::obligation_forest::{Error, ForestObligation, Outcome};
use rustc_data_structures::obligation_forest::{ObligationForest, ObligationProcessor};
-use rustc_errors::ErrorGuaranteed;
use rustc_infer::traits::ProjectionCacheKey;
use rustc_infer::traits::{SelectionError, TraitEngine, TraitEngineExt as _, TraitObligation};
use rustc_middle::mir::interpret::ErrorHandled;
if let (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b)) =
(c1.val(), c2.val())
{
- if infcx.try_unify_abstract_consts(a.shrink(), b.shrink()) {
+ if infcx.try_unify_abstract_consts(
+ a.shrink(),
+ b.shrink(),
+ obligation.param_env,
+ ) {
return ProcessResult::Changed(vec![]);
}
}
),
}
}
- (Err(ErrorHandled::Reported(ErrorGuaranteed)), _)
- | (_, Err(ErrorHandled::Reported(ErrorGuaranteed))) => {
- ProcessResult::Error(CodeSelectionError(
- SelectionError::NotConstEvaluatable(NotConstEvaluatable::Error(
- ErrorGuaranteed,
- )),
- ))
- }
+ (Err(ErrorHandled::Reported(reported)), _)
+ | (_, Err(ErrorHandled::Reported(reported))) => ProcessResult::Error(
+ CodeSelectionError(SelectionError::NotConstEvaluatable(
+ NotConstEvaluatable::Error(reported),
+ )),
+ ),
(Err(ErrorHandled::Linted), _) | (_, Err(ErrorHandled::Linted)) => {
span_bug!(
obligation.cause.span(self.selcx.tcx()),
#[derive(Clone)]
pub enum CopyImplementationError<'tcx> {
- InfrigingFields(Vec<&'tcx ty::FieldDef>),
+ InfrigingFields(Vec<(&'tcx ty::FieldDef, Ty<'tcx>)>),
NotAnAdt,
HasDestructor,
}
};
let mut infringing = Vec::new();
- for variant in &adt.variants {
+ for variant in adt.variants() {
for field in &variant.fields {
let ty = field.ty(tcx, substs);
if ty.references_error() {
// If it does not, then this field probably doesn't normalize
// to begin with, and point to the bad field's span instead.
let cause = if field
- .ty(tcx, traits::InternalSubsts::identity_for_item(tcx, adt.did))
+ .ty(tcx, traits::InternalSubsts::identity_for_item(tcx, adt.did()))
.has_param_types_or_consts()
{
cause.clone()
match traits::fully_normalize(&infcx, ctx, cause, param_env, ty) {
Ok(ty) => {
if !infcx.type_is_copy_modulo_regions(param_env, ty, span) {
- infringing.push(field);
+ infringing.push((field, ty));
}
}
Err(errors) => {
match fully_normalize(&infcx, fulfill_cx, cause, elaborated_env, predicates) {
Ok(predicates) => predicates,
Err(errors) => {
- infcx.report_fulfillment_errors(&errors, None, false);
- return Err(ErrorGuaranteed);
+ let reported = infcx.report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
}
};
// represents a legitimate failure due to some kind of
// unconstrained variable, and it seems better not to ICE,
// all things considered.
- tcx.sess.span_err(span, &fixup_err.to_string());
- return Err(ErrorGuaranteed);
+ let reported = tcx.sess.span_err(span, &fixup_err.to_string());
+ return Err(reported);
}
};
if predicates.needs_infer() {
- tcx.sess.delay_span_bug(span, "encountered inference variables after `fully_resolve`");
- Err(ErrorGuaranteed)
+ let reported = tcx
+ .sess
+ .delay_span_bug(span, "encountered inference variables after `fully_resolve`");
+ Err(reported)
} else {
Ok(predicates)
}
// the main traversal loop:
// basically we want to cut the inheritance directed graph into a few non-overlapping slices of nodes
- // that each node is emited after all its descendents have been emitted.
+ // that each node is emitted after all its descendents have been emitted.
// so we convert the directed graph into a tree by skipping all previously visted nodes using a visited set.
// this is done on the fly.
// Each loop run emits a slice - it starts by find a "childless" unvisited node, backtracking upwards, and it
// Starting point 0 stack [D]
// Loop run #0: Stack after diving in is [D B A], A is "childless"
// after this point, all newly visited nodes won't have a vtable that equals to a prefix of this one.
- // Loop run #0: Emiting the slice [B A] (in reverse order), B has a next-sibling node, so this slice stops here.
+ // Loop run #0: Emitting the slice [B A] (in reverse order), B has a next-sibling node, so this slice stops here.
// Loop run #0: Stack after exiting out is [D C], C is the next starting point.
// Loop run #1: Stack after diving in is [D C], C is "childless", since its child A is skipped(already emitted).
- // Loop run #1: Emiting the slice [D C] (in reverse order). No one has a next-sibling node.
+ // Loop run #1: Emitting the slice [D C] (in reverse order). No one has a next-sibling node.
// Loop run #1: Stack after exiting out is []. Now the function exits.
loop {
ty::WithOptConstParam { did, const_param_did: Some(param_did) },
)
},
- try_unify_abstract_consts: const_evaluatable::try_unify_abstract_consts,
+ try_unify_abstract_consts: |tcx, param_env_and| {
+ let (param_env, (a, b)) = param_env_and.into_parts();
+ const_evaluatable::try_unify_abstract_consts(tcx, (a, b), param_env)
+ },
..*providers
};
}
use rustc_middle::ty::{self, GenericParamDefKind, TyCtxt};
use rustc_parse_format::{ParseMode, Parser, Piece, Position};
use rustc_span::symbol::{kw, sym, Symbol};
-use rustc_span::Span;
+use rustc_span::{Span, DUMMY_SP};
#[derive(Clone, Debug)]
pub struct OnUnimplementedFormatString(Symbol);
if let Some(note) = note {
diag.note(note);
}
- diag.emit();
- ErrorGuaranteed
+ diag.emit()
}
impl<'tcx> OnUnimplementedDirective {
span: Span,
is_root: bool,
) -> Result<Self, ErrorGuaranteed> {
- let mut errored = false;
+ let mut errored = None;
let mut item_iter = items.iter();
let parse_value = |value_str| {
)
})?;
attr::eval_condition(cond, &tcx.sess.parse_sess, Some(tcx.features()), &mut |item| {
- if let Some(symbol) = item.value_str() && parse_value(symbol).is_err() {
- errored = true;
+ if let Some(symbol) = item.value_str() && let Err(guar) = parse_value(symbol) {
+ errored = Some(guar);
}
true
});
&& note.is_none()
{
if let Some(items) = item.meta_item_list() {
- if let Ok(subcommand) =
- Self::parse(tcx, item_def_id, &items, item.span(), false)
- {
- subcommands.push(subcommand);
- } else {
- errored = true;
- }
+ match Self::parse(tcx, item_def_id, &items, item.span(), false) {
+ Ok(subcommand) => subcommands.push(subcommand),
+ Err(reported) => errored = Some(reported),
+ };
continue;
}
} else if item.has_name(sym::append_const_msg) && append_const_msg.is_none() {
);
}
- if errored {
- Err(ErrorGuaranteed)
+ if let Some(reported) = errored {
+ Err(reported)
} else {
Ok(OnUnimplementedDirective {
condition,
append_const_msg: None,
}))
} else {
- return Err(ErrorGuaranteed);
+ let reported =
+ tcx.sess.delay_span_bug(DUMMY_SP, "of_item: neither meta_item_list nor value_str");
+ return Err(reported);
};
debug!("of_item({:?}) = {:?}", item_def_id, result);
result
match generics.params.iter().find(|param| param.name == s) {
Some(_) => (),
None => {
- struct_span_err!(
+ let reported = struct_span_err!(
tcx.sess,
span,
E0230,
}
)
.emit();
- result = Err(ErrorGuaranteed);
+ result = Err(reported);
}
}
}
// `{:1}` and `{}` are not to be used
Position::ArgumentIs(_) | Position::ArgumentImplicitlyIs(_) => {
- struct_span_err!(
+ let reported = struct_span_err!(
tcx.sess,
span,
E0231,
"only named substitution parameters are allowed"
)
.emit();
- result = Err(ErrorGuaranteed);
+ result = Err(reported);
}
},
}
use rustc_hir::def_id::DefId;
use rustc_hir::lang_items::LangItem;
use rustc_infer::infer::resolve::OpportunisticRegionResolver;
+use rustc_middle::traits::select::OverflowError;
use rustc_middle::ty::fold::{TypeFoldable, TypeFolder};
use rustc_middle::ty::subst::Subst;
use rustc_middle::ty::{self, Term, ToPredicate, Ty, TyCtxt};
if !selcx.tcx().recursion_limit().value_within_limit(obligation.recursion_depth) {
// This should really be an immediate error, but some existing code
// relies on being able to recover from this.
- return Err(ProjectionError::TraitSelectionError(SelectionError::Overflow));
+ return Err(ProjectionError::TraitSelectionError(SelectionError::Overflow(
+ OverflowError::Canonical,
+ )));
}
if obligation.predicate.references_error() {
// `rustc_ty_utils::instance::resolve_associated_item()`.
let node_item =
assoc_def(selcx, impl_data.impl_def_id, obligation.predicate.item_def_id)
- .map_err(|ErrorGuaranteed| ())?;
+ .map_err(|ErrorGuaranteed { .. }| ())?;
if node_item.is_final() {
// Non-specializable items are always projectable.
let self_ty = selcx.infcx().shallow_resolve(obligation.predicate.self_ty());
let tail = selcx.tcx().struct_tail_with_normalize(self_ty, |ty| {
+ // We throw away any obligations we get from this, since we normalize
+ // and confirm these obligations once again during confirmation
normalize_with_depth(
selcx,
obligation.param_env,
| ty::Int(_)
| ty::Uint(_)
| ty::Float(_)
- | ty::Foreign(_)
| ty::Str
| ty::Array(..)
| ty::Slice(_)
| ty::Generator(..)
| ty::GeneratorWitness(..)
| ty::Never
+ // Extern types have unit metadata, according to RFC 2850
+ | ty::Foreign(_)
// If returned by `struct_tail_without_normalization` this is a unit struct
// without any fields, or not a struct, and therefore is Sized.
| ty::Adt(..)
// Integers and floats are always Sized, and so have unit type metadata.
| ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(..)) => true,
- ty::Projection(..)
+ // type parameters, opaques, and unnormalized projections have pointer
+ // metadata if they're known (e.g. by the param_env) to be sized
+ ty::Param(_) | ty::Projection(..) | ty::Opaque(..)
+ if tail.is_sized(selcx.tcx().at(obligation.cause.span), obligation.param_env) =>
+ {
+ true
+ }
+
+ // FIXME(compiler-errors): are Bound and Placeholder types ever known sized?
+ ty::Param(_)
+ | ty::Projection(..)
| ty::Opaque(..)
- | ty::Param(..)
| ty::Bound(..)
| ty::Placeholder(..)
| ty::Infer(..)
candidate_set.mark_ambiguous();
}
false
- },
+ }
}
}
super::ImplSource::Param(..) => {
super::ImplSource::AutoImpl(..)
| super::ImplSource::Builtin(..)
| super::ImplSource::TraitUpcasting(_)
- | super::ImplSource::ConstDrop(_) => {
+ | super::ImplSource::ConstDestruct(_) => {
// These traits have no associated types.
selcx.tcx().sess.delay_span_bug(
obligation.cause.span,
| super::ImplSource::Builtin(..)
| super::ImplSource::TraitUpcasting(_)
| super::ImplSource::TraitAlias(..)
- | super::ImplSource::ConstDrop(_) => {
+ | super::ImplSource::ConstDestruct(_) => {
// we don't create Select candidates with this kind of resolution
span_bug!(
obligation.cause.span,
let self_ty = selcx.infcx().shallow_resolve(obligation.predicate.self_ty());
let mut obligations = vec![];
- let metadata_ty = self_ty.ptr_metadata_ty(tcx, |ty| {
+ let (metadata_ty, check_is_sized) = self_ty.ptr_metadata_ty(tcx, |ty| {
normalize_with_depth_to(
selcx,
obligation.param_env,
&mut obligations,
)
});
+ if check_is_sized {
+ let sized_predicate = ty::Binder::dummy(ty::TraitRef::new(
+ tcx.require_lang_item(LangItem::Sized, None),
+ tcx.mk_substs_trait(self_ty, &[]),
+ ))
+ .without_const()
+ .to_predicate(tcx);
+ obligations.push(Obligation::new(
+ obligation.cause.clone(),
+ obligation.param_env,
+ sized_predicate,
+ ));
+ }
let substs = tcx.mk_substs([self_ty.into()].iter());
let metadata_def_id = tcx.require_lang_item(LangItem::Metadata, None);
use rustc_middle::ty::subst::GenericArg;
use rustc_middle::ty::{self, Ty, TyCtxt};
-pub use rustc_middle::traits::query::{DropckOutlivesResult, DtorckConstraint};
+pub use rustc_middle::traits::query::{DropckConstraint, DropckOutlivesResult};
pub trait AtExt<'tcx> {
fn dropck_outlives(&self, ty: Ty<'tcx>) -> InferOk<'tcx, Vec<GenericArg<'tcx>>>;
}
ty::Adt(def, _) => {
- if Some(def.did) == tcx.lang_items().manually_drop() {
+ if Some(def.did()) == tcx.lang_items().manually_drop() {
// `ManuallyDrop` never has a dtor.
true
} else {
)
}
OverflowError::ErrorReporting => EvaluationResult::EvaluatedToErr,
+ OverflowError::Error(_) => EvaluationResult::EvaluatedToErr,
})
}
Err(OverflowError::ErrorReporting) => EvaluationResult::EvaluatedToErr,
+ Err(OverflowError::Error(_)) => EvaluationResult::EvaluatedToErr,
}
}
}
//! candidates. See the [rustc dev guide] for more details.
//!
//! [rustc dev guide]:https://rustc-dev-guide.rust-lang.org/traits/resolution.html#candidate-assembly
+use hir::LangItem;
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_infer::traits::TraitEngine;
Ok(Some(EvaluatedCandidate { candidate: c, evaluation: eval }))
}
Ok(_) => Ok(None),
- Err(OverflowError::Canonical) => Err(Overflow),
+ Err(OverflowError::Canonical) => Err(Overflow(OverflowError::Canonical)),
Err(OverflowError::ErrorReporting) => Err(ErrorReporting),
+ Err(OverflowError::Error(e)) => Err(Overflow(OverflowError::Error(e))),
})
.flat_map(Result::transpose)
.collect::<Result<Vec<_>, _>>()?;
} else if lang_items.drop_trait() == Some(def_id)
&& obligation.predicate.is_const_if_const()
{
- self.assemble_const_drop_candidates(obligation, &mut candidates);
+ // holds to make it easier to transition
+ // FIXME(fee1-dead): add a note for selection error of `~const Drop`
+ // when beta is bumped
+ // FIXME: remove this when beta is bumped
+ #[cfg(bootstrap)]
+ {}
+
+ candidates.vec.push(SelectionCandidate::ConstDestructCandidate(None))
+ } else if lang_items.destruct_trait() == Some(def_id) {
+ self.assemble_const_destruct_candidates(obligation, &mut candidates);
} else {
if lang_items.clone_trait() == Some(def_id) {
// Same builtin conditions as `Copy`, i.e., every type which has builtin support
}
}
- fn assemble_const_drop_candidates(
+ fn assemble_const_destruct_candidates(
&mut self,
obligation: &TraitObligation<'tcx>,
candidates: &mut SelectionCandidateSet<'tcx>,
) {
- // If the predicate is `~const Drop` in a non-const environment, we don't actually need
+ // If the predicate is `~const Destruct` in a non-const environment, we don't actually need
// to check anything. We'll short-circuit checking any obligations in confirmation, too.
- if obligation.param_env.constness() == hir::Constness::NotConst {
- candidates.vec.push(ConstDropCandidate(None));
+ if !obligation.is_const() {
+ candidates.vec.push(ConstDestructCandidate(None));
return;
}
| ty::Param(_)
| ty::Placeholder(_)
| ty::Projection(_) => {
- // We don't know if these are `~const Drop`, at least
+ // We don't know if these are `~const Destruct`, at least
// not structurally... so don't push a candidate.
}
| ty::Generator(..)
| ty::Tuple(_)
| ty::GeneratorWitness(_) => {
- // These are built-in, and cannot have a custom `impl const Drop`.
- candidates.vec.push(ConstDropCandidate(None));
+ // These are built-in, and cannot have a custom `impl const Destruct`.
+ candidates.vec.push(ConstDestructCandidate(None));
}
ty::Adt(..) => {
// Find a custom `impl Drop` impl, if it exists
let relevant_impl = self.tcx().find_map_relevant_impl(
- obligation.predicate.def_id(),
+ self.tcx().require_lang_item(LangItem::Drop, None),
obligation.predicate.skip_binder().trait_ref.self_ty(),
Some,
);
if let Some(impl_def_id) = relevant_impl {
// Check that `impl Drop` is actually const, if there is a custom impl
if self.tcx().impl_constness(impl_def_id) == hir::Constness::Const {
- candidates.vec.push(ConstDropCandidate(Some(impl_def_id)));
+ candidates.vec.push(ConstDestructCandidate(Some(impl_def_id)));
}
} else {
// Otherwise check the ADT like a built-in type (structurally)
- candidates.vec.push(ConstDropCandidate(None));
+ candidates.vec.push(ConstDestructCandidate(None));
}
}
//! https://rustc-dev-guide.rust-lang.org/traits/resolution.html#confirmation
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_hir::lang_items::LangItem;
-use rustc_hir::Constness;
use rustc_index::bit_set::GrowableBitSet;
use rustc_infer::infer::InferOk;
use rustc_infer::infer::LateBoundRegionConversionTime::HigherRankedType;
use crate::traits::project::{normalize_with_depth, normalize_with_depth_to};
use crate::traits::select::TraitObligationExt;
-use crate::traits::util;
-use crate::traits::util::{closure_trait_ref_and_return_type, predicate_for_trait_def};
-use crate::traits::ImplSource;
-use crate::traits::Normalized;
-use crate::traits::OutputTypeParameterMismatch;
-use crate::traits::Selection;
-use crate::traits::TraitNotObjectSafe;
-use crate::traits::VtblSegment;
-use crate::traits::{BuiltinDerivedObligation, ImplDerivedObligation};
+use crate::traits::util::{self, closure_trait_ref_and_return_type, predicate_for_trait_def};
use crate::traits::{
- ImplSourceAutoImplData, ImplSourceBuiltinData, ImplSourceClosureData, ImplSourceConstDropData,
- ImplSourceDiscriminantKindData, ImplSourceFnPointerData, ImplSourceGeneratorData,
- ImplSourceObjectData, ImplSourcePointeeData, ImplSourceTraitAliasData,
- ImplSourceTraitUpcastingData, ImplSourceUserDefinedData,
+ BuiltinDerivedObligation, DerivedObligationCause, ImplDerivedObligation,
+ ImplDerivedObligationCause, ImplSource, ImplSourceAutoImplData, ImplSourceBuiltinData,
+ ImplSourceClosureData, ImplSourceConstDestructData, ImplSourceDiscriminantKindData,
+ ImplSourceFnPointerData, ImplSourceGeneratorData, ImplSourceObjectData, ImplSourcePointeeData,
+ ImplSourceTraitAliasData, ImplSourceTraitUpcastingData, ImplSourceUserDefinedData, Normalized,
+ ObjectCastObligation, Obligation, ObligationCause, OutputTypeParameterMismatch,
+ PredicateObligation, Selection, SelectionError, TraitNotObjectSafe, TraitObligation,
+ Unimplemented, VtblSegment,
};
-use crate::traits::{ObjectCastObligation, PredicateObligation, TraitObligation};
-use crate::traits::{Obligation, ObligationCause};
-use crate::traits::{SelectionError, Unimplemented};
use super::BuiltinImplConditions;
use super::SelectionCandidate::{self, *};
Ok(ImplSource::TraitUpcasting(data))
}
- ConstDropCandidate(def_id) => {
- let data = self.confirm_const_drop_candidate(obligation, def_id)?;
- Ok(ImplSource::ConstDrop(data))
+ ConstDestructCandidate(def_id) => {
+ let data = self.confirm_const_destruct_candidate(obligation, def_id)?;
+ Ok(ImplSource::ConstDestruct(data))
}
}
}
debug!(?nested, "vtable_auto_impl");
ensure_sufficient_stack(|| {
let cause = obligation.derived_cause(BuiltinDerivedObligation);
- let mut obligations = self.collect_predicates_for_types(
- obligation.param_env,
- cause,
- obligation.recursion_depth + 1,
- trait_def_id,
- nested,
- );
let trait_obligations: Vec<PredicateObligation<'_>> =
self.infcx.commit_unconditionally(|_| {
let poly_trait_ref = obligation.predicate.to_poly_trait_ref();
let trait_ref = self.infcx.replace_bound_vars_with_placeholders(poly_trait_ref);
- let cause = obligation.derived_cause(ImplDerivedObligation);
self.impl_or_trait_obligations(
- cause,
+ &cause,
obligation.recursion_depth + 1,
obligation.param_env,
trait_def_id,
&trait_ref.substs,
+ obligation.predicate,
)
});
+ let mut obligations = self.collect_predicates_for_types(
+ obligation.param_env,
+ cause,
+ obligation.recursion_depth + 1,
+ trait_def_id,
+ nested,
+ );
+
// Adds the predicates from the trait. Note that this contains a `Self: Trait`
// predicate as usual. It won't have any effect since auto traits are coinductive.
obligations.extend(trait_obligations);
self.infcx.commit_unconditionally(|_| {
let substs = self.rematch_impl(impl_def_id, obligation);
debug!(?substs, "impl substs");
- let cause = obligation.derived_cause(ImplDerivedObligation);
ensure_sufficient_stack(|| {
self.vtable_impl(
impl_def_id,
substs,
- cause,
+ &obligation.cause,
obligation.recursion_depth + 1,
obligation.param_env,
+ obligation.predicate,
)
})
})
&mut self,
impl_def_id: DefId,
substs: Normalized<'tcx, SubstsRef<'tcx>>,
- cause: ObligationCause<'tcx>,
+ cause: &ObligationCause<'tcx>,
recursion_depth: usize,
param_env: ty::ParamEnv<'tcx>,
+ parent_trait_pred: ty::Binder<'tcx, ty::TraitPredicate<'tcx>>,
) -> ImplSourceUserDefinedData<'tcx, PredicateObligation<'tcx>> {
debug!(?impl_def_id, ?substs, ?recursion_depth, "vtable_impl");
param_env,
impl_def_id,
&substs.value,
+ parent_trait_pred,
);
debug!(?impl_obligations, "vtable_impl");
.predicates
.into_iter()
{
- if let ty::PredicateKind::Trait(..) = super_trait.kind().skip_binder() {
- let normalized_super_trait = normalize_with_depth_to(
- self,
- obligation.param_env,
- obligation.cause.clone(),
- obligation.recursion_depth + 1,
- super_trait,
- &mut nested,
- );
- nested.push(Obligation::new(
- obligation.cause.clone(),
- obligation.param_env,
- normalized_super_trait,
- ));
- }
+ let normalized_super_trait = normalize_with_depth_to(
+ self,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ super_trait,
+ &mut nested,
+ );
+ nested.push(Obligation::new(
+ obligation.cause.clone(),
+ obligation.param_env,
+ normalized_super_trait,
+ ));
}
let assoc_types: Vec<_> = tcx
let substs = trait_ref.substs;
let trait_obligations = self.impl_or_trait_obligations(
- obligation.cause.clone(),
+ &obligation.cause,
obligation.recursion_depth,
obligation.param_env,
trait_def_id,
&substs,
+ obligation.predicate,
);
debug!(?trait_def_id, ?trait_obligations, "trait alias obligations");
// TraitA+Kx+'a -> TraitB+Ky+'b (trait upcasting coercion).
(&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => {
// See `assemble_candidates_for_unsizing` for more info.
- // We already checked the compatiblity of auto traits within `assemble_candidates_for_unsizing`.
+ // We already checked the compatibility of auto traits within `assemble_candidates_for_unsizing`.
let principal_a = data_a.principal().unwrap();
source_trait_ref = principal_a.with_self_ty(tcx, source);
upcast_trait_ref = util::supertraits(tcx, source_trait_ref).nth(idx).unwrap();
// Trait+Kx+'a -> Trait+Ky+'b (auto traits and lifetime subtyping).
(&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => {
// See `assemble_candidates_for_unsizing` for more info.
- // We already checked the compatiblity of auto traits within `assemble_candidates_for_unsizing`.
+ // We already checked the compatibility of auto traits within `assemble_candidates_for_unsizing`.
let iter = data_a
.principal()
.map(|b| b.map_bound(ty::ExistentialPredicate::Trait))
Ok(ImplSourceBuiltinData { nested })
}
- fn confirm_const_drop_candidate(
+ fn confirm_const_destruct_candidate(
&mut self,
obligation: &TraitObligation<'tcx>,
impl_def_id: Option<DefId>,
- ) -> Result<ImplSourceConstDropData<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
- // `~const Drop` in a non-const environment is always trivially true, since our type is `Drop`
- if obligation.param_env.constness() == Constness::NotConst {
- return Ok(ImplSourceConstDropData { nested: vec![] });
+ ) -> Result<ImplSourceConstDestructData<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
+ // `~const Destruct` in a non-const environment is always trivially true, since our type is `Drop`
+ if !obligation.is_const() {
+ return Ok(ImplSourceConstDestructData { nested: vec![] });
+ }
+
+ let drop_trait = self.tcx().require_lang_item(LangItem::Drop, None);
+ // FIXME: remove if statement below when beta is bumped
+ #[cfg(bootstrap)]
+ {}
+
+ if obligation.predicate.skip_binder().def_id() == drop_trait {
+ return Ok(ImplSourceConstDestructData { nested: vec![] });
}
let tcx = self.tcx();
let cause = obligation.derived_cause(BuiltinDerivedObligation);
// If we have a custom `impl const Drop`, then
- // first check it like a regular impl candidate
+ // first check it like a regular impl candidate.
+ // This is copied from confirm_impl_candidate but remaps the predicate to `~const Drop` beforehand.
if let Some(impl_def_id) = impl_def_id {
- nested.extend(self.confirm_impl_candidate(obligation, impl_def_id).nested);
+ let obligations = self.infcx.commit_unconditionally(|_| {
+ let mut new_obligation = obligation.clone();
+ new_obligation.predicate = new_obligation.predicate.map_bound(|mut trait_pred| {
+ trait_pred.trait_ref.def_id = drop_trait;
+ trait_pred
+ });
+ let substs = self.rematch_impl(impl_def_id, &new_obligation);
+ debug!(?substs, "impl substs");
+
+ let derived = DerivedObligationCause {
+ parent_trait_pred: obligation.predicate,
+ parent_code: obligation.cause.clone_code(),
+ };
+ let derived_code = ImplDerivedObligation(Box::new(ImplDerivedObligationCause {
+ derived,
+ impl_def_id,
+ span: obligation.cause.span,
+ }));
+
+ let cause = ObligationCause::new(
+ obligation.cause.span,
+ obligation.cause.body_id,
+ derived_code,
+ );
+ ensure_sufficient_stack(|| {
+ self.vtable_impl(
+ impl_def_id,
+ substs,
+ &cause,
+ new_obligation.recursion_depth + 1,
+ new_obligation.param_env,
+ obligation.predicate,
+ )
+ })
+ });
+ nested.extend(obligations.nested);
}
// We want to confirm the ADT's fields if we have an ADT
| ty::Foreign(_) => {}
// These types are built-in, so we can fast-track by registering
- // nested predicates for their constituient type(s)
+ // nested predicates for their constituent type(s)
ty::Array(ty, _) | ty::Slice(ty) => {
stack.push(ty);
}
self_ty
.rebind(ty::TraitPredicate {
trait_ref: ty::TraitRef {
- def_id: self.tcx().require_lang_item(LangItem::Drop, None),
+ def_id: self
+ .tcx()
+ .require_lang_item(LangItem::Destruct, None),
substs: self.tcx().mk_substs_trait(nested_ty, &[]),
},
constness: ty::BoundConstness::ConstIfConst,
let predicate = self_ty
.rebind(ty::TraitPredicate {
trait_ref: ty::TraitRef {
- def_id: self.tcx().require_lang_item(LangItem::Drop, None),
+ def_id: self.tcx().require_lang_item(LangItem::Destruct, None),
substs: self.tcx().mk_substs_trait(nested_ty, &[]),
},
constness: ty::BoundConstness::ConstIfConst,
}
}
- Ok(ImplSourceConstDropData { nested })
+ Ok(ImplSourceConstDestructData { nested })
}
}
use super::util;
use super::util::{closure_trait_ref_and_return_type, predicate_for_trait_def};
use super::wf;
-use super::DerivedObligationCause;
-use super::Normalized;
-use super::Obligation;
-use super::ObligationCauseCode;
-use super::Selection;
-use super::SelectionResult;
-use super::TraitQueryMode;
-use super::{ErrorReporting, Overflow, SelectionError};
-use super::{ObligationCause, PredicateObligation, TraitObligation};
+use super::{
+ DerivedObligationCause, ErrorReporting, ImplDerivedObligation, ImplDerivedObligationCause,
+ Normalized, Obligation, ObligationCause, ObligationCauseCode, Overflow, PredicateObligation,
+ Selection, SelectionError, SelectionResult, TraitObligation, TraitQueryMode,
+};
use crate::infer::{InferCtxt, InferOk, TypeFreshener};
use crate::traits::error_reporting::InferCtxtExt;
intercrate_ambiguity_causes: Option<Vec<IntercrateAmbiguityCause>>,
- /// Controls whether or not to filter out negative impls when selecting.
- /// This is used in librustdoc to distinguish between the lack of an impl
- /// and a negative impl
- allow_negative_impls: bool,
-
/// The mode that trait queries run in, which informs our error handling
/// policy. In essence, canonicalized queries need their errors propagated
/// rather than immediately reported because we do not have accurate spans.
freshener: infcx.freshener_keep_static(),
intercrate: false,
intercrate_ambiguity_causes: None,
- allow_negative_impls: false,
query_mode: TraitQueryMode::Standard,
}
}
freshener: infcx.freshener_keep_static(),
intercrate: true,
intercrate_ambiguity_causes: None,
- allow_negative_impls: false,
- query_mode: TraitQueryMode::Standard,
- }
- }
-
- pub fn with_negative(
- infcx: &'cx InferCtxt<'cx, 'tcx>,
- allow_negative_impls: bool,
- ) -> SelectionContext<'cx, 'tcx> {
- debug!(?allow_negative_impls, "with_negative");
- SelectionContext {
- infcx,
- freshener: infcx.freshener_keep_static(),
- intercrate: false,
- intercrate_ambiguity_causes: None,
- allow_negative_impls,
query_mode: TraitQueryMode::Standard,
}
}
freshener: infcx.freshener_keep_static(),
intercrate: false,
intercrate_ambiguity_causes: None,
- allow_negative_impls: false,
query_mode,
}
}
obligation: &TraitObligation<'tcx>,
) -> SelectionResult<'tcx, Selection<'tcx>> {
let candidate = match self.select_from_obligation(obligation) {
- Err(SelectionError::Overflow) => {
+ Err(SelectionError::Overflow(OverflowError::Canonical)) => {
// In standard mode, overflow must have been caught and reported
// earlier.
assert!(self.query_mode == TraitQueryMode::Canonical);
- return Err(SelectionError::Overflow);
+ return Err(SelectionError::Overflow(OverflowError::Canonical));
}
Err(SelectionError::Ambiguous(_)) => {
return Ok(None);
};
match self.confirm_candidate(obligation, candidate) {
- Err(SelectionError::Overflow) => {
+ Err(SelectionError::Overflow(OverflowError::Canonical)) => {
assert!(self.query_mode == TraitQueryMode::Canonical);
- Err(SelectionError::Overflow)
+ Err(SelectionError::Overflow(OverflowError::Canonical))
}
Err(e) => Err(e),
Ok(candidate) => {
match project::poly_project_and_unify_type(self, &project_obligation) {
Ok(Ok(Some(mut subobligations))) => {
'compute_res: {
- // If we've previously marked this projection as 'complete', thne
+ // If we've previously marked this projection as 'complete', then
// use the final cached result (either `EvaluatedToOk` or
// `EvaluatedToOkModuloRegions`), and skip re-evaluating the
// sub-obligations.
if let (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b)) =
(c1.val(), c2.val())
{
- if self.infcx.try_unify_abstract_consts(a.shrink(), b.shrink()) {
+ if self.infcx.try_unify_abstract_consts(
+ a.shrink(),
+ b.shrink(),
+ obligation.param_env,
+ ) {
return Ok(EvaluatedToOk);
}
}
Err(_) => Ok(EvaluatedToErr),
}
}
- (Err(ErrorHandled::Reported(ErrorGuaranteed)), _)
- | (_, Err(ErrorHandled::Reported(ErrorGuaranteed))) => Ok(EvaluatedToErr),
+ (Err(ErrorHandled::Reported(_)), _)
+ | (_, Err(ErrorHandled::Reported(_))) => Ok(EvaluatedToErr),
(Err(ErrorHandled::Linted), _) | (_, Err(ErrorHandled::Linted)) => {
span_bug!(
obligation.cause.span(self.tcx()),
Ok(Some(c)) => self.evaluate_candidate(stack, &c),
Err(SelectionError::Ambiguous(_)) => Ok(EvaluatedToAmbig),
Ok(None) => Ok(EvaluatedToAmbig),
- Err(Overflow) => Err(OverflowError::Canonical),
+ Err(Overflow(OverflowError::Canonical)) => Err(OverflowError::Canonical),
Err(ErrorReporting) => Err(OverflowError::ErrorReporting),
Err(..) => Ok(EvaluatedToErr),
}
match self.query_mode {
TraitQueryMode::Standard => {
if self.infcx.is_tainted_by_errors() {
- return Err(OverflowError::ErrorReporting);
+ return Err(OverflowError::Error(
+ ErrorGuaranteed::unchecked_claim_error_was_emitted(),
+ ));
}
self.infcx.report_overflow_error(error_obligation, true);
}
GeneratorCandidate => {}
// FnDef where the function is const
FnPointerCandidate { is_const: true } => {}
- ConstDropCandidate(_) => {}
+ ConstDestructCandidate(_) => {}
_ => {
// reject all other types of candidates
continue;
if let ImplCandidate(def_id) = candidate {
if ty::ImplPolarity::Reservation == tcx.impl_polarity(def_id)
|| obligation.polarity() == tcx.impl_polarity(def_id)
- || self.allow_negative_impls
{
result.push(candidate);
}
}
if self.can_use_global_caches(param_env) {
- if let Err(Overflow) = candidate {
+ if let Err(Overflow(OverflowError::Canonical)) = candidate {
// Don't cache overflow globally; we only produce this in certain modes.
} else if !pred.needs_infer() {
if !candidate.needs_infer() {
};
// (*) Prefer `BuiltinCandidate { has_nested: false }`, `PointeeCandidate`,
- // `DiscriminantKindCandidate`, and `ConstDropCandidate` to anything else.
+ // `DiscriminantKindCandidate`, and `ConstDestructCandidate` to anything else.
//
// This is a fix for #53123 and prevents winnowing from accidentally extending the
// lifetime of a variable.
BuiltinCandidate { has_nested: false }
| DiscriminantKindCandidate
| PointeeCandidate
- | ConstDropCandidate(_),
+ | ConstDestructCandidate(_),
_,
) => true,
(
BuiltinCandidate { has_nested: false }
| DiscriminantKindCandidate
| PointeeCandidate
- | ConstDropCandidate(_),
+ | ConstDestructCandidate(_),
) => false,
(ParamCandidate(other), ParamCandidate(victim)) => {
#[tracing::instrument(level = "debug", skip(self, cause, param_env))]
fn impl_or_trait_obligations(
&mut self,
- cause: ObligationCause<'tcx>,
+ cause: &ObligationCause<'tcx>,
recursion_depth: usize,
param_env: ty::ParamEnv<'tcx>,
def_id: DefId, // of impl or trait
substs: SubstsRef<'tcx>, // for impl or trait
+ parent_trait_pred: ty::Binder<'tcx, ty::TraitPredicate<'tcx>>,
) -> Vec<PredicateObligation<'tcx>> {
let tcx = self.tcx();
debug!(?predicates);
assert_eq!(predicates.parent, None);
let mut obligations = Vec::with_capacity(predicates.predicates.len());
- for (predicate, _) in predicates.predicates {
- debug!(?predicate);
+ let parent_code = cause.clone_code();
+ for (predicate, span) in predicates.predicates {
+ let span = *span;
+ let derived =
+ DerivedObligationCause { parent_trait_pred, parent_code: parent_code.clone() };
+ let code = ImplDerivedObligation(Box::new(ImplDerivedObligationCause {
+ derived,
+ impl_def_id: def_id,
+ span,
+ }));
+ let cause = ObligationCause::new(cause.span, cause.body_id, code);
let predicate = normalize_with_depth_to(
self,
param_env,
predicate.subst(tcx, substs),
&mut obligations,
);
- obligations.push(Obligation {
- cause: cause.clone(),
- recursion_depth,
- param_env,
- predicate,
- });
+ obligations.push(Obligation { cause, recursion_depth, param_env, predicate });
}
obligations
use crate::traits::select::IntercrateAmbiguityCause;
use crate::traits::{self, coherence, FutureCompatOverlapErrorKind, ObligationCause, TraitEngine};
use rustc_data_structures::fx::FxHashSet;
-use rustc_errors::struct_span_err;
+use rustc_errors::{struct_span_err, EmissionGuarantee};
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_middle::lint::LintDiagnosticBuilder;
use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef};
-use rustc_middle::ty::{self, TyCtxt};
+use rustc_middle::ty::{self, ImplSubject, TyCtxt};
use rustc_session::lint::builtin::COHERENCE_LEAK_CHECK;
use rustc_session::lint::builtin::ORDER_DEPENDENT_TRAIT_OBJECTS;
-use rustc_span::DUMMY_SP;
+use rustc_span::{Span, DUMMY_SP};
-use super::util::impl_trait_ref_and_oblig;
+use super::util;
use super::{FulfillmentContext, SelectionContext};
/// Information pertinent to an overlapping impl error.
param_env, source_trait_ref, target_impl
);
+ let source_trait = ImplSubject::Trait(source_trait_ref);
+
let selcx = &mut SelectionContext::new(&infcx);
let target_substs = infcx.fresh_substs_for_item(DUMMY_SP, target_impl);
- let (target_trait_ref, obligations) =
- impl_trait_ref_and_oblig(selcx, param_env, target_impl, target_substs);
+ let (target_trait, obligations) =
+ util::impl_subject_and_oblig(selcx, param_env, target_impl, target_substs);
// do the impls unify? If not, no specialization.
let Ok(InferOk { obligations: more_obligations, .. }) =
- infcx.at(&ObligationCause::dummy(), param_env).eq(source_trait_ref, target_trait_ref)
+ infcx.at(&ObligationCause::dummy(), param_env).eq(source_trait, target_trait)
else {
debug!(
"fulfill_implication: {:?} does not unify with {:?}",
- source_trait_ref, target_trait_ref
+ source_trait, target_trait
);
return Err(());
};
[] => {
debug!(
"fulfill_implication: an impl for {:?} specializes {:?}",
- source_trait_ref, target_trait_ref
+ source_trait, target_trait
);
// Now resolve the *substitution* we built for the target earlier, replacing
debug!(
"fulfill_implication: for impls on {:?} and {:?}, \
could not fulfill: {:?} given {:?}",
- source_trait_ref,
- target_trait_ref,
+ source_trait,
+ target_trait,
errors,
param_env.caller_bounds()
);
}
}
- sg.has_errored = true;
- err.emit();
+ sg.has_errored = Some(err.emit());
}
fn report_conflicting_impls(
// Work to be done after we've built the DiagnosticBuilder. We have to define it
// now because the struct_lint methods don't return back the DiagnosticBuilder
// that's passed in.
- let decorate = |err: LintDiagnosticBuilder<'_>| {
+ fn decorate<G: EmissionGuarantee>(
+ tcx: TyCtxt<'_>,
+ overlap: OverlapError,
+ used_to_be_allowed: Option<FutureCompatOverlapErrorKind>,
+ impl_span: Span,
+ err: LintDiagnosticBuilder<'_, G>,
+ ) -> G {
let msg = format!(
"conflicting implementations of trait `{}`{}{}",
overlap.trait_desc,
coherence::add_placeholder_note(&mut err);
}
err.emit()
- };
+ }
match used_to_be_allowed {
None => {
- sg.has_errored = true;
- if overlap.with_impl.is_local() || !tcx.orphan_check_crate(()).contains(&impl_def_id) {
+ let reported = if overlap.with_impl.is_local()
+ || !tcx.orphan_check_crate(()).contains(&impl_def_id)
+ {
let err = struct_span_err!(tcx.sess, impl_span, E0119, "");
- decorate(LintDiagnosticBuilder::new(err.forget_guarantee()));
+ Some(decorate(
+ tcx,
+ overlap,
+ used_to_be_allowed,
+ impl_span,
+ LintDiagnosticBuilder::new(err),
+ ))
} else {
- tcx.sess.delay_span_bug(impl_span, "impl should have failed the orphan check");
- }
+ Some(tcx.sess.delay_span_bug(impl_span, "impl should have failed the orphan check"))
+ };
+ sg.has_errored = reported;
}
Some(kind) => {
let lint = match kind {
lint,
tcx.hir().local_def_id_to_hir_id(impl_def_id),
impl_span,
- decorate,
- )
+ |ldb| {
+ decorate(tcx, overlap, used_to_be_allowed, impl_span, ldb);
+ },
+ );
}
};
}
#[derive(Debug)]
pub enum NonStructuralMatchTy<'tcx> {
- Adt(&'tcx AdtDef),
+ Adt(AdtDef<'tcx>),
Param,
Dynamic,
Foreign,
}
};
- if !self.seen.insert(adt_def.did) {
+ if !self.seen.insert(adt_def.did()) {
debug!("Search already seen adt_def: {:?}", adt_def);
return ControlFlow::CONTINUE;
}
if !self.type_marked_structural(ty) {
debug!("Search found ty: {:?}", ty);
- return ControlFlow::Break(NonStructuralMatchTy::Adt(&adt_def));
+ return ControlFlow::Break(NonStructuralMatchTy::Adt(adt_def));
}
// structural-match does not care about the
use rustc_data_structures::fx::FxHashSet;
use rustc_hir::def_id::DefId;
use rustc_middle::ty::subst::{GenericArg, Subst, SubstsRef};
-use rustc_middle::ty::{self, ToPredicate, Ty, TyCtxt, TypeFoldable};
+use rustc_middle::ty::{self, ImplSubject, ToPredicate, Ty, TyCtxt, TypeFoldable};
use super::{Normalized, Obligation, ObligationCause, PredicateObligation, SelectionContext};
pub use rustc_infer::traits::{self, util::*};
// Other
///////////////////////////////////////////////////////////////////////////
-/// Instantiate all bound parameters of the impl with the given substs,
-/// returning the resulting trait ref and all obligations that arise.
+/// Instantiate all bound parameters of the impl subject with the given substs,
+/// returning the resulting subject and all obligations that arise.
/// The obligations are closed under normalization.
-pub fn impl_trait_ref_and_oblig<'a, 'tcx>(
+pub fn impl_subject_and_oblig<'a, 'tcx>(
selcx: &mut SelectionContext<'a, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
impl_def_id: DefId,
impl_substs: SubstsRef<'tcx>,
-) -> (ty::TraitRef<'tcx>, impl Iterator<Item = PredicateObligation<'tcx>>) {
- let impl_trait_ref = selcx.tcx().impl_trait_ref(impl_def_id).unwrap();
- let impl_trait_ref = impl_trait_ref.subst(selcx.tcx(), impl_substs);
- let Normalized { value: impl_trait_ref, obligations: normalization_obligations1 } =
- super::normalize(selcx, param_env, ObligationCause::dummy(), impl_trait_ref);
+) -> (ImplSubject<'tcx>, impl Iterator<Item = PredicateObligation<'tcx>>) {
+ let subject = selcx.tcx().impl_subject(impl_def_id);
+ let subject = subject.subst(selcx.tcx(), impl_substs);
+ let Normalized { value: subject, obligations: normalization_obligations1 } =
+ super::normalize(selcx, param_env, ObligationCause::dummy(), subject);
let predicates = selcx.tcx().predicates_of(impl_def_id);
let predicates = predicates.instantiate(selcx.tcx(), impl_substs);
.chain(normalization_obligations1.into_iter())
.chain(normalization_obligations2.into_iter());
- (impl_trait_ref, impl_obligations)
+ (subject, impl_obligations)
}
pub fn predicates_for_generics<'tcx>(
ty::Adt(def, substs) => {
// WfNominalType
- let obligations = self.nominal_obligations(def.did, substs);
+ let obligations = self.nominal_obligations(def.did(), substs);
self.out.extend(obligations);
}
rustc_index = { path = "../rustc_index" }
rustc_ast = { path = "../rustc_ast" }
rustc_span = { path = "../rustc_span" }
-chalk-ir = "0.76.0"
-chalk-engine = "0.76.0"
-chalk-solve = "0.76.0"
+chalk-ir = "0.80.0"
+chalk-engine = "0.80.0"
+chalk-solve = "0.80.0"
smallvec = { version = "1.6.1", features = ["union", "may_dangle"] }
rustc_infer = { path = "../rustc_infer" }
rustc_trait_selection = { path = "../rustc_trait_selection" }
&self,
trait_id: chalk_ir::TraitId<RustInterner<'tcx>>,
) -> Arc<chalk_solve::rust_ir::TraitDatum<RustInterner<'tcx>>> {
+ use chalk_solve::rust_ir::WellKnownTrait::*;
+
let def_id = trait_id.0;
let trait_def = self.interner.tcx.trait_def(def_id);
let lang_items = self.interner.tcx.lang_items();
let well_known = if lang_items.sized_trait() == Some(def_id) {
- Some(chalk_solve::rust_ir::WellKnownTrait::Sized)
+ Some(Sized)
} else if lang_items.copy_trait() == Some(def_id) {
- Some(chalk_solve::rust_ir::WellKnownTrait::Copy)
+ Some(Copy)
} else if lang_items.clone_trait() == Some(def_id) {
- Some(chalk_solve::rust_ir::WellKnownTrait::Clone)
+ Some(Clone)
} else if lang_items.drop_trait() == Some(def_id) {
- Some(chalk_solve::rust_ir::WellKnownTrait::Drop)
+ Some(Drop)
} else if lang_items.fn_trait() == Some(def_id) {
- Some(chalk_solve::rust_ir::WellKnownTrait::Fn)
+ Some(Fn)
} else if lang_items.fn_once_trait() == Some(def_id) {
- Some(chalk_solve::rust_ir::WellKnownTrait::FnOnce)
+ Some(FnOnce)
} else if lang_items.fn_mut_trait() == Some(def_id) {
- Some(chalk_solve::rust_ir::WellKnownTrait::FnMut)
+ Some(FnMut)
} else if lang_items.unsize_trait() == Some(def_id) {
- Some(chalk_solve::rust_ir::WellKnownTrait::Unsize)
+ Some(Unsize)
} else if lang_items.unpin_trait() == Some(def_id) {
- Some(chalk_solve::rust_ir::WellKnownTrait::Unpin)
+ Some(Unpin)
} else if lang_items.coerce_unsized_trait() == Some(def_id) {
- Some(chalk_solve::rust_ir::WellKnownTrait::CoerceUnsized)
+ Some(CoerceUnsized)
+ } else if lang_items.dispatch_from_dyn_trait() == Some(def_id) {
+ Some(DispatchFromDyn)
} else {
None
};
) -> Arc<chalk_solve::rust_ir::AdtDatum<RustInterner<'tcx>>> {
let adt_def = adt_id.0;
- let bound_vars = bound_vars_for_item(self.interner.tcx, adt_def.did);
+ let bound_vars = bound_vars_for_item(self.interner.tcx, adt_def.did());
let binders = binders_for(self.interner, bound_vars);
- let where_clauses = self.where_clauses_for(adt_def.did, bound_vars);
+ let where_clauses = self.where_clauses_for(adt_def.did(), bound_vars);
let variants: Vec<_> = adt_def
- .variants
+ .variants()
.iter()
.map(|variant| chalk_solve::rust_ir::AdtVariantDatum {
fields: variant
chalk_solve::rust_ir::AdtDatumBound { variants, where_clauses },
),
flags: chalk_solve::rust_ir::AdtFlags {
- upstream: !adt_def.did.is_local(),
+ upstream: !adt_def.did().is_local(),
fundamental: adt_def.is_fundamental(),
phantom_data: adt_def.is_phantom_data(),
},
let int = |i| chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Int(i)).intern(self.interner);
let uint = |i| chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Uint(i)).intern(self.interner);
Arc::new(chalk_solve::rust_ir::AdtRepr {
- c: adt_def.repr.c(),
- packed: adt_def.repr.packed(),
- int: adt_def.repr.int.map(|i| match i {
+ c: adt_def.repr().c(),
+ packed: adt_def.repr().packed(),
+ int: adt_def.repr().int.map(|i| match i {
attr::IntType::SignedInt(ty) => match ty {
ast::IntTy::Isize => int(chalk_ir::IntTy::Isize),
ast::IntTy::I8 => int(chalk_ir::IntTy::I8),
})
}
+ fn adt_size_align(
+ &self,
+ adt_id: chalk_ir::AdtId<RustInterner<'tcx>>,
+ ) -> Arc<chalk_solve::rust_ir::AdtSizeAlign> {
+ let tcx = self.interner.tcx;
+ let did = adt_id.0.did();
+
+ // Grab the ADT and the param we might need to calculate its layout
+ let param_env = tcx.param_env(did);
+ let adt_ty = tcx.type_of(did);
+
+ // The ADT is a 1-zst if it's a ZST and its alignment is 1.
+ // Mark the ADT as _not_ a 1-zst if there was a layout error.
+ let one_zst = if let Ok(layout) = tcx.layout_of(param_env.and(adt_ty)) {
+ layout.is_zst() && layout.align.abi.bytes() == 1
+ } else {
+ false
+ };
+
+ Arc::new(chalk_solve::rust_ir::AdtSizeAlign::from_one_zst(one_zst))
+ }
+
fn fn_def_datum(
&self,
fn_def_id: chalk_ir::FnDefId<RustInterner<'tcx>>,
let trait_ref = self.interner.tcx.impl_trait_ref(impl_def_id).unwrap();
let self_ty = trait_ref.self_ty();
let provides = match (self_ty.kind(), chalk_ty) {
- (&ty::Adt(impl_adt_def, ..), Adt(id, ..)) => impl_adt_def.did == id.0.did,
+ (&ty::Adt(impl_adt_def, ..), Adt(id, ..)) => impl_adt_def.did() == id.0.did(),
(_, AssociatedType(_ty_id, ..)) => {
// FIXME(chalk): See https://github.com/rust-lang/rust/pull/77152#discussion_r494484774
false
Unpin => lang_items.unpin_trait(),
CoerceUnsized => lang_items.coerce_unsized_trait(),
DiscriminantKind => lang_items.discriminant_kind_trait(),
+ DispatchFromDyn => lang_items.dispatch_from_dyn_trait(),
};
def_id.map(chalk_ir::TraitId)
}
&self,
adt_id: chalk_ir::AdtId<RustInterner<'tcx>>,
) -> chalk_ir::Variances<RustInterner<'tcx>> {
- let variances = self.interner.tcx.variances_of(adt_id.0.did);
+ let variances = self.interner.tcx.variances_of(adt_id.0.did());
chalk_ir::Variances::from_iter(
self.interner,
variances.iter().map(|v| v.lower_into(self.interner)),
use rustc_span::source_map::{Span, DUMMY_SP};
use rustc_trait_selection::traits::query::dropck_outlives::trivial_dropck_outlives;
use rustc_trait_selection::traits::query::dropck_outlives::{
- DropckOutlivesResult, DtorckConstraint,
+ DropckConstraint, DropckOutlivesResult,
};
use rustc_trait_selection::traits::query::normalize::AtExt;
use rustc_trait_selection::traits::query::{CanonicalTyGoal, NoSolution};
let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
let cause = ObligationCause::dummy();
- let mut constraints = DtorckConstraint::empty();
+ let mut constraints = DropckConstraint::empty();
while let Some((ty, depth)) = ty_stack.pop() {
debug!(
"{} kinds, {} overflows, {} ty_stack",
for_ty: Ty<'tcx>,
depth: usize,
ty: Ty<'tcx>,
- constraints: &mut DtorckConstraint<'tcx>,
+ constraints: &mut DropckConstraint<'tcx>,
) -> Result<(), NoSolution> {
debug!("dtorck_constraint_for_ty({:?}, {:?}, {:?}, {:?})", span, for_ty, depth, ty);
}
ty::Adt(def, substs) => {
- let DtorckConstraint { dtorck_types, outlives, overflows } =
- tcx.at(span).adt_dtorck_constraint(def.did)?;
+ let DropckConstraint { dtorck_types, outlives, overflows } =
+ tcx.at(span).adt_dtorck_constraint(def.did())?;
// FIXME: we can try to recursively `dtorck_constraint_on_ty`
// there, but that needs some way to handle cycles.
constraints.dtorck_types.extend(dtorck_types.iter().map(|t| t.subst(tcx, substs)));
crate fn adt_dtorck_constraint(
tcx: TyCtxt<'_>,
def_id: DefId,
-) -> Result<&DtorckConstraint<'_>, NoSolution> {
+) -> Result<&DropckConstraint<'_>, NoSolution> {
let def = tcx.adt_def(def_id);
let span = tcx.def_span(def_id);
debug!("dtorck_constraint: {:?}", def);
// `PhantomData`.
let substs = InternalSubsts::identity_for_item(tcx, def_id);
assert_eq!(substs.len(), 1);
- let result = DtorckConstraint {
+ let result = DropckConstraint {
outlives: vec![],
dtorck_types: vec![substs.type_at(0)],
overflows: vec![],
return Ok(tcx.arena.alloc(result));
}
- let mut result = DtorckConstraint::empty();
+ let mut result = DropckConstraint::empty();
for field in def.all_fields() {
let fty = tcx.type_of(field.did);
dtorck_constraint_for_ty(tcx, span, fty, 0, fty, &mut result)?;
Ok(tcx.arena.alloc(result))
}
-fn dedup_dtorck_constraint(c: &mut DtorckConstraint<'_>) {
+fn dedup_dtorck_constraint(c: &mut DropckConstraint<'_>) {
let mut outlives = FxHashSet::default();
let mut dtorck_types = FxHashSet::default();
resolved_ty,
);
let span = tcx.def_span(leaf_def.item.def_id);
- tcx.sess.delay_span_bug(span, &msg);
+ let reported = tcx.sess.delay_span_bug(span, &msg);
- return Err(ErrorGuaranteed);
+ return Err(reported);
}
}
| traits::ImplSource::DiscriminantKind(..)
| traits::ImplSource::Pointee(..)
| traits::ImplSource::TraitUpcasting(_)
- | traits::ImplSource::ConstDrop(_) => None,
+ | traits::ImplSource::ConstDestruct(_) => None,
})
}
// parameter without a `Copy` bound, then we conservatively return that it
// needs drop.
let adt_has_dtor =
- |adt_def: &ty::AdtDef| adt_def.destructor(tcx).map(|_| DtorType::Significant);
+ |adt_def: ty::AdtDef<'tcx>| adt_def.destructor(tcx).map(|_| DtorType::Significant);
let res =
drop_tys_helper(tcx, query.value, query.param_env, adt_has_dtor, false).next().is_some();
impl<'tcx, F, I> Iterator for NeedsDropTypes<'tcx, F>
where
- F: Fn(&ty::AdtDef, SubstsRef<'tcx>) -> NeedsDropResult<I>,
+ F: Fn(ty::AdtDef<'tcx>, SubstsRef<'tcx>) -> NeedsDropResult<I>,
I: Iterator<Item = Ty<'tcx>>,
{
type Item = NeedsDropResult<Ty<'tcx>>;
/// "significant" / "insignificant".
Insignificant,
- /// Type has a `Drop` implentation.
+ /// Type has a `Drop` implantation.
Significant,
}
// This is a helper function for `adt_drop_tys` and `adt_significant_drop_tys`.
-// Depending on the implentation of `adt_has_dtor`, it is used to check if the
+// Depending on the implantation of `adt_has_dtor`, it is used to check if the
// ADT has a destructor or if the ADT only has a significant destructor. For
// understanding significant destructor look at `adt_significant_drop_tys`.
fn drop_tys_helper<'tcx>(
tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
param_env: rustc_middle::ty::ParamEnv<'tcx>,
- adt_has_dtor: impl Fn(&ty::AdtDef) -> Option<DtorType>,
+ adt_has_dtor: impl Fn(ty::AdtDef<'tcx>) -> Option<DtorType>,
only_significant: bool,
) -> impl Iterator<Item = NeedsDropResult<Ty<'tcx>>> {
fn with_query_cache<'tcx>(
iter.into_iter().try_fold(Vec::new(), |mut vec, subty| {
match subty.kind() {
ty::Adt(adt_id, subst) => {
- for subty in tcx.adt_drop_tys(adt_id.did)? {
+ for subty in tcx.adt_drop_tys(adt_id.did())? {
vec.push(subty.subst(tcx, subst));
}
}
})
}
- let adt_components = move |adt_def: &ty::AdtDef, substs: SubstsRef<'tcx>| {
+ let adt_components = move |adt_def: ty::AdtDef<'tcx>, substs: SubstsRef<'tcx>| {
if adt_def.is_manually_drop() {
debug!("drop_tys_helper: `{:?}` is manually drop", adt_def);
Ok(Vec::new())
fn adt_consider_insignificant_dtor<'tcx>(
tcx: TyCtxt<'tcx>,
-) -> impl Fn(&ty::AdtDef) -> Option<DtorType> + 'tcx {
- move |adt_def: &ty::AdtDef| {
- let is_marked_insig = tcx.has_attr(adt_def.did, sym::rustc_insignificant_dtor);
+) -> impl Fn(ty::AdtDef<'tcx>) -> Option<DtorType> + 'tcx {
+ move |adt_def: ty::AdtDef<'tcx>| {
+ let is_marked_insig = tcx.has_attr(adt_def.did(), sym::rustc_insignificant_dtor);
if is_marked_insig {
// In some cases like `std::collections::HashMap` where the struct is a wrapper around
// a type that is a Drop type, and the wrapped type (eg: `hashbrown::HashMap`) lies
}
}
-fn adt_drop_tys(tcx: TyCtxt<'_>, def_id: DefId) -> Result<&ty::List<Ty<'_>>, AlwaysRequiresDrop> {
+fn adt_drop_tys<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+) -> Result<&ty::List<Ty<'tcx>>, AlwaysRequiresDrop> {
// This is for the "adt_drop_tys" query, that considers all `Drop` impls, therefore all dtors are
// significant.
let adt_has_dtor =
- |adt_def: &ty::AdtDef| adt_def.destructor(tcx).map(|_| DtorType::Significant);
+ |adt_def: ty::AdtDef<'tcx>| adt_def.destructor(tcx).map(|_| DtorType::Significant);
// `tcx.type_of(def_id)` identical to `tcx.make_adt(def, identity_substs)`
drop_tys_helper(tcx, tcx.type_of(def_id), tcx.param_env(def_id), adt_has_dtor, false)
.collect::<Result<Vec<_>, _>>()
.map(|components| tcx.intern_type_list(&components))
}
// If `def_id` refers to a generic ADT, the queries above and below act as if they had been handed
-// a `tcx.make_ty(def, identity_substs)` and as such it is legal to substitue the generic parameters
+// a `tcx.make_ty(def, identity_substs)` and as such it is legal to substitute the generic parameters
// of the ADT into the outputted `ty`s.
fn adt_significant_drop_tys(
tcx: TyCtxt<'_>,
// cleared when recursing to check A, but `shadow_seen` won't, so that we
// can catch cases of mutual recursion where A also contains B).
let mut seen: Vec<Ty<'_>> = Vec::new();
- let mut shadow_seen: Vec<&'tcx ty::AdtDef> = Vec::new();
+ let mut shadow_seen: Vec<ty::AdtDef<'tcx>> = Vec::new();
let mut representable_cache = FxHashMap::default();
let mut force_result = false;
let r = is_type_structurally_recursive(
tcx: TyCtxt<'tcx>,
sp: Span,
seen: &mut Vec<Ty<'tcx>>,
- shadow_seen: &mut Vec<&'tcx ty::AdtDef>,
+ shadow_seen: &mut Vec<ty::AdtDef<'tcx>>,
representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
ty: Ty<'tcx>,
force_result: &mut bool,
.take(shadow_seen.len() - 1)
.any(|seen_def| seen_def == def)
{
- let adt_def_id = def.did;
+ let adt_def_id = def.did();
let raw_adt_ty = tcx.type_of(adt_def_id);
debug!("are_inner_types_recursive: checking nested type: {:?}", raw_adt_ty);
}
}
-fn same_adt<'tcx>(ty: Ty<'tcx>, def: &'tcx ty::AdtDef) -> bool {
+fn same_adt<'tcx>(ty: Ty<'tcx>, def: ty::AdtDef<'tcx>) -> bool {
match *ty.kind() {
ty::Adt(ty_def, _) => ty_def == def,
_ => false,
tcx: TyCtxt<'tcx>,
sp: Span,
seen: &mut Vec<Ty<'tcx>>,
- shadow_seen: &mut Vec<&'tcx ty::AdtDef>,
+ shadow_seen: &mut Vec<ty::AdtDef<'tcx>>,
representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
ty: Ty<'tcx>,
force_result: &mut bool,
tcx: TyCtxt<'tcx>,
sp: Span,
seen: &mut Vec<Ty<'tcx>>,
- shadow_seen: &mut Vec<&'tcx ty::AdtDef>,
+ shadow_seen: &mut Vec<ty::AdtDef<'tcx>>,
representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
ty: Ty<'tcx>,
force_result: &mut bool,
// For structs and enums, track all previously seen types by pushing them
// onto the 'seen' stack.
seen.push(ty);
- shadow_seen.push(def);
+ shadow_seen.push(*def);
let out = are_inner_types_recursive(
tcx,
sp,
fn sized_constraint_for_ty<'tcx>(
tcx: TyCtxt<'tcx>,
- adtdef: &ty::AdtDef,
+ adtdef: ty::AdtDef<'tcx>,
ty: Ty<'tcx>,
) -> Vec<Ty<'tcx>> {
use ty::TyKind::*;
})
.without_const()
.to_predicate(tcx);
- let predicates = tcx.predicates_of(adtdef.did).predicates;
+ let predicates = tcx.predicates_of(adtdef.did()).predicates;
if predicates.iter().any(|(p, _)| *p == sized_predicate) { vec![] } else { vec![ty] }
}
let def = tcx.adt_def(def_id);
let result = tcx.mk_type_list(
- def.variants
+ def.variants()
.iter()
.flat_map(|v| v.fields.last())
.flat_map(|f| sized_constraint_for_ty(tcx, def, tcx.type_of(f.did))),
// (a) It has no variants (i.e. an empty `enum`);
// (b) Each of its variants (a single one in the case of a `struct`) has at least
// one uninhabited field.
- def.variants.iter().all(|var| {
+ def.variants().iter().all(|var| {
var.fields.iter().any(|field| {
let ty = tcx.type_of(field.did).subst(tcx, substs);
tcx.conservative_is_privately_uninhabited(param_env.and(ty))
use crate::astconv::AstConv;
use rustc_data_structures::fx::FxHashMap;
-use rustc_errors::{pluralize, struct_span_err, Applicability};
+use rustc_errors::{pluralize, struct_span_err, Applicability, ErrorGuaranteed};
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_middle::ty;
"the precise format of `Fn`-family traits' type parameters is subject to change",
);
// Do not suggest the other syntax if we are in trait impl:
- // the desugaring would contain an associated type constrait.
+ // the desugaring would contain an associated type constraint.
if !is_impl {
let args = trait_segment
.args
ty_param_name: &str,
assoc_name: Ident,
span: Span,
- ) where
+ ) -> ErrorGuaranteed
+ where
I: Iterator<Item = ty::PolyTraitRef<'tcx>>,
{
// The fallback span is needed because `assoc_name` might be an `Fn()`'s `Output` without a
err.span_label(span, format!("associated type `{}` not found", assoc_name));
}
- err.emit();
+ err.emit()
}
/// When there are any missing associated types, emit an E0191 error and attempt to supply a
use crate::errors::AssocTypeBindingNotAllowed;
use crate::structured_errors::{GenericArgsInfo, StructuredDiagnostic, WrongNumberOfGenericArgs};
use rustc_ast::ast::ParamKindOrd;
-use rustc_errors::{struct_span_err, Applicability, Diagnostic, ErrorGuaranteed};
+use rustc_errors::{struct_span_err, Applicability, Diagnostic};
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::DefId;
use rustc_hir::GenericArg;
+use rustc_infer::infer::TyCtxtInferExt;
use rustc_middle::ty::{
self, subst, subst::SubstsRef, GenericParamDef, GenericParamDefKind, Ty, TyCtxt,
};
if let Some(param_local_id) = param.def_id.as_local() {
let param_hir_id = tcx.hir().local_def_id_to_hir_id(param_local_id);
let param_name = tcx.hir().ty_param_name(param_hir_id);
- let param_type = tcx.type_of(param.def_id);
+ let param_type = tcx.infer_ctxt().enter(|infcx| {
+ infcx.resolve_numeric_literals_with_default(tcx.type_of(param.def_id))
+ });
if param_type.is_suggestable() {
err.span_suggestion(
tcx.def_span(src_def_id),
let param_counts = gen_params.own_counts();
// Subtracting from param count to ensure type params synthesized from `impl Trait`
- // cannot be explictly specified even with `explicit_generic_args_with_impl_trait`
+ // cannot be explicitly specified even with `explicit_generic_args_with_impl_trait`
// feature enabled.
let synth_type_param_count = if tcx.features().explicit_generic_args_with_impl_trait {
gen_params
let mut invalid_args = vec![];
- let mut check_lifetime_args = |min_expected_args: usize,
- max_expected_args: usize,
- provided_args: usize,
- late_bounds_ignore: bool|
- -> bool {
- if (min_expected_args..=max_expected_args).contains(&provided_args) {
- return true;
- }
+ let mut check_lifetime_args =
+ |min_expected_args: usize,
+ max_expected_args: usize,
+ provided_args: usize,
+ late_bounds_ignore: bool| {
+ if (min_expected_args..=max_expected_args).contains(&provided_args) {
+ return Ok(());
+ }
- if late_bounds_ignore {
- return true;
- }
+ if late_bounds_ignore {
+ return Ok(());
+ }
- if provided_args > max_expected_args {
- invalid_args.extend(
- gen_args.args[max_expected_args..provided_args].iter().map(|arg| arg.span()),
- );
- };
+ if provided_args > max_expected_args {
+ invalid_args.extend(
+ gen_args.args[max_expected_args..provided_args]
+ .iter()
+ .map(|arg| arg.span()),
+ );
+ };
- let gen_args_info = if provided_args > min_expected_args {
- invalid_args.extend(
- gen_args.args[min_expected_args..provided_args].iter().map(|arg| arg.span()),
- );
- let num_redundant_args = provided_args - min_expected_args;
- GenericArgsInfo::ExcessLifetimes { num_redundant_args }
- } else {
- let num_missing_args = min_expected_args - provided_args;
- GenericArgsInfo::MissingLifetimes { num_missing_args }
+ let gen_args_info = if provided_args > min_expected_args {
+ invalid_args.extend(
+ gen_args.args[min_expected_args..provided_args]
+ .iter()
+ .map(|arg| arg.span()),
+ );
+ let num_redundant_args = provided_args - min_expected_args;
+ GenericArgsInfo::ExcessLifetimes { num_redundant_args }
+ } else {
+ let num_missing_args = min_expected_args - provided_args;
+ GenericArgsInfo::MissingLifetimes { num_missing_args }
+ };
+
+ let reported = WrongNumberOfGenericArgs::new(
+ tcx,
+ gen_args_info,
+ seg,
+ gen_params,
+ has_self as usize,
+ gen_args,
+ def_id,
+ )
+ .diagnostic()
+ .emit();
+
+ Err(reported)
};
- WrongNumberOfGenericArgs::new(
- tcx,
- gen_args_info,
- seg,
- gen_params,
- has_self as usize,
- gen_args,
- def_id,
- )
- .diagnostic()
- .emit();
-
- false
- };
-
let min_expected_lifetime_args = if infer_lifetimes { 0 } else { param_counts.lifetimes };
let max_expected_lifetime_args = param_counts.lifetimes;
let num_provided_lifetime_args = gen_args.num_lifetime_params();
"check_types_and_consts"
);
if (expected_min..=expected_max).contains(&provided) {
- return true;
+ return Ok(());
}
let num_default_params = expected_max - expected_min;
debug!(?gen_args_info);
- WrongNumberOfGenericArgs::new(
+ let reported = WrongNumberOfGenericArgs::new(
tcx,
gen_args_info,
seg,
.diagnostic()
.emit_unless(gen_args.has_err());
- false
+ Err(reported)
};
let args_correct = {
GenericArgCountResult {
explicit_late_bound,
- correct: if lifetimes_correct && args_correct {
- Ok(())
- } else {
- Err(GenericArgCountMismatch { reported: Some(ErrorGuaranteed), invalid_args })
- },
+ correct: lifetimes_correct.and(args_correct).map_err(|reported| {
+ GenericArgCountMismatch { reported: Some(reported), invalid_args }
+ }),
}
}
LATE_BOUND_LIFETIME_ARGUMENTS,
args.args[0].id(),
multispan,
- |lint| lint.build(msg).emit(),
+ |lint| {
+ lint.build(msg).emit();
+ },
);
}
type_str: &str,
trait_str: &str,
name: Symbol,
- ) {
+ ) -> ErrorGuaranteed {
let mut err = struct_span_err!(self.tcx().sess, span, E0223, "ambiguous associated type");
if let (true, Ok(snippet)) = (
self.tcx()
Applicability::HasPlaceholders,
);
}
- err.emit();
+ err.emit()
}
// Search for a bound on a type parameter which includes the associated item
(Some(bound), _) => (bound, matching_candidates.next()),
(None, Some(bound)) => (bound, const_candidates.next()),
(None, None) => {
- self.complain_about_assoc_type_not_found(
+ let reported = self.complain_about_assoc_type_not_found(
all_candidates,
&ty_param_name(),
assoc_name,
span,
);
- return Err(ErrorGuaranteed);
+ return Err(reported);
}
};
debug!("one_bound_for_assoc_type: bound = {:?}", bound);
where_bounds.join(",\n"),
));
}
- err.emit();
+ let reported = err.emit();
if !where_bounds.is_empty() {
- return Err(ErrorGuaranteed);
+ return Err(reported);
}
}
if let ty::Adt(adt_def, _) = qself_ty.kind() {
if adt_def.is_enum() {
let variant_def = adt_def
- .variants
+ .variants()
.iter()
- .find(|vd| tcx.hygienic_eq(assoc_ident, vd.ident(tcx), adt_def.did));
+ .find(|vd| tcx.hygienic_eq(assoc_ident, vd.ident(tcx), adt_def.did()));
if let Some(variant_def) = variant_def {
if permit_variants {
tcx.check_stability(variant_def.def_id, Some(hir_ref_id), span, None);
// trait reference.
let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) else {
// A cycle error occurred, most likely.
- return Err(ErrorGuaranteed);
+ let guar = tcx.sess.delay_span_bug(span, "expected cycle error");
+ return Err(guar);
};
self.one_bound_for_assoc_type(
| Res::Def(DefKind::TyParam, param_did),
) => self.find_bound_for_assoc_item(param_did.expect_local(), assoc_ident, span)?,
_ => {
- if variant_resolution.is_some() {
+ let reported = if variant_resolution.is_some() {
// Variant in type position
let msg = format!("expected type, found variant `{}`", assoc_ident);
- tcx.sess.span_err(span, &msg);
+ tcx.sess.span_err(span, &msg)
} else if qself_ty.is_enum() {
let mut err = struct_span_err!(
tcx.sess,
let adt_def = qself_ty.ty_adt_def().expect("enum is not an ADT");
if let Some(suggested_name) = find_best_match_for_name(
&adt_def
- .variants
+ .variants()
.iter()
.map(|variant| variant.name)
.collect::<Vec<Symbol>>(),
);
}
- if let Some(sp) = tcx.hir().span_if_local(adt_def.did) {
+ if let Some(sp) = tcx.hir().span_if_local(adt_def.did()) {
let sp = tcx.sess.source_map().guess_head_span(sp);
err.span_label(sp, format!("variant `{}` not found here", assoc_ident));
}
- err.emit();
- } else if !qself_ty.references_error() {
+ err.emit()
+ } else if let Some(reported) = qself_ty.error_reported() {
+ reported
+ } else {
// Don't print `TyErr` to the user.
self.report_ambiguous_associated_type(
span,
&qself_ty.to_string(),
"Trait",
assoc_ident.name,
- );
- }
- return Err(ErrorGuaranteed);
+ )
+ };
+ return Err(reported);
}
};
// but it was used in a type position.
let Some(item) = item else {
let msg = format!("found associated const `{assoc_ident}` when type was expected");
- tcx.sess.struct_span_err(span, &msg).emit();
- return Err(ErrorGuaranteed);
+ let guar = tcx.sess.struct_span_err(span, &msg).emit();
+ return Err(guar);
};
let ty = self.projected_ty_from_poly_trait_ref(span, item.def_id, assoc_segment, bound);
let adt_def = self_ty.map(|t| t.ty_adt_def().unwrap());
let (generics_def_id, index) = if let Some(adt_def) = adt_def {
debug_assert!(adt_def.is_enum());
- (adt_def.did, last)
+ (adt_def.did(), last)
} else if last >= 1 && segments[last - 1].args.is_some() {
// Everything but the penultimate segment should have no
// parameters at all.
sugg,
Applicability::MachineApplicable,
)
- .emit()
+ .emit();
},
);
}
let t = self.resolve_vars_if_possible(t);
- if t.references_error() {
- return Err(ErrorGuaranteed);
+ if let Some(reported) = t.error_reported() {
+ return Err(reported);
}
if self.type_is_known_to_be_sized_modulo_regions(t, span) {
| ty::Adt(..)
| ty::Never
| ty::Error(_) => {
- self.tcx
+ let reported = self
+ .tcx
.sess
.delay_span_bug(span, &format!("`{:?}` should be sized but is not?", t));
- return Err(ErrorGuaranteed);
+ return Err(reported);
}
})
}
}
impl From<ErrorGuaranteed> for CastError {
- fn from(ErrorGuaranteed: ErrorGuaranteed) -> Self {
+ fn from(_: ErrorGuaranteed) -> Self {
CastError::ErrorGuaranteed
}
}
// inference is more completely known.
match cast_ty.kind() {
ty::Dynamic(..) | ty::Slice(..) => {
- check.report_cast_to_unsized_type(fcx);
- Err(ErrorGuaranteed)
+ let reported = check.report_cast_to_unsized_type(fcx);
+ Err(reported)
}
_ => Ok(check),
}
}
}
- fn report_cast_to_unsized_type(&self, fcx: &FnCtxt<'a, 'tcx>) {
- if self.cast_ty.references_error() || self.expr_ty.references_error() {
- return;
+ fn report_cast_to_unsized_type(&self, fcx: &FnCtxt<'a, 'tcx>) -> ErrorGuaranteed {
+ if let Some(reported) =
+ self.cast_ty.error_reported().or_else(|| self.expr_ty.error_reported())
+ {
+ return reported;
}
let tstr = fcx.ty_to_string(self.cast_ty);
err.span_help(self.expr.span, "consider using a box or reference as appropriate");
}
}
- err.emit();
+ err.emit()
}
fn trivial_cast_lint(&self, fcx: &FnCtxt<'a, 'tcx>) {
}
None => {
tcx.struct_span_lint_hir(UNSUPPORTED_CALLING_CONVENTIONS, hir_id, span, |lint| {
- lint.build("use of calling convention not supported on this target").emit()
+ lint.build("use of calling convention not supported on this target").emit();
});
}
}
};
if let Some(header) = item {
- tcx.sess.span_err(header.span, "functions with the \"rust-call\" ABI must take a single non-self argument that is a tuple")
+ tcx.sess.span_err(header.span, "functions with the \"rust-call\" ABI must take a single non-self argument that is a tuple");
}
};
let arg_is_panic_info = match *inputs[0].kind() {
ty::Ref(region, ty, mutbl) => match *ty.kind() {
ty::Adt(ref adt, _) => {
- adt.did == panic_info_did
+ adt.did() == panic_info_did
&& mutbl == hir::Mutability::Not
&& !region.is_static()
}
let span = hir.span(fn_id);
if inputs.len() == 1 {
let arg_is_alloc_layout = match inputs[0].kind() {
- ty::Adt(ref adt, _) => adt.did == alloc_layout_did,
+ ty::Adt(ref adt, _) => adt.did() == alloc_layout_did,
_ => false,
};
def.destructor(tcx); // force the destructor to be evaluated
check_representable(tcx, span, def_id);
- if def.repr.simd() {
+ if def.repr().simd() {
check_simd(tcx, span, def_id);
}
origin: &hir::OpaqueTyOrigin,
) -> Result<(), ErrorGuaranteed> {
if tcx.try_expand_impl_trait_type(def_id.to_def_id(), substs).is_err() {
- match origin {
+ let reported = match origin {
hir::OpaqueTyOrigin::AsyncFn(..) => async_opaque_type_cycle_error(tcx, span),
_ => opaque_type_cycle_error(tcx, def_id, span),
- }
- Err(ErrorGuaranteed)
+ };
+ Err(reported)
} else {
Ok(())
}
trace!(?hidden_type);
match infcx.at(&misc_cause, param_env).eq(opaque_defn.concrete_ty, hidden_type) {
Ok(infer_ok) => inh.register_infer_ok_obligations(infer_ok),
- Err(ty_err) => tcx.sess.delay_span_bug(
- span,
- &format!(
- "could not check bounds on revealed type `{}`:\n{}",
- hidden_type, ty_err,
- ),
- ),
+ Err(ty_err) => {
+ tcx.sess.delay_span_bug(
+ span,
+ &format!(
+ "could not check bounds on revealed type `{}`:\n{}",
+ hidden_type, ty_err,
+ ),
+ );
+ }
}
}
}
let len = if let ty::Array(_ty, c) = e.kind() {
- c.try_eval_usize(tcx, tcx.param_env(def.did))
+ c.try_eval_usize(tcx, tcx.param_env(def.did()))
} else {
Some(fields.len() as u64)
};
}
}
-pub(super) fn check_packed(tcx: TyCtxt<'_>, sp: Span, def: &ty::AdtDef) {
- let repr = def.repr;
+pub(super) fn check_packed(tcx: TyCtxt<'_>, sp: Span, def: ty::AdtDef<'_>) {
+ let repr = def.repr();
if repr.packed() {
- for attr in tcx.get_attrs(def.did).iter() {
+ for attr in tcx.get_attrs(def.did()).iter() {
for r in attr::find_repr_attrs(&tcx.sess, attr) {
if let attr::ReprPacked(pack) = r
&& let Some(repr_pack) = repr.pack
)
.emit();
} else {
- if let Some(def_spans) = check_packed_inner(tcx, def.did, &mut vec![]) {
+ if let Some(def_spans) = check_packed_inner(tcx, def.did(), &mut vec![]) {
let mut err = struct_span_err!(
tcx.sess,
sp,
&if first {
format!(
"`{}` contains a field of type `{}`",
- tcx.type_of(def.did),
+ tcx.type_of(def.did()),
ident
)
} else {
) -> Option<Vec<(DefId, Span)>> {
if let ty::Adt(def, substs) = tcx.type_of(def_id).kind() {
if def.is_struct() || def.is_union() {
- if def.repr.align.is_some() {
- return Some(vec![(def.did, DUMMY_SP)]);
+ if def.repr().align.is_some() {
+ return Some(vec![(def.did(), DUMMY_SP)]);
}
stack.push(def_id);
for field in &def.non_enum_variant().fields {
if let ty::Adt(def, _) = field.ty(tcx, substs).kind() {
- if !stack.contains(&def.did) {
- if let Some(mut defs) = check_packed_inner(tcx, def.did, stack) {
- defs.push((def.did, field.ident(tcx).span));
+ if !stack.contains(&def.did()) {
+ if let Some(mut defs) = check_packed_inner(tcx, def.did(), stack) {
+ defs.push((def.did(), field.ident(tcx).span));
return Some(defs);
}
}
None
}
-pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, sp: Span, adt: &'tcx ty::AdtDef) {
- if !adt.repr.transparent() {
+pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, sp: Span, adt: ty::AdtDef<'tcx>) {
+ if !adt.repr().transparent() {
return;
}
let sp = tcx.sess.source_map().guess_head_span(sp);
.emit();
}
- if adt.variants.len() != 1 {
- bad_variant_count(tcx, adt, sp, adt.did);
- if adt.variants.is_empty() {
+ if adt.variants().len() != 1 {
+ bad_variant_count(tcx, adt, sp, adt.did());
+ if adt.variants().is_empty() {
// Don't bother checking the fields. No variants (and thus no fields) exist.
return;
}
}
}
- let repr_type_ty = def.repr.discr_type().to_ty(tcx);
+ let repr_type_ty = def.repr().discr_type().to_ty(tcx);
if repr_type_ty == tcx.types.i128 || repr_type_ty == tcx.types.u128 {
if !tcx.features().repr128 {
feature_err(
}
}
- if tcx.adt_def(def_id).repr.int.is_none() && tcx.features().arbitrary_enum_discriminant {
+ if tcx.adt_def(def_id).repr().int.is_none() && tcx.features().arbitrary_enum_discriminant {
let is_unit = |var: &hir::Variant<'_>| matches!(var.data, hir::VariantData::Unit(..));
let has_disr = |var: &hir::Variant<'_>| var.disr_expr.is_some();
for ((_, discr), v) in iter::zip(def.discriminants(tcx), vs) {
// Check for duplicate discriminant values
if let Some(i) = disr_vals.iter().position(|&x| x.val == discr.val) {
- let variant_did = def.variants[VariantIdx::new(i)].def_id;
+ let variant_did = def.variant(VariantIdx::new(i)).def_id;
let variant_i_hir_id = tcx.hir().local_def_id_to_hir_id(variant_did.expect_local());
let variant_i = tcx.hir().expect_variant(variant_i_hir_id);
let i_span = match variant_i.disr_expr {
if ty.references_error() {
// If there is already another error, do not emit
// an error for not using a type parameter.
- assert!(tcx.sess.has_errors());
+ assert!(tcx.sess.has_errors().is_some());
return;
}
pub(super) use wfcheck::check_impl_item as check_impl_item_well_formed;
-fn async_opaque_type_cycle_error(tcx: TyCtxt<'_>, span: Span) {
+fn async_opaque_type_cycle_error(tcx: TyCtxt<'_>, span: Span) -> ErrorGuaranteed {
struct_span_err!(tcx.sess, span, E0733, "recursion in an `async fn` requires boxing")
.span_label(span, "recursive `async fn`")
.note("a recursive `async fn` must be rewritten to return a boxed `dyn Future`")
.note(
"consider using the `async_recursion` crate: https://crates.io/crates/async_recursion",
)
- .emit();
+ .emit()
}
/// Emit an error for recursive opaque types.
///
/// If all the return expressions evaluate to `!`, then we explain that the error will go away
/// after changing it. This can happen when a user uses `panic!()` or similar as a placeholder.
-fn opaque_type_cycle_error(tcx: TyCtxt<'_>, def_id: LocalDefId, span: Span) {
+fn opaque_type_cycle_error(tcx: TyCtxt<'_>, def_id: LocalDefId, span: Span) -> ErrorGuaranteed {
let mut err = struct_span_err!(tcx.sess, span, E0720, "cannot resolve opaque type");
let mut label = false;
if !label {
err.span_label(span, "cannot resolve opaque type");
}
- err.emit();
+ err.emit()
}
let impl_m_span = tcx.sess.source_map().guess_head_span(impl_m_span);
- if let Err(ErrorGuaranteed) =
- compare_self_type(tcx, impl_m, impl_m_span, trait_m, impl_trait_ref)
- {
+ if let Err(_) = compare_self_type(tcx, impl_m, impl_m_span, trait_m, impl_trait_ref) {
return;
}
- if let Err(ErrorGuaranteed) =
- compare_number_of_generics(tcx, impl_m, impl_m_span, trait_m, trait_item_span)
- {
+ if let Err(_) = compare_number_of_generics(tcx, impl_m, impl_m_span, trait_m, trait_item_span) {
return;
}
- if let Err(ErrorGuaranteed) =
+ if let Err(_) =
compare_number_of_method_arguments(tcx, impl_m, impl_m_span, trait_m, trait_item_span)
{
return;
}
- if let Err(ErrorGuaranteed) = compare_synthetic_generics(tcx, impl_m, trait_m) {
+ if let Err(_) = compare_synthetic_generics(tcx, impl_m, trait_m) {
return;
}
- if let Err(ErrorGuaranteed) =
- compare_predicate_entailment(tcx, impl_m, impl_m_span, trait_m, impl_trait_ref)
+ if let Err(_) = compare_predicate_entailment(tcx, impl_m, impl_m_span, trait_m, impl_trait_ref)
{
return;
}
- if let Err(ErrorGuaranteed) = compare_const_param_types(tcx, impl_m, trait_m, trait_item_span) {
+ if let Err(_) = compare_const_param_types(tcx, impl_m, trait_m, trait_item_span) {
return;
}
}
traits::normalize(&mut selcx, param_env, normalize_cause, predicate);
inh.register_predicates(obligations);
- let mut cause = cause.clone();
- cause.span = span;
+ let cause = ObligationCause::new(
+ span,
+ impl_m_hir_id,
+ ObligationCauseCode::CompareImplMethodObligation {
+ impl_item_def_id: impl_m.def_id,
+ trait_item_def_id: trait_m.def_id,
+ },
+ );
inh.register_predicate(traits::Obligation::new(cause, param_env, predicate));
}
&terr,
false,
);
- diag.emit();
- return Err(ErrorGuaranteed);
+
+ return Err(diag.emit());
}
// Check that all obligations are satisfied by the implementation's
// version.
let errors = inh.fulfillment_cx.borrow_mut().select_all_or_error(&infcx);
if !errors.is_empty() {
- infcx.report_fulfillment_errors(&errors, None, false);
- return Err(ErrorGuaranteed);
+ let reported = infcx.report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
}
// Finally, resolve all regions. This catches wily misuses of
.map_or(def_sp, |g| g.span)
});
- tcx.sess.emit_err(LifetimesOrBoundsMismatchOnTrait {
+ let reported = tcx.sess.emit_err(LifetimesOrBoundsMismatchOnTrait {
span,
item_kind,
ident: impl_m.ident(tcx),
generics_span,
});
- return Err(ErrorGuaranteed);
+ return Err(reported);
}
Ok(())
} else {
err.note_trait_signature(trait_m.name.to_string(), trait_m.signature(tcx));
}
- err.emit();
- return Err(ErrorGuaranteed);
+ let reported = err.emit();
+ return Err(reported);
}
(true, false) => {
} else {
err.note_trait_signature(trait_m.name.to_string(), trait_m.signature(tcx));
}
- err.emit();
- return Err(ErrorGuaranteed);
+ let reported = err.emit();
+ return Err(reported);
}
}
let item_kind = assoc_item_kind_str(impl_);
- let mut err_occurred = false;
+ let mut err_occurred = None;
for (kind, trait_count, impl_count) in matchings {
if impl_count != trait_count {
- err_occurred = true;
-
let (trait_spans, impl_trait_spans) = if let Some(def_id) = trait_.def_id.as_local() {
let trait_item = tcx.hir().expect_trait_item(def_id);
if trait_item.generics.params.is_empty() {
err.span_label(*span, "`impl Trait` introduces an implicit type parameter");
}
- err.emit();
+ let reported = err.emit();
+ err_occurred = Some(reported);
}
}
- if err_occurred { Err(ErrorGuaranteed) } else { Ok(()) }
+ if let Some(reported) = err_occurred { Err(reported) } else { Ok(()) }
}
fn compare_number_of_method_arguments<'tcx>(
impl_number_args
),
);
- err.emit();
- return Err(ErrorGuaranteed);
+ let reported = err.emit();
+ return Err(reported);
}
Ok(())
// 2. Explanation as to what is going on
// If we get here, we already have the same number of generics, so the zip will
// be okay.
- let mut error_found = false;
+ let mut error_found = None;
let impl_m_generics = tcx.generics_of(impl_m.def_id);
let trait_m_generics = tcx.generics_of(trait_m.def_id);
let impl_m_type_params = impl_m_generics.params.iter().filter_map(|param| match param.kind {
}
_ => unreachable!(),
}
- err.emit();
- error_found = true;
+ let reported = err.emit();
+ error_found = Some(reported);
}
}
- if error_found { Err(ErrorGuaranteed) } else { Ok(()) }
+ if let Some(reported) = error_found { Err(reported) } else { Ok(()) }
}
fn compare_const_param_types<'tcx>(
trait_ty
),
);
- err.emit();
- return Err(ErrorGuaranteed);
+ let reported = err.emit();
+ return Err(reported);
}
}
// version.
let errors = inh.fulfillment_cx.borrow_mut().select_all_or_error(&infcx);
if !errors.is_empty() {
- infcx.report_fulfillment_errors(&errors, None, false);
- return Err(ErrorGuaranteed);
+ let reported = infcx.report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
}
// Finally, resolve all regions. This catches wily misuses of
// version.
let errors = inh.fulfillment_cx.borrow_mut().select_all_or_error(&infcx);
if !errors.is_empty() {
- infcx.report_fulfillment_errors(&errors, None, false);
- return Err(ErrorGuaranteed);
+ let reported = infcx.report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
}
// Finally, resolve all regions. This catches wily misuses of
{
if e.hir_id == id {
if let Some(span) = expr.span.find_ancestor_inside(block_span) {
- let return_suggestions =
- if self.tcx.is_diagnostic_item(sym::Result, expected_adt.did) {
- vec!["Ok(())".to_string()]
- } else if self.tcx.is_diagnostic_item(sym::Option, expected_adt.did)
- {
- vec!["None".to_string(), "Some(())".to_string()]
- } else {
- return;
- };
+ let return_suggestions = if self
+ .tcx
+ .is_diagnostic_item(sym::Result, expected_adt.did())
+ {
+ vec!["Ok(())".to_string()]
+ } else if self.tcx.is_diagnostic_item(sym::Option, expected_adt.did()) {
+ vec!["None".to_string(), "Some(())".to_string()]
+ } else {
+ return;
+ };
if let Some(indent) =
self.tcx.sess.source_map().indentation_before(span.shrink_to_lo())
{
}
let compatible_variants: Vec<String> = expected_adt
- .variants
+ .variants()
.iter()
.filter(|variant| variant.fields.len() == 1)
.filter_map(|variant| {
err.multipart_suggestions(
&format!(
"try wrapping the expression in a variant of `{}`",
- self.tcx.def_path_str(expected_adt.did)
+ self.tcx.def_path_str(expected_adt.did())
),
compatible_variants.into_iter().map(|variant| {
vec![
tcx,
drop_impl_did.expect_local(),
dtor_self_type,
- adt_def.did,
+ adt_def.did(),
)?;
ensure_drop_predicates_are_implied_by_item_defn(
tcx,
dtor_predicates,
- adt_def.did.expect_local(),
+ adt_def.did().expect_local(),
self_to_impl_substs,
)
}
// already checked by coherence, but compilation may
// not have been terminated.
let span = tcx.def_span(drop_impl_did);
- tcx.sess.delay_span_bug(
+ let reported = tcx.sess.delay_span_bug(
span,
&format!("should have been rejected by coherence check: {}", dtor_self_type),
);
- Err(ErrorGuaranteed)
+ Err(reported)
}
}
}
Err(_) => {
let item_span = tcx.def_span(self_type_did);
let self_descr = tcx.def_kind(self_type_did).descr(self_type_did);
- struct_span_err!(
+ let reported = struct_span_err!(
tcx.sess,
drop_impl_span,
E0366,
),
)
.emit();
- return Err(ErrorGuaranteed);
+ return Err(reported);
}
}
let errors = fulfillment_cx.select_all_or_error(&infcx);
if !errors.is_empty() {
// this could be reached when we get lazy normalization
- infcx.report_fulfillment_errors(&errors, None, false);
- return Err(ErrorGuaranteed);
+ let reported = infcx.report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
}
// NB. It seems a bit... suspicious to use an empty param-env
(
ty::PredicateKind::ConstEvaluatable(a),
ty::PredicateKind::ConstEvaluatable(b),
- ) => tcx.try_unify_abstract_consts((a, b)),
+ ) => tcx.try_unify_abstract_consts(self_param_env.and((a, b))),
(
ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_a, lt_a)),
ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_b, lt_b)),
if !assumptions_in_impl_context.iter().copied().any(predicate_matches_closure) {
let item_span = tcx.def_span(self_type_did);
let self_descr = tcx.def_kind(self_type_did).descr(self_type_did.to_def_id());
- struct_span_err!(
+ let reported = struct_span_err!(
tcx.sess,
predicate_sp,
E0367,
)
.span_note(item_span, "the implementor must specify the same requirement")
.emit();
- result = Err(ErrorGuaranteed);
+ result = Err(reported);
}
}
use rustc_hir::def::{CtorKind, DefKind, Res};
use rustc_hir::def_id::DefId;
use rustc_hir::intravisit::Visitor;
+use rustc_hir::lang_items::LangItem;
use rustc_hir::{ExprKind, HirId, QPath};
use rustc_infer::infer;
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
// else an error would have been flagged by the
// `loops` pass for using break with an expression
// where you are not supposed to.
- assert!(expr_opt.is_none() || self.tcx.sess.has_errors());
+ assert!(expr_opt.is_none() || self.tcx.sess.has_errors().is_some());
}
// If we encountered a `break`, then (no surprise) it may be possible to break from the
deferred_cast_checks.push(cast_check);
t_cast
}
- Err(ErrorGuaranteed) => self.tcx.ty_error(),
+ Err(_) => self.tcx.ty_error(),
}
}
}
// Prohibit struct expressions when non-exhaustive flag is set.
let adt = adt_ty.ty_adt_def().expect("`check_struct_path` returned non-ADT type");
- if !adt.did.is_local() && variant.is_field_list_non_exhaustive() {
+ if !adt.did().is_local() && variant.is_field_list_non_exhaustive() {
self.tcx
.sess
.emit_err(StructExprNonExhaustive { span: expr.span, what: adt.variant_descr() });
if inaccessible_remaining_fields {
self.report_inaccessible_fields(adt_ty, span);
} else {
- self.report_missing_fields(adt_ty, span, remaining_fields);
+ self.report_missing_fields(
+ adt_ty,
+ span,
+ remaining_fields,
+ variant,
+ ast_fields,
+ substs,
+ );
}
}
}
adt_ty: Ty<'tcx>,
span: Span,
remaining_fields: FxHashMap<Ident, (usize, &ty::FieldDef)>,
+ variant: &'tcx ty::VariantDef,
+ ast_fields: &'tcx [hir::ExprField<'tcx>],
+ substs: SubstsRef<'tcx>,
) {
let len = remaining_fields.len();
}
};
- struct_span_err!(
+ let mut err = struct_span_err!(
self.tcx.sess,
span,
E0063,
remaining_fields_names,
truncated_fields_error,
adt_ty
- )
- .span_label(span, format!("missing {}{}", remaining_fields_names, truncated_fields_error))
- .emit();
+ );
+ err.span_label(
+ span,
+ format!("missing {}{}", remaining_fields_names, truncated_fields_error),
+ );
+
+ // If the last field is a range literal, but it isn't supposed to be, then they probably
+ // meant to use functional update syntax.
+ //
+ // I don't use 'is_range_literal' because only double-sided, half-open ranges count.
+ if let Some((
+ last,
+ ExprKind::Struct(
+ QPath::LangItem(LangItem::Range, ..),
+ &[ref range_start, ref range_end],
+ _,
+ ),
+ )) = ast_fields.last().map(|last| (last, &last.expr.kind)) &&
+ let variant_field =
+ variant.fields.iter().find(|field| field.ident(self.tcx) == last.ident) &&
+ let range_def_id = self.tcx.lang_items().range_struct() &&
+ variant_field
+ .and_then(|field| field.ty(self.tcx, substs).ty_adt_def())
+ .map(|adt| adt.did())
+ != range_def_id
+ {
+ let instead = self
+ .tcx
+ .sess
+ .source_map()
+ .span_to_snippet(range_end.expr.span)
+ .map(|s| format!(" from `{s}`"))
+ .unwrap_or(String::new());
+ err.span_suggestion(
+ range_start.span.shrink_to_hi(),
+ &format!("to set the remaining fields{instead}, separate the last named field with a comma"),
+ ",".to_string(),
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ err.emit();
}
/// Report an error for a struct field expression when there are invisible fields.
ty::Adt(base_def, substs) if !base_def.is_enum() => {
debug!("struct named {:?}", base_t);
let (ident, def_scope) =
- self.tcx.adjust_ident_and_get_scope(field, base_def.did, self.body_id);
+ self.tcx.adjust_ident_and_get_scope(field, base_def.did(), self.body_id);
let fields = &base_def.non_enum_variant().fields;
if let Some(index) = fields
.iter()
self.tcx.check_stability(field.did, Some(expr.hir_id), expr.span, None);
return field_ty;
}
- private_candidate = Some((adjustments, base_def.did, field_ty));
+ private_candidate = Some((adjustments, base_def.did(), field_ty));
}
}
ty::Tuple(tys) => {
if let ty::RawPtr(ty_and_mut) = expr_t.kind() {
if let ty::Adt(adt_def, _) = ty_and_mut.ty.kind() {
- if adt_def.variants.len() == 1
+ if adt_def.variants().len() == 1
&& adt_def
- .variants
+ .variants()
.iter()
.next()
.unwrap()
fn suggest_fields_on_recordish(
&self,
err: &mut Diagnostic,
- def: &'tcx ty::AdtDef,
+ def: ty::AdtDef<'tcx>,
field: Ident,
access_span: Span,
) {
}
// FIXME: currently we never try to compose autoderefs
// and ReifyFnPointer/UnsafeFnPointer, but we could.
- _ => self.tcx.sess.delay_span_bug(
- expr.span,
- &format!(
- "while adjusting {:?}, can't compose {:?} and {:?}",
- expr,
- entry.get(),
- adj
- ),
- ),
- };
+ _ => {
+ self.tcx.sess.delay_span_bug(
+ expr.span,
+ &format!(
+ "while adjusting {:?}, can't compose {:?} and {:?}",
+ expr,
+ entry.get(),
+ adj
+ ),
+ );
+ }
+ }
*entry.get_mut() = adj;
}
}
.or_else(|error| {
let result = match error {
method::MethodError::PrivateMatch(kind, def_id, _) => Ok((kind, def_id)),
- _ => Err(ErrorGuaranteed),
+ _ => Err(ErrorGuaranteed::unchecked_claim_error_was_emitted()),
};
// If we have a path like `MyTrait::missing_method`, then don't register
if let Some(self_ty) = self_ty =>
{
let adt_def = self_ty.ty_adt_def().unwrap();
- user_self_ty = Some(UserSelfTy { impl_def_id: adt_def.did, self_ty });
+ user_self_ty = Some(UserSelfTy { impl_def_id: adt_def.did(), self_ty });
is_alias_variant_ctor = true;
}
Res::Def(DefKind::AssocFn | DefKind::AssocConst, def_id) => {
return None;
}
Res::Def(DefKind::Variant, _) => match ty.kind() {
- ty::Adt(adt, substs) => Some((adt.variant_of_res(def), adt.did, substs)),
+ ty::Adt(adt, substs) => Some((adt.variant_of_res(def), adt.did(), substs)),
_ => bug!("unexpected type: {:?}", ty),
},
Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _)
| Res::SelfTy { .. } => match ty.kind() {
ty::Adt(adt, substs) if !adt.is_enum() => {
- Some((adt.non_enum_variant(), adt.did, substs))
+ Some((adt.non_enum_variant(), adt.did(), substs))
}
_ => None,
},
let mut result_code = code.clone();
loop {
let parent = match &*code {
+ ObligationCauseCode::ImplDerivedObligation(c) => {
+ c.derived.parent_code.clone()
+ }
ObligationCauseCode::BuiltinDerivedObligation(c)
- | ObligationCauseCode::ImplDerivedObligation(c)
| ObligationCauseCode::DerivedObligation(c) => c.parent_code.clone(),
_ => break,
};
}
let self_: ty::subst::GenericArg<'_> = match &*unpeel_to_top(error.obligation.cause.clone_code()) {
ObligationCauseCode::BuiltinDerivedObligation(code) |
- ObligationCauseCode::ImplDerivedObligation(code) |
ObligationCauseCode::DerivedObligation(code) => {
code.parent_trait_pred.self_ty().skip_binder().into()
}
+ ObligationCauseCode::ImplDerivedObligation(code) => {
+ code.derived.parent_trait_pred.self_ty().skip_binder().into()
+ }
_ if let ty::PredicateKind::Trait(predicate) =
error.obligation.predicate.kind().skip_binder() => {
predicate.self_ty().into()
let pin_box_found = self.tcx.mk_lang_item(box_found, LangItem::Pin).unwrap();
let pin_found = self.tcx.mk_lang_item(found, LangItem::Pin).unwrap();
match expected.kind() {
- ty::Adt(def, _) if Some(def.did) == pin_did => {
+ ty::Adt(def, _) if Some(def.did()) == pin_did => {
if self.can_coerce(pin_box_found, expected) {
debug!("can coerce {:?} to {:?}, suggesting Box::pin", pin_box_found, expected);
match found.kind() {
can_suggest: bool,
fn_id: hir::HirId,
) -> bool {
+ let found =
+ self.resolve_numeric_literals_with_default(self.resolve_vars_if_possible(found));
// Only suggest changing the return type for methods that
// haven't set a return type at all (and aren't `fn main()` or an impl).
match (&fn_decl.output, found.is_suggestable(), can_suggest, expected.is_unit()) {
err.span_suggestion(
span,
"try adding a return type",
- format!("-> {} ", self.resolve_vars_with_obligations(found)),
+ format!("-> {} ", found),
Applicability::MachineApplicable,
);
true
}
(&hir::FnRetTy::DefaultReturn(span), false, true, true) => {
- err.span_label(span, "possibly return type missing here?");
+ // FIXME: if `found` could be `impl Iterator` or `impl Fn*`, we should suggest
+ // that.
+ err.span_suggestion(
+ span,
+ "a return type might be missing here",
+ "-> _ ".to_string(),
+ Applicability::HasPlaceholders,
+ );
true
}
(&hir::FnRetTy::DefaultReturn(span), _, false, true) => {
//! This calculates the types which has storage which lives across a suspension point in a
//! generator from the perspective of typeck. The actual types used at runtime
-//! is calculated in `rustc_const_eval::transform::generator` and may be a subset of the
+//! is calculated in `rustc_mir_transform::generator` and may be a subset of the
//! types computed here.
use self::drop_ranges::DropRanges;
use rustc_hir::hir_id::HirIdSet;
use rustc_hir::intravisit::{self, Visitor};
use rustc_hir::{Arm, Expr, ExprKind, Guard, HirId, Pat, PatKind};
-use rustc_middle::middle::region::{self, YieldData};
+use rustc_middle::middle::region::{self, Scope, ScopeData, YieldData};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::symbol::sym;
use rustc_span::Span;
self.expr_count += 1;
- let scope = self.region_scope_tree.temporary_scope(expr.hir_id.local_id);
+ debug!("is_borrowed_temporary: {:?}", self.drop_ranges.is_borrowed_temporary(expr));
+
+ // Typically, the value produced by an expression is consumed by its parent in some way,
+ // so we only have to check if the parent contains a yield (note that the parent may, for
+ // example, store the value into a local variable, but then we already consider local
+ // variables to be live across their scope).
+ //
+ // However, in the case of temporary values, we are going to store the value into a
+ // temporary on the stack that is live for the current temporary scope and then return a
+ // reference to it. That value may be live across the entire temporary scope.
+ let scope = if self.drop_ranges.is_borrowed_temporary(expr) {
+ self.region_scope_tree.temporary_scope(expr.hir_id.local_id)
+ } else {
+ debug!("parent_node: {:?}", self.fcx.tcx.hir().find_parent_node(expr.hir_id));
+ match self.fcx.tcx.hir().find_parent_node(expr.hir_id) {
+ Some(parent) => Some(Scope { id: parent.local_id, data: ScopeData::Node }),
+ None => self.region_scope_tree.temporary_scope(expr.hir_id.local_id),
+ }
+ };
// If there are adjustments, then record the final type --
// this is the actual value that is being produced.
}
// Returns whether it emitted a diagnostic or not
-// Note that this fn and the proceding one are based on the code
+// Note that this fn and the proceeding one are based on the code
// for creating must_use diagnostics
//
// Note that this technique was chosen over things like a `Suspend` marker trait
-// as it is simpler and has precendent in the compiler
+// as it is simpler and has precedent in the compiler
pub fn check_must_not_suspend_ty<'tcx>(
fcx: &FnCtxt<'_, 'tcx>,
ty: Ty<'tcx>,
let descr_pre = &format!("{}boxed ", data.descr_pre);
check_must_not_suspend_ty(fcx, boxed_ty, hir_id, SuspendCheckData { descr_pre, ..data })
}
- ty::Adt(def, _) => check_must_not_suspend_def(fcx.tcx, def.did, hir_id, data),
+ ty::Adt(def, _) => check_must_not_suspend_def(fcx.tcx, def.did(), hir_id, data),
// FIXME: support adding the attribute to TAITs
ty::Opaque(def, _) => {
let mut has_emitted = false;
use hir::def_id::DefId;
use hir::{Body, HirId, HirIdMap, Node};
use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_set::FxHashSet;
use rustc_hir as hir;
use rustc_index::bit_set::BitSet;
use rustc_index::vec::IndexVec;
let consumed_borrowed_places = find_consumed_and_borrowed(fcx, def_id, body);
let num_exprs = fcx.tcx.region_scope_tree(def_id).body_expr_count(body.id()).unwrap_or(0);
- let mut drop_ranges = build_control_flow_graph(
+ let (mut drop_ranges, borrowed_temporaries) = build_control_flow_graph(
fcx.tcx.hir(),
fcx.tcx,
&fcx.typeck_results.borrow(),
drop_ranges.propagate_to_fixpoint();
- DropRanges { tracked_value_map: drop_ranges.tracked_value_map, nodes: drop_ranges.nodes }
+ debug!("borrowed_temporaries = {borrowed_temporaries:?}");
+ DropRanges {
+ tracked_value_map: drop_ranges.tracked_value_map,
+ nodes: drop_ranges.nodes,
+ borrowed_temporaries: Some(borrowed_temporaries),
+ }
} else {
// If drop range tracking is not enabled, skip all the analysis and produce an
// empty set of DropRanges.
- DropRanges { tracked_value_map: FxHashMap::default(), nodes: IndexVec::new() }
+ DropRanges {
+ tracked_value_map: FxHashMap::default(),
+ nodes: IndexVec::new(),
+ borrowed_temporaries: None,
+ }
}
}
pub struct DropRanges {
tracked_value_map: FxHashMap<TrackedValue, TrackedValueIndex>,
nodes: IndexVec<PostOrderId, NodeInfo>,
+ borrowed_temporaries: Option<FxHashSet<HirId>>,
}
impl DropRanges {
})
}
+ pub fn is_borrowed_temporary(&self, expr: &hir::Expr<'_>) -> bool {
+ if let Some(b) = &self.borrowed_temporaries { b.contains(&expr.hir_id) } else { true }
+ }
+
/// Returns a reference to the NodeInfo for a node, panicking if it does not exist
fn expect_node(&self, id: PostOrderId) -> &NodeInfo {
&self.nodes[id]
intravisit::{self, Visitor},
Body, Expr, ExprKind, Guard, HirId, LoopIdError,
};
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::{fx::FxHashMap, stable_set::FxHashSet};
use rustc_hir as hir;
use rustc_index::vec::IndexVec;
use rustc_middle::{
consumed_borrowed_places: ConsumedAndBorrowedPlaces,
body: &'tcx Body<'tcx>,
num_exprs: usize,
-) -> DropRangesBuilder {
+) -> (DropRangesBuilder, FxHashSet<HirId>) {
let mut drop_range_visitor =
DropRangeVisitor::new(hir, tcx, typeck_results, consumed_borrowed_places, num_exprs);
intravisit::walk_body(&mut drop_range_visitor, body);
drop_range_visitor.drop_ranges.process_deferred_edges();
- drop_range_visitor.drop_ranges
+ (drop_range_visitor.drop_ranges, drop_range_visitor.places.borrowed_temporaries)
}
/// This struct is used to gather the information for `DropRanges` to determine the regions of the
use hir::{def_id::DefId, Body, HirId, HirIdMap};
use rustc_data_structures::stable_set::FxHashSet;
use rustc_hir as hir;
+use rustc_middle::hir::place::{PlaceBase, Projection, ProjectionKind};
use rustc_middle::ty::{ParamEnv, TyCtxt};
pub(super) fn find_consumed_and_borrowed<'a, 'tcx>(
/// Note that this set excludes "partial drops" -- for example, a statement like `drop(x.y)` is
/// not considered a drop of `x`, although it would be a drop of `x.y`.
pub(super) consumed: HirIdMap<FxHashSet<TrackedValue>>,
+
/// A set of hir-ids of values or variables that are borrowed at some point within the body.
pub(super) borrowed: FxHashSet<TrackedValue>,
+
+ /// A set of hir-ids of values or variables that are borrowed at some point within the body.
+ pub(super) borrowed_temporaries: FxHashSet<HirId>,
}
/// Works with ExprUseVisitor to find interesting values for the drop range analysis.
places: ConsumedAndBorrowedPlaces {
consumed: <_>::default(),
borrowed: <_>::default(),
+ borrowed_temporaries: <_>::default(),
},
}
}
&mut self,
place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>,
diag_expr_id: HirId,
- _bk: rustc_middle::ty::BorrowKind,
+ bk: rustc_middle::ty::BorrowKind,
+ ) {
+ debug!(
+ "borrow: place_with_id = {place_with_id:?}, diag_expr_id={diag_expr_id:?}, \
+ borrow_kind={bk:?}"
+ );
+
+ self.places
+ .borrowed
+ .insert(TrackedValue::from_place_with_projections_allowed(place_with_id));
+
+ // Ordinarily a value is consumed by it's parent, but in the special case of a
+ // borrowed RValue, we create a reference that lives as long as the temporary scope
+ // for that expression (typically, the innermost statement, but sometimes the enclosing
+ // block). We record this fact here so that later in generator_interior
+ // we can use the correct scope.
+ //
+ // We special case borrows through a dereference (`&*x`, `&mut *x` where `x` is
+ // some rvalue expression), since these are essentially a copy of a pointer.
+ // In other words, this borrow does not refer to the
+ // temporary (`*x`), but to the referent (whatever `x` is a borrow of).
+ //
+ // We were considering that we might encounter problems down the line if somehow,
+ // some part of the compiler were to look at this result and try to use it to
+ // drive a borrowck-like analysis (this does not currently happen, as of this writing).
+ // But even this should be fine, because the lifetime of the dereferenced reference
+ // found in the rvalue is only significant as an intermediate 'link' to the value we
+ // are producing, and we separately track whether that value is live over a yield.
+ // Example:
+ //
+ // ```notrust
+ // fn identity<T>(x: &mut T) -> &mut T { x }
+ // let a: A = ...;
+ // let y: &'y mut A = &mut *identity(&'a mut a);
+ // ^^^^^^^^^^^^^^^^^^^^^^^^^ the borrow we are talking about
+ // ```
+ //
+ // The expression `*identity(...)` is a deref of an rvalue,
+ // where the `identity(...)` (the rvalue) produces a return type
+ // of `&'rv mut A`, where `'a: 'rv`. We then assign this result to
+ // `'y`, resulting in (transitively) `'a: 'y` (i.e., while `y` is in use,
+ // `a` will be considered borrowed). Other parts of the code will ensure
+ // that if `y` is live over a yield, `&'y mut A` appears in the generator
+ // state. If `'y` is live, then any sound region analysis must conclude
+ // that `'a` is also live. So if this causes a bug, blame some other
+ // part of the code!
+ let is_deref = place_with_id
+ .place
+ .projections
+ .iter()
+ .any(|Projection { kind, .. }| *kind == ProjectionKind::Deref);
+
+ if let (false, PlaceBase::Rvalue) = (is_deref, place_with_id.place.base) {
+ self.places.borrowed_temporaries.insert(place_with_id.hir_id);
+ }
+ }
+
+ fn copy(
+ &mut self,
+ place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>,
+ _diag_expr_id: HirId,
) {
- debug!("borrow {:?}; diag_expr_id={:?}", place_with_id, diag_expr_id);
+ debug!("copy: place_with_id = {place_with_id:?}");
+
self.places
.borrowed
.insert(TrackedValue::from_place_with_projections_allowed(place_with_id));
+
+ // For copied we treat this mostly like a borrow except that we don't add the place
+ // to borrowed_temporaries because the copy is consumed.
}
fn mutate(
if let ty::Adt(adt_def, _) = self_ty.kind() {
if adt_def.is_enum() {
let variant_def = adt_def
- .variants
+ .variants()
.iter()
- .find(|vd| tcx.hygienic_eq(method_name, vd.ident(tcx), adt_def.did));
+ .find(|vd| tcx.hygienic_eq(method_name, vd.ident(tcx), adt_def.did()));
if let Some(variant_def) = variant_def {
// Braced variants generate unusable names in value namespace (reserved for
// possible future use), so variants resolved as associated items may refer to
// for a "<" in `self_ty_name`.
if !self_ty_name.contains('<') {
if let Adt(def, _) = self_ty.kind() {
- let generics = self.tcx.generics_of(def.did);
+ let generics = self.tcx.generics_of(def.did());
if !generics.params.is_empty() {
let counts = generics.own_counts();
self_ty_name += &format!(
}
/// Creates a string version of the `expr` that includes explicit adjustments.
- /// Returns the string and also a bool indicating whther this is a *precise*
+ /// Returns the string and also a bool indicating whether this is a *precise*
/// suggestion.
fn adjust_expr(
&self,
lint::builtin::TYVAR_BEHIND_RAW_POINTER,
scope_expr_id,
span,
- |lint| lint.build("type annotations needed").emit(),
+ |lint| {
+ lint.build("type annotations needed").emit();
+ },
);
}
} else {
self.assemble_inherent_impl_candidates_for_type(p.def_id());
}
ty::Adt(def, _) => {
- self.assemble_inherent_impl_candidates_for_type(def.did);
+ self.assemble_inherent_impl_candidates_for_type(def.did());
}
ty::Foreign(did) => {
self.assemble_inherent_impl_candidates_for_type(did);
let selcx = &mut traits::SelectionContext::new(self);
let cause = traits::ObligationCause::misc(self.span, self.body_id);
+ let mut parent_pred = None;
+
// If so, impls may carry other conditions (e.g., where
// clauses) that must be considered. Make sure that those
// match as well (or at least may match, sometimes we
}
let predicate =
ty::Binder::dummy(trait_ref).without_const().to_predicate(self.tcx);
+ parent_pred = Some(predicate);
let obligation = traits::Obligation::new(cause, self.param_env, predicate);
if !self.predicate_may_hold(&obligation) {
result = ProbeResult::NoMatch;
let o = self.resolve_vars_if_possible(o);
if !self.predicate_may_hold(&o) {
result = ProbeResult::NoMatch;
- possibly_unsatisfied_predicates.push((o.predicate, None, Some(o.cause)));
+ possibly_unsatisfied_predicates.push((o.predicate, parent_pred, Some(o.cause)));
}
}
use rustc_middle::ty::print::with_crate_prefix;
use rustc_middle::ty::ToPolyTraitRef;
use rustc_middle::ty::{self, DefIdTree, ToPredicate, Ty, TyCtxt, TypeFoldable};
-use rustc_span::lev_distance;
use rustc_span::symbol::{kw, sym, Ident};
-use rustc_span::{source_map, FileName, MultiSpan, Span};
+use rustc_span::{lev_distance, source_map, ExpnKind, FileName, MacroKind, MultiSpan, Span};
use rustc_trait_selection::traits::error_reporting::on_unimplemented::InferCtxtExt as _;
use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
use rustc_trait_selection::traits::{
let candidate_found = autoderef.any(|(ty, _)| {
if let ty::Adt(adt_deref, _) = ty.kind() {
self.tcx
- .inherent_impls(adt_deref.did)
+ .inherent_impls(adt_deref.did())
.iter()
.filter_map(|def_id| {
self.associated_value(*def_id, item_name)
}
if let Some(def) = actual.ty_adt_def() {
- if let Some(full_sp) = tcx.hir().span_if_local(def.did) {
+ if let Some(full_sp) = tcx.hir().span_if_local(def.did()) {
let def_sp = tcx.sess.source_map().guess_head_span(full_sp);
err.span_label(
def_sp,
// Pick out the list of unimplemented traits on the receiver.
// This is used for custom error messages with the `#[rustc_on_unimplemented]` attribute.
let mut unimplemented_traits = FxHashMap::default();
+ let mut unimplemented_traits_only = true;
for (predicate, _parent_pred, cause) in &unsatisfied_predicates {
if let (ty::PredicateKind::Trait(p), Some(cause)) =
(predicate.kind().skip_binder(), cause.as_ref())
}
}
+ // Make sure that, if any traits other than the found ones were involved,
+ // we don't don't report an unimplemented trait.
+ // We don't want to say that `iter::Cloned` is not an iterator, just
+ // because of some non-Clone item being iterated over.
+ for (predicate, _parent_pred, _cause) in &unsatisfied_predicates {
+ match predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(p)
+ if unimplemented_traits.contains_key(&p.trait_ref.def_id) => {}
+ _ => {
+ unimplemented_traits_only = false;
+ break;
+ }
+ }
+ }
+
let mut collect_type_param_suggestions =
|self_ty: Ty<'tcx>, parent_pred: ty::Predicate<'tcx>, obligation: &str| {
// We don't care about regions here, so it's fine to skip the binder here.
.get(self.tcx.hir().local_def_id_to_hir_id(did)),
)
}
- ty::Adt(def, _) => def.did.as_local().map(|def_id| {
+ ty::Adt(def, _) => def.did().as_local().map(|def_id| {
self.tcx
.hir()
.get(self.tcx.hir().local_def_id_to_hir_id(def_id))
);
match &self_ty.kind() {
// Point at the type that couldn't satisfy the bound.
- ty::Adt(def, _) => bound_spans.push((def_span(def.did), msg)),
+ ty::Adt(def, _) => bound_spans.push((def_span(def.did()), msg)),
// Point at the trait object that couldn't satisfy the bound.
ty::Dynamic(preds, _) => {
for pred in preds.iter() {
// Find all the requirements that come from a local `impl` block.
let mut skip_list: FxHashSet<_> = Default::default();
let mut spanned_predicates: FxHashMap<MultiSpan, _> = Default::default();
- for (data, p, parent_p) in unsatisfied_predicates
+ for (data, p, parent_p, impl_def_id, cause_span) in unsatisfied_predicates
.iter()
.filter_map(|(p, parent, c)| c.as_ref().map(|c| (p, parent, c)))
.filter_map(|(p, parent, c)| match c.code() {
ObligationCauseCode::ImplDerivedObligation(ref data) => {
- Some((data, p, parent))
+ Some((&data.derived, p, parent, data.impl_def_id, data.span))
}
_ => None,
})
{
let parent_trait_ref = data.parent_trait_pred;
- let parent_def_id = parent_trait_ref.def_id();
let path = parent_trait_ref.print_modifiers_and_trait_path();
let tr_self_ty = parent_trait_ref.skip_binder().self_ty();
- let mut candidates = vec![];
- self.tcx.for_each_relevant_impl(
- parent_def_id,
- parent_trait_ref.self_ty().skip_binder(),
- |impl_def_id| match self.tcx.hir().get_if_local(impl_def_id) {
- Some(Node::Item(hir::Item {
- kind: hir::ItemKind::Impl(hir::Impl { .. }),
- ..
- })) => {
- candidates.push(impl_def_id);
+ let unsatisfied_msg = "unsatisfied trait bound introduced here".to_string();
+ let derive_msg =
+ "unsatisfied trait bound introduced in this `derive` macro";
+ match self.tcx.hir().get_if_local(impl_def_id) {
+ // Unmet obligation comes from a `derive` macro, point at it once to
+ // avoid multiple span labels pointing at the same place.
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Trait(..),
+ ident,
+ ..
+ })) if matches!(
+ ident.span.ctxt().outer_expn_data().kind,
+ ExpnKind::Macro(MacroKind::Derive, _)
+ ) =>
+ {
+ let span = ident.span.ctxt().outer_expn_data().call_site;
+ let mut spans: MultiSpan = span.into();
+ spans.push_span_label(span, derive_msg.to_string());
+ let entry = spanned_predicates.entry(spans);
+ entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p);
+ }
+
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { of_trait, self_ty, .. }),
+ ..
+ })) if matches!(
+ self_ty.span.ctxt().outer_expn_data().kind,
+ ExpnKind::Macro(MacroKind::Derive, _)
+ ) || matches!(
+ of_trait.as_ref().map(|t| t
+ .path
+ .span
+ .ctxt()
+ .outer_expn_data()
+ .kind),
+ Some(ExpnKind::Macro(MacroKind::Derive, _))
+ ) =>
+ {
+ let span = self_ty.span.ctxt().outer_expn_data().call_site;
+ let mut spans: MultiSpan = span.into();
+ spans.push_span_label(span, derive_msg.to_string());
+ let entry = spanned_predicates.entry(spans.into());
+ entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p);
+ }
+
+ // Unmet obligation coming from a `trait`.
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Trait(..),
+ ident,
+ span: item_span,
+ ..
+ })) if !matches!(
+ ident.span.ctxt().outer_expn_data().kind,
+ ExpnKind::Macro(MacroKind::Derive, _)
+ ) =>
+ {
+ if let Some(pred) = parent_p {
+ // Done to add the "doesn't satisfy" `span_label`.
+ let _ = format_pred(*pred);
}
- _ => {}
- },
- );
- if let [def_id] = &candidates[..] {
- match self.tcx.hir().get_if_local(*def_id) {
- Some(Node::Item(hir::Item {
- kind: hir::ItemKind::Impl(hir::Impl { of_trait, self_ty, .. }),
- ..
- })) => {
- if let Some(pred) = parent_p {
- // Done to add the "doesn't satisfy" `span_label`.
- let _ = format_pred(*pred);
- }
- skip_list.insert(p);
+ skip_list.insert(p);
+ let mut spans = if cause_span != *item_span {
+ let mut spans: MultiSpan = cause_span.into();
+ spans.push_span_label(cause_span, unsatisfied_msg);
+ spans
+ } else {
+ ident.span.into()
+ };
+ spans.push_span_label(ident.span, "in this trait".to_string());
+ let entry = spanned_predicates.entry(spans.into());
+ entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p);
+ }
+
+ // Unmet obligation coming from an `impl`.
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { of_trait, self_ty, .. }),
+ span: item_span,
+ ..
+ })) if !matches!(
+ self_ty.span.ctxt().outer_expn_data().kind,
+ ExpnKind::Macro(MacroKind::Derive, _)
+ ) && !matches!(
+ of_trait.as_ref().map(|t| t
+ .path
+ .span
+ .ctxt()
+ .outer_expn_data()
+ .kind),
+ Some(ExpnKind::Macro(MacroKind::Derive, _))
+ ) =>
+ {
+ if let Some(pred) = parent_p {
+ // Done to add the "doesn't satisfy" `span_label`.
+ let _ = format_pred(*pred);
+ }
+ skip_list.insert(p);
+ let mut spans = if cause_span != *item_span {
+ let mut spans: MultiSpan = cause_span.into();
+ spans.push_span_label(cause_span, unsatisfied_msg);
+ spans
+ } else {
let mut spans = Vec::with_capacity(2);
if let Some(trait_ref) = of_trait {
spans.push(trait_ref.path.span);
}
spans.push(self_ty.span);
- let entry = spanned_predicates.entry(spans.into());
- entry
- .or_insert_with(|| (path, tr_self_ty, Vec::new()))
- .2
- .push(p);
+ spans.into()
+ };
+ if let Some(trait_ref) = of_trait {
+ spans.push_span_label(trait_ref.path.span, String::new());
}
- _ => {}
+ spans.push_span_label(self_ty.span, String::new());
+
+ let entry = spanned_predicates.entry(spans.into());
+ entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p);
}
+ _ => {}
}
}
- for (span, (path, self_ty, preds)) in spanned_predicates {
- err.span_note(
- span,
- &format!(
- "the following trait bounds were not satisfied because of the \
- requirements of the implementation of `{}` for `{}`:\n{}",
- path,
- self_ty,
- preds
- .into_iter()
- // .map(|pred| format!("{:?}", pred))
- .filter_map(|pred| format_pred(*pred))
- .map(|(p, _)| format!("`{}`", p))
- .collect::<Vec<_>>()
- .join("\n"),
- ),
- );
+ let mut spanned_predicates: Vec<_> = spanned_predicates.into_iter().collect();
+ spanned_predicates.sort_by_key(|(span, (_, _, _))| span.primary_span());
+ for (span, (_path, _self_ty, preds)) in spanned_predicates {
+ let mut preds: Vec<_> = preds
+ .into_iter()
+ .filter_map(|pred| format_pred(*pred))
+ .map(|(p, _)| format!("`{}`", p))
+ .collect();
+ preds.sort();
+ preds.dedup();
+ let msg = if let [pred] = &preds[..] {
+ format!("trait bound {} was not satisfied", pred)
+ } else {
+ format!(
+ "the following trait bounds were not satisfied:\n{}",
+ preds.join("\n"),
+ )
+ };
+ err.span_note(span, &msg);
+ unsatisfied_bounds = true;
}
// The requirements that didn't have an `impl` span to show.
let mut bound_list = unsatisfied_predicates
.iter()
- .filter(|(pred, _, _parent_pred)| !skip_list.contains(&pred))
.filter_map(|(pred, parent_pred, _cause)| {
format_pred(*pred).map(|(p, self_ty)| {
collect_type_param_suggestions(self_ty, *pred, &p);
- match parent_pred {
- None => format!("`{}`", &p),
- Some(parent_pred) => match format_pred(*parent_pred) {
+ (
+ match parent_pred {
None => format!("`{}`", &p),
- Some((parent_p, _)) => {
- collect_type_param_suggestions(
- self_ty,
- *parent_pred,
- &p,
- );
- format!("`{}`\nwhich is required by `{}`", p, parent_p)
- }
+ Some(parent_pred) => match format_pred(*parent_pred) {
+ None => format!("`{}`", &p),
+ Some((parent_p, _)) => {
+ collect_type_param_suggestions(
+ self_ty,
+ *parent_pred,
+ &p,
+ );
+ format!(
+ "`{}`\nwhich is required by `{}`",
+ p, parent_p
+ )
+ }
+ },
},
- }
+ *pred,
+ )
})
})
+ .filter(|(_, pred)| !skip_list.contains(&pred))
+ .map(|(t, _)| t)
.enumerate()
.collect::<Vec<(usize, String)>>();
.join("\n");
let actual_prefix = actual.prefix_string(self.tcx);
info!("unimplemented_traits.len() == {}", unimplemented_traits.len());
- let (primary_message, label) = if unimplemented_traits.len() == 1 {
+ let (primary_message, label) = if unimplemented_traits.len() == 1
+ && unimplemented_traits_only
+ {
unimplemented_traits
.into_iter()
.next()
ty.is_str()
|| matches!(
ty.kind(),
- ty::Adt(adt, _) if self.tcx.is_diagnostic_item(sym::String, adt.did)
+ ty::Adt(adt, _) if self.tcx.is_diagnostic_item(sym::String, adt.did())
)
}
- ty::Adt(adt, _) => self.tcx.is_diagnostic_item(sym::String, adt.did),
+ ty::Adt(adt, _) => self.tcx.is_diagnostic_item(sym::String, adt.did()),
_ => false,
};
if is_string_or_ref_str && item_name.name == sym::iter {
if let ty::Adt(adt, _) = rcvr_ty.kind() {
let mut inherent_impls_candidate = self
.tcx
- .inherent_impls(adt.did)
+ .inherent_impls(adt.did())
.iter()
.copied()
.filter(|def_id| {
if unsatisfied_predicates.is_empty() && actual.is_enum() {
let adt_def = actual.ty_adt_def().expect("enum is not an ADT");
if let Some(suggestion) = lev_distance::find_best_match_for_name(
- &adt_def.variants.iter().map(|s| s.name).collect::<Vec<_>>(),
+ &adt_def.variants().iter().map(|s| s.name).collect::<Vec<_>>(),
item_name.name,
None,
) {
let all_local_types_needing_impls =
errors.iter().all(|e| match e.obligation.predicate.kind().skip_binder() {
ty::PredicateKind::Trait(pred) => match pred.self_ty().kind() {
- ty::Adt(def, _) => def.did.is_local(),
+ ty::Adt(def, _) => def.did().is_local(),
_ => false,
},
_ => false,
let def_ids = preds
.iter()
.filter_map(|pred| match pred.self_ty().kind() {
- ty::Adt(def, _) => Some(def.did),
+ ty::Adt(def, _) => Some(def.did()),
_ => None,
})
.collect::<FxHashSet<_>>();
match pred.self_ty().kind() {
ty::Adt(def, _) => {
spans.push_span_label(
- sm.guess_head_span(self.tcx.def_span(def.did)),
+ sm.guess_head_span(self.tcx.def_span(def.did())),
format!("must implement `{}`", pred.trait_ref.print_only_trait_path()),
);
}
for (pred, _, _) in unsatisfied_predicates {
let ty::PredicateKind::Trait(trait_pred) = pred.kind().skip_binder() else { continue };
let adt = match trait_pred.self_ty().ty_adt_def() {
- Some(adt) if adt.did.is_local() => adt,
+ Some(adt) if adt.did().is_local() => adt,
_ => continue,
};
if let Some(diagnostic_name) = self.tcx.get_diagnostic_name(trait_pred.def_id()) {
};
if can_derive {
let self_name = trait_pred.self_ty().to_string();
- let self_span = self.tcx.def_span(adt.did);
+ let self_span = self.tcx.def_span(adt.did());
if let Some(poly_trait_ref) = pred.to_opt_poly_trait_pred() {
for super_trait in supertraits(self.tcx, poly_trait_ref.to_poly_trait_ref())
{
/// Print out the type for use in value namespace.
fn ty_to_value_string(&self, ty: Ty<'tcx>) -> String {
match ty.kind() {
- ty::Adt(def, substs) => format!("{}", ty::Instance::new(def.did, substs)),
+ ty::Adt(def, substs) => format!("{}", ty::Instance::new(def.did(), substs)),
_ => self.ty_to_string(ty),
}
}
) -> bool {
fn is_local(ty: Ty<'_>) -> bool {
match ty.kind() {
- ty::Adt(def, _) => def.did.is_local(),
+ ty::Adt(def, _) => def.did().is_local(),
ty::Foreign(did) => did.is_local(),
ty::Dynamic(tr, ..) => tr.principal().map_or(false, |d| d.def_id().is_local()),
ty::Param(_) => true,
}
fn find_use_placement<'tcx>(tcx: TyCtxt<'tcx>, target_module: LocalDefId) -> (Option<Span>, bool) {
+ // FIXME(#94854): this code uses an out-of-date method for inferring a span
+ // to suggest. It would be better to thread the ModSpans from the AST into
+ // the HIR, and then use that to drive the suggestion here.
+
let mut span = None;
let mut found_use = false;
let (module, _, _) = tcx.hir().get_module(target_module);
}
/// Emit an error when encountering two or more variants in a transparent enum.
-fn bad_variant_count<'tcx>(tcx: TyCtxt<'tcx>, adt: &'tcx ty::AdtDef, sp: Span, did: DefId) {
+fn bad_variant_count<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>, sp: Span, did: DefId) {
let variant_spans: Vec<_> = adt
- .variants
+ .variants()
.iter()
.map(|variant| tcx.hir().span_if_local(variant.def_id).unwrap())
.collect();
- let msg = format!("needs exactly one variant, but has {}", adt.variants.len(),);
+ let msg = format!("needs exactly one variant, but has {}", adt.variants().len(),);
let mut err = struct_span_err!(tcx.sess, sp, E0731, "transparent enum {}", msg);
err.span_label(sp, &msg);
if let [start @ .., end] = &*variant_spans {
/// enum.
fn bad_non_zero_sized_fields<'tcx>(
tcx: TyCtxt<'tcx>,
- adt: &'tcx ty::AdtDef,
+ adt: ty::AdtDef<'tcx>,
field_count: usize,
field_spans: impl Iterator<Item = Span>,
sp: Span,
let string_type = self.tcx.get_diagnostic_item(sym::String);
let is_std_string = |ty: Ty<'tcx>| match ty.ty_adt_def() {
- Some(ty_def) => Some(ty_def.did) == string_type,
+ Some(ty_def) => Some(ty_def.did()) == string_type,
None => false,
};
ex.span,
format!("cannot apply unary operator `{}`", op.as_str()),
);
+ let missing_trait = match op {
+ hir::UnOp::Deref => unreachable!("check unary op `-` or `!` only"),
+ hir::UnOp::Not => "std::ops::Not",
+ hir::UnOp::Neg => "std::ops::Neg",
+ };
+ let mut visitor = TypeParamVisitor(vec![]);
+ visitor.visit_ty(operand_ty);
+ if let [ty] = &visitor.0[..] {
+ if let ty::Param(p) = *operand_ty.kind() {
+ suggest_constraining_param(
+ self.tcx,
+ self.body_id,
+ &mut err,
+ *ty,
+ operand_ty,
+ missing_trait,
+ p,
+ true,
+ );
+ }
+ }
let sp = self.tcx.sess.source_map().start_point(ex.span);
if let Some(sp) =
use rustc_hir::{HirId, Pat, PatKind};
use rustc_infer::infer;
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_middle::middle::stability::EvalResult;
use rustc_middle::ty::{self, Adt, BindingMode, Ty, TypeFoldable};
use rustc_session::lint::builtin::NON_EXHAUSTIVE_OMITTED_PATTERNS;
use rustc_span::hygiene::DesugaringKind;
use rustc_span::lev_distance::find_best_match_for_name;
use rustc_span::source_map::{Span, Spanned};
-use rustc_span::symbol::{sym, Ident};
+use rustc_span::symbol::{kw, sym, Ident};
use rustc_span::{BytePos, MultiSpan, DUMMY_SP};
use rustc_trait_selection::autoderef::Autoderef;
use rustc_trait_selection::traits::{ObligationCause, Pattern};
_ => {
let (type_def_id, item_def_id) = match pat_ty.kind() {
Adt(def, _) => match res {
- Res::Def(DefKind::Const, def_id) => (Some(def.did), Some(def_id)),
+ Res::Def(DefKind::Const, def_id) => (Some(def.did()), Some(def_id)),
_ => (None, None),
},
_ => (None, None),
self.field_ty(span, f, substs)
})
.unwrap_or_else(|| {
- inexistent_fields.push(field.ident);
+ inexistent_fields.push(field);
no_field_errors = false;
tcx.ty_error()
})
.filter(|(_, ident)| !used_fields.contains_key(ident))
.collect::<Vec<_>>();
- let inexistent_fields_err = if !(inexistent_fields.is_empty() || variant.is_recovered()) {
+ let inexistent_fields_err = if !(inexistent_fields.is_empty() || variant.is_recovered())
+ && !inexistent_fields.iter().any(|field| field.ident.name == kw::Underscore)
+ {
Some(self.error_inexistent_fields(
adt.variant_descr(),
&inexistent_fields,
&mut unmentioned_fields,
variant,
+ substs,
))
} else {
None
};
// Require `..` if struct has non_exhaustive attribute.
- let non_exhaustive = variant.is_field_list_non_exhaustive() && !adt.did.is_local();
+ let non_exhaustive = variant.is_field_list_non_exhaustive() && !adt.did().is_local();
if non_exhaustive && !has_rest_pat {
self.error_foreign_non_exhaustive_spat(pat, adt.variant_descr(), fields.is_empty());
}
.copied()
.filter(|(field, _)| {
field.vis.is_accessible_from(tcx.parent_module(pat.hir_id).to_def_id(), tcx)
+ && !matches!(
+ tcx.eval_stability(field.did, None, DUMMY_SP, None),
+ EvalResult::Deny { .. }
+ )
+ // We only want to report the error if it is hidden and not local
+ && !(tcx.is_doc_hidden(field.did) && !field.did.is_local())
})
.collect();
fn error_inexistent_fields(
&self,
kind_name: &str,
- inexistent_fields: &[Ident],
- unmentioned_fields: &mut Vec<(&ty::FieldDef, Ident)>,
+ inexistent_fields: &[&hir::PatField<'tcx>],
+ unmentioned_fields: &mut Vec<(&'tcx ty::FieldDef, Ident)>,
variant: &ty::VariantDef,
+ substs: &'tcx ty::List<ty::subst::GenericArg<'tcx>>,
) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
let tcx = self.tcx;
let (field_names, t, plural) = if inexistent_fields.len() == 1 {
- (format!("a field named `{}`", inexistent_fields[0]), "this", "")
+ (format!("a field named `{}`", inexistent_fields[0].ident), "this", "")
} else {
(
format!(
"fields named {}",
inexistent_fields
.iter()
- .map(|ident| format!("`{}`", ident))
+ .map(|field| format!("`{}`", field.ident))
.collect::<Vec<String>>()
.join(", ")
),
"s",
)
};
- let spans = inexistent_fields.iter().map(|ident| ident.span).collect::<Vec<_>>();
+ let spans = inexistent_fields.iter().map(|field| field.ident.span).collect::<Vec<_>>();
let mut err = struct_span_err!(
tcx.sess,
spans,
tcx.def_path_str(variant.def_id),
field_names
);
- if let Some(ident) = inexistent_fields.last() {
+ if let Some(pat_field) = inexistent_fields.last() {
err.span_label(
- ident.span,
+ pat_field.ident.span,
format!(
"{} `{}` does not have {} field{}",
kind_name,
if unmentioned_fields.len() == 1 {
let input =
unmentioned_fields.iter().map(|(_, field)| field.name).collect::<Vec<_>>();
- let suggested_name = find_best_match_for_name(&input, ident.name, None);
+ let suggested_name = find_best_match_for_name(&input, pat_field.ident.name, None);
if let Some(suggested_name) = suggested_name {
err.span_suggestion(
- ident.span,
+ pat_field.ident.span,
"a field with a similar name exists",
suggested_name.to_string(),
Applicability::MaybeIncorrect,
unmentioned_fields.retain(|&(_, x)| x.name != suggested_name);
}
} else if inexistent_fields.len() == 1 {
- let unmentioned_field = unmentioned_fields[0].1.name;
- err.span_suggestion_short(
- ident.span,
- &format!(
- "`{}` has a field named `{}`",
- tcx.def_path_str(variant.def_id),
- unmentioned_field
- ),
- unmentioned_field.to_string(),
- Applicability::MaybeIncorrect,
- );
+ match pat_field.pat.kind {
+ PatKind::Lit(expr)
+ if !self.can_coerce(
+ self.typeck_results.borrow().expr_ty(expr),
+ self.field_ty(
+ unmentioned_fields[0].1.span,
+ unmentioned_fields[0].0,
+ substs,
+ ),
+ ) => {}
+ _ => {
+ let unmentioned_field = unmentioned_fields[0].1.name;
+ err.span_suggestion_short(
+ pat_field.ident.span,
+ &format!(
+ "`{}` has a field named `{}`",
+ tcx.def_path_str(variant.def_id),
+ unmentioned_field
+ ),
+ unmentioned_field.to_string(),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
}
}
}
sp,
&format!(
"include the missing field{} in the pattern{}",
- if len == 1 { "" } else { "s" },
+ pluralize!(len),
if have_inaccessible_fields { " and ignore the inaccessible fields" } else { "" }
),
format!(
err.span_suggestion(
sp,
&format!(
- "if you don't care about {} missing field{}, you can explicitly ignore {}",
- if len == 1 { "this" } else { "these" },
- if len == 1 { "" } else { "s" },
- if len == 1 { "it" } else { "them" },
+ "if you don't care about {these} missing field{s}, you can explicitly ignore {them}",
+ these = pluralize!("this", len),
+ s = pluralize!(len),
+ them = if len == 1 { "it" } else { "them" },
),
format!("{}..{}", prefix, postfix),
Applicability::MachineApplicable,
.find_map(|(ty, _)| {
match ty.kind() {
ty::Adt(adt_def, _)
- if self.tcx.is_diagnostic_item(sym::Option, adt_def.did)
- || self.tcx.is_diagnostic_item(sym::Result, adt_def.did) =>
+ if self.tcx.is_diagnostic_item(sym::Option, adt_def.did())
+ || self.tcx.is_diagnostic_item(sym::Result, adt_def.did()) =>
{
// Slicing won't work here, but `.as_deref()` might (issue #91328).
err.span_suggestion(
use crate::check::method::MethodCallee;
use crate::check::{has_expected_num_generic_args, FnCtxt, PlaceOp};
use rustc_ast as ast;
+use rustc_data_structures::intern::Interned;
use rustc_errors::Applicability;
use rustc_hir as hir;
use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
) = index_expr.kind
{
match adjusted_ty.kind() {
- ty::Adt(ty::AdtDef { did, .. }, _)
+ ty::Adt(ty::AdtDef(Interned(ty::AdtDefData { did, .. }, _)), _)
if self.tcx.is_diagnostic_item(sym::Vec, *did) =>
{
return self.negative_index(adjusted_ty, index_expr.span, base_expr);
// represents the case of the path being completely captured by the variable.
//
// eg. If `a.b` is captured and we are processing `a.b`, then we can't have the closure also
- // capture `a.b.c`, because that voilates min capture.
+ // capture `a.b.c`, because that violates min capture.
let is_completely_captured = captured_by_move_projs.iter().any(|projs| projs.is_empty());
assert!(!is_completely_captured || (captured_by_move_projs.len() == 1));
ty::RawPtr(..) => unreachable!(),
ty::Adt(def, substs) => {
- // Multi-varaint enums are captured in entirety,
+ // Multi-variant enums are captured in entirety,
// which would've been handled in the case of single empty slice in `captured_by_move_projs`.
- assert_eq!(def.variants.len(), 1);
+ assert_eq!(def.variants().len(), 1);
// Only Field projections can be applied to a non-box Adt.
assert!(
ProjectionKind::Field(..)
))
);
- def.variants.get(VariantIdx::new(0)).unwrap().fields.iter().enumerate().any(
+ def.variants().get(VariantIdx::new(0)).unwrap().fields.iter().enumerate().any(
|(i, field)| {
let paths_using_field = captured_by_move_projs
.iter()
// Return true for fields of packed structs, unless those fields have alignment 1.
match p.kind {
ProjectionKind::Field(..) => match ty.kind() {
- ty::Adt(def, _) if def.repr.packed() => {
+ ty::Adt(def, _) if def.repr().packed() => {
// We erase regions here because they cannot be hashed
match tcx.layout_of(param_env.and(tcx.erase_regions(p.ty))) {
Ok(layout) if layout.align.abi.bytes() == 1 => {
}
}
-/// Reduces the precision of the captured place when the precision doesn't yeild any benefit from
-/// borrow checking prespective, allowing us to save us on the size of the capture.
+/// Reduces the precision of the captured place when the precision doesn't yield any benefit from
+/// borrow checking perspective, allowing us to save us on the size of the capture.
///
///
/// Fields that are read through a shared reference will always be read via a shared ref or a copy,
{
for_item(tcx, item).with_fcx(|fcx| {
let variants = lookup_fields(fcx);
- let packed = tcx.adt_def(item.def_id).repr.packed();
+ let packed = tcx.adt_def(item.def_id).repr().packed();
for variant in &variants {
// For DST, or when drop needs to copy things around, all
},
)
}));
- // Manually call `normalize_assocaited_types_in` on the other types
+ // Manually call `normalize_associated_types_in` on the other types
// in `FnSig`. This ensures that if the types of these fields
// ever change to include projections, we will start normalizing
// them automatically.
if self.is_tainted_by_errors() {
// FIXME(eddyb) keep track of `ErrorGuaranteed` from where the error was emitted.
- wbcx.typeck_results.tainted_by_errors = Some(ErrorGuaranteed);
+ wbcx.typeck_results.tainted_by_errors =
+ Some(ErrorGuaranteed::unchecked_claim_error_was_emitted());
}
debug!("writeback: typeck results for {:?} are {:#?}", item_def_id, wbcx.typeck_results);
if !errors_buffer.is_empty() {
errors_buffer.sort_by_key(|diag| diag.span.primary_span());
- for diag in errors_buffer.drain(..) {
- self.tcx().sess.diagnostic().emit_diagnostic(&diag);
+ for mut diag in errors_buffer.drain(..) {
+ self.tcx().sess.diagnostic().emit_diagnostic(&mut diag);
}
}
}
// users of the typeck results don't produce extra errors, or worse, ICEs.
if resolver.replaced_with_error {
// FIXME(eddyb) keep track of `ErrorGuaranteed` from where the error was emitted.
- self.typeck_results.tainted_by_errors = Some(ErrorGuaranteed);
+ self.typeck_results.tainted_by_errors =
+ Some(ErrorGuaranteed::unchecked_claim_error_was_emitted());
}
x
}
fn report_type_error(&self, t: Ty<'tcx>) {
- if !self.tcx.sess.has_errors() {
+ if !self.tcx.sess.has_errors().is_some() {
self.infcx
.emit_inference_failure_err(
Some(self.body.id()),
}
fn report_const_error(&self, c: ty::Const<'tcx>) {
- if !self.tcx.sess.has_errors() {
+ if self.tcx.sess.has_errors().is_none() {
self.infcx
.emit_inference_failure_err(
Some(self.body.id()),
E0204,
"the trait `Copy` may not be implemented for this type"
);
- for span in fields.iter().map(|f| tcx.def_span(f.did)) {
- err.span_label(span, "this field does not implement `Copy`");
+ for (field, ty) in fields {
+ let field_span = tcx.def_span(field.did);
+ err.span_label(field_span, "this field does not implement `Copy`");
+ // Spin up a new FulfillmentContext, so we can get the _precise_ reason
+ // why this field does not implement Copy. This is useful because sometimes
+ // it is not immediately clear why Copy is not implemented for a field, since
+ // all we point at is the field itself.
+ tcx.infer_ctxt().enter(|infcx| {
+ let mut fulfill_cx = traits::FulfillmentContext::new_ignoring_regions();
+ fulfill_cx.register_bound(
+ &infcx,
+ param_env,
+ ty,
+ tcx.lang_items().copy_trait().unwrap(),
+ traits::ObligationCause::dummy_with_span(field_span),
+ );
+ for error in fulfill_cx.select_all_or_error(&infcx) {
+ let error_predicate = error.obligation.predicate;
+ // Only note if it's not the root obligation, otherwise it's trivial and
+ // should be self-explanatory (i.e. a field literally doesn't implement Copy).
+
+ // FIXME: This error could be more descriptive, especially if the error_predicate
+ // contains a foreign type or if it's a deeply nested type...
+ if error_predicate != error.root_obligation.predicate {
+ err.span_note(
+ error.obligation.cause.span,
+ &format!(
+ "the `Copy` impl for `{}` requires that `{}`",
+ ty, error_predicate
+ ),
+ );
+ }
+ }
+ });
}
err.emit();
}
if def_a.is_struct() && def_b.is_struct() =>
{
if def_a != def_b {
- let source_path = tcx.def_path_str(def_a.did);
- let target_path = tcx.def_path_str(def_b.did);
+ let source_path = tcx.def_path_str(def_a.did());
+ let target_path = tcx.def_path_str(def_b.did());
create_err(&format!(
"the trait `DispatchFromDyn` may only be implemented \
return;
}
- if def_a.repr.c() || def_a.repr.packed() {
+ if def_a.repr().c() || def_a.repr().packed() {
create_err(
"structs implementing `DispatchFromDyn` may not have \
`#[repr(packed)]` or `#[repr(C)]`",
if def_a.is_struct() && def_b.is_struct() =>
{
if def_a != def_b {
- let source_path = tcx.def_path_str(def_a.did);
- let target_path = tcx.def_path_str(def_b.did);
+ let source_path = tcx.def_path_str(def_a.did());
+ let target_path = tcx.def_path_str(def_b.did());
struct_span_err!(
tcx.sess,
span,
//! `tcx.inherent_impls(def_id)`). That value, however,
//! is computed by selecting an idea from this table.
-use rustc_errors::struct_span_err;
+use rustc_errors::{pluralize, struct_span_err};
use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_hir::itemlikevisit::ItemLikeVisitor;
let lang_items = self.tcx.lang_items();
match *self_ty.kind() {
ty::Adt(def, _) => {
- self.check_def_id(item, def.did);
+ self.check_def_id(item, def.did());
}
ty::Foreign(did) => {
self.check_def_id(item, did);
let to_implement = if assoc_items.is_empty() {
String::new()
} else {
- let plural = assoc_items.len() > 1;
let assoc_items_kind = {
let item_types = assoc_items.iter().map(|x| x.kind);
if item_types.clone().all(|x| x == hir::AssocItemKind::Const) {
format!(
" to implement {} {}{}",
- if plural { "these" } else { "this" },
+ pluralize!("this", assoc_items.len()),
assoc_items_kind,
- if plural { "s" } else { "" }
+ pluralize!(assoc_items.len()),
)
};
for &impl_of_trait in impls_of_trait {
match orphan_check_impl(tcx, impl_of_trait) {
Ok(()) => {}
- Err(ErrorGuaranteed) => errors.push(impl_of_trait),
+ Err(_) => errors.push(impl_of_trait),
}
}
if tcx.trait_is_auto(trait_def_id) && !trait_def_id.is_local() {
let self_ty = trait_ref.self_ty();
let opt_self_def_id = match *self_ty.kind() {
- ty::Adt(self_def, _) => Some(self_def.did),
+ ty::Adt(self_def, _) => Some(self_def.did()),
ty::Foreign(did) => Some(did),
_ => None,
};
};
if let Some((msg, label)) = msg {
- struct_span_err!(tcx.sess, sp, E0321, "{}", msg).span_label(sp, label).emit();
- return Err(ErrorGuaranteed);
+ let reported =
+ struct_span_err!(tcx.sess, sp, E0321, "{}", msg).span_label(sp, label).emit();
+ return Err(reported);
}
}
if let ty::Opaque(def_id, _) = *trait_ref.self_ty().kind() {
- tcx.sess
+ let reported = tcx
+ .sess
.struct_span_err(sp, "cannot implement trait on type alias impl trait")
.span_note(tcx.def_span(def_id), "type alias impl trait defined here")
.emit();
- return Err(ErrorGuaranteed);
+ return Err(reported);
}
Ok(())
// That way if we had `Vec<MyType>`, we will properly attribute the
// problem to `Vec<T>` and avoid confusing the user if they were to see
// `MyType` in the error.
- ty::Adt(def, _) => tcx.mk_adt(def, ty::List::empty()),
+ ty::Adt(def, _) => tcx.mk_adt(*def, ty::List::empty()),
_ => ty,
};
let this = "this".to_string();
_ => ControlFlow::Break(NotUniqueParam::NotParam(t.into())),
}
}
- fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
- match *r {
- ty::ReEarlyBound(p) => {
- if self.seen.insert(p.index) {
- ControlFlow::CONTINUE
- } else {
- ControlFlow::Break(NotUniqueParam::DuplicateParam(r.into()))
- }
- }
- _ => ControlFlow::Break(NotUniqueParam::NotParam(r.into())),
- }
+ fn visit_region(&mut self, _: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // We don't drop candidates during candidate assembly because of region
+ // constraints, so the behavior for impls only constrained by regions
+ // will not change.
+ ControlFlow::CONTINUE
}
fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
match c.val() {
assert_eq!(trait_ref.substs.len(), 1);
let self_ty = trait_ref.self_ty();
let (self_type_did, substs) = match self_ty.kind() {
- ty::Adt(def, substs) => (def.did, substs),
+ ty::Adt(def, substs) => (def.did(), substs),
_ => {
// FIXME: should also lint for stuff like `&i32` but
// considering that auto traits are unstable, that
// by only visiting each `DefId` once.
//
// This will be is incorrect in subtle cases, but I don't care :)
- if self.seen.insert(def.did) {
+ if self.seen.insert(def.did()) {
for ty in def.all_fields().map(|field| field.ty(tcx, substs)) {
ty.visit_with(self)?;
}
}
let self_ty_root = match self_ty.kind() {
- ty::Adt(def, _) => tcx.mk_adt(def, InternalSubsts::identity_for_item(tcx, def.did)),
+ ty::Adt(def, _) => tcx.mk_adt(*def, InternalSubsts::identity_for_item(tcx, def.did())),
_ => unimplemented!("unexpected self ty {:?}", self_ty),
};
fn convert_enum_variant_types(tcx: TyCtxt<'_>, def_id: DefId, variants: &[hir::Variant<'_>]) {
let def = tcx.adt_def(def_id);
- let repr_type = def.repr.discr_type();
+ let repr_type = def.repr().discr_type();
let initial = repr_type.initial_discriminant(tcx);
let mut prev_discr = None::<Discr<'_>>;
)
}
-fn adt_def(tcx: TyCtxt<'_>, def_id: DefId) -> &ty::AdtDef {
+fn adt_def<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::AdtDef<'tcx> {
use rustc_hir::*;
let def_id = def_id.expect_local();
// Only allow features whose feature gates have been enabled.
let allowed = match feature_gate.as_ref().copied() {
Some(sym::arm_target_feature) => rust_features.arm_target_feature,
- Some(sym::aarch64_target_feature) => rust_features.aarch64_target_feature,
Some(sym::hexagon_target_feature) => rust_features.hexagon_target_feature,
Some(sym::powerpc_target_feature) => rust_features.powerpc_target_feature,
Some(sym::mips_target_feature) => rust_features.mips_target_feature,
Some(sym::tbm_target_feature) => rust_features.tbm_target_feature,
Some(sym::wasm_target_feature) => rust_features.wasm_target_feature,
Some(sym::cmpxchg16b_target_feature) => rust_features.cmpxchg16b_target_feature,
- Some(sym::adx_target_feature) => rust_features.adx_target_feature,
Some(sym::movbe_target_feature) => rust_features.movbe_target_feature,
Some(sym::rtm_target_feature) => rust_features.rtm_target_feature,
Some(sym::f16c_target_feature) => rust_features.f16c_target_feature,
Some(sym::ermsb_target_feature) => rust_features.ermsb_target_feature,
Some(sym::bpf_target_feature) => rust_features.bpf_target_feature,
+ Some(sym::aarch64_ver_target_feature) => rust_features.aarch64_ver_target_feature,
Some(name) => bug!("unknown target feature gate {}", name),
None => true,
};
-use rustc_errors::{Applicability, ErrorGuaranteed, StashKey};
+use rustc_errors::{Applicability, StashKey};
use rustc_hir as hir;
use rustc_hir::def::Res;
use rustc_hir::def_id::{DefId, LocalDefId};
owner, def_id,
),
);
- if let Some(ErrorGuaranteed) =
+ if let Some(_) =
tcx.typeck(owner).tainted_by_errors
{
// Some error in the
Node::Variant(Variant { disr_expr: Some(ref e), .. }) if e.hir_id == hir_id => tcx
.adt_def(tcx.hir().get_parent_item(hir_id))
- .repr
+ .repr()
.discr_type()
.to_ty(tcx),
bk: ty::BorrowKind,
);
+ /// The value found at `place` is being copied.
+ /// `diag_expr_id` is the id used for diagnostics (see `consume` for more details).
+ fn copy(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) {
+ // In most cases, copying data from `x` is equivalent to doing `*&x`, so by default
+ // we treat a copy of `x` as a borrow of `x`.
+ self.borrow(place_with_id, diag_expr_id, ty::BorrowKind::ImmBorrow)
+ }
+
/// The path at `assignee_place` is being assigned to.
/// `diag_expr_id` is the id used for diagnostics (see `consume` for more details).
fn mutate(&mut self, assignee_place: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId);
// struct; however, when EUV is run during typeck, it
// may not. This will generate an error earlier in typeck,
// so we can just ignore it.
- if !self.tcx().sess.has_errors() {
+ if !self.tcx().sess.has_errors().is_some() {
span_bug!(with_expr.span, "with expression doesn't evaluate to a struct");
}
}
match mode {
ConsumeMode::Move => delegate.consume(place_with_id, diag_expr_id),
- ConsumeMode::Copy => {
- delegate.borrow(place_with_id, diag_expr_id, ty::BorrowKind::ImmBorrow)
- }
+ ConsumeMode::Copy => delegate.copy(place_with_id, diag_expr_id),
}
}
}
AdtKind::Enum => def.is_variant_list_non_exhaustive(),
};
- def.variants.len() > 1 || (!def.did.is_local() && is_non_exhaustive)
+ def.variants().len() > 1 || (!def.did().is_local() && is_non_exhaustive)
} else {
false
}
#![allow(rustc::potential_query_instability)]
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(bool_to_option)]
+#![feature(box_patterns)]
#![feature(control_flow_enum)]
#![feature(crate_visibility_modifier)]
#![feature(hash_drain_filter)]
check_unused::check_crate(tcx);
check_for_entry_fn(tcx);
- if tcx.sess.err_count() == 0 { Ok(()) } else { Err(ErrorGuaranteed) }
+ if let Some(reported) = tcx.sess.has_errors() { Err(reported) } else { Ok(()) }
}
/// A quasi-deprecated helper used in rustdoc and clippy to get
) -> McResult<usize> {
let ty = self.typeck_results.node_type(pat_hir_id);
match ty.kind() {
- ty::Adt(adt_def, _) => Ok(adt_def.variants[variant_index].fields.len()),
+ ty::Adt(adt_def, _) => Ok(adt_def.variant(variant_index).fields.len()),
_ => {
self.tcx()
.sess
// `['b => 'a, U => T]` and thus get the requirement that `T:
// 'a` holds for `Foo`.
debug!("Adt");
- if let Some(unsubstituted_predicates) = global_inferred_outlives.get(&def.did) {
+ if let Some(unsubstituted_predicates) = global_inferred_outlives.get(&def.did()) {
for (unsubstituted_predicate, &span) in unsubstituted_predicates {
// `unsubstituted_predicate` is `U: 'b` in the
// example above. So apply the substitution to
// let _: () = substs.region_at(0);
check_explicit_predicates(
tcx,
- def.did,
+ def.did(),
substs,
required_predicates,
explicit_map,
}
fn diagnostic_common(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let mut err = self.sess.struct_span_fatal_with_code(
+ let mut err = self.sess.struct_span_err_with_code(
self.span,
&format!("can't pass `{}` to variadic function", self.ty),
self.code(),
}
fn diagnostic_common(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
- let mut err = self.sess.struct_span_fatal_with_code(
+ let mut err = self.sess.struct_span_err_with_code(
self.span,
&format!(
"cannot cast thin pointer `{}` to fat pointer `{}`",
.skip(self.params_offset + self.num_provided_type_or_const_args())
.take(num_params_to_take)
.map(|param| match param.kind {
- // This is being infered from the item's inputs, no need to set it.
+ // This is being inferred from the item's inputs, no need to set it.
ty::GenericParamDefKind::Type { .. } if is_used_in_input(param.def_id) => {
"_".to_string()
}
let num_redundant_lt_args = lt_arg_spans.len() - self.num_expected_lifetime_args();
let msg_lifetimes = format!(
- "remove {} {} argument{}",
- if num_redundant_lt_args == 1 { "this" } else { "these" },
- "lifetime",
- pluralize!(num_redundant_lt_args),
+ "remove {these} lifetime argument{s}",
+ these = pluralize!("this", num_redundant_lt_args),
+ s = pluralize!(num_redundant_lt_args),
);
err.span_suggestion(
let num_redundant_gen_args =
gen_arg_spans.len() - self.num_expected_type_or_const_args();
let msg_types_or_consts = format!(
- "remove {} {} argument{}",
- if num_redundant_gen_args == 1 { "this" } else { "these" },
- "generic",
- pluralize!(num_redundant_type_or_const_args),
+ "remove {these} generic argument{s}",
+ these = pluralize!("this", num_redundant_gen_args),
+ s = pluralize!(num_redundant_gen_args),
);
err.span_suggestion(
}
ty::Adt(def, substs) => {
- self.add_constraints_from_substs(current, def.did, substs, variance);
+ self.add_constraints_from_substs(current, def.did(), substs, variance);
}
ty::Projection(ref data) => {
# Link libstdc++ statically into the rustc_llvm instead of relying on a
# dynamic version to be available.
-#static-libstdcpp = false
+#static-libstdcpp = true
# Whether to use Ninja to build LLVM. This runs much faster than make.
#ninja = true
#[doc(inline)]
pub use core::alloc::*;
+use core::marker::Destruct;
+
#[cfg(test)]
mod tests;
#[cfg_attr(not(test), lang = "box_free")]
#[inline]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
+#[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
// This signature has to be the same as `Box`, otherwise an ICE will happen.
// When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as
// well.
// For example if `Box` is changed to `struct Box<T: ?Sized, A: Allocator>(Unique<T>, A)`,
// this function has to be changed to `fn box_free<T: ?Sized, A: Allocator>(Unique<T>, A)` as well.
-pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Drop>(
+pub(crate) const unsafe fn box_free<
+ T: ?Sized,
+ A: ~const Allocator + ~const Drop + ~const Destruct,
+>(
ptr: Unique<T>,
alloc: A,
) {
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_deref", issue = "88955")]
+#[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
impl<B: ?Sized + ToOwned> const Deref for Cow<'_, B>
where
B::Owned: ~const Borrow<B>,
#[cfg(not(no_global_oom_handling))]
use core::iter::FromIterator;
use core::iter::{FusedIterator, Iterator};
-use core::marker::{Unpin, Unsize};
+use core::marker::{Destruct, Unpin, Unsize};
use core::mem;
use core::ops::{
CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[must_use]
#[inline]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn new_in(x: T, alloc: A) -> Self
where
- A: ~const Allocator + ~const Drop,
+ A: ~const Allocator + ~const Drop + ~const Destruct,
{
let mut boxed = Self::new_uninit_in(alloc);
unsafe {
#[unstable(feature = "allocator_api", issue = "32838")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
where
- T: ~const Drop,
- A: ~const Allocator + ~const Drop,
+ T: ~const Drop + ~const Destruct,
+ A: ~const Allocator + ~const Drop + ~const Destruct,
{
let mut boxed = Self::try_new_uninit_in(alloc)?;
unsafe {
#[cfg(not(no_global_oom_handling))]
#[must_use]
// #[unstable(feature = "new_uninit", issue = "63291")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
where
- A: ~const Allocator + ~const Drop,
+ A: ~const Allocator + ~const Drop + ~const Destruct,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
// NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
where
- A: ~const Allocator + ~const Drop,
+ A: ~const Allocator + ~const Drop + ~const Destruct,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
let ptr = alloc.allocate(layout)?.cast();
#[cfg(not(no_global_oom_handling))]
// #[unstable(feature = "new_uninit", issue = "63291")]
#[must_use]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
where
- A: ~const Allocator + ~const Drop,
+ A: ~const Allocator + ~const Drop + ~const Destruct,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
// NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
where
- A: ~const Allocator + ~const Drop,
+ A: ~const Allocator + ~const Drop + ~const Destruct,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
let ptr = alloc.allocate_zeroed(layout)?.cast();
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[must_use]
#[inline(always)]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn pin_in(x: T, alloc: A) -> Pin<Self>
where
- A: 'static + ~const Allocator + ~const Drop,
+ A: 'static + ~const Allocator + ~const Drop + ~const Destruct,
{
Self::into_pin(Self::new_in(x, alloc))
}
#[unstable(feature = "box_into_inner", issue = "80437")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn into_inner(boxed: Self) -> T
where
- Self: ~const Drop,
+ Self: ~const Drop + ~const Destruct,
{
*boxed
}
use crate::collections::TryReserveError;
use crate::slice;
-use crate::vec::{self, AsIntoIter, Vec};
+use crate::vec::{self, AsVecIntoIter, Vec};
use super::SpecExtend;
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for IntoIter<T> {}
+// In addition to the SAFETY invariants of the following three unsafe traits
+// also refer to the vec::in_place_collect module documentation to get an overview
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
unsafe impl<T> SourceIter for IntoIter<T> {
#[doc(hidden)]
unsafe impl<I> InPlaceIterable for IntoIter<I> {}
-impl<I> AsIntoIter for IntoIter<I> {
+unsafe impl<I> AsVecIntoIter for IntoIter<I> {
type Item = I;
fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> {
let mut out_tree = clone_subtree(internal.first_edge().descend());
{
- let out_root = BTreeMap::ensure_is_owned(&mut out_tree.root);
+ let out_root = out_tree.root.as_mut().unwrap();
let mut out_node = out_root.push_internal_level();
let mut in_edge = internal.first_edge();
while let Ok(kv) = in_edge.right_kv() {
fn replace(&mut self, key: K) -> Option<K> {
let (map, dormant_map) = DormantMutRef::new(self);
- let root_node = Self::ensure_is_owned(&mut map.root).borrow_mut();
+ let root_node = map.root.get_or_insert_with(Root::new).borrow_mut();
match root_node.search_tree::<K>(&key) {
Found(mut kv) => Some(mem::replace(kv.key_mut(), key)),
GoDown(handle) => {
- VacantEntry { key, handle, dormant_map, _marker: PhantomData }.insert(());
+ VacantEntry { key, handle: Some(handle), dormant_map, _marker: PhantomData }
+ .insert(());
None
}
}
let self_iter = mem::take(self).into_iter();
let other_iter = mem::take(other).into_iter();
- let root = BTreeMap::ensure_is_owned(&mut self.root);
+ let root = self.root.get_or_insert_with(Root::new);
root.append_from_sorted_iters(self_iter, other_iter, &mut self.length)
}
where
K: Ord,
{
- // FIXME(@porglezomp) Avoid allocating if we don't insert
let (map, dormant_map) = DormantMutRef::new(self);
- let root_node = Self::ensure_is_owned(&mut map.root).borrow_mut();
- match root_node.search_tree(&key) {
- Found(handle) => Occupied(OccupiedEntry { handle, dormant_map, _marker: PhantomData }),
- GoDown(handle) => {
- Vacant(VacantEntry { key, handle, dormant_map, _marker: PhantomData })
- }
+ match map.root {
+ None => Vacant(VacantEntry { key, handle: None, dormant_map, _marker: PhantomData }),
+ Some(ref mut root) => match root.borrow_mut().search_tree(&key) {
+ Found(handle) => {
+ Occupied(OccupiedEntry { handle, dormant_map, _marker: PhantomData })
+ }
+ GoDown(handle) => Vacant(VacantEntry {
+ key,
+ handle: Some(handle),
+ dormant_map,
+ _marker: PhantomData,
+ }),
+ },
}
}
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
-
- /// If the root node is the empty (non-allocated) root node, allocate our
- /// own node. Is an associated function to avoid borrowing the entire BTreeMap.
- fn ensure_is_owned(root: &mut Option<Root<K, V>>) -> &mut Root<K, V> {
- root.get_or_insert_with(Root::new)
- }
}
#[cfg(test)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct VacantEntry<'a, K: 'a, V: 'a> {
pub(super) key: K,
- pub(super) handle: Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>,
+ /// `None` for a (empty) map without root
+ pub(super) handle: Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>>,
pub(super) dormant_map: DormantMutRef<'a, BTreeMap<K, V>>,
// Be invariant in `K` and `V`
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(self, value: V) -> &'a mut V {
- let out_ptr = match self.handle.insert_recursing(self.key, value) {
- (None, val_ptr) => {
- // SAFETY: We have consumed self.handle and the handle returned.
- let map = unsafe { self.dormant_map.awaken() };
- map.length += 1;
- val_ptr
- }
- (Some(ins), val_ptr) => {
- drop(ins.left);
+ let out_ptr = match self.handle {
+ None => {
// SAFETY: We have consumed self.handle and the reference returned.
let map = unsafe { self.dormant_map.awaken() };
- let root = map.root.as_mut().unwrap();
- root.push_internal_level().push(ins.kv.0, ins.kv.1, ins.right);
- map.length += 1;
+ let mut root = NodeRef::new_leaf();
+ let val_ptr = root.borrow_mut().push(self.key, value) as *mut V;
+ map.root = Some(root.forget_type());
+ map.length = 1;
val_ptr
}
+ Some(handle) => match handle.insert_recursing(self.key, value) {
+ (None, val_ptr) => {
+ // SAFETY: We have consumed self.handle and the handle returned.
+ let map = unsafe { self.dormant_map.awaken() };
+ map.length += 1;
+ val_ptr
+ }
+ (Some(ins), val_ptr) => {
+ drop(ins.left);
+ // SAFETY: We have consumed self.handle and the reference returned.
+ let map = unsafe { self.dormant_map.awaken() };
+ let root = map.root.as_mut().unwrap();
+ root.push_internal_level().push(ins.kv.0, ins.kv.1, ins.right);
+ map.length += 1;
+ val_ptr
+ }
+ },
};
// Now that we have finished growing the tree using borrowed references,
// dereference the pointer to a part of it, that we picked up along the way.
K: Ord,
{
let iter = mem::take(self).into_iter();
- let root = BTreeMap::ensure_is_owned(&mut self.root);
- root.bulk_push(iter, &mut self.length);
+ if !iter.is_empty() {
+ self.root.insert(Root::new()).bulk_push(iter, &mut self.length);
+ }
}
}
fn empty() {
let mut map: BTreeMap<i32, i32> = BTreeMap::new();
map.drain_filter(|_, _| unreachable!("there's nothing to decide on"));
- assert!(map.is_empty());
+ assert_eq!(map.height(), None);
map.check();
}
assert_eq!(map.len(), len);
map.clear();
map.check();
- assert!(map.is_empty());
+ assert_eq!(map.height(), None);
}
}
let mut a = BTreeMap::new();
let key = "hello there";
let value = "value goes here";
- assert!(a.is_empty());
+ assert_eq!(a.height(), None);
a.insert(key, value);
assert_eq!(a.len(), 1);
assert_eq!(a[key], value);
let key = "hello there";
let value = "value goes here";
- assert!(a.is_empty());
+ assert_eq!(a.height(), None);
match a.entry(key) {
- Occupied(_) => panic!(),
+ Occupied(_) => unreachable!(),
Vacant(e) => {
assert_eq!(key, *e.key());
e.insert(value);
a.check();
}
+#[test]
+fn test_vacant_entry_no_insert() {
+ let mut a = BTreeMap::<&str, ()>::new();
+ let key = "hello there";
+
+ // Non-allocated
+ assert_eq!(a.height(), None);
+ match a.entry(key) {
+ Occupied(_) => unreachable!(),
+ Vacant(e) => assert_eq!(key, *e.key()),
+ }
+ // Ensures the tree has no root.
+ assert_eq!(a.height(), None);
+ a.check();
+
+ // Allocated but still empty
+ a.insert(key, ());
+ a.remove(&key);
+ assert_eq!(a.height(), Some(0));
+ assert!(a.is_empty());
+ match a.entry(key) {
+ Occupied(_) => unreachable!(),
+ Vacant(e) => assert_eq!(key, *e.key()),
+ }
+ // Ensures the allocated root is not changed.
+ assert_eq!(a.height(), Some(0));
+ assert!(a.is_empty());
+ a.check();
+}
+
#[test]
fn test_first_last_entry() {
let mut a = BTreeMap::new();
unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Dying, K, V, Type> {}
impl<K, V> NodeRef<marker::Owned, K, V, marker::Leaf> {
- fn new_leaf() -> Self {
+ pub fn new_leaf() -> Self {
Self::from_new_leaf(LeafNode::new())
}
}
impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> {
- /// Adds a key-value pair to the end of the node.
- pub fn push(&mut self, key: K, val: V) {
+ /// Adds a key-value pair to the end of the node, and returns
+ /// the mutable reference of the inserted value.
+ pub fn push(&mut self, key: K, val: V) -> &mut V {
let len = self.len_mut();
let idx = usize::from(*len);
assert!(idx < CAPACITY);
*len += 1;
unsafe {
self.key_area_mut(idx).write(key);
- self.val_area_mut(idx).write(val);
+ self.val_area_mut(idx).write(val)
}
}
}
#[allow(dead_code)]
// Check that the member-like functions conditionally provided by #[derive()]
-// are not overriden by genuine member functions with a different signature.
+// are not overridden by genuine member functions with a different signature.
fn assert_derives() {
fn hash<T: Hash, H: Hasher>(v: BTreeSet<T>, state: &mut H) {
v.hash(state);
use core::fmt;
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
+use core::mem::MaybeUninit;
use core::ops::Try;
use super::{count, wrap_index, RingSlices};
/// [`iter`]: super::VecDeque::iter
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
- pub(crate) ring: &'a [T],
+ pub(crate) ring: &'a [MaybeUninit<T>],
pub(crate) tail: usize,
pub(crate) head: usize,
}
impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
- f.debug_tuple("Iter").field(&front).field(&back).finish()
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ f.debug_tuple("Iter")
+ .field(&MaybeUninit::slice_assume_init_ref(front))
+ .field(&MaybeUninit::slice_assume_init_ref(back))
+ .finish()
+ }
}
}
}
let tail = self.tail;
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
- unsafe { Some(self.ring.get_unchecked(tail)) }
+ // Safety:
+ // - `self.tail` in a ring buffer is always a valid index.
+ // - `self.head` and `self.tail` equality is checked above.
+ unsafe { Some(self.ring.get_unchecked(tail).assume_init_ref()) }
}
#[inline]
F: FnMut(Acc, Self::Item) -> Acc,
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
- accum = front.iter().fold(accum, &mut f);
- back.iter().fold(accum, &mut f)
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ accum = MaybeUninit::slice_assume_init_ref(front).iter().fold(accum, &mut f);
+ MaybeUninit::slice_assume_init_ref(back).iter().fold(accum, &mut f)
+ }
}
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
{
let (mut iter, final_res);
if self.tail <= self.head {
- // single slice self.ring[self.tail..self.head]
- iter = self.ring[self.tail..self.head].iter();
+ // Safety: single slice self.ring[self.tail..self.head] is initialized.
+ iter = unsafe { MaybeUninit::slice_assume_init_ref(&self.ring[self.tail..self.head]) }
+ .iter();
final_res = iter.try_fold(init, &mut f);
} else {
- // two slices: self.ring[self.tail..], self.ring[..self.head]
+ // Safety: two slices: self.ring[self.tail..], self.ring[..self.head] both are initialized.
let (front, back) = self.ring.split_at(self.tail);
- let mut back_iter = back.iter();
+
+ let mut back_iter = unsafe { MaybeUninit::slice_assume_init_ref(back).iter() };
let res = back_iter.try_fold(init, &mut f);
let len = self.ring.len();
self.tail = (self.ring.len() - back_iter.len()) & (len - 1);
- iter = front[..self.head].iter();
+ iter = unsafe { MaybeUninit::slice_assume_init_ref(&front[..self.head]).iter() };
final_res = iter.try_fold(res?, &mut f);
}
self.tail = self.head - iter.len();
// that is in bounds.
unsafe {
let idx = wrap_index(self.tail.wrapping_add(idx), self.ring.len());
- self.ring.get_unchecked(idx)
+ self.ring.get_unchecked(idx).assume_init_ref()
}
}
}
return None;
}
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
- unsafe { Some(self.ring.get_unchecked(self.head)) }
+ // Safety:
+ // - `self.head` in a ring buffer is always a valid index.
+ // - `self.head` and `self.tail` equality is checked above.
+ unsafe { Some(self.ring.get_unchecked(self.head).assume_init_ref()) }
}
fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
F: FnMut(Acc, Self::Item) -> Acc,
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
- accum = back.iter().rfold(accum, &mut f);
- front.iter().rfold(accum, &mut f)
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ accum = MaybeUninit::slice_assume_init_ref(back).iter().rfold(accum, &mut f);
+ MaybeUninit::slice_assume_init_ref(front).iter().rfold(accum, &mut f)
+ }
}
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
{
let (mut iter, final_res);
if self.tail <= self.head {
- // single slice self.ring[self.tail..self.head]
- iter = self.ring[self.tail..self.head].iter();
+ // Safety: single slice self.ring[self.tail..self.head] is initialized.
+ iter = unsafe {
+ MaybeUninit::slice_assume_init_ref(&self.ring[self.tail..self.head]).iter()
+ };
final_res = iter.try_rfold(init, &mut f);
} else {
- // two slices: self.ring[self.tail..], self.ring[..self.head]
+ // Safety: two slices: self.ring[self.tail..], self.ring[..self.head] both are initialized.
let (front, back) = self.ring.split_at(self.tail);
- let mut front_iter = front[..self.head].iter();
+
+ let mut front_iter =
+ unsafe { MaybeUninit::slice_assume_init_ref(&front[..self.head]).iter() };
let res = front_iter.try_rfold(init, &mut f);
self.head = front_iter.len();
- iter = back.iter();
+ iter = unsafe { MaybeUninit::slice_assume_init_ref(back).iter() };
final_res = iter.try_rfold(res?, &mut f);
}
self.head = self.tail + iter.len();
use core::hash::{Hash, Hasher};
use core::iter::{repeat_with, FromIterator};
use core::marker::PhantomData;
-use core::mem::{self, ManuallyDrop};
+use core::mem::{self, ManuallyDrop, MaybeUninit};
use core::ops::{Index, IndexMut, Range, RangeBounds};
use core::ptr::{self, NonNull};
use core::slice;
}
}
- /// Turn ptr into a slice
+ /// Turn ptr into a slice, since the elements of the backing buffer may be uninitialized,
+ /// we will return a slice of [`MaybeUninit<T>`].
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
#[inline]
- unsafe fn buffer_as_slice(&self) -> &[T] {
- unsafe { slice::from_raw_parts(self.ptr(), self.cap()) }
+ unsafe fn buffer_as_slice(&self) -> &[MaybeUninit<T>] {
+ unsafe { slice::from_raw_parts(self.ptr() as *mut MaybeUninit<T>, self.cap()) }
}
- /// Turn ptr into a mut slice
+ /// Turn ptr into a mut slice, since the elements of the backing buffer may be uninitialized,
+ /// we will return a slice of [`MaybeUninit<T>`].
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
#[inline]
- unsafe fn buffer_as_mut_slice(&mut self) -> &mut [T] {
- unsafe { slice::from_raw_parts_mut(self.ptr(), self.cap()) }
+ unsafe fn buffer_as_mut_slice(&mut self) -> &mut [MaybeUninit<T>] {
+ unsafe { slice::from_raw_parts_mut(self.ptr() as *mut MaybeUninit<T>, self.cap()) }
}
/// Moves an element out of the buffer
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_slices(&self) -> (&[T], &[T]) {
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
unsafe {
let buf = self.buffer_as_slice();
- RingSlices::ring_slices(buf, self.head, self.tail)
+ let (front, back) = RingSlices::ring_slices(buf, self.head, self.tail);
+ (MaybeUninit::slice_assume_init_ref(front), MaybeUninit::slice_assume_init_ref(back))
}
}
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
unsafe {
let head = self.head;
let tail = self.tail;
let buf = self.buffer_as_mut_slice();
- RingSlices::ring_slices(buf, head, tail)
+ let (front, back) = RingSlices::ring_slices(buf, head, tail);
+ (MaybeUninit::slice_assume_init_mut(front), MaybeUninit::slice_assume_init_mut(back))
}
}
if self.is_contiguous() {
let tail = self.tail;
let head = self.head;
- return unsafe { RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0 };
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ return unsafe {
+ MaybeUninit::slice_assume_init_mut(
+ RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0,
+ )
+ };
}
let buf = self.buf.ptr();
let tail = self.tail;
let head = self.head;
- unsafe { RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0 }
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ MaybeUninit::slice_assume_init_mut(
+ RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0,
+ )
+ }
}
/// Rotates the double-ended queue `mid` places to the left.
// Reverse the offset to find the original RcBox.
let rc_ptr =
- unsafe { (ptr as *mut RcBox<T>).set_ptr_value((ptr as *mut u8).offset(-offset)) };
+ unsafe { (ptr as *mut u8).offset(-offset).with_metadata_of(ptr as *mut RcBox<T>) };
unsafe { Self::from_ptr(rc_ptr) }
}
Self::allocate_for_layout(
Layout::for_value(&*ptr),
|layout| Global.allocate(layout),
- |mem| (ptr as *mut RcBox<T>).set_ptr_value(mem),
+ |mem| mem.with_metadata_of(ptr as *mut RcBox<T>),
)
}
}
/// assert!(empty.upgrade().is_none());
/// ```
#[stable(feature = "downgraded_weak", since = "1.10.0")]
+ #[rustc_const_unstable(feature = "const_weak_new", issue = "95091", reason = "recently added")]
#[must_use]
- pub fn new() -> Weak<T> {
- Weak { ptr: NonNull::new(usize::MAX as *mut RcBox<T>).expect("MAX is not 0") }
+ pub const fn new() -> Weak<T> {
+ Weak { ptr: unsafe { NonNull::new_unchecked(usize::MAX as *mut RcBox<T>) } }
}
}
let offset = unsafe { data_offset(ptr) };
// Thus, we reverse the offset to get the whole RcBox.
// SAFETY: the pointer originated from a Weak, so this offset is safe.
- unsafe { (ptr as *mut RcBox<T>).set_ptr_value((ptr as *mut u8).offset(-offset)) }
+ unsafe { (ptr as *mut u8).offset(-offset).with_metadata_of(ptr as *mut RcBox<T>) }
};
// SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
}
/// Tries to reserve the minimum capacity for exactly `additional` more elements to
- /// be inserted in the given `String`. After calling `reserve_exact`,
+ /// be inserted in the given `String`. After calling `try_reserve_exact`,
/// capacity will be greater than or equal to `self.len() + additional`.
/// Does nothing if the capacity is already sufficient.
///
let offset = data_offset(ptr);
// Reverse the offset to find the original ArcInner.
- let arc_ptr = (ptr as *mut ArcInner<T>).set_ptr_value((ptr as *mut u8).offset(-offset));
+ let arc_ptr =
+ (ptr as *mut u8).offset(-offset).with_metadata_of(ptr as *mut ArcInner<T>);
Self::from_ptr(arc_ptr)
}
Self::allocate_for_layout(
Layout::for_value(&*ptr),
|layout| Global.allocate(layout),
- |mem| (ptr as *mut ArcInner<T>).set_ptr_value(mem) as *mut ArcInner<T>,
+ |mem| mem.with_metadata_of(ptr as *mut ArcInner<T>),
)
}
}
/// assert!(empty.upgrade().is_none());
/// ```
#[stable(feature = "downgraded_weak", since = "1.10.0")]
+ #[rustc_const_unstable(feature = "const_weak_new", issue = "95091", reason = "recently added")]
#[must_use]
- pub fn new() -> Weak<T> {
- Weak { ptr: NonNull::new(usize::MAX as *mut ArcInner<T>).expect("MAX is not 0") }
+ pub const fn new() -> Weak<T> {
+ Weak { ptr: unsafe { NonNull::new_unchecked(usize::MAX as *mut ArcInner<T>) } }
}
}
let offset = unsafe { data_offset(ptr) };
// Thus, we reverse the offset to get the whole RcBox.
// SAFETY: the pointer originated from a Weak, so this offset is safe.
- unsafe { (ptr as *mut ArcInner<T>).set_ptr_value((ptr as *mut u8).offset(-offset)) }
+ unsafe { (ptr as *mut u8).offset(-offset).with_metadata_of(ptr as *mut ArcInner<T>) }
};
// SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
--- /dev/null
+//! Inplace iterate-and-collect specialization for `Vec`
+//!
+//! Note: This documents Vec internals, some of the following sections explain implementation
+//! details and are best read together with the source of this module.
+//!
+//! The specialization in this module applies to iterators in the shape of
+//! `source.adapter().adapter().adapter().collect::<Vec<U>>()`
+//! where `source` is an owning iterator obtained from [`Vec<T>`], [`Box<[T]>`][box] (by conversion to `Vec`)
+//! or [`BinaryHeap<T>`], the adapters each consume one or more items per step
+//! (represented by [`InPlaceIterable`]), provide transitive access to `source` (via [`SourceIter`])
+//! and thus the underlying allocation. And finally the layouts of `T` and `U` must
+//! have the same size and alignment, this is currently ensured via const eval instead of trait bounds
+//! in the specialized [`SpecFromIter`] implementation.
+//!
+//! [`BinaryHeap<T>`]: crate::collections::BinaryHeap
+//! [box]: crate::boxed::Box
+//!
+//! By extension some other collections which use `collect::<Vec<_>>()` internally in their
+//! `FromIterator` implementation benefit from this too.
+//!
+//! Access to the underlying source goes through a further layer of indirection via the private
+//! trait [`AsVecIntoIter`] to hide the implementation detail that other collections may use
+//! `vec::IntoIter` internally.
+//!
+//! In-place iteration depends on the interaction of several unsafe traits, implementation
+//! details of multiple parts in the iterator pipeline and often requires holistic reasoning
+//! across multiple structs since iterators are executed cooperatively rather than having
+//! a central evaluator/visitor struct executing all iterator components.
+//!
+//! # Reading from and writing to the same allocation
+//!
+//! By its nature collecting in place means that the reader and writer side of the iterator
+//! use the same allocation. Since `try_fold()` (used in [`SpecInPlaceCollect`]) takes a
+//! reference to the iterator for the duration of the iteration that means we can't interleave
+//! the step of reading a value and getting a reference to write to. Instead raw pointers must be
+//! used on the reader and writer side.
+//!
+//! That writes never clobber a yet-to-be-read item is ensured by the [`InPlaceIterable`] requirements.
+//!
+//! # Layout constraints
+//!
+//! [`Allocator`] requires that `allocate()` and `deallocate()` have matching alignment and size.
+//! Additionally this specialization doesn't make sense for ZSTs as there is no reallocation to
+//! avoid and it would make pointer arithmetic more difficult.
+//!
+//! [`Allocator`]: core::alloc::Allocator
+//!
+//! # Drop- and panic-safety
+//!
+//! Iteration can panic, requiring dropping the already written parts but also the remainder of
+//! the source. Iteration can also leave some source items unconsumed which must be dropped.
+//! All those drops in turn can panic which then must either leak the allocation or abort to avoid
+//! double-drops.
+//!
+//! This is handled by the [`InPlaceDrop`] guard for sink items (`U`) and by
+//! [`vec::IntoIter::forget_allocation_drop_remaining()`] for remaining source items (`T`).
+//!
+//! [`vec::IntoIter::forget_allocation_drop_remaining()`]: super::IntoIter::forget_allocation_drop_remaining()
+//!
+//! # O(1) collect
+//!
+//! The main iteration itself is further specialized when the iterator implements
+//! [`TrustedRandomAccessNoCoerce`] to let the optimizer see that it is a counted loop with a single
+//! [induction variable]. This can turn some iterators into a noop, i.e. it reduces them from O(n) to
+//! O(1). This particular optimization is quite fickle and doesn't always work, see [#79308]
+//!
+//! [#79308]: https://github.com/rust-lang/rust/issues/79308
+//! [induction variable]: https://en.wikipedia.org/wiki/Induction_variable
+//!
+//! Since unchecked accesses through that trait do not advance the read pointer of `IntoIter`
+//! this would interact unsoundly with the requirements about dropping the tail described above.
+//! But since the normal `Drop` implementation of `IntoIter` would suffer from the same problem it
+//! is only correct for `TrustedRandomAccessNoCoerce` to be implemented when the items don't
+//! have a destructor. Thus that implicit requirement also makes the specialization safe to use for
+//! in-place collection.
+//! Note that this safety concern is about the correctness of `impl Drop for IntoIter`,
+//! not the guarantees of `InPlaceIterable`.
+//!
+//! # Adapter implementations
+//!
+//! The invariants for adapters are documented in [`SourceIter`] and [`InPlaceIterable`], but
+//! getting them right can be rather subtle for multiple, sometimes non-local reasons.
+//! For example `InPlaceIterable` would be valid to implement for [`Peekable`], except
+//! that it is stateful, cloneable and `IntoIter`'s clone implementation shortens the underlying
+//! allocation which means if the iterator has been peeked and then gets cloned there no longer is
+//! enough room, thus breaking an invariant ([#85322]).
+//!
+//! [#85322]: https://github.com/rust-lang/rust/issues/85322
+//! [`Peekable`]: core::iter::Peekable
+//!
+//!
+//! # Examples
+//!
+//! Some cases that are optimized by this specialization, more can be found in the `Vec`
+//! benchmarks:
+//!
+//! ```rust
+//! # #[allow(dead_code)]
+//! /// Converts a usize vec into an isize one.
+//! pub fn cast(vec: Vec<usize>) -> Vec<isize> {
+//! // Does not allocate, free or panic. On optlevel>=2 it does not loop.
+//! // Of course this particular case could and should be written with `into_raw_parts` and
+//! // `from_raw_parts` instead.
+//! vec.into_iter().map(|u| u as isize).collect()
+//! }
+//! ```
+//!
+//! ```rust
+//! # #[allow(dead_code)]
+//! /// Drops remaining items in `src` and if the layouts of `T` and `U` match it
+//! /// returns an empty Vec backed by the original allocation. Otherwise it returns a new
+//! /// empty vec.
+//! pub fn recycle_allocation<T, U>(src: Vec<T>) -> Vec<U> {
+//! src.into_iter().filter_map(|_| None).collect()
+//! }
+//! ```
+//!
+//! ```rust
+//! let vec = vec![13usize; 1024];
+//! let _ = vec.into_iter()
+//! .enumerate()
+//! .filter_map(|(idx, val)| if idx % 2 == 0 { Some(val+idx) } else {None})
+//! .collect::<Vec<_>>();
+//!
+//! // is equivalent to the following, but doesn't require bounds checks
+//!
+//! let mut vec = vec![13usize; 1024];
+//! let mut write_idx = 0;
+//! for idx in 0..vec.len() {
+//! if idx % 2 == 0 {
+//! vec[write_idx] = vec[idx] + idx;
+//! write_idx += 1;
+//! }
+//! }
+//! vec.truncate(write_idx);
+//! ```
+use core::iter::{InPlaceIterable, SourceIter, TrustedRandomAccessNoCoerce};
+use core::mem::{self, ManuallyDrop};
+use core::ptr::{self};
+
+use super::{InPlaceDrop, SpecFromIter, SpecFromIterNested, Vec};
+
+/// Specialization marker for collecting an iterator pipeline into a Vec while reusing the
+/// source allocation, i.e. executing the pipeline in place.
+#[rustc_unsafe_specialization_marker]
+pub(super) trait InPlaceIterableMarker {}
+
+impl<T> InPlaceIterableMarker for T where T: InPlaceIterable {}
+
+impl<T, I> SpecFromIter<T, I> for Vec<T>
+where
+ I: Iterator<Item = T> + SourceIter<Source: AsVecIntoIter> + InPlaceIterableMarker,
+{
+ default fn from_iter(mut iterator: I) -> Self {
+ // See "Layout constraints" section in the module documentation. We rely on const
+ // optimization here since these conditions currently cannot be expressed as trait bounds
+ if mem::size_of::<T>() == 0
+ || mem::size_of::<T>()
+ != mem::size_of::<<<I as SourceIter>::Source as AsVecIntoIter>::Item>()
+ || mem::align_of::<T>()
+ != mem::align_of::<<<I as SourceIter>::Source as AsVecIntoIter>::Item>()
+ {
+ // fallback to more generic implementations
+ return SpecFromIterNested::from_iter(iterator);
+ }
+
+ let (src_buf, src_ptr, dst_buf, dst_end, cap) = unsafe {
+ let inner = iterator.as_inner().as_into_iter();
+ (
+ inner.buf.as_ptr(),
+ inner.ptr,
+ inner.buf.as_ptr() as *mut T,
+ inner.end as *const T,
+ inner.cap,
+ )
+ };
+
+ let len = SpecInPlaceCollect::collect_in_place(&mut iterator, dst_buf, dst_end);
+
+ let src = unsafe { iterator.as_inner().as_into_iter() };
+ // check if SourceIter contract was upheld
+ // caveat: if they weren't we might not even make it to this point
+ debug_assert_eq!(src_buf, src.buf.as_ptr());
+ // check InPlaceIterable contract. This is only possible if the iterator advanced the
+ // source pointer at all. If it uses unchecked access via TrustedRandomAccess
+ // then the source pointer will stay in its initial position and we can't use it as reference
+ if src.ptr != src_ptr {
+ debug_assert!(
+ unsafe { dst_buf.add(len) as *const _ } <= src.ptr,
+ "InPlaceIterable contract violation, write pointer advanced beyond read pointer"
+ );
+ }
+
+ // Drop any remaining values at the tail of the source but prevent drop of the allocation
+ // itself once IntoIter goes out of scope.
+ // If the drop panics then we also leak any elements collected into dst_buf.
+ //
+ // Note: This access to the source wouldn't be allowed by the TrustedRandomIteratorNoCoerce
+ // contract (used by SpecInPlaceCollect below). But see the "O(1) collect" section in the
+ // module documenttation why this is ok anyway.
+ src.forget_allocation_drop_remaining();
+
+ let vec = unsafe { Vec::from_raw_parts(dst_buf, len, cap) };
+
+ vec
+ }
+}
+
+fn write_in_place_with_drop<T>(
+ src_end: *const T,
+) -> impl FnMut(InPlaceDrop<T>, T) -> Result<InPlaceDrop<T>, !> {
+ move |mut sink, item| {
+ unsafe {
+ // the InPlaceIterable contract cannot be verified precisely here since
+ // try_fold has an exclusive reference to the source pointer
+ // all we can do is check if it's still in range
+ debug_assert!(sink.dst as *const _ <= src_end, "InPlaceIterable contract violation");
+ ptr::write(sink.dst, item);
+ // Since this executes user code which can panic we have to bump the pointer
+ // after each step.
+ sink.dst = sink.dst.add(1);
+ }
+ Ok(sink)
+ }
+}
+
+/// Helper trait to hold specialized implementations of the in-place iterate-collect loop
+trait SpecInPlaceCollect<T, I>: Iterator<Item = T> {
+ /// Collects an iterator (`self`) into the destination buffer (`dst`) and returns the number of items
+ /// collected. `end` is the last writable element of the allocation and used for bounds checks.
+ ///
+ /// This method is specialized and one of its implementations makes use of
+ /// `Iterator::__iterator_get_unchecked` calls with a `TrustedRandomAccessNoCoerce` bound
+ /// on `I` which means the caller of this method must take the safety conditions
+ /// of that trait into consideration.
+ fn collect_in_place(&mut self, dst: *mut T, end: *const T) -> usize;
+}
+
+impl<T, I> SpecInPlaceCollect<T, I> for I
+where
+ I: Iterator<Item = T>,
+{
+ #[inline]
+ default fn collect_in_place(&mut self, dst_buf: *mut T, end: *const T) -> usize {
+ // use try-fold since
+ // - it vectorizes better for some iterator adapters
+ // - unlike most internal iteration methods, it only takes a &mut self
+ // - it lets us thread the write pointer through its innards and get it back in the end
+ let sink = InPlaceDrop { inner: dst_buf, dst: dst_buf };
+ let sink =
+ self.try_fold::<_, _, Result<_, !>>(sink, write_in_place_with_drop(end)).unwrap();
+ // iteration succeeded, don't drop head
+ unsafe { ManuallyDrop::new(sink).dst.offset_from(dst_buf) as usize }
+ }
+}
+
+impl<T, I> SpecInPlaceCollect<T, I> for I
+where
+ I: Iterator<Item = T> + TrustedRandomAccessNoCoerce,
+{
+ #[inline]
+ fn collect_in_place(&mut self, dst_buf: *mut T, end: *const T) -> usize {
+ let len = self.size();
+ let mut drop_guard = InPlaceDrop { inner: dst_buf, dst: dst_buf };
+ for i in 0..len {
+ // Safety: InplaceIterable contract guarantees that for every element we read
+ // one slot in the underlying storage will have been freed up and we can immediately
+ // write back the result.
+ unsafe {
+ let dst = dst_buf.offset(i as isize);
+ debug_assert!(dst as *const _ <= end, "InPlaceIterable contract violation");
+ ptr::write(dst, self.__iterator_get_unchecked(i));
+ // Since this executes user code which can panic we have to bump the pointer
+ // after each step.
+ drop_guard.dst = dst.add(1);
+ }
+ }
+ mem::forget(drop_guard);
+ len
+ }
+}
+
+/// Internal helper trait for in-place iteration specialization.
+///
+/// Currently this is only implemented by [`vec::IntoIter`] - returning a reference to itself - and
+/// [`binary_heap::IntoIter`] which returns a reference to its inner representation.
+///
+/// Since this is an internal trait it hides the implementation detail `binary_heap::IntoIter`
+/// uses `vec::IntoIter` internally.
+///
+/// [`vec::IntoIter`]: super::IntoIter
+/// [`binary_heap::IntoIter`]: crate::collections::binary_heap::IntoIter
+///
+/// # Safety
+///
+/// In-place iteration relies on implementation details of `vec::IntoIter`, most importantly that
+/// it does not create references to the whole allocation during iteration, only raw pointers
+#[rustc_specialization_trait]
+pub(crate) unsafe trait AsVecIntoIter {
+ type Item;
+ fn as_into_iter(&mut self) -> &mut super::IntoIter<Self::Item>;
+}
+#[cfg(not(no_global_oom_handling))]
+use super::AsVecIntoIter;
use crate::alloc::{Allocator, Global};
use crate::raw_vec::RawVec;
use core::fmt;
/// (&mut into_iter).for_each(core::mem::drop);
/// unsafe { core::ptr::write(&mut into_iter, Vec::new().into_iter()); }
/// ```
+ ///
+ /// This method is used by in-place iteration, refer to the vec::in_place_collect
+ /// documentation for an overview.
#[cfg(not(no_global_oom_handling))]
pub(super) fn forget_allocation_drop_remaining(&mut self) {
let remaining = self.as_raw_mut_slice();
}
}
+// In addition to the SAFETY invariants of the following three unsafe traits
+// also refer to the vec::in_place_collect module documentation to get an overview
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {}
}
}
-// internal helper trait for in-place iteration specialization.
-#[rustc_specialization_trait]
-pub(crate) trait AsIntoIter {
- type Item;
- fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item>;
-}
-
-impl<T> AsIntoIter for IntoIter<T> {
+#[cfg(not(no_global_oom_handling))]
+unsafe impl<T> AsVecIntoIter for IntoIter<T> {
type Item = T;
fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item> {
mod cow;
#[cfg(not(no_global_oom_handling))]
-pub(crate) use self::into_iter::AsIntoIter;
+pub(crate) use self::in_place_collect::AsVecIntoIter;
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::into_iter::IntoIter;
mod is_zero;
#[cfg(not(no_global_oom_handling))]
-mod source_iter_marker;
+mod in_place_collect;
mod partial_eq;
///
/// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>`
/// (at least, it's highly likely to be incorrect if it wasn't).
- /// * `T` needs to have the same size and alignment as what `ptr` was allocated with.
+ /// * `T` needs to have the same alignment as what `ptr` was allocated with.
/// (`T` having a less strict alignment is not sufficient, the alignment really
/// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
/// allocated and deallocated with the same layout.)
+ /// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs
+ /// to be the same size as the pointer was allocated with. (Because similar to
+ /// alignment, [`dealloc`] must be called with the same layout `size`.)
/// * `length` needs to be less than or equal to `capacity`.
- /// * `capacity` needs to be the capacity that the pointer was allocated with.
///
/// Violating these may cause problems like corrupting the allocator's
/// internal data structures. For example it is **not** safe
/// It's also not safe to build one from a `Vec<u16>` and its length, because
/// the allocator cares about the alignment, and these two types have different
/// alignments. The buffer was allocated with alignment 2 (for `u16`), but after
- /// turning it into a `Vec<u8>` it'll be deallocated with alignment 1.
+ /// turning it into a `Vec<u8>` it'll be deallocated with alignment 1. To avoid
+ /// these issues, it is often preferable to do casting/transmuting using
+ /// [`slice::from_raw_parts`] instead.
///
/// The ownership of `ptr` is effectively transferred to the
/// `Vec<T>` which may then deallocate, reallocate or change the
}
}
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "vec_from_array_ref", since = "1.61.0")]
+impl<T: Clone, const N: usize> From<&[T; N]> for Vec<T> {
+ /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from(b"raw"), vec![b'r', b'a', b'w']);
+ /// ```
+ #[cfg(not(test))]
+ fn from(s: &[T; N]) -> Vec<T> {
+ s.to_vec()
+ }
+
+ #[cfg(test)]
+ fn from(s: &[T; N]) -> Vec<T> {
+ crate::slice::to_vec(s, Global)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "vec_from_array_ref", since = "1.61.0")]
+impl<T: Clone, const N: usize> From<&mut [T; N]> for Vec<T> {
+ /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from(&mut [1, 2, 3]), vec![1, 2, 3]);
+ /// ```
+ #[cfg(not(test))]
+ fn from(s: &mut [T; N]) -> Vec<T> {
+ s.to_vec()
+ }
+
+ #[cfg(test)]
+ fn from(s: &mut [T; N]) -> Vec<T> {
+ crate::slice::to_vec(s, Global)
+ }
+}
+
#[stable(feature = "vec_from_cow_slice", since = "1.14.0")]
impl<'a, T> From<Cow<'a, [T]>> for Vec<T>
where
}
}
-__impl_slice_eq1! { [A: Allocator] Vec<T, A>, Vec<U, A>, #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A1: Allocator, A2: Allocator] Vec<T, A1>, Vec<U, A2>, #[stable(feature = "rust1", since = "1.0.0")] }
__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &[U], #[stable(feature = "rust1", since = "1.0.0")] }
__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &mut [U], #[stable(feature = "rust1", since = "1.0.0")] }
__impl_slice_eq1! { [A: Allocator] &[T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] }
+++ /dev/null
-use core::iter::{InPlaceIterable, SourceIter, TrustedRandomAccessNoCoerce};
-use core::mem::{self, ManuallyDrop};
-use core::ptr::{self};
-
-use super::{AsIntoIter, InPlaceDrop, SpecFromIter, SpecFromIterNested, Vec};
-
-/// Specialization marker for collecting an iterator pipeline into a Vec while reusing the
-/// source allocation, i.e. executing the pipeline in place.
-#[rustc_unsafe_specialization_marker]
-pub(super) trait InPlaceIterableMarker {}
-
-impl<T> InPlaceIterableMarker for T where T: InPlaceIterable {}
-
-impl<T, I> SpecFromIter<T, I> for Vec<T>
-where
- I: Iterator<Item = T> + SourceIter<Source: AsIntoIter> + InPlaceIterableMarker,
-{
- default fn from_iter(mut iterator: I) -> Self {
- // Additional requirements which cannot expressed via trait bounds. We rely on const eval
- // instead:
- // a) no ZSTs as there would be no allocation to reuse and pointer arithmetic would panic
- // b) size match as required by Alloc contract
- // c) alignments match as required by Alloc contract
- if mem::size_of::<T>() == 0
- || mem::size_of::<T>()
- != mem::size_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>()
- || mem::align_of::<T>()
- != mem::align_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>()
- {
- // fallback to more generic implementations
- return SpecFromIterNested::from_iter(iterator);
- }
-
- let (src_buf, src_ptr, dst_buf, dst_end, cap) = unsafe {
- let inner = iterator.as_inner().as_into_iter();
- (
- inner.buf.as_ptr(),
- inner.ptr,
- inner.buf.as_ptr() as *mut T,
- inner.end as *const T,
- inner.cap,
- )
- };
-
- let len = SpecInPlaceCollect::collect_in_place(&mut iterator, dst_buf, dst_end);
-
- let src = unsafe { iterator.as_inner().as_into_iter() };
- // check if SourceIter contract was upheld
- // caveat: if they weren't we might not even make it to this point
- debug_assert_eq!(src_buf, src.buf.as_ptr());
- // check InPlaceIterable contract. This is only possible if the iterator advanced the
- // source pointer at all. If it uses unchecked access via TrustedRandomAccess
- // then the source pointer will stay in its initial position and we can't use it as reference
- if src.ptr != src_ptr {
- debug_assert!(
- unsafe { dst_buf.add(len) as *const _ } <= src.ptr,
- "InPlaceIterable contract violation, write pointer advanced beyond read pointer"
- );
- }
-
- // drop any remaining values at the tail of the source
- // but prevent drop of the allocation itself once IntoIter goes out of scope
- // if the drop panics then we also leak any elements collected into dst_buf
- //
- // FIXME: Since `SpecInPlaceCollect::collect_in_place` above might use
- // `__iterator_get_unchecked` internally, this call might be operating on
- // a `vec::IntoIter` with incorrect internal state regarding which elements
- // have already been “consumed”. However, the `TrustedRandomIteratorNoCoerce`
- // implementation of `vec::IntoIter` is only present if the `Vec` elements
- // don’t have a destructor, so it doesn’t matter if elements are “dropped multiple times”
- // in this case.
- // This argument technically currently lacks justification from the `# Safety` docs for
- // `SourceIter`/`InPlaceIterable` and/or `TrustedRandomAccess`, so it might be possible that
- // someone could inadvertently create new library unsoundness
- // involving this `.forget_allocation_drop_remaining()` call.
- src.forget_allocation_drop_remaining();
-
- let vec = unsafe { Vec::from_raw_parts(dst_buf, len, cap) };
-
- vec
- }
-}
-
-fn write_in_place_with_drop<T>(
- src_end: *const T,
-) -> impl FnMut(InPlaceDrop<T>, T) -> Result<InPlaceDrop<T>, !> {
- move |mut sink, item| {
- unsafe {
- // the InPlaceIterable contract cannot be verified precisely here since
- // try_fold has an exclusive reference to the source pointer
- // all we can do is check if it's still in range
- debug_assert!(sink.dst as *const _ <= src_end, "InPlaceIterable contract violation");
- ptr::write(sink.dst, item);
- // Since this executes user code which can panic we have to bump the pointer
- // after each step.
- sink.dst = sink.dst.add(1);
- }
- Ok(sink)
- }
-}
-
-/// Helper trait to hold specialized implementations of the in-place iterate-collect loop
-trait SpecInPlaceCollect<T, I>: Iterator<Item = T> {
- /// Collects an iterator (`self`) into the destination buffer (`dst`) and returns the number of items
- /// collected. `end` is the last writable element of the allocation and used for bounds checks.
- ///
- /// This method is specialized and one of its implementations makes use of
- /// `Iterator::__iterator_get_unchecked` calls with a `TrustedRandomAccessNoCoerce` bound
- /// on `I` which means the caller of this method must take the safety conditions
- /// of that trait into consideration.
- fn collect_in_place(&mut self, dst: *mut T, end: *const T) -> usize;
-}
-
-impl<T, I> SpecInPlaceCollect<T, I> for I
-where
- I: Iterator<Item = T>,
-{
- #[inline]
- default fn collect_in_place(&mut self, dst_buf: *mut T, end: *const T) -> usize {
- // use try-fold since
- // - it vectorizes better for some iterator adapters
- // - unlike most internal iteration methods, it only takes a &mut self
- // - it lets us thread the write pointer through its innards and get it back in the end
- let sink = InPlaceDrop { inner: dst_buf, dst: dst_buf };
- let sink =
- self.try_fold::<_, _, Result<_, !>>(sink, write_in_place_with_drop(end)).unwrap();
- // iteration succeeded, don't drop head
- unsafe { ManuallyDrop::new(sink).dst.offset_from(dst_buf) as usize }
- }
-}
-
-impl<T, I> SpecInPlaceCollect<T, I> for I
-where
- I: Iterator<Item = T> + TrustedRandomAccessNoCoerce,
-{
- #[inline]
- fn collect_in_place(&mut self, dst_buf: *mut T, end: *const T) -> usize {
- let len = self.size();
- let mut drop_guard = InPlaceDrop { inner: dst_buf, dst: dst_buf };
- for i in 0..len {
- // Safety: InplaceIterable contract guarantees that for every element we read
- // one slot in the underlying storage will have been freed up and we can immediately
- // write back the result.
- unsafe {
- let dst = dst_buf.offset(i as isize);
- debug_assert!(dst as *const _ <= end, "InPlaceIterable contract violation");
- ptr::write(dst, self.__iterator_get_unchecked(i));
- // Since this executes user code which can panic we have to bump the pointer
- // after each step.
- drop_guard.dst = dst.add(1);
- }
- }
- mem::forget(drop_guard);
- len
- }
-}
t!(format!("{:?}", "true"), "\"true\"");
t!(format!("{:?}", "foo\nbar"), "\"foo\\nbar\"");
t!(format!("{:?}", "foo\n\"bar\"\r\n\'baz\'\t\\qux\\"), r#""foo\n\"bar\"\r\n'baz'\t\\qux\\""#);
- t!(format!("{:?}", "foo\0bar\x01baz\u{7f}q\u{75}x"), r#""foo\u{0}bar\u{1}baz\u{7f}qux""#);
+ t!(format!("{:?}", "foo\0bar\x01baz\u{7f}q\u{75}x"), r#""foo\0bar\u{1}baz\u{7f}qux""#);
t!(format!("{:o}", 10_usize), "12");
t!(format!("{:x}", 10_usize), "a");
t!(format!("{:X}", 10_usize), "A");
#![feature(const_intrinsic_copy)]
#![feature(const_mut_refs)]
#![feature(const_nonnull_slice_from_raw_parts)]
-#![feature(const_ptr_offset)]
#![feature(const_ptr_write)]
#![feature(const_try)]
#![feature(core_intrinsics)]
assert_eq!("abc".escape_debug().to_string(), "abc");
assert_eq!("a c".escape_debug().to_string(), "a c");
assert_eq!("éèê".escape_debug().to_string(), "éèê");
- assert_eq!("\r\n\t".escape_debug().to_string(), "\\r\\n\\t");
+ assert_eq!("\0\r\n\t".escape_debug().to_string(), "\\0\\r\\n\\t");
assert_eq!("'\"\\".escape_debug().to_string(), "\\'\\\"\\\\");
assert_eq!("\u{7f}\u{ff}".escape_debug().to_string(), "\\u{7f}\u{ff}");
assert_eq!("\u{100}\u{ffff}".escape_debug().to_string(), "\u{100}\\u{ffff}");
b'\'' => ([b'\\', b'\'', 0, 0], 2),
b'"' => ([b'\\', b'"', 0, 0], 2),
b'\x20'..=b'\x7e' => ([c, 0, 0, 0], 1),
- _ => ([b'\\', b'x', hexify(c >> 4), hexify(c & 0xf)], 4),
+ _ => {
+ let hex_digits: &[u8; 16] = b"0123456789abcdef";
+ ([b'\\', b'x', hex_digits[(c >> 4) as usize], hex_digits[(c & 0xf) as usize]], 4)
+ }
};
return EscapeDefault { range: 0..len, data };
-
- fn hexify(b: u8) -> u8 {
- match b {
- 0..=9 => b'0' + b,
- _ => b'a' + b - 10,
- }
- }
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Iterator for EscapeDefault {
type Item = u8;
+
+ #[inline]
fn next(&mut self) -> Option<u8> {
self.range.next().map(|i| self.data[i as usize])
}
/// [impl]: index.html#implementing-async-iterator
#[unstable(feature = "async_iterator", issue = "79024")]
#[must_use = "async iterators do nothing unless polled"]
+#[doc(alias = "Stream")]
pub trait AsyncIterator {
/// The type of items yielded by the async iterator.
type Item;
//! impl bool {}
+use crate::marker::Destruct;
+
#[lang = "bool"]
impl bool {
/// Returns `Some(t)` if the `bool` is [`true`](../std/keyword.true.html),
#[unstable(feature = "bool_to_option", issue = "80967")]
#[rustc_const_unstable(feature = "const_bool_to_option", issue = "91917")]
#[inline]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn then_some<T>(self, t: T) -> Option<T>
where
- T: ~const Drop,
+ T: ~const Drop + ~const Destruct,
{
if self { Some(t) } else { None }
}
#[stable(feature = "lazy_bool_to_option", since = "1.50.0")]
#[rustc_const_unstable(feature = "const_bool_to_option", issue = "91917")]
#[inline]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn then<T, F>(self, f: F) -> Option<T>
where
F: ~const FnOnce() -> T,
- F: ~const Drop,
+ F: ~const Drop + ~const Destruct,
{
if self { Some(f()) } else { None }
}
}
/// An error which can be returned when parsing a char.
+///
+/// This `struct` is created when using the [`char::from_str`] method.
#[stable(feature = "char_from_str", since = "1.20.0")]
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ParseCharError {
}
}
-/// The error type returned when a conversion from u32 to char fails.
+/// The error type returned when a conversion from [`prim@u32`] to [`prim@char`] fails.
+///
+/// This `struct` is created by the [`char::try_from<u32>`](char#impl-TryFrom<u32>) method.
+/// See its documentation for more.
#[stable(feature = "try_from", since = "1.34.0")]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct CharTryFromError(());
None => self.iter.next()?,
};
- if u < 0xD800 || 0xDFFF < u {
+ if !u.is_utf16_surrogate() {
// SAFETY: not a surrogate
Some(Ok(unsafe { from_u32_unchecked(u as u32) }))
} else if u >= 0xDC00 {
// buf is empty, no additional elements from it.
None => (0, 0),
// `u` is a non surrogate, so it's always an additional character.
- Some(u) if u < 0xD800 || 0xDFFF < u => (1, 1),
+ Some(u) if !u.is_utf16_surrogate() => (1, 1),
// `u` is a leading surrogate (it can never be a trailing surrogate and
// it's a surrogate due to the previous branch) and `self.iter` is empty.
//
#[inline]
pub(crate) fn escape_debug_ext(self, args: EscapeDebugExtArgs) -> EscapeDebug {
let init_state = match self {
+ '\0' => EscapeDefaultState::Backslash('0'),
'\t' => EscapeDefaultState::Backslash('t'),
'\r' => EscapeDefaultState::Backslash('r'),
'\n' => EscapeDefaultState::Backslash('n'),
#![stable(feature = "rust1", since = "1.0.0")]
+use crate::marker::Destruct;
+
/// A common trait for the ability to explicitly duplicate an object.
///
/// Differs from [`Copy`] in that [`Copy`] is implicit and an inexpensive bit-wise copy, while
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[default_method_body_is_const]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
fn clone_from(&mut self, source: &Self)
where
- Self: ~const Drop,
+ Self: ~const Drop + ~const Destruct,
{
*self = source.clone()
}
/// let result = 2.cmp(&1);
/// assert_eq!(Ordering::Greater, result);
/// ```
-#[derive(Clone, Copy, PartialEq, Debug, Hash)]
+#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
#[stable(feature = "rust1", since = "1.0.0")]
#[repr(i8)]
pub enum Ordering {
/* compiler built-in */
}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl Eq for Ordering {}
-
#[stable(feature = "rust1", since = "1.0.0")]
impl Ord for Ordering {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "AsRef")]
pub trait AsRef<T: ?Sized> {
- /// Performs the conversion.
+ /// Converts this type into a shared reference of the (usually inferred) input type.
#[stable(feature = "rust1", since = "1.0.0")]
fn as_ref(&self) -> &T;
}
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "AsMut")]
pub trait AsMut<T: ?Sized> {
- /// Performs the conversion.
+ /// Converts this type into a mutable reference of the (usually inferred) input type.
#[stable(feature = "rust1", since = "1.0.0")]
fn as_mut(&mut self) -> &mut T;
}
#[rustc_diagnostic_item = "Into"]
#[stable(feature = "rust1", since = "1.0.0")]
pub trait Into<T>: Sized {
- /// Performs the conversion.
+ /// Converts this type into the (usually inferred) input type.
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
fn into(self) -> T;
note = "to coerce a `{T}` into a `{Self}`, use `&*` as a prefix",
))]
pub trait From<T>: Sized {
- /// Performs the conversion.
+ /// Converts to this type from the input type.
#[lang = "from"]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
where
T: ~const AsRef<U>,
{
+ #[inline]
fn as_ref(&self) -> &U {
<T as AsRef<U>>::as_ref(*self)
}
where
T: ~const AsRef<U>,
{
+ #[inline]
fn as_ref(&self) -> &U {
<T as AsRef<U>>::as_ref(*self)
}
where
T: ~const AsMut<U>,
{
+ #[inline]
fn as_mut(&mut self) -> &mut U {
(*self).as_mut()
}
}
(fmt_prec.saturating_sub(prec), prec.saturating_sub(fmt_prec))
}
- None => (0,0)
+ None => (0, 0)
};
for _ in 1..subtracted_precision {
- n/=10;
+ n /= 10;
exponent += 1;
}
if subtracted_precision != 0 {
// SAFETY: In either case, `exp_buf` is written within bounds and `exp_ptr[..len]`
// is contained within `exp_buf` since `len <= 3`.
let exp_slice = unsafe {
- *exp_ptr.offset(0) = if upper {b'E'} else {b'e'};
+ *exp_ptr.offset(0) = if upper { b'E' } else { b'e' };
let len = if exponent < 10 {
*exp_ptr.offset(1) = (exponent as u8) + b'0';
2
/// Which kind of future are we turning this into?
#[unstable(feature = "into_future", issue = "67644")]
- type Future: Future<Output = Self::Output>;
+ type IntoFuture: Future<Output = Self::Output>;
/// Creates a future from a value.
#[unstable(feature = "into_future", issue = "67644")]
#[lang = "into_future"]
- fn into_future(self) -> Self::Future;
+ fn into_future(self) -> Self::IntoFuture;
}
#[unstable(feature = "into_future", issue = "67644")]
impl<F: Future> IntoFuture for F {
type Output = F::Output;
- type Future = F;
+ type IntoFuture = F;
- fn into_future(self) -> Self::Future {
+ fn into_future(self) -> Self::IntoFuture {
self
}
}
)]
#![allow(missing_docs)]
-use crate::marker::DiscriminantKind;
+use crate::marker::{Destruct, DiscriminantKind};
use crate::mem;
// These imports are used for simplifying intra-doc links
///
/// The stabilized version of this intrinsic is [`pointer::offset`].
#[must_use = "returns a new pointer rather than modifying its argument"]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
pub fn offset<T>(dst: *const T, offset: isize) -> *const T;
/// Calculates the offset from a pointer, potentially wrapping.
///
/// The stabilized version of this intrinsic is [`pointer::wrapping_offset`].
#[must_use = "returns a new pointer rather than modifying its argument"]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
pub fn arith_offset<T>(dst: *const T, offset: isize) -> *const T;
/// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
#[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
#[lang = "const_eval_select"]
#[rustc_do_not_const_check]
+#[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const unsafe fn const_eval_select<ARG, F, G, RET>(
arg: ARG,
_called_in_const: F,
) -> RET
where
F: ~const FnOnce<ARG, Output = RET>,
- G: FnOnce<ARG, Output = RET> + ~const Drop,
+ G: FnOnce<ARG, Output = RET> + ~const Drop + ~const Destruct,
{
called_at_rt.call_once(arg)
}
)]
#[rustc_const_unstable(feature = "const_eval_select", issue = "none")]
#[lang = "const_eval_select_ct"]
+#[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const unsafe fn const_eval_select_ct<ARG, F, G, RET>(
arg: ARG,
called_in_const: F,
) -> RET
where
F: ~const FnOnce<ARG, Output = RET>,
- G: FnOnce<ARG, Output = RET> + ~const Drop,
+ G: FnOnce<ARG, Output = RET> + ~const Drop + ~const Destruct,
{
called_in_const.call_once(arg)
}
--- /dev/null
+use crate::ops::Try;
+
+/// Like `Iterator::by_ref`, but requiring `Sized` so it can forward generics.
+///
+/// Ideally this will no longer be required, eventually, but as can be seen in
+/// the benchmarks (as of Feb 2022 at least) `by_ref` can have performance cost.
+pub(crate) struct ByRefSized<'a, I>(pub &'a mut I);
+
+impl<I: Iterator> Iterator for ByRefSized<'_, I> {
+ type Item = I::Item;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.0.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.0.size_hint()
+ }
+
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ self.0.advance_by(n)
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.0.nth(n)
+ }
+
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.0.fold(init, f)
+ }
+
+ fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ self.0.try_fold(init, f)
+ }
+}
use crate::iter::{InPlaceIterable, Iterator};
use crate::ops::{ChangeOutputType, ControlFlow, FromResidual, NeverShortCircuit, Residual, Try};
+mod by_ref_sized;
mod chain;
mod cloned;
mod copied;
scan::Scan, skip::Skip, skip_while::SkipWhile, take::Take, take_while::TakeWhile, zip::Zip,
};
+pub(crate) use self::by_ref_sized::ByRefSized;
+
#[stable(feature = "iter_cloned", since = "1.1.0")]
pub use self::cloned::Cloned;
/// The trait is unsafe because implementers must uphold additional safety properties.
/// See [`as_inner`] for details.
///
+/// The primary use of this trait is in-place iteration. Refer to the [`vec::in_place_collect`]
+/// module documentation for more information.
+///
+/// [`vec::in_place_collect`]: ../../../../alloc/vec/in_place_collect/index.html
+///
/// # Examples
///
/// Retrieving a partially consumed source:
//! ```
//!
//! While many collections offer `iter()`, not all offer `iter_mut()`. For
-//! example, mutating the keys of a [`HashSet<T>`] or [`HashMap<K, V>`] could
-//! put the collection into an inconsistent state if the key hashes change, so
-//! these collections only offer `iter()`.
+//! example, mutating the keys of a [`HashSet<T>`] could put the collection
+//! into an inconsistent state if the key hashes change, so this collection
+//! only offers `iter()`.
//!
//! [`into_iter()`]: IntoIterator::into_iter
//! [`HashSet<T>`]: ../../std/collections/struct.HashSet.html
-//! [`HashMap<K, V>`]: ../../std/collections/struct.HashMap.html
//!
//! # Adapters
//!
#[unstable(feature = "iter_intersperse", reason = "recently added", issue = "79524")]
pub use self::adapters::{Intersperse, IntersperseWith};
-pub(crate) use self::adapters::try_process;
+pub(crate) use self::adapters::{try_process, ByRefSized};
mod adapters;
mod range;
use crate::ops::{ChangeOutputType, ControlFlow, FromResidual, Residual, Try};
use super::super::try_process;
+use super::super::ByRefSized;
use super::super::TrustedRandomAccessNoCoerce;
use super::super::{Chain, Cloned, Copied, Cycle, Enumerate, Filter, FilterMap, Fuse};
use super::super::{FlatMap, Flatten};
<<Self as Iterator>::Item as Try>::Residual: Residual<B>,
B: FromIterator<<Self::Item as Try>::Output>,
{
- try_process(self, |i| i.collect())
+ try_process(ByRefSized(self), |i| i.collect())
}
/// Collects all the items from an iterator into a collection.
/// This is useful when you have an iterator over `&T`, but you need an
/// iterator over `T`.
///
+ /// There is no guarantee whatsoever about the `clone` method actually
+ /// being called *or* optimized away. So code should not depend on
+ /// either.
+ ///
/// [`clone`]: Clone::clone
///
/// # Examples
/// assert_eq!(v_cloned, vec![1, 2, 3]);
/// assert_eq!(v_map, vec![1, 2, 3]);
/// ```
+ ///
+ /// To get the best performance, try to clone late:
+ ///
+ /// ```
+ /// let a = [vec![0_u8, 1, 2], vec![3, 4], vec![23]];
+ /// // don't do this:
+ /// let slower: Vec<_> = a.iter().cloned().filter(|s| s.len() == 1).collect();
+ /// assert_eq!(&[vec![23]], &slower[..]);
+ /// // instead call `cloned` late
+ /// let faster: Vec<_> = a.iter().filter(|s| s.len() == 1).cloned().collect();
+ /// assert_eq!(&[vec![23]], &faster[..]);
+ /// ```
#[stable(feature = "rust1", since = "1.0.0")]
fn cloned<'a, T: 'a>(self) -> Cloned<Self>
where
/// in its place, assuming structural constraints of the source allow such an insertion.
/// In other words this trait indicates that an iterator pipeline can be collected in place.
///
+/// The primary use of this trait is in-place iteration. Refer to the [`vec::in_place_collect`]
+/// module documentation for more information.
+///
+/// [`vec::in_place_collect`]: ../../../../alloc/vec/in_place_collect/index.html
/// [`SourceIter`]: crate::iter::SourceIter
/// [`next()`]: Iterator::next
/// [`try_fold()`]: Iterator::try_fold
#![warn(missing_docs)]
#![allow(explicit_outlives_requirements)]
//
-// Library features for const fns:
+// Library features:
#![feature(const_align_offset)]
#![feature(const_align_of_val)]
#![feature(const_alloc_layout)]
#![feature(const_pin)]
#![feature(const_replace)]
#![feature(const_ptr_is_null)]
-#![feature(const_ptr_offset)]
#![feature(const_ptr_offset_from)]
#![feature(const_ptr_read)]
#![feature(const_ptr_write)]
#![feature(ptr_metadata)]
#![feature(slice_ptr_get)]
#![feature(str_internals)]
+#![feature(utf16_extra)]
+#![feature(utf16_extra_const)]
#![feature(variant_count)]
#![feature(const_array_from_ref)]
#![feature(const_slice_from_ref)]
#![feature(asm_const)]
//
// Target features:
-#![feature(aarch64_target_feature)]
-#![feature(adx_target_feature)]
+#![cfg_attr(bootstrap, feature(aarch64_target_feature))]
#![feature(arm_target_feature)]
#![feature(avx512_target_feature)]
#![feature(cmpxchg16b_target_feature)]
#![feature(sse4a_target_feature)]
#![feature(tbm_target_feature)]
#![feature(wasm_target_feature)]
+#![cfg_attr(bootstrap, feature(adx_target_feature))]
// allow using `core::` in intra-doc links
#[allow(unused_extern_crates)]
#[cfg_attr(not(test), rustc_diagnostic_item = "assert_eq_macro")]
#[allow_internal_unstable(core_panic)]
macro_rules! assert_eq {
- ($left:expr, $right:expr $(,)?) => ({
+ ($left:expr, $right:expr $(,)?) => {
match (&$left, &$right) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
}
}
}
- });
- ($left:expr, $right:expr, $($arg:tt)+) => ({
+ };
+ ($left:expr, $right:expr, $($arg:tt)+) => {
match (&$left, &$right) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
}
}
}
- });
+ };
}
/// Asserts that two expressions are not equal to each other (using [`PartialEq`]).
#[cfg_attr(not(test), rustc_diagnostic_item = "assert_ne_macro")]
#[allow_internal_unstable(core_panic)]
macro_rules! assert_ne {
- ($left:expr, $right:expr $(,)?) => ({
+ ($left:expr, $right:expr $(,)?) => {
match (&$left, &$right) {
(left_val, right_val) => {
if *left_val == *right_val {
}
}
}
- });
- ($left:expr, $right:expr, $($arg:tt)+) => ({
+ };
+ ($left:expr, $right:expr, $($arg:tt)+) => {
match (&($left), &($right)) {
(left_val, right_val) => {
if *left_val == *right_val {
}
}
}
- });
+ };
}
/// Asserts that an expression matches any of the given patterns.
#[allow_internal_unstable(core_panic)]
#[rustc_macro_transparency = "semitransparent"]
pub macro assert_matches {
- ($left:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )? $(,)?) => ({
+ ($left:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )? $(,)?) => {
match $left {
$( $pattern )|+ $( if $guard )? => {}
ref left_val => {
);
}
}
- }),
- ($left:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )?, $($arg:tt)+) => ({
+ },
+ ($left:expr, $(|)? $( $pattern:pat_param )|+ $( if $guard: expr )?, $($arg:tt)+) => {
match $left {
$( $pattern )|+ $( if $guard )? => {}
ref left_val => {
);
}
}
- }),
+ },
}
/// Asserts that a boolean expression is `true` at runtime.
#[rustc_diagnostic_item = "debug_assert_macro"]
#[allow_internal_unstable(edition_panic)]
macro_rules! debug_assert {
- ($($arg:tt)*) => (if $crate::cfg!(debug_assertions) { $crate::assert!($($arg)*); })
+ ($($arg:tt)*) => {
+ if $crate::cfg!(debug_assertions) {
+ $crate::assert!($($arg)*);
+ }
+ };
}
/// Asserts that two expressions are equal to each other.
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "debug_assert_eq_macro")]
macro_rules! debug_assert_eq {
- ($($arg:tt)*) => (if $crate::cfg!(debug_assertions) { $crate::assert_eq!($($arg)*); })
+ ($($arg:tt)*) => {
+ if $crate::cfg!(debug_assertions) {
+ $crate::assert_eq!($($arg)*);
+ }
+ };
}
/// Asserts that two expressions are not equal to each other.
#[stable(feature = "assert_ne", since = "1.13.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "debug_assert_ne_macro")]
macro_rules! debug_assert_ne {
- ($($arg:tt)*) => (if $crate::cfg!(debug_assertions) { $crate::assert_ne!($($arg)*); })
+ ($($arg:tt)*) => {
+ if $crate::cfg!(debug_assertions) {
+ $crate::assert_ne!($($arg)*);
+ }
+ };
}
/// Asserts that an expression matches any of the given patterns.
#[allow_internal_unstable(assert_matches)]
#[rustc_macro_transparency = "semitransparent"]
pub macro debug_assert_matches($($arg:tt)*) {
- if $crate::cfg!(debug_assertions) { $crate::assert_matches::assert_matches!($($arg)*); }
+ if $crate::cfg!(debug_assertions) {
+ $crate::assert_matches::assert_matches!($($arg)*);
+ }
}
/// Returns whether the given expression matches any of the given patterns.
$( $pattern )|+ $( if $guard )? => true,
_ => false
}
- }
+ };
}
/// Unwraps a result or propagates its error.
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "write_macro")]
macro_rules! write {
- ($dst:expr, $($arg:tt)*) => ($dst.write_fmt($crate::format_args!($($arg)*)))
+ ($dst:expr, $($arg:tt)*) => {
+ $dst.write_fmt($crate::format_args!($($arg)*))
+ };
}
/// Write formatted data into a buffer, with a newline appended.
#[cfg_attr(not(test), rustc_diagnostic_item = "writeln_macro")]
#[allow_internal_unstable(format_args_nl)]
macro_rules! writeln {
- ($dst:expr $(,)?) => (
+ ($dst:expr $(,)?) => {
$crate::write!($dst, "\n")
- );
- ($dst:expr, $($arg:tt)*) => (
+ };
+ ($dst:expr, $($arg:tt)*) => {
$dst.write_fmt($crate::format_args_nl!($($arg)*))
- );
+ };
}
/// Indicates unreachable code.
#[cfg_attr(not(test), rustc_diagnostic_item = "unimplemented_macro")]
#[allow_internal_unstable(core_panic)]
macro_rules! unimplemented {
- () => ($crate::panicking::panic("not implemented"));
- ($($arg:tt)+) => ($crate::panic!("not implemented: {}", $crate::format_args!($($arg)+)));
+ () => {
+ $crate::panicking::panic("not implemented")
+ };
+ ($($arg:tt)+) => {
+ $crate::panic!("not implemented: {}", $crate::format_args!($($arg)+))
+ };
}
/// Indicates unfinished code.
#[cfg_attr(not(test), rustc_diagnostic_item = "todo_macro")]
#[allow_internal_unstable(core_panic)]
macro_rules! todo {
- () => ($crate::panicking::panic("not yet implemented"));
- ($($arg:tt)+) => ($crate::panic!("not yet implemented: {}", $crate::format_args!($($arg)+)));
+ () => {
+ $crate::panicking::panic("not yet implemented")
+ };
+ ($($arg:tt)+) => {
+ $crate::panic!("not yet implemented: {}", $crate::format_args!($($arg)+))
+ };
}
/// Definitions of built-in macros.
#[stable(feature = "pin_raw", since = "1.38.0")]
impl<T: ?Sized> Unpin for *mut T {}
+/// A marker for types that can be dropped.
+///
+/// This should be used for `~const` bounds,
+/// as non-const bounds will always hold for every type.
+#[unstable(feature = "const_trait_impl", issue = "67792")]
+#[cfg_attr(not(bootstrap), lang = "destruct")]
+#[cfg_attr(
+ not(bootstrap),
+ rustc_on_unimplemented(message = "can't drop `{Self}`", append_const_msg,)
+)]
+pub trait Destruct {}
+
+#[cfg(bootstrap)]
+#[unstable(feature = "const_trait_impl", issue = "67792")]
+impl<T: ?Sized> const Destruct for T {}
+
/// Implementations of `Copy` for primitive types.
///
/// Implementations that cannot be described in Rust
/// A wrapper to inhibit compiler from automatically calling `T`’s destructor.
/// This wrapper is 0-cost.
///
-/// `ManuallyDrop<T>` is subject to the same layout optimizations as `T`.
-/// As a consequence, it has *no effect* on the assumptions that the compiler makes
-/// about its contents. For example, initializing a `ManuallyDrop<&mut T>`
-/// with [`mem::zeroed`] is undefined behavior.
-/// If you need to handle uninitialized data, use [`MaybeUninit<T>`] instead.
+/// `ManuallyDrop<T>` is guaranteed to have the same layout as `T`, and is subject
+/// to the same layout optimizations as `T`. As a consequence, it has *no effect*
+/// on the assumptions that the compiler makes about its contents. For example,
+/// initializing a `ManuallyDrop<&mut T>` with [`mem::zeroed`] is undefined
+/// behavior. If you need to handle uninitialized data, use [`MaybeUninit<T>`]
+/// instead.
///
/// Note that accessing the value inside a `ManuallyDrop<T>` is safe.
/// This means that a `ManuallyDrop<T>` whose content has been dropped must not
/// * '2.5E-10'
/// * '5.'
/// * '.5', or, equivalently, '0.5'
- /// * 'inf', '-inf', 'NaN'
+ /// * 'inf', '-inf', '+infinity', 'NaN'
+ ///
+ /// Note that alphabetical characters are not case-sensitive.
///
/// Leading and trailing whitespace represent an error.
///
/// # Grammar
///
- /// All strings that adhere to the following [EBNF] grammar
- /// will result in an [`Ok`] being returned:
+ /// All strings that adhere to the following [EBNF] grammar when
+ /// lowercased will result in an [`Ok`] being returned:
///
/// ```txt
- /// Float ::= Sign? ( 'inf' | 'NaN' | Number )
+ /// Float ::= Sign? ( 'inf' | 'infinity' | 'nan' | Number )
/// Number ::= ( Digit+ |
+ /// '.' Digit* |
/// Digit+ '.' Digit* |
/// Digit* '.' Digit+ ) Exp?
- /// Exp ::= [eE] Sign? Digit+
+ /// Exp ::= 'e' Sign? Digit+
/// Sign ::= [+-]
/// Digit ::= [0-9]
/// ```
let r = try_opt!(self.checked_rem(rhs));
let m = if (r > 0 && rhs < 0) || (r < 0 && rhs > 0) {
- try_opt!(r.checked_add(rhs))
+ // r + rhs cannot overflow because they have opposite signs
+ r + rhs
} else {
r
};
if m == 0 {
Some(self)
} else {
- self.checked_add(try_opt!(rhs.checked_sub(m)))
+ // rhs - m cannot overflow because m has the same sign as rhs
+ self.checked_add(rhs - m)
}
}
ascii::escape_default(self)
}
+ #[inline]
pub(crate) const fn is_utf8_char_boundary(self) -> bool {
// This is bit magic equivalent to: b < 128 || b >= 192
(self as i8) >= -0x40
uint_impl! { u16, u16, i16, NonZeroU16, 16, 65535, 4, "0xa003", "0x3a", "0x1234", "0x3412", "0x2c48",
"[0x34, 0x12]", "[0x12, 0x34]", "", "" }
widening_impl! { u16, u32, 16, unsigned }
+
+ /// Checks if the value is a Unicode surrogate code point, which are disallowed values for [`char`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(utf16_extra)]
+ ///
+ /// let low_non_surrogate = 0xA000u16;
+ /// let low_surrogate = 0xD800u16;
+ /// let high_surrogate = 0xDC00u16;
+ /// let high_non_surrogate = 0xE000u16;
+ ///
+ /// assert!(!low_non_surrogate.is_utf16_surrogate());
+ /// assert!(low_surrogate.is_utf16_surrogate());
+ /// assert!(high_surrogate.is_utf16_surrogate());
+ /// assert!(!high_non_surrogate.is_utf16_surrogate());
+ /// ```
+ #[must_use]
+ #[unstable(feature = "utf16_extra", issue = "94919")]
+ #[rustc_const_unstable(feature = "utf16_extra_const", issue = "94919")]
+ #[inline]
+ pub const fn is_utf16_surrogate(self) -> bool {
+ matches!(self, 0xD800..=0xDFFF)
+ }
}
#[lang = "u32"]
without modifying the original"]
#[inline]
pub const fn log2(self) -> u32 {
- <$Int>::BITS - 1 - self.leading_zeros()
+ Self::BITS - 1 - self.leading_zeros()
}
/// Returns the base 10 logarithm of the number, rounded down.
NonZeroI128(i128);
NonZeroIsize(isize);
}
+
+macro_rules! nonzero_bits {
+ ( $( $Ty: ident($Int: ty); )+ ) => {
+ $(
+ impl $Ty {
+ /// The size of this non-zero integer type in bits.
+ ///
+ #[doc = concat!("This value is equal to [`", stringify!($Int), "::BITS`].")]
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(nonzero_bits)]
+ #[doc = concat!("# use std::num::", stringify!($Ty), ";")]
+ ///
+ #[doc = concat!("assert_eq!(", stringify!($Ty), "::BITS, ", stringify!($Int), "::BITS);")]
+ /// ```
+ #[unstable(feature = "nonzero_bits", issue = "94881")]
+ pub const BITS: u32 = <$Int>::BITS;
+ }
+ )+
+ }
+}
+
+nonzero_bits! {
+ NonZeroU8(u8);
+ NonZeroI8(i8);
+ NonZeroU16(u16);
+ NonZeroI16(i16);
+ NonZeroU32(u32);
+ NonZeroI32(i32);
+ NonZeroU64(u64);
+ NonZeroI64(i64);
+ NonZeroU128(u128);
+ NonZeroI128(i128);
+ NonZeroUsize(usize);
+ NonZeroIsize(isize);
+}
pub const fn checked_next_multiple_of(self, rhs: Self) -> Option<Self> {
match try_opt!(self.checked_rem(rhs)) {
0 => Some(self),
- r => self.checked_add(try_opt!(rhs.checked_sub(r)))
+ // rhs - r cannot overflow because r is smaller than rhs
+ r => self.checked_add(rhs - r)
}
}
#[doc = concat!("assert_eq!(3", stringify!($SelfT), ".wrapping_next_power_of_two(), 4);")]
#[doc = concat!("assert_eq!(", stringify!($SelfT), "::MAX.wrapping_next_power_of_two(), 0);")]
/// ```
+ #[inline]
#[unstable(feature = "wrapping_next_power_of_two", issue = "32463",
reason = "needs decision on wrapping behaviour")]
#[rustc_const_unstable(feature = "wrapping_next_power_of_two", issue = "32463")]
//! * [`Box<U>`]
//! * `&U`
//! * `&mut U`
-//! * `fn`, `extern "C" fn`
+//! * `fn`, `extern "C" fn`[^extern_fn]
//! * [`num::NonZero*`]
//! * [`ptr::NonNull<U>`]
//! * `#[repr(transparent)]` struct around one of the types in this list.
//!
+//! [^extern_fn]: this remains true for any other ABI: `extern "abi" fn` (_e.g._, `extern "system" fn`)
+//!
//! [`Box<U>`]: ../../std/boxed/struct.Box.html
//! [`num::NonZero*`]: crate::num
//! [`ptr::NonNull<U>`]: crate::ptr::NonNull
#![stable(feature = "rust1", since = "1.0.0")]
use crate::iter::{self, FromIterator, FusedIterator, TrustedLen};
+use crate::marker::Destruct;
use crate::panicking::{panic, panic_str};
use crate::pin::Pin;
use crate::{
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn unwrap_or(self, default: T) -> T
where
- T: ~const Drop,
+ T: ~const Drop + ~const Destruct,
{
match self {
Some(x) => x,
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn unwrap_or_else<F>(self, f: F) -> T
where
F: ~const FnOnce() -> T,
- F: ~const Drop,
+ F: ~const Drop + ~const Destruct,
{
match self {
Some(x) => x,
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn map<U, F>(self, f: F) -> Option<U>
where
F: ~const FnOnce(T) -> U,
- F: ~const Drop,
+ F: ~const Drop + ~const Destruct,
{
match self {
Some(x) => Some(f(x)),
#[inline]
#[unstable(feature = "result_option_inspect", issue = "91345")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn inspect<F>(self, f: F) -> Self
where
F: ~const FnOnce(&T),
- F: ~const Drop,
+ F: ~const Drop + ~const Destruct,
{
if let Some(ref x) = self {
f(x);
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn map_or<U, F>(self, default: U, f: F) -> U
where
F: ~const FnOnce(T) -> U,
- F: ~const Drop,
- U: ~const Drop,
+ F: ~const Drop + ~const Destruct,
+ U: ~const Drop + ~const Destruct,
{
match self {
Some(t) => f(t),
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn map_or_else<U, D, F>(self, default: D, f: F) -> U
where
D: ~const FnOnce() -> U,
- D: ~const Drop,
+ D: ~const Drop + ~const Destruct,
F: ~const FnOnce(T) -> U,
- F: ~const Drop,
+ F: ~const Drop + ~const Destruct,
{
match self {
Some(t) => f(t),
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn ok_or<E>(self, err: E) -> Result<T, E>
where
- E: ~const Drop,
+ E: ~const Drop + ~const Destruct,
{
match self {
Some(v) => Ok(v),
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn ok_or_else<E, F>(self, err: F) -> Result<T, E>
where
F: ~const FnOnce() -> E,
- F: ~const Drop,
+ F: ~const Drop + ~const Destruct,
{
match self {
Some(v) => Ok(v),
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn and<U>(self, optb: Option<U>) -> Option<U>
where
- T: ~const Drop,
- U: ~const Drop,
+ T: ~const Drop + ~const Destruct,
+ U: ~const Drop + ~const Destruct,
{
match self {
Some(_) => optb,
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn and_then<U, F>(self, f: F) -> Option<U>
where
F: ~const FnOnce(T) -> Option<U>,
- F: ~const Drop,
+ F: ~const Drop + ~const Destruct,
{
match self {
Some(x) => f(x),
#[inline]
#[stable(feature = "option_filter", since = "1.27.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn filter<P>(self, predicate: P) -> Self
where
- T: ~const Drop,
+ T: ~const Drop + ~const Destruct,
P: ~const FnOnce(&T) -> bool,
- P: ~const Drop,
+ P: ~const Drop + ~const Destruct,
{
if let Some(x) = self {
if predicate(&x) {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn or(self, optb: Option<T>) -> Option<T>
where
- T: ~const Drop,
+ T: ~const Drop + ~const Destruct,
{
match self {
Some(x) => Some(x),
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn or_else<F>(self, f: F) -> Option<T>
where
F: ~const FnOnce() -> Option<T>,
- F: ~const Drop,
+ F: ~const Drop + ~const Destruct,
{
match self {
Some(x) => Some(x),
#[inline]
#[stable(feature = "option_xor", since = "1.37.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn xor(self, optb: Option<T>) -> Option<T>
where
- T: ~const Drop,
+ T: ~const Drop + ~const Destruct,
{
match (self, optb) {
(Some(a), None) => Some(a),
#[inline]
#[stable(feature = "option_insert", since = "1.53.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn insert(&mut self, value: T) -> &mut T
where
- T: ~const Drop,
+ T: ~const Drop + ~const Destruct,
{
*self = Some(value);
#[inline]
#[stable(feature = "option_entry", since = "1.20.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn get_or_insert(&mut self, value: T) -> &mut T
where
- T: ~const Drop,
+ T: ~const Drop + ~const Destruct,
{
if let None = *self {
*self = Some(value);
#[inline]
#[stable(feature = "option_entry", since = "1.20.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn get_or_insert_with<F>(&mut self, f: F) -> &mut T
where
F: ~const FnOnce() -> T,
- F: ~const Drop,
+ F: ~const Drop + ~const Destruct,
{
if let None = *self {
// the compiler isn't smart enough to know that we are not dropping a `T`
/// ```
#[stable(feature = "option_zip_option", since = "1.46.0")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn zip<U>(self, other: Option<U>) -> Option<(T, U)>
where
- T: ~const Drop,
- U: ~const Drop,
+ T: ~const Drop + ~const Destruct,
+ U: ~const Drop + ~const Destruct,
{
match (self, other) {
(Some(a), Some(b)) => Some((a, b)),
/// ```
#[unstable(feature = "option_zip", issue = "70086")]
#[rustc_const_unstable(feature = "const_option_ext", issue = "91930")]
+ #[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
pub const fn zip_with<U, F, R>(self, other: Option<U>, f: F) -> Option<R>
where
F: ~const FnOnce(T, U) -> R,
- F: ~const Drop,
- T: ~const Drop,
- U: ~const Drop,
+ F: ~const Drop + ~const Destruct,
+ T: ~const Drop + ~const Destruct,
+ U: ~const Drop + ~const Destruct,
{
match (self, other) {
(Some(a), Some(b)) => Some(f(a, b)),
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
+#[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
impl<T> const Clone for Option<T>
where
- T: ~const Clone + ~const Drop,
+ T: ~const Clone + ~const Drop + ~const Destruct,
{
#[inline]
fn clone(&self) -> Self {
$crate::panicking::panic("internal error: entered unreachable code")
),
// Use of `unreachable_display` for non_fmt_panic lint.
- // NOTE: the message ("internal error ...") is embeded directly in unreachable_display
+ // NOTE: the message ("internal error ...") is embedded directly in unreachable_display
($msg:expr $(,)?) => (
$crate::panicking::unreachable_display(&$msg)
),
self as _
}
+ /// Use the pointer value in a new pointer of another type.
+ ///
+ /// In case `val` is a (fat) pointer to an unsized type, this operation
+ /// will ignore the pointer part, whereas for (thin) pointers to sized
+ /// types, this has the same effect as a simple cast.
+ ///
+ /// The resulting pointer will have provenance of `self`, i.e., for a fat
+ /// pointer, this operation is semantically the same as creating a new
+ /// fat pointer with the data pointer value of `self` but the metadata of
+ /// `val`.
+ ///
+ /// # Examples
+ ///
+ /// This function is primarily useful for allowing byte-wise pointer
+ /// arithmetic on potentially fat pointers:
+ ///
+ /// ```
+ /// #![feature(set_ptr_value)]
+ /// # use core::fmt::Debug;
+ /// let arr: [i32; 3] = [1, 2, 3];
+ /// let mut ptr = arr.as_ptr() as *const dyn Debug;
+ /// let thin = ptr as *const u8;
+ /// unsafe {
+ /// ptr = thin.add(8).with_metadata_of(ptr);
+ /// # assert_eq!(*(ptr as *const i32), 3);
+ /// println!("{:?}", &*ptr); // will print "3"
+ /// }
+ /// ```
+ #[unstable(feature = "set_ptr_value", issue = "75091")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline]
+ pub fn with_metadata_of<U>(self, mut val: *const U) -> *const U
+ where
+ U: ?Sized,
+ {
+ let target = &mut val as *mut *const U as *mut *const u8;
+ // SAFETY: In case of a thin pointer, this operations is identical
+ // to a simple assignment. In case of a fat pointer, with the current
+ // fat pointer layout implementation, the first field of such a
+ // pointer is always the data pointer, which is likewise assigned.
+ unsafe { *target = self as *const u8 };
+ val
+ }
+
/// Changes constness without changing the type.
///
/// This is a bit safer than `as` because it wouldn't silently change the type if the code is
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[inline(always)]
pub const unsafe fn offset(self, count: isize) -> *const T
where
/// ```
#[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[inline(always)]
pub const fn wrapping_offset(self, count: isize) -> *const T
where
///
/// [`guaranteed_ne`]: #method.guaranteed_ne
///
- /// The return value may change depending on the compiler version and unsafe code might not
+ /// The return value may change depending on the compiler version and unsafe code must not
/// rely on the result of this function for soundness. It is suggested to only use this function
/// for performance optimizations where spurious `false` return values by this function do not
/// affect the outcome, but just the performance.
///
/// [`guaranteed_eq`]: #method.guaranteed_eq
///
- /// The return value may change depending on the compiler version and unsafe code might not
+ /// The return value may change depending on the compiler version and unsafe code must not
/// rely on the result of this function for soundness. It is suggested to only use this function
/// for performance optimizations where spurious `false` return values by this function do not
/// affect the outcome, but just the performance.
/// ```
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[inline(always)]
pub const unsafe fn add(self, count: usize) -> Self
where
/// ```
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[inline]
pub const unsafe fn sub(self, count: usize) -> Self
where
/// ```
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[inline(always)]
pub const fn wrapping_add(self, count: usize) -> Self
where
/// ```
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[inline]
pub const fn wrapping_sub(self, count: usize) -> Self
where
self.wrapping_offset((count as isize).wrapping_neg())
}
- /// Sets the pointer value to `ptr`.
- ///
- /// In case `self` is a (fat) pointer to an unsized type, this operation
- /// will only affect the pointer part, whereas for (thin) pointers to
- /// sized types, this has the same effect as a simple assignment.
- ///
- /// The resulting pointer will have provenance of `val`, i.e., for a fat
- /// pointer, this operation is semantically the same as creating a new
- /// fat pointer with the data pointer value of `val` but the metadata of
- /// `self`.
- ///
- /// # Examples
- ///
- /// This function is primarily useful for allowing byte-wise pointer
- /// arithmetic on potentially fat pointers:
- ///
- /// ```
- /// #![feature(set_ptr_value)]
- /// # use core::fmt::Debug;
- /// let arr: [i32; 3] = [1, 2, 3];
- /// let mut ptr = arr.as_ptr() as *const dyn Debug;
- /// let thin = ptr as *const u8;
- /// unsafe {
- /// ptr = ptr.set_ptr_value(thin.add(8));
- /// # assert_eq!(*(ptr as *const i32), 3);
- /// println!("{:?}", &*ptr); // will print "3"
- /// }
- /// ```
- #[unstable(feature = "set_ptr_value", issue = "75091")]
- #[must_use = "returns a new pointer rather than modifying its argument"]
- #[inline]
- pub fn set_ptr_value(mut self, val: *const u8) -> Self {
- let thin = &mut self as *mut *const T as *mut *const u8;
- // SAFETY: In case of a thin pointer, this operations is identical
- // to a simple assignment. In case of a fat pointer, with the current
- // fat pointer layout implementation, the first field of such a
- // pointer is always the data pointer, which is likewise assigned.
- unsafe { *thin = val };
- self
- }
-
/// Reads the value from `self` without moving it. This leaves the
/// memory in `self` unchanged.
///
self as _
}
+ /// Use the pointer value in a new pointer of another type.
+ ///
+ /// In case `val` is a (fat) pointer to an unsized type, this operation
+ /// will ignore the pointer part, whereas for (thin) pointers to sized
+ /// types, this has the same effect as a simple cast.
+ ///
+ /// The resulting pointer will have provenance of `self`, i.e., for a fat
+ /// pointer, this operation is semantically the same as creating a new
+ /// fat pointer with the data pointer value of `self` but the metadata of
+ /// `val`.
+ ///
+ /// # Examples
+ ///
+ /// This function is primarily useful for allowing byte-wise pointer
+ /// arithmetic on potentially fat pointers:
+ ///
+ /// ```
+ /// #![feature(set_ptr_value)]
+ /// # use core::fmt::Debug;
+ /// let mut arr: [i32; 3] = [1, 2, 3];
+ /// let mut ptr = arr.as_mut_ptr() as *mut dyn Debug;
+ /// let thin = ptr as *mut u8;
+ /// unsafe {
+ /// ptr = thin.add(8).with_metadata_of(ptr);
+ /// # assert_eq!(*(ptr as *mut i32), 3);
+ /// println!("{:?}", &*ptr); // will print "3"
+ /// }
+ /// ```
+ #[unstable(feature = "set_ptr_value", issue = "75091")]
+ #[must_use = "returns a new pointer rather than modifying its argument"]
+ #[inline]
+ pub fn with_metadata_of<U>(self, mut val: *mut U) -> *mut U
+ where
+ U: ?Sized,
+ {
+ let target = &mut val as *mut *mut U as *mut *mut u8;
+ // SAFETY: In case of a thin pointer, this operations is identical
+ // to a simple assignment. In case of a fat pointer, with the current
+ // fat pointer layout implementation, the first field of such a
+ // pointer is always the data pointer, which is likewise assigned.
+ unsafe { *target = self as *mut u8 };
+ val
+ }
+
/// Changes constness without changing the type.
///
/// This is a bit safer than `as` because it wouldn't silently change the type if the code is
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[inline(always)]
pub const unsafe fn offset(self, count: isize) -> *mut T
where
/// ```
#[stable(feature = "ptr_wrapping_offset", since = "1.16.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[inline(always)]
pub const fn wrapping_offset(self, count: isize) -> *mut T
where
/// ```
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[inline(always)]
pub const unsafe fn add(self, count: usize) -> Self
where
/// ```
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[inline]
pub const unsafe fn sub(self, count: usize) -> Self
where
/// ```
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[inline(always)]
pub const fn wrapping_add(self, count: usize) -> Self
where
/// ```
#[stable(feature = "pointer_methods", since = "1.26.0")]
#[must_use = "returns a new pointer rather than modifying its argument"]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[inline]
pub const fn wrapping_sub(self, count: usize) -> Self
where
self.wrapping_offset((count as isize).wrapping_neg())
}
- /// Sets the pointer value to `ptr`.
- ///
- /// In case `self` is a (fat) pointer to an unsized type, this operation
- /// will only affect the pointer part, whereas for (thin) pointers to
- /// sized types, this has the same effect as a simple assignment.
- ///
- /// The resulting pointer will have provenance of `val`, i.e., for a fat
- /// pointer, this operation is semantically the same as creating a new
- /// fat pointer with the data pointer value of `val` but the metadata of
- /// `self`.
- ///
- /// # Examples
- ///
- /// This function is primarily useful for allowing byte-wise pointer
- /// arithmetic on potentially fat pointers:
- ///
- /// ```
- /// #![feature(set_ptr_value)]
- /// # use core::fmt::Debug;
- /// let mut arr: [i32; 3] = [1, 2, 3];
- /// let mut ptr = arr.as_mut_ptr() as *mut dyn Debug;
- /// let thin = ptr as *mut u8;
- /// unsafe {
- /// ptr = ptr.set_ptr_value(thin.add(8));
- /// # assert_eq!(*(ptr as *mut i32), 3);
- /// println!("{:?}", &*ptr); // will print "3"
- /// }
- /// ```
- #[unstable(feature = "set_ptr_value", issue = "75091")]
- #[must_use = "returns a new pointer rather than modifying its argument"]
- #[inline]
- pub fn set_ptr_value(mut self, val: *mut u8) -> Self {
- let thin = &mut self as *mut *mut T as *mut *mut u8;
- // SAFETY: In case of a thin pointer, this operations is identical
- // to a simple assignment. In case of a fat pointer, with the current
- // fat pointer layout implementation, the first field of such a
- // pointer is always the data pointer, which is likewise assigned.
- unsafe { *thin = val };
- self
- }
-
/// Reads the value from `self` without moving it. This leaves the
/// memory in `self` unchanged.
///
#![stable(feature = "rust1", since = "1.0.0")]
use crate::iter::{self, FromIterator, FusedIterator, TrustedLen};
+use crate::marker::Destruct;
use crate::ops::{self, ControlFlow, Deref, DerefMut};
use crate::{convert, fmt, hint};
#[rustc_const_unstable(feature = "const_result_drop", issue = "92384")]
pub const fn ok(self) -> Option<T>
where
- E: ~const Drop,
+ E: ~const Drop + ~const Destruct,
{
match self {
Ok(x) => Some(x),
#[rustc_const_unstable(feature = "const_result_drop", issue = "92384")]
pub const fn err(self) -> Option<E>
where
- T: ~const Drop,
+ T: ~const Drop + ~const Destruct,
{
match self {
// FIXME: ~const Drop doesn't quite work right yet
#[stable(feature = "rust1", since = "1.0.0")]
pub const fn and<U>(self, res: Result<U, E>) -> Result<U, E>
where
- T: ~const Drop,
- U: ~const Drop,
- E: ~const Drop,
+ T: ~const Drop + ~const Destruct,
+ U: ~const Drop + ~const Destruct,
+ E: ~const Drop + ~const Destruct,
{
match self {
// FIXME: ~const Drop doesn't quite work right yet
#[stable(feature = "rust1", since = "1.0.0")]
pub const fn or<F>(self, res: Result<T, F>) -> Result<T, F>
where
- T: ~const Drop,
- E: ~const Drop,
- F: ~const Drop,
+ T: ~const Drop + ~const Destruct,
+ E: ~const Drop + ~const Destruct,
+ F: ~const Drop + ~const Destruct,
{
match self {
Ok(v) => Ok(v),
#[stable(feature = "rust1", since = "1.0.0")]
pub const fn unwrap_or(self, default: T) -> T
where
- T: ~const Drop,
- E: ~const Drop,
+ T: ~const Drop + ~const Destruct,
+ E: ~const Drop + ~const Destruct,
{
match self {
Ok(t) => t,
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_clone", issue = "91805")]
+#[cfg_attr(not(bootstrap), allow(drop_bounds))] // FIXME remove `~const Drop` and this attr when bumping
impl<T, E> const Clone for Result<T, E>
where
- T: ~const Clone + ~const Drop,
- E: ~const Clone + ~const Drop,
+ T: ~const Clone + ~const Drop + ~const Destruct,
+ E: ~const Clone + ~const Drop + ~const Destruct,
{
#[inline]
fn clone(&self) -> Self {
/// documentation for more information.
#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
#[derive(Clone)]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct EscapeAscii<'a> {
inner: iter::FlatMap<super::Iter<'a, u8>, ascii::EscapeDefault, EscapeByte>,
}
///
/// use std::slice;
///
-/// slice::range(2..1, ..3);
+/// let _ = slice::range(2..1, ..3);
/// ```
///
/// ```should_panic
///
/// use std::slice;
///
-/// slice::range(1..4, ..3);
+/// let _ = slice::range(1..4, ..3);
/// ```
///
/// ```should_panic
///
/// use std::slice;
///
-/// slice::range(1..=usize::MAX, ..3);
+/// let _ = slice::range(1..=usize::MAX, ..3);
/// ```
///
/// [`Index::index`]: ops::Index::index
#[track_caller]
#[unstable(feature = "slice_range", issue = "76393")]
+#[must_use]
pub fn range<R>(range: R, bounds: ops::RangeTo<usize>) -> ops::Range<usize>
where
R: ops::RangeBounds<usize>,
/// [`iter`]: slice::iter
/// [slices]: slice
#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct Iter<'a, T: 'a> {
ptr: NonNull<T>,
end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
/// [`iter_mut`]: slice::iter_mut
/// [slices]: slice
#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct IterMut<'a, T: 'a> {
ptr: NonNull<T>,
end: *mut T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
/// [`split`]: slice::split
/// [slices]: slice
#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct Split<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
/// [`split_inclusive`]: slice::split_inclusive
/// [slices]: slice
#[stable(feature = "split_inclusive", since = "1.51.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct SplitInclusive<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
/// [`split_mut`]: slice::split_mut
/// [slices]: slice
#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct SplitMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
/// [`split_inclusive_mut`]: slice::split_inclusive_mut
/// [slices]: slice
#[stable(feature = "split_inclusive", since = "1.51.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct SplitInclusiveMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
/// [`rsplit`]: slice::rsplit
/// [slices]: slice
#[stable(feature = "slice_rsplit", since = "1.27.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct RSplit<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
/// [`rsplit_mut`]: slice::rsplit_mut
/// [slices]: slice
#[stable(feature = "slice_rsplit", since = "1.27.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct RSplitMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
/// [`splitn`]: slice::splitn
/// [slices]: slice
#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct SplitN<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
/// [`rsplitn`]: slice::rsplitn
/// [slices]: slice
#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct RSplitN<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
/// [`splitn_mut`]: slice::splitn_mut
/// [slices]: slice
#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct SplitNMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
/// [`rsplitn_mut`]: slice::rsplitn_mut
/// [slices]: slice
#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct RSplitNMut<'a, T: 'a, P>
where
P: FnMut(&T) -> bool,
/// [slices]: slice
#[derive(Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct Windows<'a, T: 'a> {
v: &'a [T],
size: NonZeroUsize,
/// [slices]: slice
#[derive(Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct Chunks<'a, T: 'a> {
v: &'a [T],
chunk_size: usize,
/// [slices]: slice
#[derive(Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct ChunksMut<'a, T: 'a> {
v: &'a mut [T],
chunk_size: usize,
/// [slices]: slice
#[derive(Debug)]
#[stable(feature = "chunks_exact", since = "1.31.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct ChunksExact<'a, T: 'a> {
v: &'a [T],
rem: &'a [T],
/// [slices]: slice
#[derive(Debug)]
#[stable(feature = "chunks_exact", since = "1.31.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct ChunksExactMut<'a, T: 'a> {
v: &'a mut [T],
rem: &'a mut [T],
/// [slices]: slice
#[derive(Debug, Clone, Copy)]
#[unstable(feature = "array_windows", issue = "75027")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct ArrayWindows<'a, T: 'a, const N: usize> {
slice_head: *const T,
num: usize,
/// [slices]: slice
#[derive(Debug)]
#[unstable(feature = "array_chunks", issue = "74985")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct ArrayChunks<'a, T: 'a, const N: usize> {
iter: Iter<'a, [T; N]>,
rem: &'a [T],
/// [slices]: slice
#[derive(Debug)]
#[unstable(feature = "array_chunks", issue = "74985")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct ArrayChunksMut<'a, T: 'a, const N: usize> {
iter: IterMut<'a, [T; N]>,
rem: &'a mut [T],
/// [slices]: slice
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct RChunks<'a, T: 'a> {
v: &'a [T],
chunk_size: usize,
/// [slices]: slice
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct RChunksMut<'a, T: 'a> {
v: &'a mut [T],
chunk_size: usize,
/// [slices]: slice
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct RChunksExact<'a, T: 'a> {
v: &'a [T],
rem: &'a [T],
/// [slices]: slice
#[derive(Debug)]
#[stable(feature = "rchunks", since = "1.31.0")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct RChunksExactMut<'a, T: 'a> {
v: &'a mut [T],
rem: &'a mut [T],
/// [`group_by`]: slice::group_by
/// [slices]: slice
#[unstable(feature = "slice_group_by", issue = "80552")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct GroupBy<'a, T: 'a, P> {
slice: &'a [T],
predicate: P,
/// [`group_by_mut`]: slice::group_by_mut
/// [slices]: slice
#[unstable(feature = "slice_group_by", issue = "80552")]
+#[must_use = "iterators are lazy and do nothing unless consumed"]
pub struct GroupByMut<'a, T: 'a, P> {
slice: &'a mut [T],
predicate: P,
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_len", since = "1.39.0")]
#[inline]
+ #[must_use]
// SAFETY: const sound because we transmute out the length field as a usize (which it must be)
pub const fn len(&self) -> usize {
// FIXME: Replace with `crate::ptr::metadata(self)` when that is const-stable.
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_is_empty", since = "1.39.0")]
#[inline]
+ #[must_use]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
+ #[must_use]
pub const fn first(&self) -> Option<&T> {
if let [first, ..] = self { Some(first) } else { None }
}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
+ #[must_use]
pub const fn first_mut(&mut self) -> Option<&mut T> {
if let [first, ..] = self { Some(first) } else { None }
}
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
+ #[must_use]
pub const fn split_first(&self) -> Option<(&T, &[T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
+ #[must_use]
pub const fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
+ #[must_use]
pub const fn split_last(&self) -> Option<(&T, &[T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
#[stable(feature = "slice_splits", since = "1.5.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
+ #[must_use]
pub const fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_first_last_not_mut", since = "1.56.0")]
#[inline]
+ #[must_use]
pub const fn last(&self) -> Option<&T> {
if let [.., last] = self { Some(last) } else { None }
}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_first_last", issue = "83570")]
#[inline]
+ #[must_use]
pub const fn last_mut(&mut self) -> Option<&mut T> {
if let [.., last] = self { Some(last) } else { None }
}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
#[inline]
+ #[must_use]
pub const fn get<I>(&self, index: I) -> Option<&I::Output>
where
I: ~const SliceIndex<Self>,
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
#[inline]
+ #[must_use]
pub const fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
where
I: ~const SliceIndex<Self>,
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
#[inline]
+ #[must_use]
pub const unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
where
I: ~const SliceIndex<Self>,
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_index", issue = "none")]
#[inline]
+ #[must_use]
pub const unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
where
I: ~const SliceIndex<Self>,
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_stable(feature = "const_slice_as_ptr", since = "1.32.0")]
#[inline]
+ #[must_use]
pub const fn as_ptr(&self) -> *const T {
self as *const [T] as *const T
}
/// assert_eq!(x, &[3, 4, 6]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[rustc_allow_const_fn_unstable(const_mut_refs)]
#[inline]
+ #[must_use]
pub const fn as_mut_ptr(&mut self) -> *mut T {
self as *mut [T] as *mut T
}
///
/// [`as_ptr`]: slice::as_ptr
#[stable(feature = "slice_ptr_range", since = "1.48.0")]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
#[inline]
+ #[must_use]
pub const fn as_ptr_range(&self) -> Range<*const T> {
let start = self.as_ptr();
// SAFETY: The `add` here is safe, because:
///
/// [`as_mut_ptr`]: slice::as_mut_ptr
#[stable(feature = "slice_ptr_range", since = "1.48.0")]
- #[rustc_const_unstable(feature = "const_ptr_offset", issue = "71499")]
+ #[rustc_const_stable(feature = "const_ptr_offset", since = "1.61.0")]
+ #[rustc_allow_const_fn_unstable(const_mut_refs)]
#[inline]
+ #[must_use]
pub const fn as_mut_ptr_range(&mut self) -> Range<*mut T> {
let start = self.as_mut_ptr();
// SAFETY: See as_ptr_range() above for why `add` here is safe.
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
+ #[must_use]
pub unsafe fn as_chunks_unchecked<const N: usize>(&self) -> &[[T; N]] {
debug_assert_ne!(N, 0);
debug_assert_eq!(self.len() % N, 0);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
+ #[must_use]
pub fn as_chunks<const N: usize>(&self) -> (&[[T; N]], &[T]) {
assert_ne!(N, 0);
let len = self.len() / N;
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
+ #[must_use]
pub fn as_rchunks<const N: usize>(&self) -> (&[T], &[[T; N]]) {
assert_ne!(N, 0);
let len = self.len() / N;
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
+ #[must_use]
pub unsafe fn as_chunks_unchecked_mut<const N: usize>(&mut self) -> &mut [[T; N]] {
debug_assert_ne!(N, 0);
debug_assert_eq!(self.len() % N, 0);
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
+ #[must_use]
pub fn as_chunks_mut<const N: usize>(&mut self) -> (&mut [[T; N]], &mut [T]) {
assert_ne!(N, 0);
let len = self.len() / N;
/// ```
#[unstable(feature = "slice_as_chunks", issue = "74985")]
#[inline]
+ #[must_use]
pub fn as_rchunks_mut<const N: usize>(&mut self) -> (&mut [T], &mut [[T; N]]) {
assert_ne!(N, 0);
let len = self.len() / N;
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
#[track_caller]
+ #[must_use]
pub fn split_at(&self, mid: usize) -> (&[T], &[T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
#[track_caller]
+ #[must_use]
pub fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
assert!(mid <= self.len());
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
#[inline]
+ #[must_use]
pub unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T]) {
// SAFETY: Caller has to check that `0 <= mid <= self.len()`
unsafe { (self.get_unchecked(..mid), self.get_unchecked(mid..)) }
/// ```
#[unstable(feature = "slice_split_at_unchecked", reason = "new API", issue = "76014")]
#[inline]
+ #[must_use]
pub unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
let len = self.len();
let ptr = self.as_mut_ptr();
#[unstable(feature = "split_array", reason = "new API", issue = "90091")]
#[inline]
#[track_caller]
+ #[must_use]
pub fn split_array_ref<const N: usize>(&self) -> (&[T; N], &[T]) {
let (a, b) = self.split_at(N);
// SAFETY: a points to [T; N]? Yes it's [T] of length N (checked by split_at)
#[unstable(feature = "split_array", reason = "new API", issue = "90091")]
#[inline]
#[track_caller]
+ #[must_use]
pub fn split_array_mut<const N: usize>(&mut self) -> (&mut [T; N], &mut [T]) {
let (a, b) = self.split_at_mut(N);
// SAFETY: a points to [T; N]? Yes it's [T] of length N (checked by split_at_mut)
/// ```
#[unstable(feature = "split_array", reason = "new API", issue = "90091")]
#[inline]
+ #[must_use]
pub fn rsplit_array_ref<const N: usize>(&self) -> (&[T], &[T; N]) {
assert!(N <= self.len());
let (a, b) = self.split_at(self.len() - N);
/// ```
#[unstable(feature = "split_array", reason = "new API", issue = "90091")]
#[inline]
+ #[must_use]
pub fn rsplit_array_mut<const N: usize>(&mut self) -> (&mut [T], &mut [T; N]) {
assert!(N <= self.len());
let (a, b) = self.split_at_mut(self.len() - N);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
+ #[must_use]
pub fn contains(&self, x: &T) -> bool
where
T: PartialEq,
/// assert!(v.starts_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
pub fn starts_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
/// assert!(v.ends_with(&[]));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
pub fn ends_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
+ #[must_use]
pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
/// }
/// ```
#[stable(feature = "slice_align_to", since = "1.30.0")]
+ #[must_use]
pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
// Note that most of this function will be constant-evaluated,
if mem::size_of::<U>() == 0 || mem::size_of::<T>() == 0 {
/// suffix.iter().copied().sum(),
/// ]);
/// let sums = middle.iter().copied().fold(sums, f32x4::add);
- /// sums.horizontal_sum()
+ /// sums.reduce_sum()
/// }
///
/// let numbers: Vec<f32> = (1..101).map(|x| x as _).collect();
/// assert_eq!(basic_simd_sum(&numbers[1..99]), 4949.0);
/// ```
#[unstable(feature = "portable_simd", issue = "86656")]
+ #[must_use]
pub fn as_simd<const LANES: usize>(&self) -> (&[T], &[Simd<T, LANES>], &[T])
where
Simd<T, LANES>: AsRef<[T; LANES]>,
/// be lifted in a way that would make it possible to see panics from this
/// method for something like `LANES == 3`.
#[unstable(feature = "portable_simd", issue = "86656")]
+ #[must_use]
pub fn as_simd_mut<const LANES: usize>(&mut self) -> (&mut [T], &mut [Simd<T, LANES>], &mut [T])
where
Simd<T, LANES>: AsMut<[T; LANES]>,
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
+ #[must_use]
pub fn is_sorted(&self) -> bool
where
T: PartialOrd,
///
/// [`is_sorted`]: slice::is_sorted
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
+ #[must_use]
pub fn is_sorted_by<F>(&self, mut compare: F) -> bool
where
F: FnMut(&T, &T) -> Option<Ordering>,
/// ```
#[inline]
#[unstable(feature = "is_sorted", reason = "new API", issue = "53485")]
+ #[must_use]
pub fn is_sorted_by_key<F, K>(&self, f: F) -> bool
where
F: FnMut(&T) -> K,
/// assert!(v[i..].iter().all(|&x| !(x < 5)));
/// ```
#[stable(feature = "partition_point", since = "1.52.0")]
+ #[must_use]
pub fn partition_point<P>(&self, mut pred: P) -> usize
where
P: FnMut(&T) -> bool,
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_from_raw_parts", issue = "67456")]
+#[must_use]
pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] {
debug_check_data_len(data, len);
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_slice_from_raw_parts", issue = "67456")]
+#[must_use]
pub const unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] {
debug_check_data_len(data as _, len);
/// Converts a reference to T into a slice of length 1 (without copying).
#[stable(feature = "from_ref", since = "1.28.0")]
#[rustc_const_unstable(feature = "const_slice_from_ref", issue = "90206")]
+#[must_use]
pub const fn from_ref<T>(s: &T) -> &[T] {
array::from_ref(s)
}
/// Converts a reference to T into a slice of length 1 (without copying).
#[stable(feature = "from_ref", since = "1.28.0")]
#[rustc_const_unstable(feature = "const_slice_from_ref", issue = "90206")]
+#[must_use]
pub const fn from_mut<T>(s: &mut T) -> &mut [T] {
array::from_mut(s)
}
#[must_use = "this returns the split string as an iterator, \
without modifying the original"]
#[stable(feature = "split_whitespace", since = "1.1.0")]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "str_split_whitespace")]
#[inline]
pub fn split_whitespace(&self) -> SplitWhitespace<'_> {
SplitWhitespace { inner: self.split(IsWhitespace).filter(IsNotEmpty) }
#[must_use = "this returns the trimmed string as a slice, \
without modifying the original"]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "str_trim")]
pub fn trim(&self) -> &str {
self.trim_matches(|c: char| c.is_whitespace())
}
#[must_use = "this returns the trimmed string as a new slice, \
without modifying the original"]
#[stable(feature = "trim_direction", since = "1.30.0")]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "str_trim_start")]
pub fn trim_start(&self) -> &str {
self.trim_start_matches(|c: char| c.is_whitespace())
}
#[must_use = "this returns the trimmed string as a new slice, \
without modifying the original"]
#[stable(feature = "trim_direction", since = "1.30.0")]
+ #[cfg_attr(not(test), rustc_diagnostic_item = "str_trim_end")]
pub fn trim_end(&self) -> &str {
self.trim_end_matches(|c: char| c.is_whitespace())
}
unsafe { &mut *(v as *mut bool as *mut Self) }
}
+ /// Get non-atomic access to a `&mut [AtomicBool]` slice.
+ ///
+ /// This is safe because the mutable reference guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(atomic_from_mut, inline_const, scoped_threads)]
+ /// use std::sync::atomic::{AtomicBool, Ordering};
+ ///
+ /// let mut some_bools = [const { AtomicBool::new(false) }; 10];
+ ///
+ /// let view: &mut [bool] = AtomicBool::get_mut_slice(&mut some_bools);
+ /// assert_eq!(view, [false; 10]);
+ /// view[..5].copy_from_slice(&[true; 5]);
+ ///
+ /// std::thread::scope(|s| {
+ /// for t in &some_bools[..5] {
+ /// s.spawn(move || assert_eq!(t.load(Ordering::Relaxed), true));
+ /// }
+ ///
+ /// for f in &some_bools[5..] {
+ /// s.spawn(move || assert_eq!(f.load(Ordering::Relaxed), false));
+ /// }
+ /// });
+ /// ```
+ #[inline]
+ #[unstable(feature = "atomic_from_mut", issue = "76314")]
+ pub fn get_mut_slice(this: &mut [Self]) -> &mut [bool] {
+ // SAFETY: the mutable reference guarantees unique ownership.
+ unsafe { &mut *(this as *mut [Self] as *mut [bool]) }
+ }
+
/// Get atomic access to a `&mut [bool]` slice.
///
/// # Examples
unsafe { &mut *(v as *mut *mut T as *mut Self) }
}
+ /// Get non-atomic access to a `&mut [AtomicPtr]` slice.
+ ///
+ /// This is safe because the mutable reference guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(atomic_from_mut, inline_const, scoped_threads)]
+ /// use std::ptr::null_mut;
+ /// use std::sync::atomic::{AtomicPtr, Ordering};
+ ///
+ /// let mut some_ptrs = [const { AtomicPtr::new(null_mut::<String>()) }; 10];
+ ///
+ /// let view: &mut [*mut String] = AtomicPtr::get_mut_slice(&mut some_ptrs);
+ /// assert_eq!(view, [null_mut::<String>(); 10]);
+ /// view
+ /// .iter_mut()
+ /// .enumerate()
+ /// .for_each(|(i, ptr)| *ptr = Box::into_raw(Box::new(format!("iteration#{i}"))));
+ ///
+ /// std::thread::scope(|s| {
+ /// for ptr in &some_ptrs {
+ /// s.spawn(move || {
+ /// let ptr = ptr.load(Ordering::Relaxed);
+ /// assert!(!ptr.is_null());
+ ///
+ /// let name = unsafe { Box::from_raw(ptr) };
+ /// println!("Hello, {name}!");
+ /// });
+ /// }
+ /// });
+ /// ```
+ #[inline]
+ #[unstable(feature = "atomic_from_mut", issue = "76314")]
+ pub fn get_mut_slice(this: &mut [Self]) -> &mut [*mut T] {
+ // SAFETY: the mutable reference guarantees unique ownership.
+ unsafe { &mut *(this as *mut [Self] as *mut [*mut T]) }
+ }
+
/// Get atomic access to a slice of pointers.
///
/// # Examples
unsafe { &mut *(v as *mut $int_type as *mut Self) }
}
+ #[doc = concat!("Get non-atomic access to a `&mut [", stringify!($atomic_type), "]` slice")]
+ ///
+ /// This is safe because the mutable reference guarantees that no other threads are
+ /// concurrently accessing the atomic data.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(atomic_from_mut, inline_const, scoped_threads)]
+ #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
+ ///
+ #[doc = concat!("let mut some_ints = [const { ", stringify!($atomic_type), "::new(0) }; 10];")]
+ ///
+ #[doc = concat!("let view: &mut [", stringify!($int_type), "] = ", stringify!($atomic_type), "::get_mut_slice(&mut some_ints);")]
+ /// assert_eq!(view, [0; 10]);
+ /// view
+ /// .iter_mut()
+ /// .enumerate()
+ /// .for_each(|(idx, int)| *int = idx as _);
+ ///
+ /// std::thread::scope(|s| {
+ /// some_ints
+ /// .iter()
+ /// .enumerate()
+ /// .for_each(|(idx, int)| {
+ /// s.spawn(move || assert_eq!(int.load(Ordering::Relaxed), idx as _));
+ /// })
+ /// });
+ /// ```
+ #[inline]
+ #[unstable(feature = "atomic_from_mut", issue = "76314")]
+ pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
+ // SAFETY: the mutable reference guarantees unique ownership.
+ unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
+ }
+
#[doc = concat!("Get atomic access to a `&mut [", stringify!($int_type), "]` slice.")]
///
/// # Examples
assert_eq!(string('~'), "~");
assert_eq!(string('é'), "é");
assert_eq!(string('文'), "文");
- assert_eq!(string('\x00'), "\\u{0}");
+ assert_eq!(string('\x00'), "\\0");
assert_eq!(string('\x1f'), "\\u{1f}");
assert_eq!(string('\x7f'), "\\u{7f}");
assert_eq!(string('\u{80}'), "\\u{80}");
assert_eq!(THEN, Greater);
}
+#[test]
+fn ordering_structural_eq() {
+ // test that consts of type `Ordering` are usable in patterns
+
+ const ORDERING: Ordering = Greater;
+
+ const REVERSE: Ordering = ORDERING.reverse();
+ match Ordering::Less {
+ REVERSE => {}
+ _ => unreachable!(),
+ };
+}
+
#[test]
fn cmp_default() {
// Test default methods in PartialOrd and PartialEq
#[test]
fn test_iterator_step_by_nth_overflow() {
- #[cfg(target_pointer_width = "8")]
- type Bigger = u16;
#[cfg(target_pointer_width = "16")]
type Bigger = u32;
#[cfg(target_pointer_width = "32")]
assert!(a == b);
}
+#[test]
+fn iter_try_collect_uses_try_fold_not_next() {
+ // This makes sure it picks up optimizations, and doesn't use the `&mut I` impl.
+ struct PanicOnNext<I>(I);
+ impl<I: Iterator> Iterator for PanicOnNext<I> {
+ type Item = I::Item;
+ fn next(&mut self) -> Option<Self::Item> {
+ panic!("Iterator::next should not be called!")
+ }
+ fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: std::ops::Try<Output = B>,
+ {
+ self.0.try_fold(init, f)
+ }
+ }
+
+ let it = (0..10).map(Some);
+ let _ = PanicOnNext(it).try_collect::<Vec<_>>();
+ // validation is just that it didn't panic.
+}
+
// just tests by whether or not this compiles
fn _empty_impl_all_auto_traits<T>() {
use std::panic::{RefUnwindSafe, UnwindSafe};
#![feature(const_ptr_as_ref)]
#![feature(const_ptr_read)]
#![feature(const_ptr_write)]
-#![feature(const_ptr_offset)]
#![feature(const_trait_impl)]
#![feature(const_likely)]
+#![feature(core_ffi_c)]
#![feature(core_intrinsics)]
#![feature(core_private_bignum)]
#![feature(core_private_diy_float)]
}
#[test]
-#[allow(warnings)]
-// Have a symbol for the test below. It doesn’t need to be an actual variadic function, match the
-// ABI, or even point to an actual executable code, because the function itself is never invoked.
-#[no_mangle]
+#[cfg(unix)] // printf may not be available on other platforms
+#[allow(deprecated)] // For SipHasher
pub fn test_variadic_fnptr() {
+ use core::ffi;
use core::hash::{Hash, SipHasher};
extern "C" {
- fn test_variadic_fnptr(_: u64, ...) -> f64;
+ // This needs to use the correct function signature even though it isn't called as some
+ // codegen backends make it UB to declare a function with multiple conflicting signatures
+ // (like LLVM) while others straight up return an error (like Cranelift).
+ fn printf(_: *const ffi::c_char, ...) -> ffi::c_int;
}
- let p: unsafe extern "C" fn(u64, ...) -> f64 = test_variadic_fnptr;
+ let p: unsafe extern "C" fn(*const ffi::c_char, ...) -> ffi::c_int = printf;
let q = p.clone();
assert_eq!(p, q);
assert!(!(p < q));
fn slice_split_array_ref_out_of_bounds() {
let v = &[1, 2, 3, 4, 5, 6][..];
- v.split_array_ref::<7>();
+ let _ = v.split_array_ref::<7>();
}
#[should_panic]
fn slice_split_array_mut_out_of_bounds() {
let v = &mut [1, 2, 3, 4, 5, 6][..];
- v.split_array_mut::<7>();
+ let _ = v.split_array_mut::<7>();
}
#[should_panic]
fn slice_rsplit_array_ref_out_of_bounds() {
let v = &[1, 2, 3, 4, 5, 6][..];
- v.rsplit_array_ref::<7>();
+ let _ = v.rsplit_array_ref::<7>();
}
#[should_panic]
fn slice_rsplit_array_mut_out_of_bounds() {
let v = &mut [1, 2, 3, 4, 5, 6][..];
- v.rsplit_array_mut::<7>();
+ let _ = v.rsplit_array_mut::<7>();
}
macro_rules! take_tests {
* **Vertical:** When an operation is "vertical", each lane processes individually without regard to the other lanes in the same vector. For example, a "vertical add" between two vectors would add lane 0 in `a` with lane 0 in `b`, with the total in lane 0 of `out`, and then the same thing for lanes 1, 2, etc. Most SIMD operations are vertical operations, so if your problem is a vertical problem then you can probably solve it with SIMD.
-* **Horizontal:** When an operation is "horizontal", the lanes within a single vector interact in some way. A "horizontal add" might add up lane 0 of `a` with lane 1 of `a`, with the total in lane 0 of `out`.
+* **Reducing/Reduce:** When an operation is "reducing" (functions named `reduce_*`), the lanes within a single vector are merged using some operation such as addition, returning the merged value as a scalar. For instance, a reducing add would return the sum of all the lanes' values.
* **Target Feature:** Rust calls a CPU architecture extension a `target_feature`. Proper SIMD requires various CPU extensions to be enabled (details below). Don't confuse this with `feature`, which is a Cargo crate concept.
However, this is not the same as alignment. Computer architectures generally prefer aligned accesses, especially when moving data between memory and vector registers, and while some support specialized operations that can bend the rules to help with this, unaligned access is still typically slow, or even undefined behavior. In addition, different architectures can require different alignments when interacting with their native SIMD types. For this reason, any `#[repr(simd)]` type has a non-portable alignment. If it is necessary to directly interact with the alignment of these types, it should be via [`mem::align_of`].
[`mem::transmute`]: https://doc.rust-lang.org/core/mem/fn.transmute.html
-[`mem::align_of`]: https://doc.rust-lang.org/core/mem/fn.align_of.html
\ No newline at end of file
+[`mem::align_of`]: https://doc.rust-lang.org/core/mem/fn.align_of.html
license = "MIT OR Apache-2.0"
[features]
-default = ["std", "generic_const_exprs"]
+default = []
std = []
generic_const_exprs = []
let det = det.rotate_lanes_right::<2>() + det;
let det = det.reverse().rotate_lanes_right::<2>() + det;
- if det.horizontal_sum() == 0. {
+ if det.reduce_sum() == 0. {
return None;
}
// calculate the reciprocal
let mut e = 0.;
for i in 0..N_BODIES {
let bi = &bodies[i];
- e += bi.mass * (bi.v * bi.v).horizontal_sum() * 0.5;
+ e += bi.mass * (bi.v * bi.v).reduce_sum() * 0.5;
for bj in bodies.iter().take(N_BODIES).skip(i + 1) {
let dx = bi.x - bj.x;
- e -= bi.mass * bj.mass / (dx * dx).horizontal_sum().sqrt()
+ e -= bi.mass * bj.mass / (dx * dx).reduce_sum().sqrt()
}
}
e
let mut mag = [0.0; N];
for i in (0..N).step_by(2) {
let d2s = f64x2::from_array([
- (r[i] * r[i]).horizontal_sum(),
- (r[i + 1] * r[i + 1]).horizontal_sum(),
+ (r[i] * r[i]).reduce_sum(),
+ (r[i + 1] * r[i + 1]).reduce_sum(),
]);
let dmags = f64x2::splat(dt) / (d2s * d2s.sqrt());
mag[i] = dmags[0];
sum += b / a;
j += 2
}
- *out = sum.horizontal_sum();
+ *out = sum.reduce_sum();
}
}
sum += b / a;
j += 2
}
- *out = sum.horizontal_sum();
+ *out = sum.reduce_sum();
}
}
unsafe { Mask::from_int_unchecked(intrinsics::simd_ge(self, other)) }
}
}
+
+macro_rules! impl_ord_methods_vector {
+ { $type:ty } => {
+ impl<const LANES: usize> Simd<$type, LANES>
+ where
+ LaneCount<LANES>: SupportedLaneCount,
+ {
+ /// Returns the lane-wise minimum with `other`.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ #[inline]
+ pub fn min(self, other: Self) -> Self {
+ self.lanes_gt(other).select(other, self)
+ }
+
+ /// Returns the lane-wise maximum with `other`.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ #[inline]
+ pub fn max(self, other: Self) -> Self {
+ self.lanes_lt(other).select(other, self)
+ }
+
+ /// Restrict each lane to a certain interval.
+ ///
+ /// For each lane, returns `max` if `self` is greater than `max`, and `min` if `self` is
+ /// less than `min`. Otherwise returns `self`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `min > max` on any lane.
+ #[must_use = "method returns a new vector and does not mutate the original value"]
+ #[inline]
+ pub fn clamp(self, min: Self, max: Self) -> Self {
+ assert!(
+ min.lanes_le(max).all(),
+ "each lane in `min` must be less than or equal to the corresponding lane in `max`",
+ );
+ self.max(min).min(max)
+ }
+ }
+ }
+}
+
+impl_ord_methods_vector!(i8);
+impl_ord_methods_vector!(i16);
+impl_ord_methods_vector!(i32);
+impl_ord_methods_vector!(i64);
+impl_ord_methods_vector!(isize);
+impl_ord_methods_vector!(u8);
+impl_ord_methods_vector!(u16);
+impl_ord_methods_vector!(u32);
+impl_ord_methods_vector!(u64);
+impl_ord_methods_vector!(usize);
//!
//! Unless stated otherwise, all intrinsics for binary operations require SIMD vectors of equal types and lengths.
-
// These intrinsics aren't linked directly from LLVM and are mostly undocumented, however they are
// mostly lowered to the matching LLVM instructions by the compiler in a fairly straightforward manner.
// The associated LLVM instruction or intrinsic is documented alongside each Rust intrinsic function.
pub(crate) fn simd_reduce_xor<T, U>(x: T) -> U;
// truncate integer vector to bitmask
+ // `fn simd_bitmask(vector) -> unsigned integer` takes a vector of integers and
+ // returns either an unsigned integer or array of `u8`.
+ // Every element in the vector becomes a single bit in the returned bitmask.
+ // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
+ // The bit order of the result depends on the byte endianness. LSB-first for little
+ // endian and MSB-first for big endian.
+ //
+ // UB if called on a vector with values other than 0 and -1.
#[allow(unused)]
pub(crate) fn simd_bitmask<T, U>(x: T) -> U;
-#![cfg_attr(not(feature = "std"), no_std)]
+#![no_std]
#![feature(
- const_fn_trait_bound,
convert_float_to_int,
decl_macro,
intra_doc_pointers,
}
impl_integer_intrinsic! {
+ unsafe impl ToBitMask<BitMask=u8> for Mask<_, 1>
+ unsafe impl ToBitMask<BitMask=u8> for Mask<_, 2>
+ unsafe impl ToBitMask<BitMask=u8> for Mask<_, 4>
unsafe impl ToBitMask<BitMask=u8> for Mask<_, 8>
unsafe impl ToBitMask<BitMask=u16> for Mask<_, 16>
unsafe impl ToBitMask<BitMask=u32> for Mask<_, 32>
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::Simd;
- /// # #[cfg(not(feature = "std"))] use core::simd::Simd;
+ /// # use core::simd::Simd;
#[doc = concat!("# use core::", stringify!($ty), "::MAX;")]
/// let x = Simd::from_array([2, 1, 0, MAX]);
/// let max = Simd::splat(MAX);
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::Simd;
- /// # #[cfg(not(feature = "std"))] use core::simd::Simd;
+ /// # use core::simd::Simd;
#[doc = concat!("# use core::", stringify!($ty), "::MAX;")]
/// let x = Simd::from_array([2, 1, 0, MAX]);
/// let max = Simd::splat(MAX);
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::Simd;
- /// # #[cfg(not(feature = "std"))] use core::simd::Simd;
+ /// # use core::simd::Simd;
#[doc = concat!("# use core::", stringify!($ty), "::{MIN, MAX};")]
/// let x = Simd::from_array([MIN, 0, 1, MAX]);
/// let max = Simd::splat(MAX);
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::Simd;
- /// # #[cfg(not(feature = "std"))] use core::simd::Simd;
+ /// # use core::simd::Simd;
#[doc = concat!("# use core::", stringify!($ty), "::{MIN, MAX};")]
/// let x = Simd::from_array([MIN, -2, -1, MAX]);
/// let max = Simd::splat(MAX);
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::Simd;
- /// # #[cfg(not(feature = "std"))] use core::simd::Simd;
+ /// # use core::simd::Simd;
#[doc = concat!("# use core::", stringify!($ty), "::{MIN, MAX};")]
/// let xs = Simd::from_array([MIN, MIN +1, -5, 0]);
/// assert_eq!(xs.abs(), Simd::from_array([MIN, MAX, 5, 0]));
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::Simd;
- /// # #[cfg(not(feature = "std"))] use core::simd::Simd;
+ /// # use core::simd::Simd;
#[doc = concat!("# use core::", stringify!($ty), "::{MIN, MAX};")]
/// let xs = Simd::from_array([MIN, -2, 0, 3]);
/// let unsat = xs.abs();
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::Simd;
- /// # #[cfg(not(feature = "std"))] use core::simd::Simd;
+ /// # use core::simd::Simd;
#[doc = concat!("# use core::", stringify!($ty), "::{MIN, MAX};")]
/// let x = Simd::from_array([MIN, -2, 3, MAX]);
/// let unsat = -x;
where
LaneCount<LANES>: SupportedLaneCount,
{
- /// Horizontal wrapping add. Returns the sum of the lanes of the vector, with wrapping addition.
+ /// Reducing wrapping add. Returns the sum of the lanes of the vector, with wrapping addition.
#[inline]
- pub fn horizontal_sum(self) -> $scalar {
+ pub fn reduce_sum(self) -> $scalar {
// Safety: `self` is an integer vector
unsafe { simd_reduce_add_ordered(self, 0) }
}
- /// Horizontal wrapping multiply. Returns the product of the lanes of the vector, with wrapping multiplication.
+ /// Reducing wrapping multiply. Returns the product of the lanes of the vector, with wrapping multiplication.
#[inline]
- pub fn horizontal_product(self) -> $scalar {
+ pub fn reduce_product(self) -> $scalar {
// Safety: `self` is an integer vector
unsafe { simd_reduce_mul_ordered(self, 1) }
}
- /// Horizontal maximum. Returns the maximum lane in the vector.
+ /// Reducing maximum. Returns the maximum lane in the vector.
#[inline]
- pub fn horizontal_max(self) -> $scalar {
+ pub fn reduce_max(self) -> $scalar {
// Safety: `self` is an integer vector
unsafe { simd_reduce_max(self) }
}
- /// Horizontal minimum. Returns the minimum lane in the vector.
+ /// Reducing minimum. Returns the minimum lane in the vector.
#[inline]
- pub fn horizontal_min(self) -> $scalar {
+ pub fn reduce_min(self) -> $scalar {
// Safety: `self` is an integer vector
unsafe { simd_reduce_min(self) }
}
LaneCount<LANES>: SupportedLaneCount,
{
- /// Horizontal add. Returns the sum of the lanes of the vector.
+ /// Reducing add. Returns the sum of the lanes of the vector.
#[inline]
- pub fn horizontal_sum(self) -> $scalar {
+ pub fn reduce_sum(self) -> $scalar {
// LLVM sum is inaccurate on i586
if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) {
self.as_array().iter().sum()
}
}
- /// Horizontal multiply. Returns the product of the lanes of the vector.
+ /// Reducing multiply. Returns the product of the lanes of the vector.
#[inline]
- pub fn horizontal_product(self) -> $scalar {
+ pub fn reduce_product(self) -> $scalar {
// LLVM product is inaccurate on i586
if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) {
self.as_array().iter().product()
}
}
- /// Horizontal maximum. Returns the maximum lane in the vector.
+ /// Reducing maximum. Returns the maximum lane in the vector.
///
/// Returns values based on equality, so a vector containing both `0.` and `-0.` may
/// return either. This function will not return `NaN` unless all lanes are `NaN`.
#[inline]
- pub fn horizontal_max(self) -> $scalar {
+ pub fn reduce_max(self) -> $scalar {
// Safety: `self` is a float vector
unsafe { simd_reduce_max(self) }
}
- /// Horizontal minimum. Returns the minimum lane in the vector.
+ /// Reducing minimum. Returns the minimum lane in the vector.
///
/// Returns values based on equality, so a vector containing both `0.` and `-0.` may
/// return either. This function will not return `NaN` unless all lanes are `NaN`.
#[inline]
- pub fn horizontal_min(self) -> $scalar {
+ pub fn reduce_min(self) -> $scalar {
// Safety: `self` is a float vector
unsafe { simd_reduce_min(self) }
}
T: SimdElement + BitAnd<T, Output = T>,
LaneCount<LANES>: SupportedLaneCount,
{
- /// Horizontal bitwise "and". Returns the cumulative bitwise "and" across the lanes of
+ /// Reducing bitwise "and". Returns the cumulative bitwise "and" across the lanes of
/// the vector.
#[inline]
- pub fn horizontal_and(self) -> T {
+ pub fn reduce_and(self) -> T {
unsafe { simd_reduce_and(self) }
}
}
T: SimdElement + BitOr<T, Output = T>,
LaneCount<LANES>: SupportedLaneCount,
{
- /// Horizontal bitwise "or". Returns the cumulative bitwise "or" across the lanes of
+ /// Reducing bitwise "or". Returns the cumulative bitwise "or" across the lanes of
/// the vector.
#[inline]
- pub fn horizontal_or(self) -> T {
+ pub fn reduce_or(self) -> T {
unsafe { simd_reduce_or(self) }
}
}
T: SimdElement + BitXor<T, Output = T>,
LaneCount<LANES>: SupportedLaneCount,
{
- /// Horizontal bitwise "xor". Returns the cumulative bitwise "xor" across the lanes of
+ /// Reducing bitwise "xor". Returns the cumulative bitwise "xor" across the lanes of
/// the vector.
#[inline]
- pub fn horizontal_xor(self) -> T {
+ pub fn reduce_xor(self) -> T {
unsafe { simd_reduce_xor(self) }
}
}
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::{Simd, Mask};
- /// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask};
+ /// # use core::simd::{Simd, Mask};
/// let a = Simd::from_array([0, 1, 2, 3]);
/// let b = Simd::from_array([4, 5, 6, 7]);
/// let mask = Mask::from_array([true, false, false, true]);
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::Mask;
- /// # #[cfg(not(feature = "std"))] use core::simd::Mask;
+ /// # use core::simd::Mask;
/// let a = Mask::<i32, 4>::from_array([true, true, false, false]);
/// let b = Mask::<i32, 4>::from_array([false, false, true, true]);
/// let mask = Mask::<i32, 4>::from_array([true, false, false, true]);
/// ## One source vector
/// ```
/// # #![feature(portable_simd)]
-/// # #[cfg(feature = "std")] use core_simd::{Simd, simd_swizzle};
-/// # #[cfg(not(feature = "std"))] use core::simd::{Simd, simd_swizzle};
+/// # use core::simd::{Simd, simd_swizzle};
/// let v = Simd::<f32, 4>::from_array([0., 1., 2., 3.]);
///
/// // Keeping the same size
/// ## Two source vectors
/// ```
/// # #![feature(portable_simd)]
-/// # #[cfg(feature = "std")] use core_simd::{Simd, simd_swizzle, Which};
-/// # #[cfg(not(feature = "std"))] use core::simd::{Simd, simd_swizzle, Which};
+/// # use core::simd::{Simd, simd_swizzle, Which};
/// use Which::*;
/// let a = Simd::<f32, 4>::from_array([0., 1., 2., 3.]);
/// let b = Simd::<f32, 4>::from_array([4., 5., 6., 7.]);
///
/// ```
/// #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::Simd;
- /// # #[cfg(not(feature = "std"))] use core::simd::Simd;
+ /// # use core::simd::Simd;
/// let a = Simd::from_array([0, 1, 2, 3]);
/// let b = Simd::from_array([4, 5, 6, 7]);
/// let (x, y) = a.interleave(b);
///
/// ```
/// #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::Simd;
- /// # #[cfg(not(feature = "std"))] use core::simd::Simd;
+ /// # use core::simd::Simd;
/// let a = Simd::from_array([0, 4, 1, 5]);
/// let b = Simd::from_array([2, 6, 3, 7]);
/// let (x, y) = a.deinterleave(b);
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::Simd;
- /// # #[cfg(not(feature = "std"))] use core::simd::Simd;
+ /// # use core::simd::Simd;
/// let floats: Simd<f32, 4> = Simd::from_array([1.9, -4.5, f32::INFINITY, f32::NAN]);
/// let ints = floats.cast::<i32>();
/// assert_eq!(ints, Simd::from_array([1, -4, i32::MAX, 0]));
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::Simd;
- /// # #[cfg(not(feature = "std"))] use core::simd::Simd;
+ /// # use core::simd::Simd;
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = Simd::from_array([9, 3, 0, 5]);
/// let alt = Simd::from_array([-5, -4, -3, -2]);
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::Simd;
- /// # #[cfg(not(feature = "std"))] use core::simd::Simd;
+ /// # use core::simd::Simd;
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = Simd::from_array([9, 3, 0, 5]);
///
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::{Simd, Mask};
- /// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask};
+ /// # use core::simd::{Simd, Mask};
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = Simd::from_array([9, 3, 0, 5]);
/// let alt = Simd::from_array([-5, -4, -3, -2]);
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::{Simd, Mask};
- /// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask};
+ /// # use core::simd::{Simd, Mask};
/// let vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = Simd::from_array([9, 3, 0, 5]);
/// let alt = Simd::from_array([-5, -4, -3, -2]);
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::Simd;
- /// # #[cfg(not(feature = "std"))] use core::simd::Simd;
+ /// # use core::simd::Simd;
/// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = Simd::from_array([9, 3, 0, 0]);
/// let vals = Simd::from_array([-27, 82, -41, 124]);
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::{Simd, Mask};
- /// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask};
+ /// # use core::simd::{Simd, Mask};
/// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = Simd::from_array([9, 3, 0, 0]);
/// let vals = Simd::from_array([-27, 82, -41, 124]);
/// # Examples
/// ```
/// # #![feature(portable_simd)]
- /// # #[cfg(feature = "std")] use core_simd::{Simd, Mask};
- /// # #[cfg(not(feature = "std"))] use core::simd::{Simd, Mask};
+ /// # use core::simd::{Simd, Mask};
/// let mut vec: Vec<i32> = vec![10, 11, 12, 13, 14, 15, 16, 17, 18];
/// let idxs = Simd::from_array([9, 3, 0, 0]);
/// let vals = Simd::from_array([-27, 82, -41, 124]);
#![feature(portable_simd)]
+use core_simd::i16x2;
#[macro_use]
mod ops_macros;
impl_signed_tests! { i16 }
+
+#[test]
+fn max_is_not_lexicographic() {
+ let a = i16x2::splat(10);
+ let b = i16x2::from_array([-4, 12]);
+ assert_eq!(a.max(b), i16x2::from_array([10, 12]));
+}
+
+#[test]
+fn min_is_not_lexicographic() {
+ let a = i16x2::splat(10);
+ let b = i16x2::from_array([12, -4]);
+ assert_eq!(a.min(b), i16x2::from_array([10, -4]));
+}
+
+#[test]
+fn clamp_is_not_lexicographic() {
+ let a = i16x2::splat(10);
+ let lo = i16x2::from_array([-12, -4]);
+ let up = i16x2::from_array([-4, 12]);
+ assert_eq!(a.clamp(lo, up), i16x2::from_array([-4, 10]));
+
+ let x = i16x2::from_array([1, 10]);
+ let y = x.clamp(i16x2::splat(0), i16x2::splat(9));
+ assert_eq!(y, i16x2::from_array([1, 9]));
+}
macro_rules! impl_common_integer_tests {
{ $vector:ident, $scalar:ident } => {
test_helpers::test_lanes! {
- fn horizontal_sum<const LANES: usize>() {
+ fn reduce_sum<const LANES: usize>() {
test_helpers::test_1(&|x| {
test_helpers::prop_assert_biteq! (
- $vector::<LANES>::from_array(x).horizontal_sum(),
+ $vector::<LANES>::from_array(x).reduce_sum(),
x.iter().copied().fold(0 as $scalar, $scalar::wrapping_add),
);
Ok(())
});
}
- fn horizontal_product<const LANES: usize>() {
+ fn reduce_product<const LANES: usize>() {
test_helpers::test_1(&|x| {
test_helpers::prop_assert_biteq! (
- $vector::<LANES>::from_array(x).horizontal_product(),
+ $vector::<LANES>::from_array(x).reduce_product(),
x.iter().copied().fold(1 as $scalar, $scalar::wrapping_mul),
);
Ok(())
});
}
- fn horizontal_and<const LANES: usize>() {
+ fn reduce_and<const LANES: usize>() {
test_helpers::test_1(&|x| {
test_helpers::prop_assert_biteq! (
- $vector::<LANES>::from_array(x).horizontal_and(),
+ $vector::<LANES>::from_array(x).reduce_and(),
x.iter().copied().fold(-1i8 as $scalar, <$scalar as core::ops::BitAnd>::bitand),
);
Ok(())
});
}
- fn horizontal_or<const LANES: usize>() {
+ fn reduce_or<const LANES: usize>() {
test_helpers::test_1(&|x| {
test_helpers::prop_assert_biteq! (
- $vector::<LANES>::from_array(x).horizontal_or(),
+ $vector::<LANES>::from_array(x).reduce_or(),
x.iter().copied().fold(0 as $scalar, <$scalar as core::ops::BitOr>::bitor),
);
Ok(())
});
}
- fn horizontal_xor<const LANES: usize>() {
+ fn reduce_xor<const LANES: usize>() {
test_helpers::test_1(&|x| {
test_helpers::prop_assert_biteq! (
- $vector::<LANES>::from_array(x).horizontal_xor(),
+ $vector::<LANES>::from_array(x).reduce_xor(),
x.iter().copied().fold(0 as $scalar, <$scalar as core::ops::BitXor>::bitxor),
);
Ok(())
});
}
- fn horizontal_max<const LANES: usize>() {
+ fn reduce_max<const LANES: usize>() {
test_helpers::test_1(&|x| {
test_helpers::prop_assert_biteq! (
- $vector::<LANES>::from_array(x).horizontal_max(),
+ $vector::<LANES>::from_array(x).reduce_max(),
x.iter().copied().max().unwrap(),
);
Ok(())
});
}
- fn horizontal_min<const LANES: usize>() {
+ fn reduce_min<const LANES: usize>() {
test_helpers::test_1(&|x| {
test_helpers::prop_assert_biteq! (
- $vector::<LANES>::from_array(x).horizontal_min(),
+ $vector::<LANES>::from_array(x).reduce_min(),
x.iter().copied().min().unwrap(),
);
Ok(())
assert_eq!(a % b, Vector::<LANES>::splat(0));
}
+ fn min<const LANES: usize>() {
+ let a = Vector::<LANES>::splat(Scalar::MIN);
+ let b = Vector::<LANES>::splat(0);
+ assert_eq!(a.min(b), a);
+ let a = Vector::<LANES>::splat(Scalar::MAX);
+ let b = Vector::<LANES>::splat(0);
+ assert_eq!(a.min(b), b);
+ }
+
+ fn max<const LANES: usize>() {
+ let a = Vector::<LANES>::splat(Scalar::MIN);
+ let b = Vector::<LANES>::splat(0);
+ assert_eq!(a.max(b), b);
+ let a = Vector::<LANES>::splat(Scalar::MAX);
+ let b = Vector::<LANES>::splat(0);
+ assert_eq!(a.max(b), a);
+ }
+
+ fn clamp<const LANES: usize>() {
+ let min = Vector::<LANES>::splat(Scalar::MIN);
+ let max = Vector::<LANES>::splat(Scalar::MAX);
+ let zero = Vector::<LANES>::splat(0);
+ let one = Vector::<LANES>::splat(1);
+ let negone = Vector::<LANES>::splat(-1);
+ assert_eq!(zero.clamp(min, max), zero);
+ assert_eq!(zero.clamp(min, one), zero);
+ assert_eq!(zero.clamp(one, max), one);
+ assert_eq!(zero.clamp(min, negone), negone);
+ }
}
test_helpers::test_lanes_panic! {
})
}
- fn horizontal_sum<const LANES: usize>() {
+ fn reduce_sum<const LANES: usize>() {
test_helpers::test_1(&|x| {
test_helpers::prop_assert_biteq! (
- Vector::<LANES>::from_array(x).horizontal_sum(),
+ Vector::<LANES>::from_array(x).reduce_sum(),
x.iter().sum(),
);
Ok(())
});
}
- fn horizontal_product<const LANES: usize>() {
+ fn reduce_product<const LANES: usize>() {
test_helpers::test_1(&|x| {
test_helpers::prop_assert_biteq! (
- Vector::<LANES>::from_array(x).horizontal_product(),
+ Vector::<LANES>::from_array(x).reduce_product(),
x.iter().product(),
);
Ok(())
});
}
- fn horizontal_max<const LANES: usize>() {
+ fn reduce_max<const LANES: usize>() {
test_helpers::test_1(&|x| {
- let vmax = Vector::<LANES>::from_array(x).horizontal_max();
+ let vmax = Vector::<LANES>::from_array(x).reduce_max();
let smax = x.iter().copied().fold(Scalar::NAN, Scalar::max);
// 0 and -0 are treated the same
if !(x.contains(&0.) && x.contains(&-0.) && vmax.abs() == 0. && smax.abs() == 0.) {
});
}
- fn horizontal_min<const LANES: usize>() {
+ fn reduce_min<const LANES: usize>() {
test_helpers::test_1(&|x| {
- let vmax = Vector::<LANES>::from_array(x).horizontal_min();
+ let vmax = Vector::<LANES>::from_array(x).reduce_min();
let smax = x.iter().copied().fold(Scalar::NAN, Scalar::min);
// 0 and -0 are treated the same
if !(x.contains(&0.) && x.contains(&-0.) && vmax.abs() == 0. && smax.abs() == 0.) {
type Scalar = $scalar;
type IntScalar = $int_scalar;
- #[cfg(feature = "std")]
test_helpers::test_lanes! {
fn ceil<const LANES: usize>() {
test_helpers::test_unary_elementwise(
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
-core_simd = { path = "../core_simd" }
+core_simd = { path = "../core_simd", default-features = false }
[features]
default = ["as_crate"]
}
}
+#[cfg(not(miri))]
+fn make_runner() -> proptest::test_runner::TestRunner {
+ Default::default()
+}
+#[cfg(miri)]
+fn make_runner() -> proptest::test_runner::TestRunner {
+ // Only run a few tests on Miri
+ proptest::test_runner::TestRunner::new(proptest::test_runner::Config::with_cases(4))
+}
+
/// Test a function that takes a single value.
pub fn test_1<A: core::fmt::Debug + DefaultStrategy>(
f: &dyn Fn(A) -> proptest::test_runner::TestCaseResult,
) {
- let mut runner = proptest::test_runner::TestRunner::default();
+ let mut runner = make_runner();
runner.run(&A::default_strategy(), f).unwrap();
}
pub fn test_2<A: core::fmt::Debug + DefaultStrategy, B: core::fmt::Debug + DefaultStrategy>(
f: &dyn Fn(A, B) -> proptest::test_runner::TestCaseResult,
) {
- let mut runner = proptest::test_runner::TestRunner::default();
+ let mut runner = make_runner();
runner
.run(&(A::default_strategy(), B::default_strategy()), |(a, b)| {
f(a, b)
>(
f: &dyn Fn(A, B, C) -> proptest::test_runner::TestCaseResult,
) {
- let mut runner = proptest::test_runner::TestRunner::default();
+ let mut runner = make_runner();
runner
.run(
&(
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
+ #[cfg(not(miri))] // Miri intrinsic implementations are uniform and larger tests are sloooow
fn lanes_8() {
implementation::<8>();
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
+ #[cfg(not(miri))] // Miri intrinsic implementations are uniform and larger tests are sloooow
fn lanes_16() {
implementation::<16>();
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
+ #[cfg(not(miri))] // Miri intrinsic implementations are uniform and larger tests are sloooow
fn lanes_32() {
implementation::<32>();
}
#[test]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test::wasm_bindgen_test)]
+ #[cfg(not(miri))] // Miri intrinsic implementations are uniform and larger tests are sloooow
fn lanes_64() {
implementation::<64>();
}
}
fn default_alloc_error_hook(layout: Layout) {
- rtprintpanic!("memory allocation of {} bytes failed\n", layout.size());
+ #[cfg(not(bootstrap))]
+ extern "Rust" {
+ // This symbol is emitted by rustc next to __rust_alloc_error_handler.
+ // Its value depends on the -Zoom={panic,abort} compiler option.
+ static __rust_alloc_error_handler_should_panic: u8;
+ }
+ #[cfg(bootstrap)]
+ let __rust_alloc_error_handler_should_panic = 0;
+
+ #[allow(unused_unsafe)]
+ if unsafe { __rust_alloc_error_handler_should_panic != 0 } {
+ panic!("memory allocation of {} bytes failed\n", layout.size());
+ } else {
+ rtprintpanic!("memory allocation of {} bytes failed\n", layout.size());
+ }
}
#[cfg(not(test))]
///
/// # Platform-specific behavior
///
-/// This function currently corresponds to the `getcwd` function on Unix
+/// This function [currently] corresponds to the `getcwd` function on Unix
/// and the `GetCurrentDirectoryW` function on Windows.
///
+/// [currently]: crate::io#platform-specific-behavior
+///
/// # Errors
///
/// Returns an [`Err`] if the current working directory value is invalid.
///
/// # Platform-specific behavior
///
-/// This function currently corresponds to the `chdir` function on Unix
+/// This function [currently] corresponds to the `chdir` function on Unix
/// and the `SetCurrentDirectoryW` function on Windows.
///
/// Returns an [`Err`] if the operation fails.
///
+/// [currently]: crate::io#platform-specific-behavior
+///
/// # Examples
///
/// ```
}
}
+/// An error indicating that no nul byte was present.
+///
+/// A slice used to create a [`CStr`] must contain a nul byte somewhere
+/// within the slice.
+///
+/// This error is created by the [`CStr::from_bytes_until_nul`] method.
+///
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")]
+pub struct FromBytesUntilNulError(());
+
+#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")]
+impl Error for FromBytesUntilNulError {}
+
+#[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")]
+impl fmt::Display for FromBytesUntilNulError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "data provided does not contain a nul")
+ }
+}
+
/// An error indicating invalid UTF-8 when converting a [`CString`] into a [`String`].
///
/// `CString` is just a wrapper over a buffer of bytes with a nul terminator;
}
}
+ /// Creates a C string wrapper from a byte slice.
+ ///
+ /// This method will create a `CStr` from any byte slice that contains at
+ /// least one nul byte. The caller does not need to know or specify where
+ /// the nul byte is located.
+ ///
+ /// If the first byte is a nul character, this method will return an
+ /// empty `CStr`. If multiple nul characters are present, the `CStr` will
+ /// end at the first one.
+ ///
+ /// If the slice only has a single nul byte at the end, this method is
+ /// equivalent to [`CStr::from_bytes_with_nul`].
+ ///
+ /// # Examples
+ /// ```
+ /// #![feature(cstr_from_bytes_until_nul)]
+ ///
+ /// use std::ffi::CStr;
+ ///
+ /// let mut buffer = [0u8; 16];
+ /// unsafe {
+ /// // Here we might call an unsafe C function that writes a string
+ /// // into the buffer.
+ /// let buf_ptr = buffer.as_mut_ptr();
+ /// buf_ptr.write_bytes(b'A', 8);
+ /// }
+ /// // Attempt to extract a C nul-terminated string from the buffer.
+ /// let c_str = CStr::from_bytes_until_nul(&buffer[..]).unwrap();
+ /// assert_eq!(c_str.to_str().unwrap(), "AAAAAAAA");
+ /// ```
+ ///
+ #[unstable(feature = "cstr_from_bytes_until_nul", issue = "95027")]
+ pub fn from_bytes_until_nul(bytes: &[u8]) -> Result<&CStr, FromBytesUntilNulError> {
+ let nul_pos = memchr::memchr(0, bytes);
+ match nul_pos {
+ Some(nul_pos) => {
+ // SAFETY: We know there is a nul byte at nul_pos, so this slice
+ // (ending at the nul byte) is a well-formed C string.
+ let subslice = &bytes[..nul_pos + 1];
+ Ok(unsafe { CStr::from_bytes_with_nul_unchecked(subslice) })
+ }
+ None => Err(FromBytesUntilNulError(())),
+ }
+ }
+
/// Creates a C string wrapper from a byte slice.
///
/// This function will cast the provided `bytes` to a `CStr`
/// wrapper after ensuring that the byte slice is nul-terminated
/// and does not contain any interior nul bytes.
///
+ /// If the nul byte may not be at the end,
+ /// [`CStr::from_bytes_until_nul`] can be used instead.
+ ///
/// # Examples
///
/// ```
assert!(cstr.is_err());
}
+#[test]
+fn cstr_from_bytes_until_nul() {
+ // Test an empty slice. This should fail because it
+ // does not contain a nul byte.
+ let b = b"";
+ assert_eq!(CStr::from_bytes_until_nul(&b[..]), Err(FromBytesUntilNulError(())));
+
+ // Test a non-empty slice, that does not contain a nul byte.
+ let b = b"hello";
+ assert_eq!(CStr::from_bytes_until_nul(&b[..]), Err(FromBytesUntilNulError(())));
+
+ // Test an empty nul-terminated string
+ let b = b"\0";
+ let r = CStr::from_bytes_until_nul(&b[..]).unwrap();
+ assert_eq!(r.to_bytes(), b"");
+
+ // Test a slice with the nul byte in the middle
+ let b = b"hello\0world!";
+ let r = CStr::from_bytes_until_nul(&b[..]).unwrap();
+ assert_eq!(r.to_bytes(), b"hello");
+
+ // Test a slice with the nul byte at the end
+ let b = b"hello\0";
+ let r = CStr::from_bytes_until_nul(&b[..]).unwrap();
+ assert_eq!(r.to_bytes(), b"hello");
+
+ // Test a slice with two nul bytes at the end
+ let b = b"hello\0\0";
+ let r = CStr::from_bytes_until_nul(&b[..]).unwrap();
+ assert_eq!(r.to_bytes(), b"hello");
+
+ // Test a slice containing lots of nul bytes
+ let b = b"\0\0\0\0";
+ let r = CStr::from_bytes_until_nul(&b[..]).unwrap();
+ assert_eq!(r.to_bytes(), b"");
+}
+
#[test]
fn into_boxed() {
let orig: &[u8] = b"Hello, world!\0";
///
/// [changes]: io#platform-specific-behavior
///
-/// On macOS before version 10.10 and REDOX this function is not protected against time-of-check to
-/// time-of-use (TOCTOU) race conditions, and should not be used in security-sensitive code on
-/// those platforms. All other platforms are protected.
+/// On macOS before version 10.10 and REDOX, as well as when running in Miri for any target, this
+/// function is not protected against time-of-check to time-of-use (TOCTOU) race conditions, and
+/// should not be used in security-sensitive code on those platforms. All other platforms are
+/// protected.
///
/// # Errors
///
use crate::io::prelude::*;
+use crate::env;
use crate::fs::{self, File, OpenOptions};
use crate::io::{ErrorKind, SeekFrom};
use crate::path::Path;
// junction
assert_eq!(check!(fs::read_link(r"C:\Users\Default User")), Path::new(r"C:\Users\Default"));
// junction with special permissions
- assert_eq!(check!(fs::read_link(r"C:\Documents and Settings\")), Path::new(r"C:\Users"));
+ // Since not all localized windows versions contain the folder "Documents and Settings" in english,
+ // we will briefly check, if it exists and otherwise skip the test. Except during CI we will always execute the test.
+ if Path::new(r"C:\Documents and Settings\").exists() || env::var_os("CI").is_some() {
+ assert_eq!(
+ check!(fs::read_link(r"C:\Documents and Settings\")),
+ Path::new(r"C:\Users")
+ );
+ }
}
let tmpdir = tmpdir();
let link = tmpdir.join("link");
assert_eq!(res.err().unwrap().kind(), ErrorKind::NotFound);
}
+#[test]
+fn file_open_not_found() {
+ let res = File::open("/path/that/does/not/exist");
+ assert_eq!(res.err().unwrap().kind(), ErrorKind::NotFound);
+}
+
#[test]
fn create_dir_all_with_junctions() {
let tmpdir = tmpdir();
use crate::io::prelude::*;
+use crate::alloc::Allocator;
use crate::cmp;
use crate::io::{self, ErrorKind, IoSlice, IoSliceMut, ReadBuf, SeekFrom};
}
// Resizing write implementation
-fn vec_write(pos_mut: &mut u64, vec: &mut Vec<u8>, buf: &[u8]) -> io::Result<usize> {
+fn vec_write<A>(pos_mut: &mut u64, vec: &mut Vec<u8, A>, buf: &[u8]) -> io::Result<usize>
+where
+ A: Allocator,
+{
let pos: usize = (*pos_mut).try_into().map_err(|_| {
io::const_io_error!(
ErrorKind::InvalidInput,
Ok(buf.len())
}
-fn vec_write_vectored(
+fn vec_write_vectored<A>(
pos_mut: &mut u64,
- vec: &mut Vec<u8>,
+ vec: &mut Vec<u8, A>,
bufs: &[IoSlice<'_>],
-) -> io::Result<usize> {
+) -> io::Result<usize>
+where
+ A: Allocator,
+{
let mut nwritten = 0;
for buf in bufs {
nwritten += vec_write(pos_mut, vec, buf)?;
}
#[stable(feature = "cursor_mut_vec", since = "1.25.0")]
-impl Write for Cursor<&mut Vec<u8>> {
+impl<A> Write for Cursor<&mut Vec<u8, A>>
+where
+ A: Allocator,
+{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, self.inner, buf)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl Write for Cursor<Vec<u8>> {
+impl<A> Write for Cursor<Vec<u8, A>>
+where
+ A: Allocator,
+{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
vec_write(&mut self.pos, &mut self.inner, buf)
}
}
#[stable(feature = "cursor_box_slice", since = "1.5.0")]
-impl Write for Cursor<Box<[u8]>> {
+impl<A> Write for Cursor<Box<[u8], A>>
+where
+ A: Allocator,
+{
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ slice_write(&mut self.pos, &mut self.inner, buf)
+ }
+
+ #[inline]
+ fn write_vectored(&mut self, bufs: &[IoSlice<'_>]) -> io::Result<usize> {
+ slice_write_vectored(&mut self.pos, &mut self.inner, bufs)
+ }
+
+ #[inline]
+ fn is_write_vectored(&self) -> bool {
+ true
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+#[stable(feature = "cursor_array", since = "1.61.0")]
+impl<const N: usize> Write for Cursor<[u8; N]> {
#[inline]
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
slice_write(&mut self.pos, &mut self.inner, buf)
assert_eq!(&writer.get_ref()[..], b);
}
-#[test]
-fn test_box_slice_writer() {
- let mut writer = Cursor::new(vec![0u8; 9].into_boxed_slice());
+fn test_slice_writer<T>(writer: &mut Cursor<T>)
+where
+ T: AsRef<[u8]>,
+ Cursor<T>: Write,
+{
assert_eq!(writer.position(), 0);
assert_eq!(writer.write(&[0]).unwrap(), 1);
assert_eq!(writer.position(), 1);
assert_eq!(writer.write(&[8, 9]).unwrap(), 1);
assert_eq!(writer.write(&[10]).unwrap(), 0);
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8];
- assert_eq!(&**writer.get_ref(), b);
+ assert_eq!(writer.get_ref().as_ref(), b);
}
-#[test]
-fn test_box_slice_writer_vectored() {
- let mut writer = Cursor::new(vec![0u8; 9].into_boxed_slice());
+fn test_slice_writer_vectored<T>(writer: &mut Cursor<T>)
+where
+ T: AsRef<[u8]>,
+ Cursor<T>: Write,
+{
assert_eq!(writer.position(), 0);
assert_eq!(writer.write_vectored(&[IoSlice::new(&[0])]).unwrap(), 1);
assert_eq!(writer.position(), 1);
assert_eq!(writer.write_vectored(&[IoSlice::new(&[8, 9])]).unwrap(), 1);
assert_eq!(writer.write_vectored(&[IoSlice::new(&[10])]).unwrap(), 0);
let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8];
- assert_eq!(&**writer.get_ref(), b);
+ assert_eq!(writer.get_ref().as_ref(), b);
+}
+
+#[test]
+fn test_box_slice_writer() {
+ let mut writer = Cursor::new(vec![0u8; 9].into_boxed_slice());
+ test_slice_writer(&mut writer);
+}
+
+#[test]
+fn test_box_slice_writer_vectored() {
+ let mut writer = Cursor::new(vec![0u8; 9].into_boxed_slice());
+ test_slice_writer_vectored(&mut writer);
+}
+
+#[test]
+fn test_array_writer() {
+ let mut writer = Cursor::new([0u8; 9]);
+ test_slice_writer(&mut writer);
+}
+
+#[test]
+fn test_array_writer_vectored() {
+ let mut writer = Cursor::new([0u8; 9]);
+ test_slice_writer_vectored(&mut writer);
}
#[test]
fn test_buf_writer() {
let mut buf = [0 as u8; 9];
- {
- let mut writer = Cursor::new(&mut buf[..]);
- assert_eq!(writer.position(), 0);
- assert_eq!(writer.write(&[0]).unwrap(), 1);
- assert_eq!(writer.position(), 1);
- assert_eq!(writer.write(&[1, 2, 3]).unwrap(), 3);
- assert_eq!(writer.write(&[4, 5, 6, 7]).unwrap(), 4);
- assert_eq!(writer.position(), 8);
- assert_eq!(writer.write(&[]).unwrap(), 0);
- assert_eq!(writer.position(), 8);
-
- assert_eq!(writer.write(&[8, 9]).unwrap(), 1);
- assert_eq!(writer.write(&[10]).unwrap(), 0);
- }
- let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8];
- assert_eq!(buf, b);
+ let mut writer = Cursor::new(&mut buf[..]);
+ test_slice_writer(&mut writer);
}
#[test]
fn test_buf_writer_vectored() {
let mut buf = [0 as u8; 9];
- {
- let mut writer = Cursor::new(&mut buf[..]);
- assert_eq!(writer.position(), 0);
- assert_eq!(writer.write_vectored(&[IoSlice::new(&[0])]).unwrap(), 1);
- assert_eq!(writer.position(), 1);
- assert_eq!(
- writer
- .write_vectored(&[IoSlice::new(&[1, 2, 3]), IoSlice::new(&[4, 5, 6, 7])],)
- .unwrap(),
- 7,
- );
- assert_eq!(writer.position(), 8);
- assert_eq!(writer.write_vectored(&[]).unwrap(), 0);
- assert_eq!(writer.position(), 8);
-
- assert_eq!(writer.write_vectored(&[IoSlice::new(&[8, 9])]).unwrap(), 1);
- assert_eq!(writer.write_vectored(&[IoSlice::new(&[10])]).unwrap(), 0);
- }
- let b: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8];
- assert_eq!(buf, b);
+ let mut writer = Cursor::new(&mut buf[..]);
+ test_slice_writer_vectored(&mut writer);
}
#[test]
}
/// Read all bytes until a newline (the `0xA` byte) is reached, and append
- /// them to the provided buffer.
+ /// them to the provided buffer. You do not need to clear the buffer before
+ /// appending.
///
/// This function will read bytes from the underlying stream until the
/// newline delimiter (the `0xA` byte) or EOF is found. Once found, all bytes
///
/// [`io::stdin`]: stdin
///
-/// ### Note: Windows Portability Consideration
+/// ### Note: Windows Portability Considerations
///
/// When operating in a console, the Windows implementation of this stream does not support
/// non-UTF-8 byte sequences. Attempting to read bytes that are not valid UTF-8 will return
/// an error.
///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
+///
/// # Examples
///
/// ```no_run
/// This handle implements both the [`Read`] and [`BufRead`] traits, and
/// is constructed via the [`Stdin::lock`] method.
///
-/// ### Note: Windows Portability Consideration
+/// ### Note: Windows Portability Considerations
///
/// When operating in a console, the Windows implementation of this stream does not support
/// non-UTF-8 byte sequences. Attempting to read bytes that are not valid UTF-8 will return
/// an error.
///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
+///
/// # Examples
///
/// ```no_run
/// is synchronized via a mutex. If you need more explicit control over
/// locking, see the [`Stdin::lock`] method.
///
-/// ### Note: Windows Portability Consideration
+/// ### Note: Windows Portability Considerations
+///
/// When operating in a console, the Windows implementation of this stream does not support
/// non-UTF-8 byte sequences. Attempting to read bytes that are not valid UTF-8 will return
/// an error.
///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
+///
/// # Examples
///
/// Using implicit synchronization:
///
/// Created by the [`io::stdout`] method.
///
-/// ### Note: Windows Portability Consideration
+/// ### Note: Windows Portability Considerations
+///
/// When operating in a console, the Windows implementation of this stream does not support
/// non-UTF-8 byte sequences. Attempting to write bytes that are not valid UTF-8 will return
/// an error.
///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
+///
/// [`lock`]: Stdout::lock
/// [`io::stdout`]: stdout
#[stable(feature = "rust1", since = "1.0.0")]
/// This handle implements the [`Write`] trait, and is constructed via
/// the [`Stdout::lock`] method. See its documentation for more.
///
-/// ### Note: Windows Portability Consideration
+/// ### Note: Windows Portability Considerations
+///
/// When operating in a console, the Windows implementation of this stream does not support
/// non-UTF-8 byte sequences. Attempting to write bytes that are not valid UTF-8 will return
/// an error.
+///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
#[must_use = "if unused stdout will immediately unlock"]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct StdoutLock<'a> {
/// is synchronized via a mutex. If you need more explicit control over
/// locking, see the [`Stdout::lock`] method.
///
-/// ### Note: Windows Portability Consideration
+/// ### Note: Windows Portability Considerations
+///
/// When operating in a console, the Windows implementation of this stream does not support
/// non-UTF-8 byte sequences. Attempting to write bytes that are not valid UTF-8 will return
/// an error.
///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
+///
/// # Examples
///
/// Using implicit synchronization:
///
/// [`io::stderr`]: stderr
///
-/// ### Note: Windows Portability Consideration
+/// ### Note: Windows Portability Considerations
+///
/// When operating in a console, the Windows implementation of this stream does not support
/// non-UTF-8 byte sequences. Attempting to write bytes that are not valid UTF-8 will return
/// an error.
+///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Stderr {
inner: Pin<&'static ReentrantMutex<RefCell<StderrRaw>>>,
/// This handle implements the [`Write`] trait and is constructed via
/// the [`Stderr::lock`] method. See its documentation for more.
///
-/// ### Note: Windows Portability Consideration
+/// ### Note: Windows Portability Considerations
+///
/// When operating in a console, the Windows implementation of this stream does not support
/// non-UTF-8 byte sequences. Attempting to write bytes that are not valid UTF-8 will return
/// an error.
+///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
#[must_use = "if unused stderr will immediately unlock"]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct StderrLock<'a> {
///
/// This handle is not buffered.
///
-/// ### Note: Windows Portability Consideration
+/// ### Note: Windows Portability Considerations
+///
/// When operating in a console, the Windows implementation of this stream does not support
/// non-UTF-8 byte sequences. Attempting to write bytes that are not valid UTF-8 will return
/// an error.
///
+/// In a process with a detached console, such as one using
+/// `#![windows_subsystem = "windows"]`, or in a child process spawned from such a process,
+/// the contained handle will be null. In such cases, the standard library's `Read` and
+/// `Write` will do nothing and silently succeed. All other I/O operations, via the
+/// standard library or via raw Windows API calls, will fail.
+///
/// # Examples
///
/// Using implicit synchronization:
/// [`IntoIterator`]: ../book/ch13-04-performance.html
/// [range patterns]: ../reference/patterns.html?highlight=range#range-patterns
/// [`for`]: keyword.for.html
+///
+/// The other use of `in` is with the keyword `pub`. It allows users to declare an item as visible
+/// only within a given scope.
+///
+/// ## Literal Example:
+///
+/// * `pub(in crate::outer_mod) fn outer_mod_visible_fn() {}` - fn is visible in `outer_mod`
+///
+/// Starting with the 2018 edition, paths for `pub(in path)` must start with `crate`, `self` or
+/// `super`. The 2015 edition may also use paths starting with `::` or modules from the crate root.
+///
+/// For more information, see the [Reference].
+///
+/// [Reference]: ../reference/visibility-and-privacy.html#pubin-path-pubcrate-pubsuper-and-pubself
mod in_keyword {}
#[doc(keyword = "let")]
#[cfg_attr(not(test), rustc_diagnostic_item = "print_macro")]
#[allow_internal_unstable(print_internals)]
macro_rules! print {
- ($($arg:tt)*) => ($crate::io::_print($crate::format_args!($($arg)*)));
+ ($($arg:tt)*) => {
+ $crate::io::_print($crate::format_args!($($arg)*))
+ };
}
/// Prints to the standard output, with a newline.
#[cfg_attr(not(test), rustc_diagnostic_item = "println_macro")]
#[allow_internal_unstable(print_internals, format_args_nl)]
macro_rules! println {
- () => ($crate::print!("\n"));
- ($($arg:tt)*) => ({
- $crate::io::_print($crate::format_args_nl!($($arg)*));
- })
+ () => {
+ $crate::print!("\n")
+ };
+ ($($arg:tt)*) => {
+ $crate::io::_print($crate::format_args_nl!($($arg)*))
+ };
}
/// Prints to the standard error.
#[cfg_attr(not(test), rustc_diagnostic_item = "eprint_macro")]
#[allow_internal_unstable(print_internals)]
macro_rules! eprint {
- ($($arg:tt)*) => ($crate::io::_eprint($crate::format_args!($($arg)*)));
+ ($($arg:tt)*) => {
+ $crate::io::_eprint($crate::format_args!($($arg)*))
+ };
}
/// Prints to the standard error, with a newline.
#[cfg_attr(not(test), rustc_diagnostic_item = "eprintln_macro")]
#[allow_internal_unstable(print_internals, format_args_nl)]
macro_rules! eprintln {
- () => ($crate::eprint!("\n"));
- ($($arg:tt)*) => ({
- $crate::io::_eprint($crate::format_args_nl!($($arg)*));
- })
+ () => {
+ $crate::eprint!("\n")
+ };
+ ($($arg:tt)*) => {
+ $crate::io::_eprint($crate::format_args_nl!($($arg)*))
+ };
}
/// Prints and returns the value of a given expression for quick and dirty
--- /dev/null
+//! L4Re-specific extensions to primitives in the [`std::fs`] module.
+//!
+//! [`std::fs`]: crate::fs
+
+#![stable(feature = "metadata_ext", since = "1.1.0")]
+
+use crate::fs::Metadata;
+use crate::sys_common::AsInner;
+
+#[allow(deprecated)]
+use crate::os::l4re::raw;
+
+/// OS-specific extensions to [`fs::Metadata`].
+///
+/// [`fs::Metadata`]: crate::fs::Metadata
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+pub trait MetadataExt {
+ /// Gain a reference to the underlying `stat` structure which contains
+ /// the raw information returned by the OS.
+ ///
+ /// The contents of the returned [`stat`] are **not** consistent across
+ /// Unix platforms. The `os::unix::fs::MetadataExt` trait contains the
+ /// cross-Unix abstractions contained within the raw stat.
+ ///
+ /// [`stat`]: struct@crate::os::linux::raw::stat
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// let stat = meta.as_raw_stat();
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext", since = "1.1.0")]
+ #[rustc_deprecated(since = "1.8.0", reason = "other methods of this trait are now preferred")]
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat;
+
+ /// Returns the device ID on which this file resides.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_dev());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_dev(&self) -> u64;
+ /// Returns the inode number.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ino());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ino(&self) -> u64;
+ /// Returns the file type and mode.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mode());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mode(&self) -> u32;
+ /// Returns the number of hard links to file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_nlink());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_nlink(&self) -> u64;
+ /// Returns the user ID of the file owner.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_uid());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_uid(&self) -> u32;
+ /// Returns the group ID of the file owner.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_gid());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_gid(&self) -> u32;
+ /// Returns the device ID that this file represents. Only relevant for special file.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_rdev());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_rdev(&self) -> u64;
+ /// Returns the size of the file (if it is a regular file or a symbolic link) in bytes.
+ ///
+ /// The size of a symbolic link is the length of the pathname it contains,
+ /// without a terminating null byte.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_size());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_size(&self) -> u64;
+ /// Returns the last access time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_atime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime(&self) -> i64;
+ /// Returns the last access time of the file, in nanoseconds since [`st_atime`].
+ ///
+ /// [`st_atime`]: Self::st_atime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_atime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_atime_nsec(&self) -> i64;
+ /// Returns the last modification time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mtime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime(&self) -> i64;
+ /// Returns the last modification time of the file, in nanoseconds since [`st_mtime`].
+ ///
+ /// [`st_mtime`]: Self::st_mtime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_mtime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_mtime_nsec(&self) -> i64;
+ /// Returns the last status change time of the file, in seconds since Unix Epoch.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ctime());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime(&self) -> i64;
+ /// Returns the last status change time of the file, in nanoseconds since [`st_ctime`].
+ ///
+ /// [`st_ctime`]: Self::st_ctime
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_ctime_nsec());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_ctime_nsec(&self) -> i64;
+ /// Returns the "preferred" block size for efficient filesystem I/O.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_blksize());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blksize(&self) -> u64;
+ /// Returns the number of blocks allocated to the file, 512-byte units.
+ ///
+ /// # Examples
+ ///
+ /// ```no_run
+ /// use std::fs;
+ /// use std::io;
+ /// use std::os::linux::fs::MetadataExt;
+ ///
+ /// fn main() -> io::Result<()> {
+ /// let meta = fs::metadata("some_file")?;
+ /// println!("{}", meta.st_blocks());
+ /// Ok(())
+ /// }
+ /// ```
+ #[stable(feature = "metadata_ext2", since = "1.8.0")]
+ fn st_blocks(&self) -> u64;
+}
+
+#[stable(feature = "metadata_ext", since = "1.1.0")]
+impl MetadataExt for Metadata {
+ #[allow(deprecated)]
+ fn as_raw_stat(&self) -> &raw::stat {
+ unsafe { &*(self.as_inner().as_inner() as *const libc::stat64 as *const raw::stat) }
+ }
+ fn st_dev(&self) -> u64 {
+ self.as_inner().as_inner().st_dev as u64
+ }
+ fn st_ino(&self) -> u64 {
+ self.as_inner().as_inner().st_ino as u64
+ }
+ fn st_mode(&self) -> u32 {
+ self.as_inner().as_inner().st_mode as u32
+ }
+ fn st_nlink(&self) -> u64 {
+ self.as_inner().as_inner().st_nlink as u64
+ }
+ fn st_uid(&self) -> u32 {
+ self.as_inner().as_inner().st_uid as u32
+ }
+ fn st_gid(&self) -> u32 {
+ self.as_inner().as_inner().st_gid as u32
+ }
+ fn st_rdev(&self) -> u64 {
+ self.as_inner().as_inner().st_rdev as u64
+ }
+ fn st_size(&self) -> u64 {
+ self.as_inner().as_inner().st_size as u64
+ }
+ fn st_atime(&self) -> i64 {
+ self.as_inner().as_inner().st_atime as i64
+ }
+ fn st_atime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_atime_nsec as i64
+ }
+ fn st_mtime(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime as i64
+ }
+ fn st_mtime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_mtime_nsec as i64
+ }
+ fn st_ctime(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime as i64
+ }
+ fn st_ctime_nsec(&self) -> i64 {
+ self.as_inner().as_inner().st_ctime_nsec as i64
+ }
+ fn st_blksize(&self) -> u64 {
+ self.as_inner().as_inner().st_blksize as u64
+ }
+ fn st_blocks(&self) -> u64 {
+ self.as_inner().as_inner().st_blocks as u64
+ }
+}
--- /dev/null
+//! L4Re-specific definitions.
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![doc(cfg(target_os = "l4re"))]
+
+pub mod fs;
+pub mod raw;
--- /dev/null
+//! L4Re-specific raw type definitions.
+
+#![stable(feature = "raw_ext", since = "1.1.0")]
+#![rustc_deprecated(
+ since = "1.8.0",
+ reason = "these type aliases are no longer supported by \
+ the standard library, the `libc` crate on \
+ crates.io should be used instead for the correct \
+ definitions"
+)]
+#![allow(deprecated)]
+
+use crate::os::raw::c_ulong;
+
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type dev_t = u64;
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub type mode_t = u32;
+
+#[stable(feature = "pthread_t", since = "1.8.0")]
+pub type pthread_t = c_ulong;
+
+#[doc(inline)]
+#[stable(feature = "raw_ext", since = "1.1.0")]
+pub use self::arch::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t};
+
+#[cfg(any(
+ target_arch = "x86",
+ target_arch = "le32",
+ target_arch = "m68k",
+ target_arch = "powerpc",
+ target_arch = "sparc",
+ target_arch = "arm",
+ target_arch = "asmjs",
+ target_arch = "wasm32"
+))]
+mod arch {
+ use crate::os::raw::{c_long, c_short, c_uint};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad1: c_short,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __st_ino: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad2: c_uint,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ }
+}
+
+#[cfg(target_arch = "mips")]
+mod arch {
+ use crate::os::raw::{c_long, c_ulong};
+
+ #[cfg(target_env = "musl")]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = i64;
+ #[cfg(not(target_env = "musl"))]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[cfg(target_env = "musl")]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[cfg(not(target_env = "musl"))]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u64;
+ #[cfg(target_env = "musl")]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[cfg(not(target_env = "musl"))]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_pad1: [c_long; 3],
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: c_ulong,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_pad2: [c_long; 2],
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_pad5: [c_long; 14],
+ }
+}
+
+#[cfg(target_arch = "hexagon")]
+mod arch {
+ use crate::os::raw::{c_int, c_long, c_uint};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = i64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = c_long;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = c_uint;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = i64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad1: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad2: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad3: [c_int; 2],
+ }
+}
+
+#[cfg(any(
+ target_arch = "mips64",
+ target_arch = "s390x",
+ target_arch = "sparc64",
+ target_arch = "riscv64",
+ target_arch = "riscv32"
+))]
+mod arch {
+ pub use libc::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t};
+}
+
+#[cfg(target_arch = "aarch64")]
+mod arch {
+ use crate::os::raw::{c_int, c_long};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = i64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = i32;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u32;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = i64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = c_long;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad1: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad2: c_int,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: time_t,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __unused: [c_int; 2],
+ }
+}
+
+#[cfg(any(target_arch = "x86_64", target_arch = "powerpc64"))]
+mod arch {
+ use crate::os::raw::{c_int, c_long};
+
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blkcnt_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type blksize_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type ino_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type nlink_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type off_t = u64;
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub type time_t = i64;
+
+ #[repr(C)]
+ #[derive(Clone)]
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub struct stat {
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_dev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ino: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_nlink: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mode: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_uid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_gid: u32,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __pad0: c_int,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_rdev: u64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_size: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blksize: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_blocks: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_atime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_mtime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime: i64,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub st_ctime_nsec: c_long,
+ #[stable(feature = "raw_ext", since = "1.1.0")]
+ pub __unused: [c_long; 3],
+ }
+}
all(target_vendor = "fortanix", target_env = "sgx")
)
)))]
-#[cfg(any(target_os = "linux", target_os = "l4re", doc))]
+#[cfg(any(target_os = "linux", doc))]
pub mod linux;
// wasi
pub mod illumos;
#[cfg(target_os = "ios")]
pub mod ios;
+#[cfg(target_os = "l4re")]
+pub mod l4re;
#[cfg(target_os = "macos")]
pub mod macos;
#[cfg(target_os = "netbsd")]
pub use crate::os::illumos::*;
#[cfg(target_os = "ios")]
pub use crate::os::ios::*;
- #[cfg(any(target_os = "linux", target_os = "l4re"))]
+ #[cfg(target_os = "l4re")]
+ pub use crate::os::l4re::*;
+ #[cfg(target_os = "linux")]
pub use crate::os::linux::*;
#[cfg(target_os = "macos")]
pub use crate::os::macos::*;
fn arg0<S>(&mut self, arg: S) -> &mut process::Command
where
S: AsRef<OsStr>;
+
+ /// Sets the process group ID of the child process. Translates to a `setpgid` call in the child
+ /// process.
+ #[unstable(feature = "process_set_process_group", issue = "93857")]
+ fn process_group(&mut self, pgroup: i32) -> &mut process::Command;
}
#[stable(feature = "rust1", since = "1.0.0")]
self.as_inner_mut().set_arg_0(arg.as_ref());
self
}
+
+ fn process_group(&mut self, pgroup: i32) -> &mut process::Command {
+ self.as_inner_mut().pgroup(pgroup);
+ self
+ }
}
/// Unix-specific extensions to [`process::ExitStatus`] and
#[inline]
fn try_from(handle_or_null: HandleOrNull) -> Result<Self, ()> {
let owned_handle = handle_or_null.0;
- if owned_handle.handle.is_null() { Err(()) } else { Ok(owned_handle) }
+ if owned_handle.handle.is_null() {
+ // Don't call `CloseHandle`; it'd be harmless, except that it could
+ // overwrite the `GetLastError` error.
+ forget(owned_handle);
+
+ Err(())
+ } else {
+ Ok(owned_handle)
+ }
}
}
inherit: bool,
options: c::DWORD,
) -> io::Result<Self> {
+ let handle = self.as_raw_handle();
+
+ // `Stdin`, `Stdout`, and `Stderr` can all hold null handles, such as
+ // in a process with a detached console. `DuplicateHandle` would fail
+ // if we passed it a null handle, but we can treat null as a valid
+ // handle which doesn't do any I/O, and allow it to be duplicated.
+ if handle.is_null() {
+ return unsafe { Ok(Self::from_raw_handle(handle)) };
+ }
+
let mut ret = 0 as c::HANDLE;
cvt(unsafe {
let cur_proc = c::GetCurrentProcess();
c::DuplicateHandle(
cur_proc,
- self.as_raw_handle(),
+ handle,
cur_proc,
&mut ret,
access,
#[inline]
fn try_from(handle_or_invalid: HandleOrInvalid) -> Result<Self, ()> {
let owned_handle = handle_or_invalid.0;
- if owned_handle.handle == c::INVALID_HANDLE_VALUE { Err(()) } else { Ok(owned_handle) }
+ if owned_handle.handle == c::INVALID_HANDLE_VALUE {
+ // Don't call `CloseHandle`; it'd be harmless, except that it could
+ // overwrite the `GetLastError` error.
+ forget(owned_handle);
+
+ Err(())
+ } else {
+ Ok(owned_handle)
+ }
}
}
use crate::os::windows::io::{AsHandle, AsSocket};
use crate::os::windows::io::{OwnedHandle, OwnedSocket};
use crate::os::windows::raw;
+use crate::ptr;
use crate::sys;
use crate::sys::c;
use crate::sys_common::{self, AsInner, FromInner, IntoInner};
#[stable(feature = "asraw_stdio", since = "1.21.0")]
impl AsRawHandle for io::Stdin {
fn as_raw_handle(&self) -> RawHandle {
- unsafe { c::GetStdHandle(c::STD_INPUT_HANDLE) as RawHandle }
+ stdio_handle(unsafe { c::GetStdHandle(c::STD_INPUT_HANDLE) as RawHandle })
}
}
#[stable(feature = "asraw_stdio", since = "1.21.0")]
impl AsRawHandle for io::Stdout {
fn as_raw_handle(&self) -> RawHandle {
- unsafe { c::GetStdHandle(c::STD_OUTPUT_HANDLE) as RawHandle }
+ stdio_handle(unsafe { c::GetStdHandle(c::STD_OUTPUT_HANDLE) as RawHandle })
}
}
#[stable(feature = "asraw_stdio", since = "1.21.0")]
impl AsRawHandle for io::Stderr {
fn as_raw_handle(&self) -> RawHandle {
- unsafe { c::GetStdHandle(c::STD_ERROR_HANDLE) as RawHandle }
+ stdio_handle(unsafe { c::GetStdHandle(c::STD_ERROR_HANDLE) as RawHandle })
}
}
#[stable(feature = "asraw_stdio_locks", since = "1.35.0")]
impl<'a> AsRawHandle for io::StdinLock<'a> {
fn as_raw_handle(&self) -> RawHandle {
- unsafe { c::GetStdHandle(c::STD_INPUT_HANDLE) as RawHandle }
+ stdio_handle(unsafe { c::GetStdHandle(c::STD_INPUT_HANDLE) as RawHandle })
}
}
#[stable(feature = "asraw_stdio_locks", since = "1.35.0")]
impl<'a> AsRawHandle for io::StdoutLock<'a> {
fn as_raw_handle(&self) -> RawHandle {
- unsafe { c::GetStdHandle(c::STD_OUTPUT_HANDLE) as RawHandle }
+ stdio_handle(unsafe { c::GetStdHandle(c::STD_OUTPUT_HANDLE) as RawHandle })
}
}
#[stable(feature = "asraw_stdio_locks", since = "1.35.0")]
impl<'a> AsRawHandle for io::StderrLock<'a> {
fn as_raw_handle(&self) -> RawHandle {
- unsafe { c::GetStdHandle(c::STD_ERROR_HANDLE) as RawHandle }
+ stdio_handle(unsafe { c::GetStdHandle(c::STD_ERROR_HANDLE) as RawHandle })
}
}
+// Translate a handle returned from `GetStdHandle` into a handle to return to
+// the user.
+fn stdio_handle(raw: RawHandle) -> RawHandle {
+ // `GetStdHandle` isn't expected to actually fail, so when it returns
+ // `INVALID_HANDLE_VALUE`, it means we were launched from a parent which
+ // didn't provide us with stdio handles, such as a parent with a detached
+ // console. In that case, return null to the user, which is consistent
+ // with what they'd get in the parent, and which avoids the problem that
+ // `INVALID_HANDLE_VALUE` aliases the current process handle.
+ if raw == c::INVALID_HANDLE_VALUE { ptr::null_mut() } else { raw }
+}
+
#[stable(feature = "from_raw_os", since = "1.1.0")]
impl FromRawHandle for fs::File {
#[inline]
let relative = r"a\b";
let mut expected = crate::env::current_dir().unwrap();
expected.push(relative);
- assert_eq!(absolute(relative).unwrap(), expected);
+ assert_eq!(absolute(relative).unwrap().as_os_str(), expected.as_os_str());
macro_rules! unchanged(
($path:expr) => {
- assert_eq!(absolute($path).unwrap(), Path::new($path));
+ assert_eq!(absolute($path).unwrap().as_os_str(), Path::new($path).as_os_str());
}
);
// Verbatim paths are always unchanged, no matter what.
unchanged!(r"\\?\path.\to/file..");
- assert_eq!(absolute(r"C:\path..\to.\file.").unwrap(), Path::new(r"C:\path..\to\file"));
- assert_eq!(absolute(r"C:\path\to\COM1").unwrap(), Path::new(r"\\.\COM1"));
- assert_eq!(absolute(r"C:\path\to\COM1.txt").unwrap(), Path::new(r"\\.\COM1"));
- assert_eq!(absolute(r"C:\path\to\COM1 .txt").unwrap(), Path::new(r"\\.\COM1"));
- assert_eq!(absolute(r"C:\path\to\cOnOuT$").unwrap(), Path::new(r"\\.\cOnOuT$"));
+ assert_eq!(
+ absolute(r"C:\path..\to.\file.").unwrap().as_os_str(),
+ Path::new(r"C:\path..\to\file").as_os_str()
+ );
+ assert_eq!(absolute(r"COM1").unwrap().as_os_str(), Path::new(r"\\.\COM1").as_os_str());
}
#[bench]
/// * No arguments to the program
/// * Inherit the current process's environment
/// * Inherit the current process's working directory
- /// * Inherit stdin/stdout/stderr for `spawn` or `status`, but create pipes for `output`
+ /// * Inherit stdin/stdout/stderr for [`spawn`] or [`status`], but create pipes for [`output`]
+ ///
+ /// [`spawn`]: Self::spawn
+ /// [`status`]: Self::status
+ /// [`output`]: Self::output
///
/// Builder methods are provided to change these defaults and
/// otherwise configure the process.
/// Configuration for the child process's standard input (stdin) handle.
///
- /// Defaults to [`inherit`] when used with `spawn` or `status`, and
- /// defaults to [`piped`] when used with `output`.
+ /// Defaults to [`inherit`] when used with [`spawn`] or [`status`], and
+ /// defaults to [`piped`] when used with [`output`].
///
/// [`inherit`]: Stdio::inherit
/// [`piped`]: Stdio::piped
+ /// [`spawn`]: Self::spawn
+ /// [`status`]: Self::status
+ /// [`output`]: Self::output
///
/// # Examples
///
/// Configuration for the child process's standard output (stdout) handle.
///
- /// Defaults to [`inherit`] when used with `spawn` or `status`, and
- /// defaults to [`piped`] when used with `output`.
+ /// Defaults to [`inherit`] when used with [`spawn`] or [`status`], and
+ /// defaults to [`piped`] when used with [`output`].
///
/// [`inherit`]: Stdio::inherit
/// [`piped`]: Stdio::piped
+ /// [`spawn`]: Self::spawn
+ /// [`status`]: Self::status
+ /// [`output`]: Self::output
///
/// # Examples
///
/// Configuration for the child process's standard error (stderr) handle.
///
- /// Defaults to [`inherit`] when used with `spawn` or `status`, and
- /// defaults to [`piped`] when used with `output`.
+ /// Defaults to [`inherit`] when used with [`spawn`] or [`status`], and
+ /// defaults to [`piped`] when used with [`output`].
///
/// [`inherit`]: Stdio::inherit
/// [`piped`]: Stdio::piped
+ /// [`spawn`]: Self::spawn
+ /// [`status`]: Self::status
+ /// [`output`]: Self::output
///
/// # Examples
///
///
/// The default implementations are returning `libc::EXIT_SUCCESS` to indicate
/// a successful execution. In case of a failure, `libc::EXIT_FAILURE` is returned.
+///
+/// Because different runtimes have different specifications on the return value
+/// of the `main` function, this trait is likely to be available only on
+/// standard library's runtime for convenience. Other runtimes are not required
+/// to provide similar functionality.
#[cfg_attr(not(test), lang = "termination")]
#[unstable(feature = "termination_trait_lib", issue = "43301")]
#[rustc_on_unimplemented(
target_arch = "wasm64",
)))]
pub const MIN_ALIGN: usize = 16;
-// The allocator on the esp-idf platform guarentees 4 byte alignment.
+// The allocator on the esp-idf platform guarantees 4 byte alignment.
#[cfg(all(any(
all(target_arch = "riscv32", target_os = "espidf"),
all(target_arch = "xtensa", target_os = "espidf"),
use crate::ptr;
use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst};
use crate::sys::hermit::abi;
-use crate::sys::mutex::Mutex;
+use crate::sys::locks::Mutex;
use crate::time::Duration;
// The implementation is inspired by Andrew D. Birrell's paper
pub mod args;
#[path = "../unix/cmath.rs"]
pub mod cmath;
-pub mod condvar;
pub mod env;
pub mod fd;
pub mod fs;
#[path = "../unsupported/io.rs"]
pub mod io;
pub mod memchr;
-pub mod mutex;
pub mod net;
pub mod os;
#[path = "../unix/os_str.rs"]
pub mod pipe;
#[path = "../unsupported/process.rs"]
pub mod process;
-pub mod rwlock;
pub mod stdio;
pub mod thread;
pub mod thread_local_dtor;
pub mod thread_local_key;
pub mod time;
+mod condvar;
+mod mutex;
+mod rwlock;
+
+pub mod locks {
+ pub use super::condvar::*;
+ pub use super::mutex::*;
+ pub use super::rwlock::*;
+}
+
use crate::io::ErrorKind;
#[allow(unused_extern_crates)]
use crate::cell::UnsafeCell;
-use crate::sys::condvar::Condvar;
-use crate::sys::mutex::Mutex;
+use crate::sys::locks::{Condvar, Mutex};
pub struct RWLock {
lock: Mutex,
//! POSIX conditional variable implementation based on user-space wait queues.
use super::{abi, error::expect_success_aborting, spin::SpinMutex, task, time::with_tmos_strong};
-use crate::{mem::replace, ptr::NonNull, sys::mutex::Mutex, time::Duration};
+use crate::{mem::replace, ptr::NonNull, sys::locks::Mutex, time::Duration};
// The implementation is inspired by the queue-based implementation shown in
// Andrew D. Birrell's paper "Implementing Condition Variables with Semaphores"
-use crate::sys::mutex::Mutex;
+use crate::sys::locks::Mutex;
use crate::time::Duration;
use super::waitqueue::{SpinMutex, WaitQueue, WaitVariable};
pub mod args;
#[path = "../unix/cmath.rs"]
pub mod cmath;
-pub mod condvar;
pub mod env;
pub mod fd;
#[path = "../unsupported/fs.rs"]
#[path = "../unsupported/io.rs"]
pub mod io;
pub mod memchr;
-pub mod mutex;
pub mod net;
pub mod os;
#[path = "../unix/os_str.rs"]
pub mod pipe;
#[path = "../unsupported/process.rs"]
pub mod process;
-pub mod rwlock;
pub mod stdio;
pub mod thread;
pub mod thread_local_key;
pub mod time;
+mod condvar;
+mod mutex;
+mod rwlock;
+
+pub mod locks {
+ pub use super::condvar::*;
+ pub use super::mutex::*;
+ pub use super::rwlock::*;
+}
+
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
pub unsafe fn init(argc: isize, argv: *const *const u8) {
pub mod pipe;
#[path = "../unsupported/process.rs"]
pub mod process;
-pub mod rwlock;
pub mod stdio;
-pub use self::itron::{condvar, mutex, thread};
+pub use self::itron::thread;
pub mod memchr;
pub mod thread_local_dtor;
pub mod thread_local_key;
pub mod time;
+mod rwlock;
+
+pub mod locks {
+ pub use super::itron::condvar::*;
+ pub use super::itron::mutex::*;
+ pub use super::rwlock::*;
+}
+
// SAFETY: must be called only once during runtime initialization.
// NOTE: this is not guaranteed to run, for example when Rust code is called externally.
pub unsafe fn init(_argc: isize, _argv: *const *const u8) {}
+++ /dev/null
-use crate::cell::UnsafeCell;
-use crate::sys::mutex::{self, Mutex};
-use crate::time::Duration;
-
-pub struct Condvar {
- inner: UnsafeCell<libc::pthread_cond_t>,
-}
-
-pub type MovableCondvar = Box<Condvar>;
-
-unsafe impl Send for Condvar {}
-unsafe impl Sync for Condvar {}
-
-const TIMESPEC_MAX: libc::timespec =
- libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 };
-
-fn saturating_cast_to_time_t(value: u64) -> libc::time_t {
- if value > <libc::time_t>::MAX as u64 { <libc::time_t>::MAX } else { value as libc::time_t }
-}
-
-impl Condvar {
- pub const fn new() -> Condvar {
- // Might be moved and address is changing it is better to avoid
- // initialization of potentially opaque OS data before it landed
- Condvar { inner: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER) }
- }
-
- #[cfg(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "l4re",
- target_os = "android",
- target_os = "redox"
- ))]
- pub unsafe fn init(&mut self) {}
-
- // NOTE: ESP-IDF's PTHREAD_COND_INITIALIZER support is not released yet
- // So on that platform, init() should always be called
- // Moreover, that platform does not have pthread_condattr_setclock support,
- // hence that initialization should be skipped as well
- #[cfg(target_os = "espidf")]
- pub unsafe fn init(&mut self) {
- let r = libc::pthread_cond_init(self.inner.get(), crate::ptr::null());
- assert_eq!(r, 0);
- }
-
- #[cfg(not(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "l4re",
- target_os = "android",
- target_os = "redox",
- target_os = "espidf"
- )))]
- pub unsafe fn init(&mut self) {
- use crate::mem::MaybeUninit;
- let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
- let r = libc::pthread_condattr_init(attr.as_mut_ptr());
- assert_eq!(r, 0);
- let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
- assert_eq!(r, 0);
- let r = libc::pthread_cond_init(self.inner.get(), attr.as_ptr());
- assert_eq!(r, 0);
- let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
- assert_eq!(r, 0);
- }
-
- #[inline]
- pub unsafe fn notify_one(&self) {
- let r = libc::pthread_cond_signal(self.inner.get());
- debug_assert_eq!(r, 0);
- }
-
- #[inline]
- pub unsafe fn notify_all(&self) {
- let r = libc::pthread_cond_broadcast(self.inner.get());
- debug_assert_eq!(r, 0);
- }
-
- #[inline]
- pub unsafe fn wait(&self, mutex: &Mutex) {
- let r = libc::pthread_cond_wait(self.inner.get(), mutex::raw(mutex));
- debug_assert_eq!(r, 0);
- }
-
- // This implementation is used on systems that support pthread_condattr_setclock
- // where we configure condition variable to use monotonic clock (instead of
- // default system clock). This approach avoids all problems that result
- // from changes made to the system time.
- #[cfg(not(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "android",
- target_os = "espidf"
- )))]
- pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
- use crate::mem;
-
- let mut now: libc::timespec = mem::zeroed();
- let r = libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now);
- assert_eq!(r, 0);
-
- // Nanosecond calculations can't overflow because both values are below 1e9.
- let nsec = dur.subsec_nanos() + now.tv_nsec as u32;
-
- let sec = saturating_cast_to_time_t(dur.as_secs())
- .checked_add((nsec / 1_000_000_000) as libc::time_t)
- .and_then(|s| s.checked_add(now.tv_sec));
- let nsec = nsec % 1_000_000_000;
-
- let timeout =
- sec.map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec as _ }).unwrap_or(TIMESPEC_MAX);
-
- let r = libc::pthread_cond_timedwait(self.inner.get(), mutex::raw(mutex), &timeout);
- assert!(r == libc::ETIMEDOUT || r == 0);
- r == 0
- }
-
- // This implementation is modeled after libcxx's condition_variable
- // https://github.com/llvm-mirror/libcxx/blob/release_35/src/condition_variable.cpp#L46
- // https://github.com/llvm-mirror/libcxx/blob/release_35/include/__mutex_base#L367
- #[cfg(any(
- target_os = "macos",
- target_os = "ios",
- target_os = "android",
- target_os = "espidf"
- ))]
- pub unsafe fn wait_timeout(&self, mutex: &Mutex, mut dur: Duration) -> bool {
- use crate::ptr;
- use crate::time::Instant;
-
- // 1000 years
- let max_dur = Duration::from_secs(1000 * 365 * 86400);
-
- if dur > max_dur {
- // OSX implementation of `pthread_cond_timedwait` is buggy
- // with super long durations. When duration is greater than
- // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait`
- // in macOS Sierra return error 316.
- //
- // This program demonstrates the issue:
- // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c
- //
- // To work around this issue, and possible bugs of other OSes, timeout
- // is clamped to 1000 years, which is allowable per the API of `wait_timeout`
- // because of spurious wakeups.
-
- dur = max_dur;
- }
-
- // First, figure out what time it currently is, in both system and
- // stable time. pthread_cond_timedwait uses system time, but we want to
- // report timeout based on stable time.
- let mut sys_now = libc::timeval { tv_sec: 0, tv_usec: 0 };
- let stable_now = Instant::now();
- let r = libc::gettimeofday(&mut sys_now, ptr::null_mut());
- debug_assert_eq!(r, 0);
-
- let nsec = dur.subsec_nanos() as libc::c_long + (sys_now.tv_usec * 1000) as libc::c_long;
- let extra = (nsec / 1_000_000_000) as libc::time_t;
- let nsec = nsec % 1_000_000_000;
- let seconds = saturating_cast_to_time_t(dur.as_secs());
-
- let timeout = sys_now
- .tv_sec
- .checked_add(extra)
- .and_then(|s| s.checked_add(seconds))
- .map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec })
- .unwrap_or(TIMESPEC_MAX);
-
- // And wait!
- let r = libc::pthread_cond_timedwait(self.inner.get(), mutex::raw(mutex), &timeout);
- debug_assert!(r == libc::ETIMEDOUT || r == 0);
-
- // ETIMEDOUT is not a totally reliable method of determining timeout due
- // to clock shifts, so do the check ourselves
- stable_now.elapsed() < dur
- }
-
- #[inline]
- #[cfg(not(target_os = "dragonfly"))]
- pub unsafe fn destroy(&self) {
- let r = libc::pthread_cond_destroy(self.inner.get());
- debug_assert_eq!(r, 0);
- }
-
- #[inline]
- #[cfg(target_os = "dragonfly")]
- pub unsafe fn destroy(&self) {
- let r = libc::pthread_cond_destroy(self.inner.get());
- // On DragonFly pthread_cond_destroy() returns EINVAL if called on
- // a condvar that was just initialized with
- // libc::PTHREAD_COND_INITIALIZER. Once it is used or
- // pthread_cond_init() is called, this behaviour no longer occurs.
- debug_assert!(r == 0 || r == libc::EINVAL);
- }
-}
pub use remove_dir_impl::remove_dir_all;
-// Fallback for REDOX and ESP-IDF
-#[cfg(any(target_os = "redox", target_os = "espidf"))]
+// Fallback for REDOX and ESP-IDF (and Miri)
+#[cfg(any(target_os = "redox", target_os = "espidf", miri))]
mod remove_dir_impl {
pub use crate::sys_common::fs::remove_dir_all;
}
// Modern implementation using openat(), unlinkat() and fdopendir()
-#[cfg(not(any(target_os = "redox", target_os = "espidf")))]
+#[cfg(not(any(target_os = "redox", target_os = "espidf", miri)))]
mod remove_dir_impl {
use super::{cstr, lstat, Dir, DirEntry, InnerReadDir, ReadDir};
use crate::ffi::CStr;
use crate::sync::Arc;
use crate::sys::{cvt, cvt_r};
- #[cfg(not(all(target_os = "macos", target_arch = "x86_64"),))]
+ #[cfg(not(all(target_os = "macos", not(target_arch = "aarch64")),))]
use libc::{fdopendir, openat, unlinkat};
- #[cfg(all(target_os = "macos", target_arch = "x86_64"))]
+ #[cfg(all(target_os = "macos", not(target_arch = "aarch64")))]
use macos_weak::{fdopendir, openat, unlinkat};
- #[cfg(all(target_os = "macos", target_arch = "x86_64"))]
+ #[cfg(all(target_os = "macos", not(target_arch = "aarch64")))]
mod macos_weak {
use crate::sys::weak::weak;
use libc::{c_char, c_int, DIR};
}
pub unsafe fn fdopendir(fd: c_int) -> *mut DIR {
+ #[cfg(all(target_os = "macos", target_arch = "x86"))]
+ weak!(fn fdopendir(c_int) -> *mut DIR, "fdopendir$INODE64$UNIX2003");
+ #[cfg(all(target_os = "macos", target_arch = "x86_64"))]
weak!(fn fdopendir(c_int) -> *mut DIR, "fdopendir$INODE64");
fdopendir.get().map(|fdopendir| fdopendir(fd)).unwrap_or_else(|| {
crate::sys::unix::os::set_errno(libc::ENOSYS);
}
}
- #[cfg(not(all(target_os = "macos", target_arch = "x86_64")))]
+ #[cfg(not(all(target_os = "macos", not(target_arch = "aarch64"))))]
pub fn remove_dir_all(p: &Path) -> io::Result<()> {
remove_dir_all_modern(p)
}
- #[cfg(all(target_os = "macos", target_arch = "x86_64"))]
+ #[cfg(all(target_os = "macos", not(target_arch = "aarch64")))]
pub fn remove_dir_all(p: &Path) -> io::Result<()> {
if macos_weak::has_openat() {
// openat() is available with macOS 10.10+, just like unlinkat() and fdopendir()
use crate::fmt;
use crate::io::{self, IoSlice, IoSliceMut};
use crate::net::{Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr};
+ use crate::os::unix::io::{AsFd, AsRawFd, BorrowedFd, FromRawFd, IntoRawFd, RawFd};
use crate::sys::fd::FileDesc;
use crate::sys_common::{AsInner, FromInner, IntoInner};
use crate::time::Duration;
}
pub fn is_read_vectored(&self) -> bool {
- unimpl!();
+ false
}
pub fn peek(&self, _: &mut [u8]) -> io::Result<usize> {
}
pub fn is_write_vectored(&self) -> bool {
- unimpl!();
+ false
}
pub fn set_timeout(&self, _: Option<Duration>, _: libc::c_int) -> io::Result<()> {
pub fn take_error(&self) -> io::Result<Option<io::Error>> {
unimpl!();
}
+
+ // This is used by sys_common code to abstract over Windows and Unix.
+ pub fn as_raw(&self) -> RawFd {
+ self.as_raw_fd()
+ }
+ }
+
+ impl AsInner<FileDesc> for Socket {
+ fn as_inner(&self) -> &FileDesc {
+ &self.0
+ }
+ }
+
+ impl FromInner<FileDesc> for Socket {
+ fn from_inner(file_desc: FileDesc) -> Socket {
+ Socket(file_desc)
+ }
}
- impl AsInner<libc::c_int> for Socket {
- fn as_inner(&self) -> &libc::c_int {
- self.0.as_inner()
+ impl IntoInner<FileDesc> for Socket {
+ fn into_inner(self) -> FileDesc {
+ self.0
}
}
- impl FromInner<libc::c_int> for Socket {
- fn from_inner(fd: libc::c_int) -> Socket {
- Socket(FileDesc::new(fd))
+ impl AsFd for Socket {
+ fn as_fd(&self) -> BorrowedFd<'_> {
+ self.0.as_fd()
}
}
- impl IntoInner<libc::c_int> for Socket {
- fn into_inner(self) -> libc::c_int {
- self.0.into_raw()
+ impl AsRawFd for Socket {
+ fn as_raw_fd(&self) -> RawFd {
+ self.0.as_raw_fd()
+ }
+ }
+
+ impl IntoRawFd for Socket {
+ fn into_raw_fd(self) -> RawFd {
+ self.0.into_raw_fd()
+ }
+ }
+
+ impl FromRawFd for Socket {
+ unsafe fn from_raw_fd(raw_fd: RawFd) -> Self {
+ Self(FromRawFd::from_raw_fd(raw_fd))
}
}
}
pub fn is_read_vectored(&self) -> bool {
- unimpl!();
+ false
}
pub fn write(&self, _: &[u8]) -> io::Result<usize> {
}
pub fn is_write_vectored(&self) -> bool {
- unimpl!();
+ false
}
pub fn peer_addr(&self) -> io::Result<SocketAddr> {
impl LookupHost {
pub fn port(&self) -> u16 {
- unimpl!();
+ 0 // unimplemented
}
}
--- /dev/null
+mod pthread_condvar;
+mod pthread_mutex;
+mod pthread_remutex;
+mod pthread_rwlock;
+pub use pthread_condvar::{Condvar, MovableCondvar};
+pub use pthread_mutex::{MovableMutex, Mutex};
+pub use pthread_remutex::ReentrantMutex;
+pub use pthread_rwlock::{MovableRWLock, RWLock};
--- /dev/null
+use crate::cell::UnsafeCell;
+use crate::sys::locks::{pthread_mutex, Mutex};
+use crate::time::Duration;
+
+pub struct Condvar {
+ inner: UnsafeCell<libc::pthread_cond_t>,
+}
+
+pub type MovableCondvar = Box<Condvar>;
+
+unsafe impl Send for Condvar {}
+unsafe impl Sync for Condvar {}
+
+const TIMESPEC_MAX: libc::timespec =
+ libc::timespec { tv_sec: <libc::time_t>::MAX, tv_nsec: 1_000_000_000 - 1 };
+
+fn saturating_cast_to_time_t(value: u64) -> libc::time_t {
+ if value > <libc::time_t>::MAX as u64 { <libc::time_t>::MAX } else { value as libc::time_t }
+}
+
+impl Condvar {
+ pub const fn new() -> Condvar {
+ // Might be moved and address is changing it is better to avoid
+ // initialization of potentially opaque OS data before it landed
+ Condvar { inner: UnsafeCell::new(libc::PTHREAD_COND_INITIALIZER) }
+ }
+
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "l4re",
+ target_os = "android",
+ target_os = "redox"
+ ))]
+ pub unsafe fn init(&mut self) {}
+
+ // NOTE: ESP-IDF's PTHREAD_COND_INITIALIZER support is not released yet
+ // So on that platform, init() should always be called
+ // Moreover, that platform does not have pthread_condattr_setclock support,
+ // hence that initialization should be skipped as well
+ #[cfg(target_os = "espidf")]
+ pub unsafe fn init(&mut self) {
+ let r = libc::pthread_cond_init(self.inner.get(), crate::ptr::null());
+ assert_eq!(r, 0);
+ }
+
+ #[cfg(not(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "l4re",
+ target_os = "android",
+ target_os = "redox",
+ target_os = "espidf"
+ )))]
+ pub unsafe fn init(&mut self) {
+ use crate::mem::MaybeUninit;
+ let mut attr = MaybeUninit::<libc::pthread_condattr_t>::uninit();
+ let r = libc::pthread_condattr_init(attr.as_mut_ptr());
+ assert_eq!(r, 0);
+ let r = libc::pthread_condattr_setclock(attr.as_mut_ptr(), libc::CLOCK_MONOTONIC);
+ assert_eq!(r, 0);
+ let r = libc::pthread_cond_init(self.inner.get(), attr.as_ptr());
+ assert_eq!(r, 0);
+ let r = libc::pthread_condattr_destroy(attr.as_mut_ptr());
+ assert_eq!(r, 0);
+ }
+
+ #[inline]
+ pub unsafe fn notify_one(&self) {
+ let r = libc::pthread_cond_signal(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+
+ #[inline]
+ pub unsafe fn notify_all(&self) {
+ let r = libc::pthread_cond_broadcast(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+
+ #[inline]
+ pub unsafe fn wait(&self, mutex: &Mutex) {
+ let r = libc::pthread_cond_wait(self.inner.get(), pthread_mutex::raw(mutex));
+ debug_assert_eq!(r, 0);
+ }
+
+ // This implementation is used on systems that support pthread_condattr_setclock
+ // where we configure condition variable to use monotonic clock (instead of
+ // default system clock). This approach avoids all problems that result
+ // from changes made to the system time.
+ #[cfg(not(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "android",
+ target_os = "espidf"
+ )))]
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
+ use crate::mem;
+
+ let mut now: libc::timespec = mem::zeroed();
+ let r = libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut now);
+ assert_eq!(r, 0);
+
+ // Nanosecond calculations can't overflow because both values are below 1e9.
+ let nsec = dur.subsec_nanos() + now.tv_nsec as u32;
+
+ let sec = saturating_cast_to_time_t(dur.as_secs())
+ .checked_add((nsec / 1_000_000_000) as libc::time_t)
+ .and_then(|s| s.checked_add(now.tv_sec));
+ let nsec = nsec % 1_000_000_000;
+
+ let timeout =
+ sec.map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec as _ }).unwrap_or(TIMESPEC_MAX);
+
+ let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout);
+ assert!(r == libc::ETIMEDOUT || r == 0);
+ r == 0
+ }
+
+ // This implementation is modeled after libcxx's condition_variable
+ // https://github.com/llvm-mirror/libcxx/blob/release_35/src/condition_variable.cpp#L46
+ // https://github.com/llvm-mirror/libcxx/blob/release_35/include/__mutex_base#L367
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "ios",
+ target_os = "android",
+ target_os = "espidf"
+ ))]
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, mut dur: Duration) -> bool {
+ use crate::ptr;
+ use crate::time::Instant;
+
+ // 1000 years
+ let max_dur = Duration::from_secs(1000 * 365 * 86400);
+
+ if dur > max_dur {
+ // OSX implementation of `pthread_cond_timedwait` is buggy
+ // with super long durations. When duration is greater than
+ // 0x100_0000_0000_0000 seconds, `pthread_cond_timedwait`
+ // in macOS Sierra return error 316.
+ //
+ // This program demonstrates the issue:
+ // https://gist.github.com/stepancheg/198db4623a20aad2ad7cddb8fda4a63c
+ //
+ // To work around this issue, and possible bugs of other OSes, timeout
+ // is clamped to 1000 years, which is allowable per the API of `wait_timeout`
+ // because of spurious wakeups.
+
+ dur = max_dur;
+ }
+
+ // First, figure out what time it currently is, in both system and
+ // stable time. pthread_cond_timedwait uses system time, but we want to
+ // report timeout based on stable time.
+ let mut sys_now = libc::timeval { tv_sec: 0, tv_usec: 0 };
+ let stable_now = Instant::now();
+ let r = libc::gettimeofday(&mut sys_now, ptr::null_mut());
+ debug_assert_eq!(r, 0);
+
+ let nsec = dur.subsec_nanos() as libc::c_long + (sys_now.tv_usec * 1000) as libc::c_long;
+ let extra = (nsec / 1_000_000_000) as libc::time_t;
+ let nsec = nsec % 1_000_000_000;
+ let seconds = saturating_cast_to_time_t(dur.as_secs());
+
+ let timeout = sys_now
+ .tv_sec
+ .checked_add(extra)
+ .and_then(|s| s.checked_add(seconds))
+ .map(|s| libc::timespec { tv_sec: s, tv_nsec: nsec })
+ .unwrap_or(TIMESPEC_MAX);
+
+ // And wait!
+ let r = libc::pthread_cond_timedwait(self.inner.get(), pthread_mutex::raw(mutex), &timeout);
+ debug_assert!(r == libc::ETIMEDOUT || r == 0);
+
+ // ETIMEDOUT is not a totally reliable method of determining timeout due
+ // to clock shifts, so do the check ourselves
+ stable_now.elapsed() < dur
+ }
+
+ #[inline]
+ #[cfg(not(target_os = "dragonfly"))]
+ pub unsafe fn destroy(&self) {
+ let r = libc::pthread_cond_destroy(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+
+ #[inline]
+ #[cfg(target_os = "dragonfly")]
+ pub unsafe fn destroy(&self) {
+ let r = libc::pthread_cond_destroy(self.inner.get());
+ // On DragonFly pthread_cond_destroy() returns EINVAL if called on
+ // a condvar that was just initialized with
+ // libc::PTHREAD_COND_INITIALIZER. Once it is used or
+ // pthread_cond_init() is called, this behaviour no longer occurs.
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ }
+}
--- /dev/null
+use crate::cell::UnsafeCell;
+use crate::mem::MaybeUninit;
+use crate::sys::cvt_nz;
+
+pub struct Mutex {
+ inner: UnsafeCell<libc::pthread_mutex_t>,
+}
+
+pub type MovableMutex = Box<Mutex>;
+
+#[inline]
+pub unsafe fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t {
+ m.inner.get()
+}
+
+unsafe impl Send for Mutex {}
+unsafe impl Sync for Mutex {}
+
+#[allow(dead_code)] // sys isn't exported yet
+impl Mutex {
+ pub const fn new() -> Mutex {
+ // Might be moved to a different address, so it is better to avoid
+ // initialization of potentially opaque OS data before it landed.
+ // Be very careful using this newly constructed `Mutex`, reentrant
+ // locking is undefined behavior until `init` is called!
+ Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
+ }
+ #[inline]
+ pub unsafe fn init(&mut self) {
+ // Issue #33770
+ //
+ // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have
+ // a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you
+ // try to re-lock it from the same thread when you already hold a lock
+ // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_init.html).
+ // This is the case even if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL
+ // (https://github.com/rust-lang/rust/issues/33770#issuecomment-220847521) -- in that
+ // case, `pthread_mutexattr_settype(PTHREAD_MUTEX_DEFAULT)` will of course be the same
+ // as setting it to `PTHREAD_MUTEX_NORMAL`, but not setting any mode will result in
+ // a Mutex where re-locking is UB.
+ //
+ // In practice, glibc takes advantage of this undefined behavior to
+ // implement hardware lock elision, which uses hardware transactional
+ // memory to avoid acquiring the lock. While a transaction is in
+ // progress, the lock appears to be unlocked. This isn't a problem for
+ // other threads since the transactional memory will abort if a conflict
+ // is detected, however no abort is generated when re-locking from the
+ // same thread.
+ //
+ // Since locking the same mutex twice will result in two aliasing &mut
+ // references, we instead create the mutex with type
+ // PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to
+ // re-lock it from the same thread, thus avoiding undefined behavior.
+ let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
+ cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap();
+ let attr = PthreadMutexAttr(&mut attr);
+ cvt_nz(libc::pthread_mutexattr_settype(attr.0.as_mut_ptr(), libc::PTHREAD_MUTEX_NORMAL))
+ .unwrap();
+ cvt_nz(libc::pthread_mutex_init(self.inner.get(), attr.0.as_ptr())).unwrap();
+ }
+ #[inline]
+ pub unsafe fn lock(&self) {
+ let r = libc::pthread_mutex_lock(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ let r = libc::pthread_mutex_unlock(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ libc::pthread_mutex_trylock(self.inner.get()) == 0
+ }
+ #[inline]
+ #[cfg(not(target_os = "dragonfly"))]
+ pub unsafe fn destroy(&self) {
+ let r = libc::pthread_mutex_destroy(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+ #[inline]
+ #[cfg(target_os = "dragonfly")]
+ pub unsafe fn destroy(&self) {
+ let r = libc::pthread_mutex_destroy(self.inner.get());
+ // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
+ // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
+ // Once it is used (locked/unlocked) or pthread_mutex_init() is called,
+ // this behaviour no longer occurs.
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ }
+}
+
+pub(super) struct PthreadMutexAttr<'a>(pub &'a mut MaybeUninit<libc::pthread_mutexattr_t>);
+
+impl Drop for PthreadMutexAttr<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ let result = libc::pthread_mutexattr_destroy(self.0.as_mut_ptr());
+ debug_assert_eq!(result, 0);
+ }
+ }
+}
--- /dev/null
+use super::pthread_mutex::PthreadMutexAttr;
+use crate::cell::UnsafeCell;
+use crate::mem::MaybeUninit;
+use crate::sys::cvt_nz;
+
+pub struct ReentrantMutex {
+ inner: UnsafeCell<libc::pthread_mutex_t>,
+}
+
+unsafe impl Send for ReentrantMutex {}
+unsafe impl Sync for ReentrantMutex {}
+
+impl ReentrantMutex {
+ pub const unsafe fn uninitialized() -> ReentrantMutex {
+ ReentrantMutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
+ }
+
+ pub unsafe fn init(&self) {
+ let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
+ cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap();
+ let attr = PthreadMutexAttr(&mut attr);
+ cvt_nz(libc::pthread_mutexattr_settype(attr.0.as_mut_ptr(), libc::PTHREAD_MUTEX_RECURSIVE))
+ .unwrap();
+ cvt_nz(libc::pthread_mutex_init(self.inner.get(), attr.0.as_ptr())).unwrap();
+ }
+
+ pub unsafe fn lock(&self) {
+ let result = libc::pthread_mutex_lock(self.inner.get());
+ debug_assert_eq!(result, 0);
+ }
+
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ libc::pthread_mutex_trylock(self.inner.get()) == 0
+ }
+
+ pub unsafe fn unlock(&self) {
+ let result = libc::pthread_mutex_unlock(self.inner.get());
+ debug_assert_eq!(result, 0);
+ }
+
+ pub unsafe fn destroy(&self) {
+ let result = libc::pthread_mutex_destroy(self.inner.get());
+ debug_assert_eq!(result, 0);
+ }
+}
--- /dev/null
+use crate::cell::UnsafeCell;
+use crate::sync::atomic::{AtomicUsize, Ordering};
+
+pub struct RWLock {
+ inner: UnsafeCell<libc::pthread_rwlock_t>,
+ write_locked: UnsafeCell<bool>, // guarded by the `inner` RwLock
+ num_readers: AtomicUsize,
+}
+
+pub type MovableRWLock = Box<RWLock>;
+
+unsafe impl Send for RWLock {}
+unsafe impl Sync for RWLock {}
+
+impl RWLock {
+ pub const fn new() -> RWLock {
+ RWLock {
+ inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
+ write_locked: UnsafeCell::new(false),
+ num_readers: AtomicUsize::new(0),
+ }
+ }
+ #[inline]
+ pub unsafe fn read(&self) {
+ let r = libc::pthread_rwlock_rdlock(self.inner.get());
+
+ // According to POSIX, when a thread tries to acquire this read lock
+ // while it already holds the write lock
+ // (or vice versa, or tries to acquire the write lock twice),
+ // "the call shall either deadlock or return [EDEADLK]"
+ // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html,
+ // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_rdlock.html).
+ // So, in principle, all we have to do here is check `r == 0` to be sure we properly
+ // got the lock.
+ //
+ // However, (at least) glibc before version 2.25 does not conform to this spec,
+ // and can return `r == 0` even when this thread already holds the write lock.
+ // We thus check for this situation ourselves and panic when detecting that a thread
+ // got the write lock more than once, or got a read and a write lock.
+ if r == libc::EAGAIN {
+ panic!("rwlock maximum reader count exceeded");
+ } else if r == libc::EDEADLK || (r == 0 && *self.write_locked.get()) {
+ // Above, we make sure to only access `write_locked` when `r == 0` to avoid
+ // data races.
+ if r == 0 {
+ // `pthread_rwlock_rdlock` succeeded when it should not have.
+ self.raw_unlock();
+ }
+ panic!("rwlock read lock would result in deadlock");
+ } else {
+ // POSIX does not make guarantees about all the errors that may be returned.
+ // See issue #94705 for more details.
+ assert_eq!(r, 0, "unexpected error during rwlock read lock: {:?}", r);
+ self.num_readers.fetch_add(1, Ordering::Relaxed);
+ }
+ }
+ #[inline]
+ pub unsafe fn try_read(&self) -> bool {
+ let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
+ if r == 0 {
+ if *self.write_locked.get() {
+ // `pthread_rwlock_tryrdlock` succeeded when it should not have.
+ self.raw_unlock();
+ false
+ } else {
+ self.num_readers.fetch_add(1, Ordering::Relaxed);
+ true
+ }
+ } else {
+ false
+ }
+ }
+ #[inline]
+ pub unsafe fn write(&self) {
+ let r = libc::pthread_rwlock_wrlock(self.inner.get());
+ // See comments above for why we check for EDEADLK and write_locked. For the same reason,
+ // we also need to check that there are no readers (tracked in `num_readers`).
+ if r == libc::EDEADLK
+ || (r == 0 && *self.write_locked.get())
+ || self.num_readers.load(Ordering::Relaxed) != 0
+ {
+ // Above, we make sure to only access `write_locked` when `r == 0` to avoid
+ // data races.
+ if r == 0 {
+ // `pthread_rwlock_wrlock` succeeded when it should not have.
+ self.raw_unlock();
+ }
+ panic!("rwlock write lock would result in deadlock");
+ } else {
+ // According to POSIX, for a properly initialized rwlock this can only
+ // return EDEADLK or 0. We rely on that.
+ debug_assert_eq!(r, 0);
+ }
+ *self.write_locked.get() = true;
+ }
+ #[inline]
+ pub unsafe fn try_write(&self) -> bool {
+ let r = libc::pthread_rwlock_trywrlock(self.inner.get());
+ if r == 0 {
+ if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
+ // `pthread_rwlock_trywrlock` succeeded when it should not have.
+ self.raw_unlock();
+ false
+ } else {
+ *self.write_locked.get() = true;
+ true
+ }
+ } else {
+ false
+ }
+ }
+ #[inline]
+ unsafe fn raw_unlock(&self) {
+ let r = libc::pthread_rwlock_unlock(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ debug_assert!(!*self.write_locked.get());
+ self.num_readers.fetch_sub(1, Ordering::Relaxed);
+ self.raw_unlock();
+ }
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0);
+ debug_assert!(*self.write_locked.get());
+ *self.write_locked.get() = false;
+ self.raw_unlock();
+ }
+ #[inline]
+ pub unsafe fn destroy(&self) {
+ let r = libc::pthread_rwlock_destroy(self.inner.get());
+ // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
+ // rwlock that was just initialized with
+ // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
+ // or pthread_rwlock_init() is called, this behaviour no longer occurs.
+ if cfg!(target_os = "dragonfly") {
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ } else {
+ debug_assert_eq!(r, 0);
+ }
+ }
+}
pub mod args;
#[path = "../unix/cmath.rs"]
pub mod cmath;
-pub mod condvar;
pub mod env;
pub mod fd;
pub mod fs;
pub mod kernel_copy;
#[cfg(target_os = "l4re")]
mod l4re;
+pub mod locks;
pub mod memchr;
-pub mod mutex;
#[cfg(not(target_os = "l4re"))]
pub mod net;
#[cfg(target_os = "l4re")]
pub mod pipe;
pub mod process;
pub mod rand;
-pub mod rwlock;
pub mod stack_overflow;
pub mod stdio;
pub mod thread;
target_os = "macos",
target_os = "ios",
target_os = "redox",
+ target_os = "l4re",
)))] {
use crate::sys::os::errno;
let pfds: &mut [_] = &mut [
+++ /dev/null
-use crate::cell::UnsafeCell;
-use crate::mem::MaybeUninit;
-use crate::sys::cvt_nz;
-
-pub struct Mutex {
- inner: UnsafeCell<libc::pthread_mutex_t>,
-}
-
-pub type MovableMutex = Box<Mutex>;
-
-#[inline]
-pub unsafe fn raw(m: &Mutex) -> *mut libc::pthread_mutex_t {
- m.inner.get()
-}
-
-unsafe impl Send for Mutex {}
-unsafe impl Sync for Mutex {}
-
-#[allow(dead_code)] // sys isn't exported yet
-impl Mutex {
- pub const fn new() -> Mutex {
- // Might be moved to a different address, so it is better to avoid
- // initialization of potentially opaque OS data before it landed.
- // Be very careful using this newly constructed `Mutex`, reentrant
- // locking is undefined behavior until `init` is called!
- Mutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
- }
- #[inline]
- pub unsafe fn init(&mut self) {
- // Issue #33770
- //
- // A pthread mutex initialized with PTHREAD_MUTEX_INITIALIZER will have
- // a type of PTHREAD_MUTEX_DEFAULT, which has undefined behavior if you
- // try to re-lock it from the same thread when you already hold a lock
- // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_init.html).
- // This is the case even if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL
- // (https://github.com/rust-lang/rust/issues/33770#issuecomment-220847521) -- in that
- // case, `pthread_mutexattr_settype(PTHREAD_MUTEX_DEFAULT)` will of course be the same
- // as setting it to `PTHREAD_MUTEX_NORMAL`, but not setting any mode will result in
- // a Mutex where re-locking is UB.
- //
- // In practice, glibc takes advantage of this undefined behavior to
- // implement hardware lock elision, which uses hardware transactional
- // memory to avoid acquiring the lock. While a transaction is in
- // progress, the lock appears to be unlocked. This isn't a problem for
- // other threads since the transactional memory will abort if a conflict
- // is detected, however no abort is generated when re-locking from the
- // same thread.
- //
- // Since locking the same mutex twice will result in two aliasing &mut
- // references, we instead create the mutex with type
- // PTHREAD_MUTEX_NORMAL which is guaranteed to deadlock if we try to
- // re-lock it from the same thread, thus avoiding undefined behavior.
- let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
- cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap();
- let attr = PthreadMutexAttr(&mut attr);
- cvt_nz(libc::pthread_mutexattr_settype(attr.0.as_mut_ptr(), libc::PTHREAD_MUTEX_NORMAL))
- .unwrap();
- cvt_nz(libc::pthread_mutex_init(self.inner.get(), attr.0.as_ptr())).unwrap();
- }
- #[inline]
- pub unsafe fn lock(&self) {
- let r = libc::pthread_mutex_lock(self.inner.get());
- debug_assert_eq!(r, 0);
- }
- #[inline]
- pub unsafe fn unlock(&self) {
- let r = libc::pthread_mutex_unlock(self.inner.get());
- debug_assert_eq!(r, 0);
- }
- #[inline]
- pub unsafe fn try_lock(&self) -> bool {
- libc::pthread_mutex_trylock(self.inner.get()) == 0
- }
- #[inline]
- #[cfg(not(target_os = "dragonfly"))]
- pub unsafe fn destroy(&self) {
- let r = libc::pthread_mutex_destroy(self.inner.get());
- debug_assert_eq!(r, 0);
- }
- #[inline]
- #[cfg(target_os = "dragonfly")]
- pub unsafe fn destroy(&self) {
- let r = libc::pthread_mutex_destroy(self.inner.get());
- // On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
- // mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
- // Once it is used (locked/unlocked) or pthread_mutex_init() is called,
- // this behaviour no longer occurs.
- debug_assert!(r == 0 || r == libc::EINVAL);
- }
-}
-
-pub struct ReentrantMutex {
- inner: UnsafeCell<libc::pthread_mutex_t>,
-}
-
-unsafe impl Send for ReentrantMutex {}
-unsafe impl Sync for ReentrantMutex {}
-
-impl ReentrantMutex {
- pub const unsafe fn uninitialized() -> ReentrantMutex {
- ReentrantMutex { inner: UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER) }
- }
-
- pub unsafe fn init(&self) {
- let mut attr = MaybeUninit::<libc::pthread_mutexattr_t>::uninit();
- cvt_nz(libc::pthread_mutexattr_init(attr.as_mut_ptr())).unwrap();
- let attr = PthreadMutexAttr(&mut attr);
- cvt_nz(libc::pthread_mutexattr_settype(attr.0.as_mut_ptr(), libc::PTHREAD_MUTEX_RECURSIVE))
- .unwrap();
- cvt_nz(libc::pthread_mutex_init(self.inner.get(), attr.0.as_ptr())).unwrap();
- }
-
- pub unsafe fn lock(&self) {
- let result = libc::pthread_mutex_lock(self.inner.get());
- debug_assert_eq!(result, 0);
- }
-
- #[inline]
- pub unsafe fn try_lock(&self) -> bool {
- libc::pthread_mutex_trylock(self.inner.get()) == 0
- }
-
- pub unsafe fn unlock(&self) {
- let result = libc::pthread_mutex_unlock(self.inner.get());
- debug_assert_eq!(result, 0);
- }
-
- pub unsafe fn destroy(&self) {
- let result = libc::pthread_mutex_destroy(self.inner.get());
- debug_assert_eq!(result, 0);
- }
-}
-
-struct PthreadMutexAttr<'a>(&'a mut MaybeUninit<libc::pthread_mutexattr_t>);
-
-impl Drop for PthreadMutexAttr<'_> {
- fn drop(&mut self) {
- unsafe {
- let result = libc::pthread_mutexattr_destroy(self.0.as_mut_ptr());
- debug_assert_eq!(result, 0);
- }
- }
-}
#[cfg(not(target_os = "fuchsia"))]
use crate::sys::fs::OpenOptions;
-use libc::{c_char, c_int, gid_t, uid_t, EXIT_FAILURE, EXIT_SUCCESS};
+use libc::{c_char, c_int, gid_t, pid_t, uid_t, EXIT_FAILURE, EXIT_SUCCESS};
cfg_if::cfg_if! {
if #[cfg(target_os = "fuchsia")] {
stderr: Option<Stdio>,
#[cfg(target_os = "linux")]
create_pidfd: bool,
+ pgroup: Option<pid_t>,
}
// Create a new type for argv, so that we can make it `Send` and `Sync`
stdin: None,
stdout: None,
stderr: None,
+ pgroup: None,
}
}
stdout: None,
stderr: None,
create_pidfd: false,
+ pgroup: None,
}
}
pub fn groups(&mut self, groups: &[gid_t]) {
self.groups = Some(Box::from(groups));
}
+ pub fn pgroup(&mut self, pgroup: pid_t) {
+ self.pgroup = Some(pgroup);
+ }
#[cfg(target_os = "linux")]
pub fn create_pidfd(&mut self, val: bool) {
pub fn get_groups(&self) -> Option<&[gid_t]> {
self.groups.as_deref()
}
+ #[allow(dead_code)]
+ pub fn get_pgroup(&self) -> Option<pid_t> {
+ self.pgroup
+ }
pub fn get_closures(&mut self) -> &mut Vec<Box<dyn FnMut() -> io::Result<()> + Send + Sync>> {
&mut self.closures
t!(cat.wait());
}
}
+
+#[test]
+#[cfg_attr(
+ any(
+ // See test_process_mask
+ target_os = "macos",
+ target_arch = "arm",
+ target_arch = "aarch64",
+ target_arch = "riscv64",
+ ),
+ ignore
+)]
+fn test_process_group_posix_spawn() {
+ unsafe {
+ // Spawn a cat subprocess that's just going to hang since there is no I/O.
+ let mut cmd = Command::new(OsStr::new("cat"));
+ cmd.pgroup(0);
+ cmd.stdin(Stdio::MakePipe);
+ cmd.stdout(Stdio::MakePipe);
+ let (mut cat, _pipes) = t!(cmd.spawn(Stdio::Null, true));
+
+ // Check that we can kill its process group, which means there *is* one.
+ t!(cvt(libc::kill(-(cat.id() as libc::pid_t), libc::SIGINT)));
+
+ t!(cat.wait());
+ }
+}
+
+#[test]
+#[cfg_attr(
+ any(
+ // See test_process_mask
+ target_os = "macos",
+ target_arch = "arm",
+ target_arch = "aarch64",
+ target_arch = "riscv64",
+ ),
+ ignore
+)]
+fn test_process_group_no_posix_spawn() {
+ unsafe {
+ // Same as above, create hang-y cat. This time, force using the non-posix_spawnp path.
+ let mut cmd = Command::new(OsStr::new("cat"));
+ cmd.pgroup(0);
+ cmd.pre_exec(Box::new(|| Ok(()))); // pre_exec forces fork + exec
+ cmd.stdin(Stdio::MakePipe);
+ cmd.stdout(Stdio::MakePipe);
+ let (mut cat, _pipes) = t!(cmd.spawn(Stdio::Null, true));
+
+ // Check that we can kill its process group, which means there *is* one.
+ t!(cvt(libc::kill(-(cat.id() as libc::pid_t), libc::SIGINT)));
+
+ t!(cat.wait());
+ }
+}
use libc::RTP_ID as pid_t;
#[cfg(not(target_os = "vxworks"))]
-use libc::{c_int, gid_t, pid_t, uid_t};
+use libc::{c_int, pid_t};
+
+#[cfg(not(any(target_os = "vxworks", target_os = "l4re")))]
+use libc::{gid_t, uid_t};
////////////////////////////////////////////////////////////////////////////////
// Command
cvt(libc::chdir(cwd.as_ptr()))?;
}
+ if let Some(pgroup) = self.get_pgroup() {
+ cvt(libc::setpgid(0, pgroup))?;
+ }
+
// emscripten has no signal support.
#[cfg(not(target_os = "emscripten"))]
{
None => None,
};
+ let pgroup = self.get_pgroup();
+
// Safety: -1 indicates we don't have a pidfd.
let mut p = unsafe { Process::new(0, -1) };
cvt_nz(libc::posix_spawnattr_init(attrs.as_mut_ptr()))?;
let attrs = PosixSpawnattr(&mut attrs);
+ let mut flags = 0;
+
let mut file_actions = MaybeUninit::uninit();
cvt_nz(libc::posix_spawn_file_actions_init(file_actions.as_mut_ptr()))?;
let file_actions = PosixSpawnFileActions(&mut file_actions);
cvt_nz(f(file_actions.0.as_mut_ptr(), cwd.as_ptr()))?;
}
+ if let Some(pgroup) = pgroup {
+ flags |= libc::POSIX_SPAWN_SETPGROUP;
+ cvt_nz(libc::posix_spawnattr_setpgroup(attrs.0.as_mut_ptr(), pgroup))?;
+ }
+
let mut set = MaybeUninit::<libc::sigset_t>::uninit();
cvt(sigemptyset(set.as_mut_ptr()))?;
cvt_nz(libc::posix_spawnattr_setsigmask(attrs.0.as_mut_ptr(), set.as_ptr()))?;
cvt(sigaddset(set.as_mut_ptr(), libc::SIGPIPE))?;
cvt_nz(libc::posix_spawnattr_setsigdefault(attrs.0.as_mut_ptr(), set.as_ptr()))?;
- let flags = libc::POSIX_SPAWN_SETSIGDEF | libc::POSIX_SPAWN_SETSIGMASK;
+ flags |= libc::POSIX_SPAWN_SETSIGDEF | libc::POSIX_SPAWN_SETSIGMASK;
cvt_nz(libc::posix_spawnattr_setflags(attrs.0.as_mut_ptr(), flags as _))?;
// Make sure we synchronize access to the global `environ` resource
}
pub fn code(&self) -> Option<i32> {
- if self.exited() { Some(libc::WEXITSTATUS(self.0)) } else { None }
+ self.exited().then(|| libc::WEXITSTATUS(self.0))
}
pub fn signal(&self) -> Option<i32> {
- if libc::WIFSIGNALED(self.0) { Some(libc::WTERMSIG(self.0)) } else { None }
+ libc::WIFSIGNALED(self.0).then(|| libc::WTERMSIG(self.0))
}
pub fn core_dumped(&self) -> bool {
}
pub fn stopped_signal(&self) -> Option<i32> {
- if libc::WIFSTOPPED(self.0) { Some(libc::WSTOPSIG(self.0)) } else { None }
+ libc::WIFSTOPPED(self.0).then(|| libc::WSTOPSIG(self.0))
}
pub fn continued(&self) -> bool {
+++ /dev/null
-use crate::cell::UnsafeCell;
-use crate::sync::atomic::{AtomicUsize, Ordering};
-
-pub struct RWLock {
- inner: UnsafeCell<libc::pthread_rwlock_t>,
- write_locked: UnsafeCell<bool>, // guarded by the `inner` RwLock
- num_readers: AtomicUsize,
-}
-
-pub type MovableRWLock = Box<RWLock>;
-
-unsafe impl Send for RWLock {}
-unsafe impl Sync for RWLock {}
-
-impl RWLock {
- pub const fn new() -> RWLock {
- RWLock {
- inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
- write_locked: UnsafeCell::new(false),
- num_readers: AtomicUsize::new(0),
- }
- }
- #[inline]
- pub unsafe fn read(&self) {
- let r = libc::pthread_rwlock_rdlock(self.inner.get());
-
- // According to POSIX, when a thread tries to acquire this read lock
- // while it already holds the write lock
- // (or vice versa, or tries to acquire the write lock twice),
- // "the call shall either deadlock or return [EDEADLK]"
- // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html,
- // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_rdlock.html).
- // So, in principle, all we have to do here is check `r == 0` to be sure we properly
- // got the lock.
- //
- // However, (at least) glibc before version 2.25 does not conform to this spec,
- // and can return `r == 0` even when this thread already holds the write lock.
- // We thus check for this situation ourselves and panic when detecting that a thread
- // got the write lock more than once, or got a read and a write lock.
- if r == libc::EAGAIN {
- panic!("rwlock maximum reader count exceeded");
- } else if r == libc::EDEADLK || (r == 0 && *self.write_locked.get()) {
- // Above, we make sure to only access `write_locked` when `r == 0` to avoid
- // data races.
- if r == 0 {
- // `pthread_rwlock_rdlock` succeeded when it should not have.
- self.raw_unlock();
- }
- panic!("rwlock read lock would result in deadlock");
- } else {
- // POSIX does not make guarantees about all the errors that may be returned.
- // See issue #94705 for more details.
- assert_eq!(r, 0, "unexpected error during rwlock read lock: {:?}", r);
- self.num_readers.fetch_add(1, Ordering::Relaxed);
- }
- }
- #[inline]
- pub unsafe fn try_read(&self) -> bool {
- let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
- if r == 0 {
- if *self.write_locked.get() {
- // `pthread_rwlock_tryrdlock` succeeded when it should not have.
- self.raw_unlock();
- false
- } else {
- self.num_readers.fetch_add(1, Ordering::Relaxed);
- true
- }
- } else {
- false
- }
- }
- #[inline]
- pub unsafe fn write(&self) {
- let r = libc::pthread_rwlock_wrlock(self.inner.get());
- // See comments above for why we check for EDEADLK and write_locked. For the same reason,
- // we also need to check that there are no readers (tracked in `num_readers`).
- if r == libc::EDEADLK
- || (r == 0 && *self.write_locked.get())
- || self.num_readers.load(Ordering::Relaxed) != 0
- {
- // Above, we make sure to only access `write_locked` when `r == 0` to avoid
- // data races.
- if r == 0 {
- // `pthread_rwlock_wrlock` succeeded when it should not have.
- self.raw_unlock();
- }
- panic!("rwlock write lock would result in deadlock");
- } else {
- // According to POSIX, for a properly initialized rwlock this can only
- // return EDEADLK or 0. We rely on that.
- debug_assert_eq!(r, 0);
- }
- *self.write_locked.get() = true;
- }
- #[inline]
- pub unsafe fn try_write(&self) -> bool {
- let r = libc::pthread_rwlock_trywrlock(self.inner.get());
- if r == 0 {
- if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
- // `pthread_rwlock_trywrlock` succeeded when it should not have.
- self.raw_unlock();
- false
- } else {
- *self.write_locked.get() = true;
- true
- }
- } else {
- false
- }
- }
- #[inline]
- unsafe fn raw_unlock(&self) {
- let r = libc::pthread_rwlock_unlock(self.inner.get());
- debug_assert_eq!(r, 0);
- }
- #[inline]
- pub unsafe fn read_unlock(&self) {
- debug_assert!(!*self.write_locked.get());
- self.num_readers.fetch_sub(1, Ordering::Relaxed);
- self.raw_unlock();
- }
- #[inline]
- pub unsafe fn write_unlock(&self) {
- debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0);
- debug_assert!(*self.write_locked.get());
- *self.write_locked.get() = false;
- self.raw_unlock();
- }
- #[inline]
- pub unsafe fn destroy(&self) {
- let r = libc::pthread_rwlock_destroy(self.inner.get());
- // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
- // rwlock that was just initialized with
- // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
- // or pthread_rwlock_init() is called, this behaviour no longer occurs.
- if cfg!(target_os = "dragonfly") {
- debug_assert!(r == 0 || r == libc::EINVAL);
- } else {
- debug_assert_eq!(r, 0);
- }
- }
-}
+++ /dev/null
-use crate::sys::mutex::Mutex;
-use crate::time::Duration;
-
-pub struct Condvar {}
-
-pub type MovableCondvar = Condvar;
-
-impl Condvar {
- pub const fn new() -> Condvar {
- Condvar {}
- }
-
- #[inline]
- pub unsafe fn init(&mut self) {}
-
- #[inline]
- pub unsafe fn notify_one(&self) {}
-
- #[inline]
- pub unsafe fn notify_all(&self) {}
-
- pub unsafe fn wait(&self, _mutex: &Mutex) {
- panic!("condvar wait not supported")
- }
-
- pub unsafe fn wait_timeout(&self, _mutex: &Mutex, _dur: Duration) -> bool {
- panic!("condvar wait not supported");
- }
-
- #[inline]
- pub unsafe fn destroy(&self) {}
-}
--- /dev/null
+use crate::sys::locks::Mutex;
+use crate::time::Duration;
+
+pub struct Condvar {}
+
+pub type MovableCondvar = Condvar;
+
+impl Condvar {
+ pub const fn new() -> Condvar {
+ Condvar {}
+ }
+
+ #[inline]
+ pub unsafe fn init(&mut self) {}
+
+ #[inline]
+ pub unsafe fn notify_one(&self) {}
+
+ #[inline]
+ pub unsafe fn notify_all(&self) {}
+
+ pub unsafe fn wait(&self, _mutex: &Mutex) {
+ panic!("condvar wait not supported")
+ }
+
+ pub unsafe fn wait_timeout(&self, _mutex: &Mutex, _dur: Duration) -> bool {
+ panic!("condvar wait not supported");
+ }
+
+ #[inline]
+ pub unsafe fn destroy(&self) {}
+}
--- /dev/null
+mod condvar;
+mod mutex;
+mod rwlock;
+pub use condvar::{Condvar, MovableCondvar};
+pub use mutex::{MovableMutex, Mutex, ReentrantMutex};
+pub use rwlock::{MovableRWLock, RWLock};
--- /dev/null
+use crate::cell::Cell;
+
+pub struct Mutex {
+ // This platform has no threads, so we can use a Cell here.
+ locked: Cell<bool>,
+}
+
+pub type MovableMutex = Mutex;
+
+unsafe impl Send for Mutex {}
+unsafe impl Sync for Mutex {} // no threads on this platform
+
+impl Mutex {
+ pub const fn new() -> Mutex {
+ Mutex { locked: Cell::new(false) }
+ }
+
+ #[inline]
+ pub unsafe fn init(&mut self) {}
+
+ #[inline]
+ pub unsafe fn lock(&self) {
+ assert_eq!(self.locked.replace(true), false, "cannot recursively acquire mutex");
+ }
+
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ self.locked.set(false);
+ }
+
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ self.locked.replace(true) == false
+ }
+
+ #[inline]
+ pub unsafe fn destroy(&self) {}
+}
+
+// All empty stubs because this platform does not yet support threads, so lock
+// acquisition always succeeds.
+pub struct ReentrantMutex {}
+
+impl ReentrantMutex {
+ pub const unsafe fn uninitialized() -> ReentrantMutex {
+ ReentrantMutex {}
+ }
+
+ pub unsafe fn init(&self) {}
+
+ pub unsafe fn lock(&self) {}
+
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ true
+ }
+
+ pub unsafe fn unlock(&self) {}
+
+ pub unsafe fn destroy(&self) {}
+}
--- /dev/null
+use crate::cell::Cell;
+
+pub struct RWLock {
+ // This platform has no threads, so we can use a Cell here.
+ mode: Cell<isize>,
+}
+
+pub type MovableRWLock = RWLock;
+
+unsafe impl Send for RWLock {}
+unsafe impl Sync for RWLock {} // no threads on this platform
+
+impl RWLock {
+ pub const fn new() -> RWLock {
+ RWLock { mode: Cell::new(0) }
+ }
+
+ #[inline]
+ pub unsafe fn read(&self) {
+ let m = self.mode.get();
+ if m >= 0 {
+ self.mode.set(m + 1);
+ } else {
+ rtabort!("rwlock locked for writing");
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_read(&self) -> bool {
+ let m = self.mode.get();
+ if m >= 0 {
+ self.mode.set(m + 1);
+ true
+ } else {
+ false
+ }
+ }
+
+ #[inline]
+ pub unsafe fn write(&self) {
+ if self.mode.replace(-1) != 0 {
+ rtabort!("rwlock locked for reading")
+ }
+ }
+
+ #[inline]
+ pub unsafe fn try_write(&self) -> bool {
+ if self.mode.get() == 0 {
+ self.mode.set(-1);
+ true
+ } else {
+ false
+ }
+ }
+
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ self.mode.set(self.mode.get() - 1);
+ }
+
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ assert_eq!(self.mode.replace(0), -1);
+ }
+
+ #[inline]
+ pub unsafe fn destroy(&self) {}
+}
pub mod args;
#[path = "../unix/cmath.rs"]
pub mod cmath;
-pub mod condvar;
pub mod env;
pub mod fs;
pub mod io;
-pub mod mutex;
+pub mod locks;
pub mod net;
pub mod os;
#[path = "../unix/os_str.rs"]
pub mod path;
pub mod pipe;
pub mod process;
-pub mod rwlock;
pub mod stdio;
pub mod thread;
#[cfg(target_thread_local)]
+++ /dev/null
-use crate::cell::Cell;
-
-pub struct Mutex {
- // This platform has no threads, so we can use a Cell here.
- locked: Cell<bool>,
-}
-
-pub type MovableMutex = Mutex;
-
-unsafe impl Send for Mutex {}
-unsafe impl Sync for Mutex {} // no threads on this platform
-
-impl Mutex {
- pub const fn new() -> Mutex {
- Mutex { locked: Cell::new(false) }
- }
-
- #[inline]
- pub unsafe fn init(&mut self) {}
-
- #[inline]
- pub unsafe fn lock(&self) {
- assert_eq!(self.locked.replace(true), false, "cannot recursively acquire mutex");
- }
-
- #[inline]
- pub unsafe fn unlock(&self) {
- self.locked.set(false);
- }
-
- #[inline]
- pub unsafe fn try_lock(&self) -> bool {
- self.locked.replace(true) == false
- }
-
- #[inline]
- pub unsafe fn destroy(&self) {}
-}
-
-// All empty stubs because this platform does not yet support threads, so lock
-// acquisition always succeeds.
-pub struct ReentrantMutex {}
-
-impl ReentrantMutex {
- pub const unsafe fn uninitialized() -> ReentrantMutex {
- ReentrantMutex {}
- }
-
- pub unsafe fn init(&self) {}
-
- pub unsafe fn lock(&self) {}
-
- #[inline]
- pub unsafe fn try_lock(&self) -> bool {
- true
- }
-
- pub unsafe fn unlock(&self) {}
-
- pub unsafe fn destroy(&self) {}
-}
+++ /dev/null
-use crate::cell::Cell;
-
-pub struct RWLock {
- // This platform has no threads, so we can use a Cell here.
- mode: Cell<isize>,
-}
-
-pub type MovableRWLock = RWLock;
-
-unsafe impl Send for RWLock {}
-unsafe impl Sync for RWLock {} // no threads on this platform
-
-impl RWLock {
- pub const fn new() -> RWLock {
- RWLock { mode: Cell::new(0) }
- }
-
- #[inline]
- pub unsafe fn read(&self) {
- let m = self.mode.get();
- if m >= 0 {
- self.mode.set(m + 1);
- } else {
- rtabort!("rwlock locked for writing");
- }
- }
-
- #[inline]
- pub unsafe fn try_read(&self) -> bool {
- let m = self.mode.get();
- if m >= 0 {
- self.mode.set(m + 1);
- true
- } else {
- false
- }
- }
-
- #[inline]
- pub unsafe fn write(&self) {
- if self.mode.replace(-1) != 0 {
- rtabort!("rwlock locked for reading")
- }
- }
-
- #[inline]
- pub unsafe fn try_write(&self) -> bool {
- if self.mode.get() == 0 {
- self.mode.set(-1);
- true
- } else {
- false
- }
- }
-
- #[inline]
- pub unsafe fn read_unlock(&self) {
- self.mode.set(self.mode.get() - 1);
- }
-
- #[inline]
- pub unsafe fn write_unlock(&self) {
- assert_eq!(self.mode.replace(0), -1);
- }
-
- #[inline]
- pub unsafe fn destroy(&self) {}
-}
pub mod args;
#[path = "../unix/cmath.rs"]
pub mod cmath;
-#[path = "../unsupported/condvar.rs"]
-pub mod condvar;
pub mod env;
pub mod fd;
pub mod fs;
pub mod io;
-#[path = "../unsupported/mutex.rs"]
-pub mod mutex;
+#[path = "../unsupported/locks/mod.rs"]
+pub mod locks;
pub mod net;
pub mod os;
#[path = "../unix/os_str.rs"]
pub mod pipe;
#[path = "../unsupported/process.rs"]
pub mod process;
-#[path = "../unsupported/rwlock.rs"]
-pub mod rwlock;
pub mod stdio;
pub mod thread;
#[path = "../unsupported/thread_local_dtor.rs"]
use crate::cmp;
use crate::mem;
use crate::sync::atomic::{AtomicUsize, Ordering::SeqCst};
-use crate::sys::mutex::Mutex;
+use crate::sys::locks::Mutex;
use crate::time::Duration;
pub struct Condvar {
use crate::cell::UnsafeCell;
-use crate::sys::condvar::Condvar;
-use crate::sys::mutex::Mutex;
+use crate::sys::locks::{Condvar, Mutex};
pub struct RWLock {
lock: Mutex,
cfg_if::cfg_if! {
if #[cfg(target_feature = "atomics")] {
#[path = "atomics/condvar.rs"]
- pub mod condvar;
+ mod condvar;
#[path = "atomics/mutex.rs"]
- pub mod mutex;
+ mod mutex;
#[path = "atomics/rwlock.rs"]
- pub mod rwlock;
+ mod rwlock;
+ pub mod locks {
+ pub use super::condvar::*;
+ pub use super::mutex::*;
+ pub use super::rwlock::*;
+ }
#[path = "atomics/futex.rs"]
pub mod futex;
#[path = "atomics/thread.rs"]
pub mod thread;
} else {
- #[path = "../unsupported/condvar.rs"]
- pub mod condvar;
- #[path = "../unsupported/mutex.rs"]
- pub mod mutex;
- #[path = "../unsupported/rwlock.rs"]
- pub mod rwlock;
+ #[path = "../unsupported/locks/mod.rs"]
+ pub mod locks;
#[path = "../unsupported/thread.rs"]
pub mod thread;
}
+++ /dev/null
-use crate::cell::UnsafeCell;
-use crate::sys::c;
-use crate::sys::mutex::{self, Mutex};
-use crate::sys::os;
-use crate::time::Duration;
-
-pub struct Condvar {
- inner: UnsafeCell<c::CONDITION_VARIABLE>,
-}
-
-pub type MovableCondvar = Condvar;
-
-unsafe impl Send for Condvar {}
-unsafe impl Sync for Condvar {}
-
-impl Condvar {
- pub const fn new() -> Condvar {
- Condvar { inner: UnsafeCell::new(c::CONDITION_VARIABLE_INIT) }
- }
-
- #[inline]
- pub unsafe fn init(&mut self) {}
-
- #[inline]
- pub unsafe fn wait(&self, mutex: &Mutex) {
- let r = c::SleepConditionVariableSRW(self.inner.get(), mutex::raw(mutex), c::INFINITE, 0);
- debug_assert!(r != 0);
- }
-
- pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
- let r = c::SleepConditionVariableSRW(
- self.inner.get(),
- mutex::raw(mutex),
- super::dur2timeout(dur),
- 0,
- );
- if r == 0 {
- debug_assert_eq!(os::errno() as usize, c::ERROR_TIMEOUT as usize);
- false
- } else {
- true
- }
- }
-
- #[inline]
- pub unsafe fn notify_one(&self) {
- c::WakeConditionVariable(self.inner.get())
- }
-
- #[inline]
- pub unsafe fn notify_all(&self) {
- c::WakeAllConditionVariable(self.inner.get())
- }
-
- pub unsafe fn destroy(&self) {
- // ...
- }
-}
--- /dev/null
+use crate::cell::UnsafeCell;
+use crate::sys::c;
+use crate::sys::locks::{mutex, Mutex};
+use crate::sys::os;
+use crate::time::Duration;
+
+pub struct Condvar {
+ inner: UnsafeCell<c::CONDITION_VARIABLE>,
+}
+
+pub type MovableCondvar = Condvar;
+
+unsafe impl Send for Condvar {}
+unsafe impl Sync for Condvar {}
+
+impl Condvar {
+ pub const fn new() -> Condvar {
+ Condvar { inner: UnsafeCell::new(c::CONDITION_VARIABLE_INIT) }
+ }
+
+ #[inline]
+ pub unsafe fn init(&mut self) {}
+
+ #[inline]
+ pub unsafe fn wait(&self, mutex: &Mutex) {
+ let r = c::SleepConditionVariableSRW(self.inner.get(), mutex::raw(mutex), c::INFINITE, 0);
+ debug_assert!(r != 0);
+ }
+
+ pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
+ let r = c::SleepConditionVariableSRW(
+ self.inner.get(),
+ mutex::raw(mutex),
+ crate::sys::windows::dur2timeout(dur),
+ 0,
+ );
+ if r == 0 {
+ debug_assert_eq!(os::errno() as usize, c::ERROR_TIMEOUT as usize);
+ false
+ } else {
+ true
+ }
+ }
+
+ #[inline]
+ pub unsafe fn notify_one(&self) {
+ c::WakeConditionVariable(self.inner.get())
+ }
+
+ #[inline]
+ pub unsafe fn notify_all(&self) {
+ c::WakeAllConditionVariable(self.inner.get())
+ }
+
+ pub unsafe fn destroy(&self) {
+ // ...
+ }
+}
--- /dev/null
+mod condvar;
+mod mutex;
+mod rwlock;
+pub use condvar::{Condvar, MovableCondvar};
+pub use mutex::{MovableMutex, Mutex, ReentrantMutex};
+pub use rwlock::{MovableRWLock, RWLock};
--- /dev/null
+//! System Mutexes
+//!
+//! The Windows implementation of mutexes is a little odd and it might not be
+//! immediately obvious what's going on. The primary oddness is that SRWLock is
+//! used instead of CriticalSection, and this is done because:
+//!
+//! 1. SRWLock is several times faster than CriticalSection according to
+//! benchmarks performed on both Windows 8 and Windows 7.
+//!
+//! 2. CriticalSection allows recursive locking while SRWLock deadlocks. The
+//! Unix implementation deadlocks so consistency is preferred. See #19962 for
+//! more details.
+//!
+//! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy
+//! is that there are no guarantees of fairness.
+
+use crate::cell::UnsafeCell;
+use crate::mem::MaybeUninit;
+use crate::sys::c;
+
+pub struct Mutex {
+ srwlock: UnsafeCell<c::SRWLOCK>,
+}
+
+// Windows SRW Locks are movable (while not borrowed).
+pub type MovableMutex = Mutex;
+
+unsafe impl Send for Mutex {}
+unsafe impl Sync for Mutex {}
+
+#[inline]
+pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK {
+ m.srwlock.get()
+}
+
+impl Mutex {
+ pub const fn new() -> Mutex {
+ Mutex { srwlock: UnsafeCell::new(c::SRWLOCK_INIT) }
+ }
+ #[inline]
+ pub unsafe fn init(&mut self) {}
+
+ #[inline]
+ pub unsafe fn lock(&self) {
+ c::AcquireSRWLockExclusive(raw(self));
+ }
+
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ c::TryAcquireSRWLockExclusive(raw(self)) != 0
+ }
+
+ #[inline]
+ pub unsafe fn unlock(&self) {
+ c::ReleaseSRWLockExclusive(raw(self));
+ }
+
+ #[inline]
+ pub unsafe fn destroy(&self) {
+ // SRWLock does not need to be destroyed.
+ }
+}
+
+pub struct ReentrantMutex {
+ inner: MaybeUninit<UnsafeCell<c::CRITICAL_SECTION>>,
+}
+
+unsafe impl Send for ReentrantMutex {}
+unsafe impl Sync for ReentrantMutex {}
+
+impl ReentrantMutex {
+ pub const fn uninitialized() -> ReentrantMutex {
+ ReentrantMutex { inner: MaybeUninit::uninit() }
+ }
+
+ pub unsafe fn init(&self) {
+ c::InitializeCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
+ }
+
+ pub unsafe fn lock(&self) {
+ c::EnterCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
+ }
+
+ #[inline]
+ pub unsafe fn try_lock(&self) -> bool {
+ c::TryEnterCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr())) != 0
+ }
+
+ pub unsafe fn unlock(&self) {
+ c::LeaveCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
+ }
+
+ pub unsafe fn destroy(&self) {
+ c::DeleteCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
+ }
+}
--- /dev/null
+use crate::cell::UnsafeCell;
+use crate::sys::c;
+
+pub struct RWLock {
+ inner: UnsafeCell<c::SRWLOCK>,
+}
+
+pub type MovableRWLock = RWLock;
+
+unsafe impl Send for RWLock {}
+unsafe impl Sync for RWLock {}
+
+impl RWLock {
+ pub const fn new() -> RWLock {
+ RWLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) }
+ }
+ #[inline]
+ pub unsafe fn read(&self) {
+ c::AcquireSRWLockShared(self.inner.get())
+ }
+ #[inline]
+ pub unsafe fn try_read(&self) -> bool {
+ c::TryAcquireSRWLockShared(self.inner.get()) != 0
+ }
+ #[inline]
+ pub unsafe fn write(&self) {
+ c::AcquireSRWLockExclusive(self.inner.get())
+ }
+ #[inline]
+ pub unsafe fn try_write(&self) -> bool {
+ c::TryAcquireSRWLockExclusive(self.inner.get()) != 0
+ }
+ #[inline]
+ pub unsafe fn read_unlock(&self) {
+ c::ReleaseSRWLockShared(self.inner.get())
+ }
+ #[inline]
+ pub unsafe fn write_unlock(&self) {
+ c::ReleaseSRWLockExclusive(self.inner.get())
+ }
+
+ #[inline]
+ pub unsafe fn destroy(&self) {
+ // ...
+ }
+}
pub mod args;
pub mod c;
pub mod cmath;
-pub mod condvar;
pub mod env;
pub mod fs;
pub mod handle;
pub mod io;
+pub mod locks;
pub mod memchr;
-pub mod mutex;
pub mod net;
pub mod os;
pub mod os_str;
pub mod pipe;
pub mod process;
pub mod rand;
-pub mod rwlock;
pub mod thread;
pub mod thread_local_dtor;
pub mod thread_local_key;
+++ /dev/null
-//! System Mutexes
-//!
-//! The Windows implementation of mutexes is a little odd and it might not be
-//! immediately obvious what's going on. The primary oddness is that SRWLock is
-//! used instead of CriticalSection, and this is done because:
-//!
-//! 1. SRWLock is several times faster than CriticalSection according to
-//! benchmarks performed on both Windows 8 and Windows 7.
-//!
-//! 2. CriticalSection allows recursive locking while SRWLock deadlocks. The
-//! Unix implementation deadlocks so consistency is preferred. See #19962 for
-//! more details.
-//!
-//! 3. While CriticalSection is fair and SRWLock is not, the current Rust policy
-//! is that there are no guarantees of fairness.
-
-use crate::cell::UnsafeCell;
-use crate::mem::MaybeUninit;
-use crate::sys::c;
-
-pub struct Mutex {
- srwlock: UnsafeCell<c::SRWLOCK>,
-}
-
-// Windows SRW Locks are movable (while not borrowed).
-pub type MovableMutex = Mutex;
-
-unsafe impl Send for Mutex {}
-unsafe impl Sync for Mutex {}
-
-#[inline]
-pub unsafe fn raw(m: &Mutex) -> c::PSRWLOCK {
- m.srwlock.get()
-}
-
-impl Mutex {
- pub const fn new() -> Mutex {
- Mutex { srwlock: UnsafeCell::new(c::SRWLOCK_INIT) }
- }
- #[inline]
- pub unsafe fn init(&mut self) {}
-
- #[inline]
- pub unsafe fn lock(&self) {
- c::AcquireSRWLockExclusive(raw(self));
- }
-
- #[inline]
- pub unsafe fn try_lock(&self) -> bool {
- c::TryAcquireSRWLockExclusive(raw(self)) != 0
- }
-
- #[inline]
- pub unsafe fn unlock(&self) {
- c::ReleaseSRWLockExclusive(raw(self));
- }
-
- #[inline]
- pub unsafe fn destroy(&self) {
- // SRWLock does not need to be destroyed.
- }
-}
-
-pub struct ReentrantMutex {
- inner: MaybeUninit<UnsafeCell<c::CRITICAL_SECTION>>,
-}
-
-unsafe impl Send for ReentrantMutex {}
-unsafe impl Sync for ReentrantMutex {}
-
-impl ReentrantMutex {
- pub const fn uninitialized() -> ReentrantMutex {
- ReentrantMutex { inner: MaybeUninit::uninit() }
- }
-
- pub unsafe fn init(&self) {
- c::InitializeCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
- }
-
- pub unsafe fn lock(&self) {
- c::EnterCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
- }
-
- #[inline]
- pub unsafe fn try_lock(&self) -> bool {
- c::TryEnterCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr())) != 0
- }
-
- pub unsafe fn unlock(&self) {
- c::LeaveCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
- }
-
- pub unsafe fn destroy(&self) {
- c::DeleteCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
- }
-}
use crate::ptr;
use crate::sys::c;
use crate::sys::c::NonZeroDWORD;
+use crate::sys::cvt;
use crate::sys::fs::{File, OpenOptions};
use crate::sys::handle::Handle;
use crate::sys::path;
use crate::sys::pipe::{self, AnonPipe};
use crate::sys::stdio;
-use crate::sys::{cvt, to_u16s};
use crate::sys_common::mutex::StaticMutex;
use crate::sys_common::process::{CommandEnv, CommandEnvs};
use crate::sys_common::{AsInner, IntoInner};
None
};
let program = resolve_exe(&self.program, || env::var_os("PATH"), child_paths)?;
+ // Case insensitive "ends_with" of UTF-16 encoded ".bat" or ".cmd"
+ let is_batch_file = matches!(
+ program.len().checked_sub(5).and_then(|i| program.get(i..)),
+ Some([46, 98 | 66, 97 | 65, 116 | 84, 0] | [46, 99 | 67, 109 | 77, 100 | 68, 0])
+ );
let mut cmd_str =
- make_command_line(program.as_os_str(), &self.args, self.force_quotes_enabled)?;
+ make_command_line(&program, &self.args, self.force_quotes_enabled, is_batch_file)?;
cmd_str.push(0); // add null terminator
// stolen from the libuv code.
si.hStdOutput = stdout.as_raw_handle();
si.hStdError = stderr.as_raw_handle();
- let program = to_u16s(&program)?;
unsafe {
cvt(c::CreateProcessW(
program.as_ptr(),
exe_path: &'a OsStr,
parent_paths: impl FnOnce() -> Option<OsString>,
child_paths: Option<&OsStr>,
-) -> io::Result<PathBuf> {
+) -> io::Result<Vec<u16>> {
// Early return if there is no filename.
if exe_path.is_empty() || path::has_trailing_slash(exe_path) {
return Err(io::const_io_error!(
if has_exe_suffix {
// The application name is a path to a `.exe` file.
// Let `CreateProcessW` figure out if it exists or not.
- return Ok(exe_path.into());
+ return path::maybe_verbatim(Path::new(exe_path));
}
let mut path = PathBuf::from(exe_path);
// Append `.exe` if not already there.
path = path::append_suffix(path, EXE_SUFFIX.as_ref());
- if program_exists(&path) {
+ if let Some(path) = program_exists(&path) {
return Ok(path);
} else {
// It's ok to use `set_extension` here because the intent is to
// remove the extension that was just added.
path.set_extension("");
- return Ok(path);
+ return path::maybe_verbatim(&path);
}
} else {
ensure_no_nuls(exe_path)?;
if !has_extension {
path.set_extension(EXE_EXTENSION);
}
- if program_exists(&path) { Some(path) } else { None }
+ program_exists(&path)
});
if let Some(path) = result {
return Ok(path);
parent_paths: Paths,
child_paths: Option<&OsStr>,
mut exists: Exists,
-) -> Option<PathBuf>
+) -> Option<Vec<u16>>
where
Paths: FnOnce() -> Option<OsString>,
- Exists: FnMut(PathBuf) -> Option<PathBuf>,
+ Exists: FnMut(PathBuf) -> Option<Vec<u16>>,
{
// 1. Child paths
// This is for consistency with Rust's historic behaviour.
}
/// Check if a file exists without following symlinks.
-fn program_exists(path: &Path) -> bool {
+fn program_exists(path: &Path) -> Option<Vec<u16>> {
unsafe {
- to_u16s(path)
- .map(|path| {
- // Getting attributes using `GetFileAttributesW` does not follow symlinks
- // and it will almost always be successful if the link exists.
- // There are some exceptions for special system files (e.g. the pagefile)
- // but these are not executable.
- c::GetFileAttributesW(path.as_ptr()) != c::INVALID_FILE_ATTRIBUTES
- })
- .unwrap_or(false)
+ let path = path::maybe_verbatim(path).ok()?;
+ // Getting attributes using `GetFileAttributesW` does not follow symlinks
+ // and it will almost always be successful if the link exists.
+ // There are some exceptions for special system files (e.g. the pagefile)
+ // but these are not executable.
+ if c::GetFileAttributesW(path.as_ptr()) == c::INVALID_FILE_ATTRIBUTES {
+ None
+ } else {
+ Some(path)
+ }
}
}
// Produces a wide string *without terminating null*; returns an error if
// `prog` or any of the `args` contain a nul.
-fn make_command_line(prog: &OsStr, args: &[Arg], force_quotes: bool) -> io::Result<Vec<u16>> {
+fn make_command_line(
+ prog: &[u16],
+ args: &[Arg],
+ force_quotes: bool,
+ is_batch_file: bool,
+) -> io::Result<Vec<u16>> {
// Encode the command and arguments in a command line string such
// that the spawned process may recover them using CommandLineToArgvW.
let mut cmd: Vec<u16> = Vec::new();
// need to add an extra pair of quotes surrounding the whole command line
// so they are properly passed on to the script.
// See issue #91991.
- let is_batch_file = Path::new(prog)
- .extension()
- .map(|ext| ext.eq_ignore_ascii_case("cmd") || ext.eq_ignore_ascii_case("bat"))
- .unwrap_or(false);
if is_batch_file {
cmd.push(b'"' as u16);
}
- // Always quote the program name so CreateProcess doesn't interpret args as
- // part of the name if the binary wasn't found first time.
- append_arg(&mut cmd, prog, Quote::Always)?;
+ // Always quote the program name so CreateProcess to avoid ambiguity when
+ // the child process parses its arguments.
+ // Note that quotes aren't escaped here because they can't be used in arg0.
+ // But that's ok because file paths can't contain quotes.
+ cmd.push(b'"' as u16);
+ cmd.extend_from_slice(prog.strip_suffix(&[0]).unwrap_or(prog));
+ cmd.push(b'"' as u16);
+
for arg in args {
cmd.push(' ' as u16);
let (arg, quote) = match arg {
use crate::env;
use crate::ffi::{OsStr, OsString};
use crate::process::Command;
+use crate::sys::to_u16s;
#[test]
fn test_raw_args() {
let command_line = &make_command_line(
- OsStr::new("quoted exe"),
+ &to_u16s("quoted exe").unwrap(),
&[
Arg::Regular(OsString::from("quote me")),
Arg::Raw(OsString::from("quote me *not*")),
Arg::Regular(OsString::from("optional-quotes")),
],
false,
+ false,
)
.unwrap();
assert_eq!(
fn test_make_command_line() {
fn test_wrapper(prog: &str, args: &[&str], force_quotes: bool) -> String {
let command_line = &make_command_line(
- OsStr::new(prog),
+ &to_u16s(prog).unwrap(),
&args.iter().map(|a| Arg::Regular(OsString::from(a))).collect::<Vec<_>>(),
force_quotes,
+ false,
)
.unwrap();
String::from_utf16(command_line).unwrap()
let temp = tmpdir();
let mut exe_path = temp.path().to_owned();
exe_path.push("exists.exe");
- symlink("<DOES NOT EXIST>".as_ref(), &exe_path).unwrap();
// A broken symlink should still be resolved.
- assert!(resolve_exe(OsStr::new("exists.exe"), empty_paths, Some(temp.path().as_ref())).is_ok());
+ // Skip this check if not in CI and creating symlinks isn't possible.
+ let is_ci = env::var("CI").is_ok();
+ let result = symlink("<DOES NOT EXIST>".as_ref(), &exe_path);
+ if is_ci || result.is_ok() {
+ result.unwrap();
+ assert!(
+ resolve_exe(OsStr::new("exists.exe"), empty_paths, Some(temp.path().as_ref())).is_ok()
+ );
+ }
}
+++ /dev/null
-use crate::cell::UnsafeCell;
-use crate::sys::c;
-
-pub struct RWLock {
- inner: UnsafeCell<c::SRWLOCK>,
-}
-
-pub type MovableRWLock = RWLock;
-
-unsafe impl Send for RWLock {}
-unsafe impl Sync for RWLock {}
-
-impl RWLock {
- pub const fn new() -> RWLock {
- RWLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) }
- }
- #[inline]
- pub unsafe fn read(&self) {
- c::AcquireSRWLockShared(self.inner.get())
- }
- #[inline]
- pub unsafe fn try_read(&self) -> bool {
- c::TryAcquireSRWLockShared(self.inner.get()) != 0
- }
- #[inline]
- pub unsafe fn write(&self) {
- c::AcquireSRWLockExclusive(self.inner.get())
- }
- #[inline]
- pub unsafe fn try_write(&self) -> bool {
- c::TryAcquireSRWLockExclusive(self.inner.get()) != 0
- }
- #[inline]
- pub unsafe fn read_unlock(&self) {
- c::ReleaseSRWLockShared(self.inner.get())
- }
- #[inline]
- pub unsafe fn write_unlock(&self) {
- c::ReleaseSRWLockExclusive(self.inner.get())
- }
-
- #[inline]
- pub unsafe fn destroy(&self) {
- // ...
- }
-}
-use crate::sys::condvar as imp;
-use crate::sys::mutex as mutex_imp;
+use crate::sys::locks as imp;
use crate::sys_common::mutex::MovableMutex;
use crate::time::Duration;
mod check;
-type CondvarCheck = <mutex_imp::MovableMutex as check::CondvarCheck>::Check;
+type CondvarCheck = <imp::MovableMutex as check::CondvarCheck>::Check;
/// An OS-based condition variable.
pub struct Condvar {
use crate::sync::atomic::{AtomicUsize, Ordering};
-use crate::sys::mutex as mutex_imp;
+use crate::sys::locks as imp;
use crate::sys_common::mutex::MovableMutex;
pub trait CondvarCheck {
/// For boxed mutexes, a `Condvar` will check it's only ever used with the same
/// mutex, based on its (stable) address.
-impl CondvarCheck for Box<mutex_imp::Mutex> {
+impl CondvarCheck for Box<imp::Mutex> {
type Check = SameMutexCheck;
}
Self { addr: AtomicUsize::new(0) }
}
pub fn verify(&self, mutex: &MovableMutex) {
- let addr = mutex.raw() as *const mutex_imp::Mutex as usize;
+ let addr = mutex.raw() as *const imp::Mutex as usize;
match self.addr.compare_exchange(0, addr, Ordering::SeqCst, Ordering::SeqCst) {
Ok(_) => {} // Stored the address
Err(n) if n == addr => {} // Lost a race to store the same address
/// Unboxed mutexes may move, so `Condvar` can not require its address to stay
/// constant.
-impl CondvarCheck for mutex_imp::Mutex {
+impl CondvarCheck for imp::Mutex {
type Check = NoCheck;
}
-use crate::sys::mutex as imp;
+use crate::sys::locks as imp;
/// An OS-based mutual exclusion lock, meant for use in static variables.
///
use crate::ops::Deref;
use crate::panic::{RefUnwindSafe, UnwindSafe};
use crate::pin::Pin;
-use crate::sys::mutex as sys;
+use crate::sys::locks as sys;
/// A re-entrant mutual exclusion
///
-use crate::sys::rwlock as imp;
+use crate::sys::locks as imp;
/// An OS-based reader-writer lock, meant for use in static variables.
///
/// ```
///
/// # Underlying System calls
-/// Currently, the following system calls are being used to get the current time using `now()`:
+///
+/// The following system calls are [currently] being used by `now()` to find out
+/// the current time:
///
/// | Platform | System call |
/// |-----------|----------------------------------------------------------------------|
/// | WASI | [__wasi_clock_time_get (Monotonic Clock)] |
/// | Windows | [QueryPerformanceCounter] |
///
+/// [currently]: crate::io#platform-specific-behavior
/// [QueryPerformanceCounter]: https://docs.microsoft.com/en-us/windows/win32/api/profileapi/nf-profileapi-queryperformancecounter
/// [`insecure_time` usercall]: https://edp.fortanix.com/docs/api/fortanix_sgx_abi/struct.Usercalls.html#method.insecure_time
/// [timekeeping in SGX]: https://edp.fortanix.com/docs/concepts/rust-std/#codestdtimecode
/// For example, on Windows the time is represented in 100 nanosecond intervals whereas Linux
/// can represent nanosecond intervals.
///
-/// Currently, the following system calls are being used to get the current time using `now()`:
+/// The following system calls are [currently] being used by `now()` to find out
+/// the current time:
///
/// | Platform | System call |
/// |-----------|----------------------------------------------------------------------|
/// | WASI | [__wasi_clock_time_get (Realtime Clock)] |
/// | Windows | [GetSystemTimePreciseAsFileTime] / [GetSystemTimeAsFileTime] |
///
+/// [currently]: crate::io#platform-specific-behavior
/// [`insecure_time` usercall]: https://edp.fortanix.com/docs/api/fortanix_sgx_abi/struct.Usercalls.html#method.insecure_time
/// [timekeeping in SGX]: https://edp.fortanix.com/docs/concepts/rust-std/#codestdtimecode
/// [gettimeofday]: https://man7.org/linux/man-pages/man2/gettimeofday.2.html
println!("neon: {}", is_aarch64_feature_detected!("neon"));
println!("asimd: {}", is_aarch64_feature_detected!("asimd"));
println!("pmull: {}", is_aarch64_feature_detected!("pmull"));
- println!("fp: {}", is_aarch64_feature_detected!("fp"));
println!("fp16: {}", is_aarch64_feature_detected!("fp16"));
println!("sve: {}", is_aarch64_feature_detected!("sve"));
println!("crc: {}", is_aarch64_feature_detected!("crc"));
build.verbose = args.verbose
build.clean = args.clean
- # Read from `--config`, then `RUST_BOOTSTRAP_CONFIG`, then fallback to `config.toml` (if it
- # exists).
+ # Read from `--config`, then `RUST_BOOTSTRAP_CONFIG`, then `./config.toml`,
+ # then `config.toml` in the root directory.
toml_path = args.config or os.getenv('RUST_BOOTSTRAP_CONFIG')
- if not toml_path and os.path.exists('config.toml'):
+ using_default_path = toml_path is None
+ if using_default_path:
toml_path = 'config.toml'
-
- if toml_path:
if not os.path.exists(toml_path):
toml_path = os.path.join(build.rust_root, toml_path)
+ # Give a hard error if `--config` or `RUST_BOOTSTRAP_CONFIG` are set to a missing path,
+ # but not if `config.toml` hasn't been created.
+ if not using_default_path or os.path.exists(toml_path):
with open(toml_path) as config:
build.config_toml = config.read()
fn one<P: Into<PathBuf>>(path: P, kind: Kind) -> PathSet {
let mut set = BTreeSet::new();
- set.insert(TaskPath { path: path.into(), kind: Some(kind.into()) });
+ set.insert(TaskPath { path: path.into(), kind: Some(kind) });
PathSet::Set(set)
}
}
if !builder.config.exclude.is_empty() {
- eprintln!(
+ builder.verbose(&format!(
"{:?} not skipped for {:?} -- not in {:?}",
pathset, self.name, builder.config.exclude
- );
+ ));
}
false
}
// multiple aliases for the same job
pub fn paths(mut self, paths: &[&str]) -> Self {
self.paths.insert(PathSet::Set(
- paths
- .iter()
- .map(|p| TaskPath { path: p.into(), kind: Some(self.kind.into()) })
- .collect(),
+ paths.iter().map(|p| TaskPath { path: p.into(), kind: Some(self.kind) }).collect(),
));
self
}
}
pub fn suite_path(mut self, suite: &str) -> Self {
- self.paths
- .insert(PathSet::Suite(TaskPath { path: suite.into(), kind: Some(self.kind.into()) }));
+ self.paths.insert(PathSet::Suite(TaskPath { path: suite.into(), kind: Some(self.kind) }));
self
}
// the rustc_llvm cache. That will always work, even though it
// may mean that on the next non-check build we'll need to rebuild
// rustc_llvm. But if LLVM is stale, that'll be a tiny amount
- // of work comparitively, and we'd likely need to rebuild it anyway,
+ // of work comparatively, and we'd likely need to rebuild it anyway,
// so that's okay.
if crate::native::prebuilt_llvm_config(self, target).is_err() {
cargo.env("RUST_CHECK", "1");
// cargo.arg("-Zcheck-cfg-features");
// Enable cfg checking of rustc well-known names
- rustflags.arg("-Zunstable-options").arg("--check-cfg=names()");
+ rustflags
+ .arg("-Zunstable-options")
+ // Enable checking of well known names
+ .arg("--check-cfg=names()")
+ // Enable checking of well known values
+ .arg("--check-cfg=values()");
// Add extra cfg not defined in rustc
for (restricted_mode, name, values) in EXTRA_CHECK_CFGS {
if should_run.paths.iter().any(|s| s.has(path, Some(desc.kind)))
&& !desc.is_excluded(
self,
- &PathSet::Suite(TaskPath { path: path.clone(), kind: Some(desc.kind.into()) }),
+ &PathSet::Suite(TaskPath { path: path.clone(), kind: Some(desc.kind) }),
)
{
return true;
}
/// By default, cargo will pick up on various variables in the environment. However, bootstrap
- /// reuses those variables to pass additional flags to rustdoc, so by default they get overriden.
+ /// reuses those variables to pass additional flags to rustdoc, so by default they get overridden.
/// Explicitly add back any previous value in the environment.
///
/// `prefix` is usually `RUSTFLAGS` or `RUSTDOCFLAGS`.
}
if builder.config.rustc_parallel {
+ // keep in sync with `bootstrap/lib.rs:Build::rustc_features`
+ // `cfg` option for rustc, `features` option for cargo, for conditional compilation
cargo.rustflag("--cfg=parallel_compiler");
cargo.rustdocflag("--cfg=parallel_compiler");
}
for tool in LLVM_TOOLS {
let tool_exe = exe(tool, target_compiler.host);
let src_path = llvm_bin_dir.join(&tool_exe);
- // When using `donwload-ci-llvm`, some of the tools
+ // When using `download-ci-llvm`, some of the tools
// may not exist, so skip trying to copy them.
if src_path.exists() {
builder.copy(&src_path, &libdir_bin.join(&tool_exe));
pub use crate::flags::Subcommand;
use crate::flags::{Color, Flags};
use crate::util::{exe, t};
-use serde::Deserialize;
+use serde::{Deserialize, Deserializer};
macro_rules! check_ci_llvm {
($name:expr) => {
// We are using a decl macro instead of a derive proc macro here to reduce the compile time of
// rustbuild.
-macro_rules! derive_merge {
+macro_rules! define_config {
($(#[$attr:meta])* struct $name:ident {
- $($field:ident: $field_ty:ty,)*
+ $($field:ident: Option<$field_ty:ty> = $field_key:literal,)*
}) => {
$(#[$attr])*
struct $name {
- $($field: $field_ty,)*
+ $($field: Option<$field_ty>,)*
}
impl Merge for $name {
)*
}
}
+
+ // The following is a trimmed version of what serde_derive generates. All parts not relevant
+ // for toml deserialization have been removed. This reduces the binary size and improves
+ // compile time of rustbuild.
+ impl<'de> Deserialize<'de> for $name {
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ struct Field;
+ impl<'de> serde::de::Visitor<'de> for Field {
+ type Value = $name;
+ fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.write_str(concat!("struct ", stringify!($name)))
+ }
+
+ #[inline]
+ fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
+ where
+ A: serde::de::MapAccess<'de>,
+ {
+ $(let mut $field: Option<$field_ty> = None;)*
+ while let Some(key) =
+ match serde::de::MapAccess::next_key::<String>(&mut map) {
+ Ok(val) => val,
+ Err(err) => {
+ return Err(err);
+ }
+ }
+ {
+ match &*key {
+ $($field_key => {
+ if $field.is_some() {
+ return Err(<A::Error as serde::de::Error>::duplicate_field(
+ $field_key,
+ ));
+ }
+ $field = match serde::de::MapAccess::next_value::<$field_ty>(
+ &mut map,
+ ) {
+ Ok(val) => Some(val),
+ Err(err) => {
+ return Err(err);
+ }
+ };
+ })*
+ key => {
+ return Err(serde::de::Error::unknown_field(key, FIELDS));
+ }
+ }
+ }
+ Ok($name { $($field),* })
+ }
+ }
+ const FIELDS: &'static [&'static str] = &[
+ $($field_key,)*
+ ];
+ Deserializer::deserialize_struct(
+ deserializer,
+ stringify!($name),
+ FIELDS,
+ Field,
+ )
+ }
+ }
}
}
-derive_merge! {
+define_config! {
/// TOML representation of various global build decisions.
- #[derive(Deserialize, Default)]
- #[serde(deny_unknown_fields, rename_all = "kebab-case")]
+ #[derive(Default)]
struct Build {
- build: Option<String>,
- host: Option<Vec<String>>,
- target: Option<Vec<String>>,
- build_dir: Option<String>,
- cargo: Option<String>,
- rustc: Option<String>,
- rustfmt: Option<PathBuf>,
- docs: Option<bool>,
- compiler_docs: Option<bool>,
- docs_minification: Option<bool>,
- submodules: Option<bool>,
- fast_submodules: Option<bool>,
- gdb: Option<String>,
- nodejs: Option<String>,
- npm: Option<String>,
- python: Option<String>,
- locked_deps: Option<bool>,
- vendor: Option<bool>,
- full_bootstrap: Option<bool>,
- extended: Option<bool>,
- tools: Option<HashSet<String>>,
- verbose: Option<usize>,
- sanitizers: Option<bool>,
- profiler: Option<bool>,
- cargo_native_static: Option<bool>,
- low_priority: Option<bool>,
- configure_args: Option<Vec<String>>,
- local_rebuild: Option<bool>,
- print_step_timings: Option<bool>,
- print_step_rusage: Option<bool>,
- check_stage: Option<u32>,
- doc_stage: Option<u32>,
- build_stage: Option<u32>,
- test_stage: Option<u32>,
- install_stage: Option<u32>,
- dist_stage: Option<u32>,
- bench_stage: Option<u32>,
- patch_binaries_for_nix: Option<bool>,
+ build: Option<String> = "build",
+ host: Option<Vec<String>> = "host",
+ target: Option<Vec<String>> = "target",
+ build_dir: Option<String> = "build-dir",
+ cargo: Option<String> = "cargo",
+ rustc: Option<String> = "rustc",
+ rustfmt: Option<PathBuf> = "rustfmt",
+ docs: Option<bool> = "docs",
+ compiler_docs: Option<bool> = "compiler-docs",
+ docs_minification: Option<bool> = "docs-minification",
+ submodules: Option<bool> = "submodules",
+ fast_submodules: Option<bool> = "fast-submodules",
+ gdb: Option<String> = "gdb",
+ nodejs: Option<String> = "nodejs",
+ npm: Option<String> = "npm",
+ python: Option<String> = "python",
+ locked_deps: Option<bool> = "locked-deps",
+ vendor: Option<bool> = "vendor",
+ full_bootstrap: Option<bool> = "full-bootstrap",
+ extended: Option<bool> = "extended",
+ tools: Option<HashSet<String>> = "tools",
+ verbose: Option<usize> = "verbose",
+ sanitizers: Option<bool> = "sanitizers",
+ profiler: Option<bool> = "profiler",
+ cargo_native_static: Option<bool> = "cargo-native-static",
+ low_priority: Option<bool> = "low-priority",
+ configure_args: Option<Vec<String>> = "configure-args",
+ local_rebuild: Option<bool> = "local-rebuild",
+ print_step_timings: Option<bool> = "print-step-timings",
+ print_step_rusage: Option<bool> = "print-step-rusage",
+ check_stage: Option<u32> = "check-stage",
+ doc_stage: Option<u32> = "doc-stage",
+ build_stage: Option<u32> = "build-stage",
+ test_stage: Option<u32> = "test-stage",
+ install_stage: Option<u32> = "install-stage",
+ dist_stage: Option<u32> = "dist-stage",
+ bench_stage: Option<u32> = "bench-stage",
+ patch_binaries_for_nix: Option<bool> = "patch-binaries-for-nix",
}
}
-derive_merge! {
+define_config! {
/// TOML representation of various global install decisions.
- #[derive(Deserialize)]
- #[serde(deny_unknown_fields, rename_all = "kebab-case")]
struct Install {
- prefix: Option<String>,
- sysconfdir: Option<String>,
- docdir: Option<String>,
- bindir: Option<String>,
- libdir: Option<String>,
- mandir: Option<String>,
- datadir: Option<String>,
+ prefix: Option<String> = "prefix",
+ sysconfdir: Option<String> = "sysconfdir",
+ docdir: Option<String> = "docdir",
+ bindir: Option<String> = "bindir",
+ libdir: Option<String> = "libdir",
+ mandir: Option<String> = "mandir",
+ datadir: Option<String> = "datadir",
}
}
-derive_merge! {
+define_config! {
/// TOML representation of how the LLVM build is configured.
- #[derive(Deserialize)]
- #[serde(deny_unknown_fields, rename_all = "kebab-case")]
struct Llvm {
- skip_rebuild: Option<bool>,
- optimize: Option<bool>,
- thin_lto: Option<bool>,
- release_debuginfo: Option<bool>,
- assertions: Option<bool>,
- tests: Option<bool>,
- plugins: Option<bool>,
- ccache: Option<StringOrBool>,
- version_check: Option<bool>,
- static_libstdcpp: Option<bool>,
- ninja: Option<bool>,
- targets: Option<String>,
- experimental_targets: Option<String>,
- link_jobs: Option<u32>,
- link_shared: Option<bool>,
- version_suffix: Option<String>,
- clang_cl: Option<String>,
- cflags: Option<String>,
- cxxflags: Option<String>,
- ldflags: Option<String>,
- use_libcxx: Option<bool>,
- use_linker: Option<String>,
- allow_old_toolchain: Option<bool>,
- polly: Option<bool>,
- clang: Option<bool>,
- download_ci_llvm: Option<StringOrBool>,
- build_config: Option<HashMap<String, String>>,
+ skip_rebuild: Option<bool> = "skip-rebuild",
+ optimize: Option<bool> = "optimize",
+ thin_lto: Option<bool> = "thin-lto",
+ release_debuginfo: Option<bool> = "release-debuginfo",
+ assertions: Option<bool> = "assertions",
+ tests: Option<bool> = "tests",
+ plugins: Option<bool> = "plugins",
+ ccache: Option<StringOrBool> = "ccache",
+ version_check: Option<bool> = "version-check",
+ static_libstdcpp: Option<bool> = "static-libstdcpp",
+ ninja: Option<bool> = "ninja",
+ targets: Option<String> = "targets",
+ experimental_targets: Option<String> = "experimental-targets",
+ link_jobs: Option<u32> = "link-jobs",
+ link_shared: Option<bool> = "link-shared",
+ version_suffix: Option<String> = "version-suffix",
+ clang_cl: Option<String> = "clang-cl",
+ cflags: Option<String> = "cflags",
+ cxxflags: Option<String> = "cxxflags",
+ ldflags: Option<String> = "ldflags",
+ use_libcxx: Option<bool> = "use-libcxx",
+ use_linker: Option<String> = "use-linker",
+ allow_old_toolchain: Option<bool> = "allow-old-toolchain",
+ polly: Option<bool> = "polly",
+ clang: Option<bool> = "clang",
+ download_ci_llvm: Option<StringOrBool> = "download-ci-llvm",
+ build_config: Option<HashMap<String, String>> = "build-config",
}
}
-derive_merge! {
- #[derive(Deserialize)]
- #[serde(deny_unknown_fields, rename_all = "kebab-case")]
+define_config! {
struct Dist {
- sign_folder: Option<String>,
- gpg_password_file: Option<String>,
- upload_addr: Option<String>,
- src_tarball: Option<bool>,
- missing_tools: Option<bool>,
- compression_formats: Option<Vec<String>>,
+ sign_folder: Option<String> = "sign-folder",
+ gpg_password_file: Option<String> = "gpg-password-file",
+ upload_addr: Option<String> = "upload-addr",
+ src_tarball: Option<bool> = "src-tarball",
+ missing_tools: Option<bool> = "missing-tools",
+ compression_formats: Option<Vec<String>> = "compression-formats",
}
}
}
}
-derive_merge! {
+define_config! {
/// TOML representation of how the Rust build is configured.
- #[derive(Deserialize)]
- #[serde(deny_unknown_fields, rename_all = "kebab-case")]
struct Rust {
- optimize: Option<bool>,
- debug: Option<bool>,
- codegen_units: Option<u32>,
- codegen_units_std: Option<u32>,
- debug_assertions: Option<bool>,
- debug_assertions_std: Option<bool>,
- overflow_checks: Option<bool>,
- overflow_checks_std: Option<bool>,
- debug_logging: Option<bool>,
- debuginfo_level: Option<u32>,
- debuginfo_level_rustc: Option<u32>,
- debuginfo_level_std: Option<u32>,
- debuginfo_level_tools: Option<u32>,
- debuginfo_level_tests: Option<u32>,
- run_dsymutil: Option<bool>,
- backtrace: Option<bool>,
- incremental: Option<bool>,
- parallel_compiler: Option<bool>,
- default_linker: Option<String>,
- channel: Option<String>,
- description: Option<String>,
- musl_root: Option<String>,
- rpath: Option<bool>,
- verbose_tests: Option<bool>,
- optimize_tests: Option<bool>,
- codegen_tests: Option<bool>,
- ignore_git: Option<bool>,
- dist_src: Option<bool>,
- save_toolstates: Option<String>,
- codegen_backends: Option<Vec<String>>,
- lld: Option<bool>,
- use_lld: Option<bool>,
- llvm_tools: Option<bool>,
- deny_warnings: Option<bool>,
- backtrace_on_ice: Option<bool>,
- verify_llvm_ir: Option<bool>,
- thin_lto_import_instr_limit: Option<u32>,
- remap_debuginfo: Option<bool>,
- jemalloc: Option<bool>,
- test_compare_mode: Option<bool>,
- llvm_libunwind: Option<String>,
- control_flow_guard: Option<bool>,
- new_symbol_mangling: Option<bool>,
- profile_generate: Option<String>,
- profile_use: Option<String>,
+ optimize: Option<bool> = "optimize",
+ debug: Option<bool> = "debug",
+ codegen_units: Option<u32> = "codegen-units",
+ codegen_units_std: Option<u32> = "codegen-units-std",
+ debug_assertions: Option<bool> = "debug-assertions",
+ debug_assertions_std: Option<bool> = "debug-assertions-std",
+ overflow_checks: Option<bool> = "overflow-checks",
+ overflow_checks_std: Option<bool> = "overflow-checks-std",
+ debug_logging: Option<bool> = "debug-logging",
+ debuginfo_level: Option<u32> = "debuginfo-level",
+ debuginfo_level_rustc: Option<u32> = "debuginfo-level-rustc",
+ debuginfo_level_std: Option<u32> = "debuginfo-level-std",
+ debuginfo_level_tools: Option<u32> = "debuginfo-level-tools",
+ debuginfo_level_tests: Option<u32> = "debuginfo-level-tests",
+ run_dsymutil: Option<bool> = "run-dsymutil",
+ backtrace: Option<bool> = "backtrace",
+ incremental: Option<bool> = "incremental",
+ parallel_compiler: Option<bool> = "parallel-compiler",
+ default_linker: Option<String> = "default-linker",
+ channel: Option<String> = "channel",
+ description: Option<String> = "description",
+ musl_root: Option<String> = "musl-root",
+ rpath: Option<bool> = "rpath",
+ verbose_tests: Option<bool> = "verbose-tests",
+ optimize_tests: Option<bool> = "optimize-tests",
+ codegen_tests: Option<bool> = "codegen-tests",
+ ignore_git: Option<bool> = "ignore-git",
+ dist_src: Option<bool> = "dist-src",
+ save_toolstates: Option<String> = "save-toolstates",
+ codegen_backends: Option<Vec<String>> = "codegen-backends",
+ lld: Option<bool> = "lld",
+ use_lld: Option<bool> = "use-lld",
+ llvm_tools: Option<bool> = "llvm-tools",
+ deny_warnings: Option<bool> = "deny-warnings",
+ backtrace_on_ice: Option<bool> = "backtrace-on-ice",
+ verify_llvm_ir: Option<bool> = "verify-llvm-ir",
+ thin_lto_import_instr_limit: Option<u32> = "thin-lto-import-instr-limit",
+ remap_debuginfo: Option<bool> = "remap-debuginfo",
+ jemalloc: Option<bool> = "jemalloc",
+ test_compare_mode: Option<bool> = "test-compare-mode",
+ llvm_libunwind: Option<String> = "llvm-libunwind",
+ control_flow_guard: Option<bool> = "control-flow-guard",
+ new_symbol_mangling: Option<bool> = "new-symbol-mangling",
+ profile_generate: Option<String> = "profile-generate",
+ profile_use: Option<String> = "profile-use",
// ignored; this is set from an env var set by bootstrap.py
- download_rustc: Option<StringOrBool>,
+ download_rustc: Option<StringOrBool> = "download-rustc",
}
}
-derive_merge! {
+define_config! {
/// TOML representation of how each build target is configured.
- #[derive(Deserialize)]
- #[serde(deny_unknown_fields, rename_all = "kebab-case")]
struct TomlTarget {
- cc: Option<String>,
- cxx: Option<String>,
- ar: Option<String>,
- ranlib: Option<String>,
- default_linker: Option<PathBuf>,
- linker: Option<String>,
- llvm_config: Option<String>,
- llvm_filecheck: Option<String>,
- android_ndk: Option<String>,
- sanitizers: Option<bool>,
- profiler: Option<bool>,
- crt_static: Option<bool>,
- musl_root: Option<String>,
- musl_libdir: Option<String>,
- wasi_root: Option<String>,
- qemu_rootfs: Option<String>,
- no_std: Option<bool>,
+ cc: Option<String> = "cc",
+ cxx: Option<String> = "cxx",
+ ar: Option<String> = "ar",
+ ranlib: Option<String> = "ranlib",
+ default_linker: Option<PathBuf> = "default-linker",
+ linker: Option<String> = "linker",
+ llvm_config: Option<String> = "llvm-config",
+ llvm_filecheck: Option<String> = "llvm-filecheck",
+ android_ndk: Option<String> = "android-ndk",
+ sanitizers: Option<bool> = "sanitizers",
+ profiler: Option<bool> = "profiler",
+ crt_static: Option<bool> = "crt-static",
+ musl_root: Option<String> = "musl-root",
+ musl_libdir: Option<String> = "musl-libdir",
+ wasi_root: Option<String> = "wasi-root",
+ qemu_rootfs: Option<String> = "qemu-rootfs",
+ no_std: Option<bool> = "no-std",
}
}
config.llvm_optimize = true;
config.ninja_in_file = true;
config.llvm_version_check = true;
+ config.llvm_static_stdcpp = true;
config.backtrace = true;
config.rust_optimize = true;
config.rust_optimize_tests = true;
let get_toml = |file: &Path| {
use std::process;
- let contents = t!(fs::read_to_string(file), "`include` config not found");
- match toml::from_str(&contents) {
+ let contents =
+ t!(fs::read_to_string(file), format!("config file {} not found", file.display()));
+ // Deserialize to Value and then TomlConfig to prevent the Deserialize impl of
+ // TomlConfig and sub types to be monomorphized 5x by toml.
+ match toml::from_str(&contents)
+ .and_then(|table: toml::Value| TomlConfig::deserialize(table))
+ {
Ok(table) => table,
Err(err) => {
println!("failed to parse TOML configuration '{}': {}", file.display(), err);
}
};
- // check --config first, then `$RUST_BOOTSTRAP_CONFIG` first, then `config.toml`
+ // Read from `--config`, then `RUST_BOOTSTRAP_CONFIG`, then `./config.toml`, then `config.toml` in the root directory.
let toml_path = flags
.config
.clone()
- .or_else(|| env::var_os("RUST_BOOTSTRAP_CONFIG").map(PathBuf::from))
- .unwrap_or_else(|| PathBuf::from("config.toml"));
- let mut toml =
- if toml_path.exists() { get_toml(&toml_path) } else { TomlConfig::default() };
+ .or_else(|| env::var_os("RUST_BOOTSTRAP_CONFIG").map(PathBuf::from));
+ let using_default_path = toml_path.is_none();
+ let mut toml_path = toml_path.unwrap_or_else(|| PathBuf::from("config.toml"));
+ if using_default_path && !toml_path.exists() {
+ toml_path = config.src.join(toml_path);
+ }
+
+ // Give a hard error if `--config` or `RUST_BOOTSTRAP_CONFIG` are set to a missing path,
+ // but not if `config.toml` hasn't been created.
+ let mut toml = if !using_default_path || toml_path.exists() {
+ get_toml(&toml_path)
+ } else {
+ TomlConfig::default()
+ };
if let Some(include) = &toml.profile {
let mut include_path = config.src.clone();
// not needed and contains symlinks which rustup currently
// chokes on when unpacking.
"library/backtrace/crates",
+ // these are 30MB combined and aren't necessary for building
+ // the standard library.
+ "library/stdarch/crates/Cargo.toml",
+ "library/stdarch/crates/stdarch-verify",
+ "library/stdarch/crates/intrinsic-test",
],
&dst_src,
);
}
}
-/// Tarball containing a prebuilt version of the build-manifest tool, intented to be used by the
+/// Tarball containing a prebuilt version of the build-manifest tool, intended to be used by the
/// release process to avoid cloning the monorepo and building stuff.
///
/// Should not be considered stable by end users.
let stage = self.stage;
let target = self.target;
builder.info(&format!("Documenting stage{} std ({})", stage, target));
+ if builder.no_std(target) == Some(true) {
+ panic!(
+ "building std documentation for no_std target {target} is not supported\n\
+ Set `docs = false` in the config to disable documentation."
+ );
+ }
let out = builder.doc_out(target);
t!(fs::create_dir_all(&out));
let compiler = builder.compiler(stage, builder.config.build);
(Some(Mode::Std), "no_global_oom_handling", None),
(Some(Mode::Std), "freebsd12", None),
(Some(Mode::Std), "backtrace_in_libstd", None),
+ /* Extra values not defined in the built-in targets yet, but used in std */
+ (Some(Mode::Std), "target_env", Some(&["libnx"])),
+ (Some(Mode::Std), "target_os", Some(&["watchos"])),
+ (
+ Some(Mode::Std),
+ "target_arch",
+ Some(&["asmjs", "spirv", "nvptx", "nvptx64", "le32", "xtensa"]),
+ ),
+ /* Extra names used by dependencies */
// FIXME: Used by rustfmt is their test but is invalid (neither cargo nor bootstrap ever set
// this config) should probably by removed or use a allow attribute.
(Some(Mode::ToolRustc), "release", None),
/// Gets the space-separated set of activated features for the compiler.
fn rustc_features(&self, kind: Kind) -> String {
- let mut features = String::new();
+ let mut features = vec![];
if self.config.jemalloc {
- features.push_str("jemalloc");
+ features.push("jemalloc");
}
if self.config.llvm_enabled() || kind == Kind::Check {
- features.push_str(" llvm");
+ features.push("llvm");
+ }
+ // keep in sync with `bootstrap/compile.rs:rustc_cargo_env`
+ if self.config.rustc_parallel {
+ features.push("rustc_use_parallel_compiler");
}
// If debug logging is on, then we want the default for tracing:
// if its unset, if debug_assertions is on, then debug_logging will also be on
// as well as tracing *ignoring* this feature when debug_assertions is on
if !self.config.rust_debug_logging {
- features.push_str(" max_level_info");
+ features.push("max_level_info");
}
- features
+ features.join(" ")
}
/// Component directory that Cargo will produce output into (e.g.
// For distribution we want the LLVM tools to be *statically* linked to libstdc++.
// We also do this if the user explicitly requested static libstdc++.
- if builder.config.llvm_tools_enabled || builder.config.llvm_static_stdcpp {
+ if builder.config.llvm_static_stdcpp {
if !target.contains("msvc") && !target.contains("netbsd") {
- if target.contains("apple") {
+ if target.contains("apple") || target.contains("windows") {
ldflags.push_all("-static-libstdc++");
} else {
ldflags.push_all("-Wl,-Bsymbolic -static-libstdc++");
}
fn stage_dir_exists(stage_path: &str) -> bool {
- match fs::create_dir(&stage_path[..]) {
+ match fs::create_dir(&stage_path) {
Ok(_) => true,
- Err(_) => Path::new(&stage_path[..]).exists(),
+ Err(_) => Path::new(&stage_path).exists(),
}
}
return;
}
- if try_link_toolchain(&stage_path[..]) {
+ if try_link_toolchain(&stage_path) {
println!(
"Added `stage1` rustup toolchain; try `cargo +stage1 build` on a separate rust project to run a newly-built toolchain"
);
println!(
"To manually link stage 1 build to `stage1` toolchain, run:\n
`rustup toolchain link stage1 {}`",
- &stage_path[..]
+ &stage_path
);
}
}
fn try_link_toolchain(stage_path: &str) -> bool {
Command::new("rustup")
.stdout(std::process::Stdio::null())
- .args(&["toolchain", "link", "stage1", &stage_path[..]])
+ .args(&["toolchain", "link", "stage1", &stage_path])
.output()
.map_or(false, |output| output.status.success())
}
}
}
-fn check_if_browser_ui_test_is_installed_global(npm: &Path, global: bool) -> bool {
+fn get_browser_ui_test_version_inner(npm: &Path, global: bool) -> Option<String> {
let mut command = Command::new(&npm);
- command.arg("list").arg("--depth=0");
+ command.arg("list").arg("--parseable").arg("--long").arg("--depth=0");
if global {
command.arg("--global");
}
.output()
.map(|output| String::from_utf8_lossy(&output.stdout).into_owned())
.unwrap_or(String::new());
- lines.contains(&" browser-ui-test@")
+ lines.lines().find_map(|l| l.split(":browser-ui-test@").skip(1).next()).map(|v| v.to_owned())
}
-fn check_if_browser_ui_test_is_installed(npm: &Path) -> bool {
- check_if_browser_ui_test_is_installed_global(npm, false)
- || check_if_browser_ui_test_is_installed_global(npm, true)
+fn get_browser_ui_test_version(npm: &Path) -> Option<String> {
+ get_browser_ui_test_version_inner(npm, false)
+ .or_else(|| get_browser_ui_test_version_inner(npm, true))
+}
+
+fn compare_browser_ui_test_version(installed_version: &str, src: &Path) {
+ match fs::read_to_string(
+ src.join("src/ci/docker/host-x86_64/x86_64-gnu-tools/browser-ui-test.version"),
+ ) {
+ Ok(v) => {
+ if v.trim() != installed_version {
+ eprintln!(
+ "⚠️ Installed version of browser-ui-test (`{}`) is different than the \
+ one used in the CI (`{}`)",
+ installed_version, v
+ );
+ }
+ }
+ Err(e) => eprintln!("Couldn't find the CI browser-ui-test version: {:?}", e),
+ }
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
.config
.npm
.as_ref()
- .map(|p| check_if_browser_ui_test_is_installed(p))
+ .map(|p| get_browser_ui_test_version(p).is_some())
.unwrap_or(false)
}))
}
// The goal here is to check if the necessary packages are installed, and if not, we
// panic.
- if !check_if_browser_ui_test_is_installed(&npm) {
- eprintln!(
- "error: rustdoc-gui test suite cannot be run because npm `browser-ui-test` \
- dependency is missing",
- );
- eprintln!(
- "If you want to install the `{0}` dependency, run `npm install {0}`",
- "browser-ui-test",
- );
- panic!("Cannot run rustdoc-gui tests");
+ match get_browser_ui_test_version(&npm) {
+ Some(version) => {
+ // We also check the version currently used in CI and emit a warning if it's not the
+ // same one.
+ compare_browser_ui_test_version(&version, &builder.build.src);
+ }
+ None => {
+ eprintln!(
+ "error: rustdoc-gui test suite cannot be run because npm `browser-ui-test` \
+ dependency is missing",
+ );
+ eprintln!(
+ "If you want to install the `{0}` dependency, run `npm install {0}`",
+ "browser-ui-test",
+ );
+ panic!("Cannot run rustdoc-gui tests");
+ }
}
let out_dir = builder.test_out(self.target).join("rustdoc-gui");
add_dylib_path(
vec![
PathBuf::from(&builder.sysroot_libdir(compiler, compiler.host)),
- PathBuf::from(builder.rustc_libdir(compiler)),
+ builder.rustc_libdir(compiler),
],
&mut cmd,
);
dpkg --add-architecture $APT_ARCH
apt-get update
apt-get download $(apt-cache depends --recurse --no-replaces \
- libc:$APT_ARCH \
- libm-dev:$APT_ARCH \
- libpthread:$APT_ARCH \
- libresolv:$APT_ARCH \
- librt:$APT_ARCH \
- libsocket:$APT_ARCH \
- system-crt:$APT_ARCH \
- system-header:$APT_ARCH \
+ libc:$APT_ARCH \
+ liblgrp-dev:$APT_ARCH \
+ liblgrp:$APT_ARCH \
+ libm-dev:$APT_ARCH \
+ libpthread:$APT_ARCH \
+ libresolv:$APT_ARCH \
+ librt:$APT_ARCH \
+ libsendfile-dev:$APT_ARCH \
+ libsendfile:$APT_ARCH \
+ libsocket:$APT_ARCH \
+ system-crt:$APT_ARCH \
+ system-header:$APT_ARCH \
| grep "^\w")
for deb in *$APT_ARCH.deb; do
COPY host-x86_64/x86_64-gnu-tools/checktools.sh /tmp/
RUN curl -sL https://nodejs.org/dist/v14.4.0/node-v14.4.0-linux-x64.tar.xz | tar -xJ
-ENV PATH="/node-v14.4.0-linux-x64/bin:${PATH}"
+ENV NODE_FOLDER=/node-v14.4.0-linux-x64/bin
+ENV PATH="$NODE_FOLDER:${PATH}"
+
+COPY host-x86_64/x86_64-gnu-tools/browser-ui-test.version /tmp/
# For now, we need to use `--unsafe-perm=true` to go around an issue when npm tries
# to create a new folder. For reference:
# https://github.com/puppeteer/puppeteer/issues/375
#
# We also specify the version in case we need to update it to go around cache limitations.
-RUN npm install -g browser-ui-test@0.8.0 --unsafe-perm=true
+#
+# The `browser-ui-test.version` file is also used by bootstrap to emit warnings in case
+# the local version of the package is different than the one used by the CI.
+RUN npm install -g browser-ui-test@$(head -n 1 /tmp/browser-ui-test.version) --unsafe-perm=true
ENV RUST_CONFIGURE_ARGS \
--build=x86_64-unknown-linux-gnu \
--- /dev/null
+0.8.4
\ No newline at end of file
set +x
on_err="
echo ERROR: An error was encountered with the build.
-cat /tmp/build.log
+cat /tmp/cmake_build.log
exit 1
"
trap "$on_err" ERR
bash -c "while true; do sleep 30; echo \$(date) - building ...; done" &
PING_LOOP_PID=$!
- "$@" &> /tmp/build.log
+ "$@" &> /tmp/cmake_build.log
trap - ERR
kill $PING_LOOP_PID
- rm /tmp/build.log
+ rm /tmp/cmake_build.log
set -x
}
set -euxo pipefail
+# Compile several crates to gather execution PGO profiles.
+# Arg0 => builds (Debug, Opt)
+# Arg1 => runs (Full, IncrFull, All)
+# Arg2 => crates (syn, cargo, ...)
+gather_profiles () {
+ cd /checkout/obj
+
+ # Compile libcore, both in opt-level=0 and opt-level=3
+ RUSTC_BOOTSTRAP=1 ./build/$PGO_HOST/stage2/bin/rustc \
+ --edition=2021 --crate-type=lib ../library/core/src/lib.rs
+ RUSTC_BOOTSTRAP=1 ./build/$PGO_HOST/stage2/bin/rustc \
+ --edition=2021 --crate-type=lib -Copt-level=3 ../library/core/src/lib.rs
+
+ cd rustc-perf
+
+ # Run rustc-perf benchmarks
+ # Benchmark using profile_local with eprintln, which essentially just means
+ # don't actually benchmark -- just make sure we run rustc a bunch of times.
+ RUST_LOG=collector=debug \
+ RUSTC=/checkout/obj/build/$PGO_HOST/stage0/bin/rustc \
+ RUSTC_BOOTSTRAP=1 \
+ /checkout/obj/build/$PGO_HOST/stage0/bin/cargo run -p collector --bin collector -- \
+ profile_local \
+ eprintln \
+ /checkout/obj/build/$PGO_HOST/stage2/bin/rustc \
+ Test \
+ --builds $1 \
+ --cargo /checkout/obj/build/$PGO_HOST/stage0/bin/cargo \
+ --runs $2 \
+ --include $3
+
+ cd /checkout/obj
+}
+
rm -rf /tmp/rustc-pgo
# We collect LLVM profiling information and rustc profiling information in
RUSTC_BOOTSTRAP=1 ./build/$PGO_HOST/stage2/bin/rustc \
--edition=2021 --crate-type=lib -Copt-level=3 ../library/core/src/lib.rs
+# Compile rustc perf
+cp -r /tmp/rustc-perf ./
+chown -R $(whoami): ./rustc-perf
+cd rustc-perf
+
+# Build the collector ahead of time, which is needed to make sure the rustc-fake
+# binary used by the collector is present.
+RUSTC=/checkout/obj/build/$PGO_HOST/stage0/bin/rustc \
+RUSTC_BOOTSTRAP=1 \
+/checkout/obj/build/$PGO_HOST/stage0/bin/cargo build -p collector
+
+gather_profiles "Debug,Opt" "Full" "syn,cargo,serde,ripgrep,regex,clap-rs,hyper-2"
+
# Merge the profile data we gathered for LLVM
# Note that this uses the profdata from the clang we used to build LLVM,
# which likely has a different version than our in-tree clang.
--stage 2 library/std \
--rust-profile-generate=/tmp/rustc-pgo
-# Profile libcore compilation in opt-level=0 and opt-level=3
-RUSTC_BOOTSTRAP=1 ./build/$PGO_HOST/stage2/bin/rustc \
- --edition=2021 --crate-type=lib ../library/core/src/lib.rs
-RUSTC_BOOTSTRAP=1 ./build/$PGO_HOST/stage2/bin/rustc \
- --edition=2021 --crate-type=lib -Copt-level=3 ../library/core/src/lib.rs
-
-cp -r /tmp/rustc-perf ./
-chown -R $(whoami): ./rustc-perf
-cd rustc-perf
-
-# Build the collector ahead of time, which is needed to make sure the rustc-fake
-# binary used by the collector is present.
-RUSTC=/checkout/obj/build/$PGO_HOST/stage0/bin/rustc \
-RUSTC_BOOTSTRAP=1 \
-/checkout/obj/build/$PGO_HOST/stage0/bin/cargo build -p collector
-
-# benchmark using profile_local with eprintln, which essentially just means
-# don't actually benchmark -- just make sure we run rustc a bunch of times.
-RUST_LOG=collector=debug \
-RUSTC=/checkout/obj/build/$PGO_HOST/stage0/bin/rustc \
-RUSTC_BOOTSTRAP=1 \
-/checkout/obj/build/$PGO_HOST/stage0/bin/cargo run -p collector --bin collector -- \
- profile_local \
- eprintln \
- /checkout/obj/build/$PGO_HOST/stage2/bin/rustc \
- Test \
- --builds Check,Debug,Opt \
- --cargo /checkout/obj/build/$PGO_HOST/stage0/bin/cargo \
- --runs All \
- --include externs,ctfe-stress-4,inflate,cargo,token-stream-stress,match-stress-enum
-
-cd /checkout/obj
+gather_profiles "Check,Debug,Opt" "All" \
+ "externs,ctfe-stress-4,inflate,cargo,token-stream-stress,match-stress-enum"
# Merge the profile data we gathered
./build/$PGO_HOST/llvm/bin/llvm-profdata \
elif ! (git diff --quiet "$BASE_COMMIT" -- \
src/test/rustdoc-gui \
src/librustdoc \
+ src/ci/docker/host-x86_64/x86_64-gnu-tools/Dockerfile \
+ src/ci/docker/host-x86_64/x86_64-gnu-tools/browser-ui-test.version \
src/tools/rustdoc-gui); then
# There was a change in either rustdoc or in its GUI tests.
echo "Rustdoc was updated"
-Subproject commit 3f255ed40b8c82a0434088568fbed270dc31bf00
+Subproject commit ea90bbaf53ba64ef4e2da9ac2352b298aec6bec8
-Subproject commit d5fc1bce3f8eb398f9c25f1b15e0257d7537cd41
+Subproject commit a6de8b6e3ea5d4f0de8b7b9a7e5c1405dc2c2ddb
-Subproject commit f6d6126fc96ecf4a7f7d22da330df9506293b0d0
+Subproject commit 11f1165e8a2f5840467e748c8108dc53c948ee9a
-Subproject commit 9d289c05fce7254b99c6a0d354d84abb7fd7a032
+Subproject commit c97d14fa6fed0baa9255432b8a93cb70614f80e3
-Subproject commit 2a928483a20bb306a7399c0468234db90d89afb5
+Subproject commit ec954f35eedf592cd173b21c05a7f80a65b61d8a
-Subproject commit 32f2a5b4e7545318846185198542230170dd8a42
+Subproject commit 155126b1d2e2cb01ddb1d7ba9489b90d7cd173ad
- [aarch64-apple-ios-sim](platform-support/aarch64-apple-ios-sim.md)
- [armv7-unknown-linux-uclibceabi](platform-support/armv7-unknown-linux-uclibceabi.md)
- [armv7-unknown-linux-uclibceabihf](platform-support/armv7-unknown-linux-uclibceabihf.md)
- - [aarch64-unknown-none-hermitkernel](platform-support/aarch64-unknown-none-hermitkernel.md)
- [\*-kmc-solid_\*](platform-support/kmc-solid.md)
- [*-unknown-openbsd](platform-support/openbsd.md)
- [x86_64-unknown-none](platform-support/x86_64-unknown-none.md)
- `llvm-ir` — Generates a file containing [LLVM IR]. The default output
filename is `CRATE_NAME.ll`.
- `metadata` — Generates a file containing metadata about the crate. The
- default output filename is `CRATE_NAME.rmeta`.
+ default output filename is `libCRATE_NAME.rmeta`.
- `mir` — Generates a file containing rustc's mid-level intermediate
representation. The default output filename is `CRATE_NAME.mir`.
- `obj` — Generates a native object file. The default output filename is
[`aarch64-kmc-solid_asp3`](platform-support/kmc-solid.md) | ✓ | | ARM64 SOLID with TOPPERS/ASP3
`aarch64-unknown-freebsd` | ✓ | ✓ | ARM64 FreeBSD
`aarch64-unknown-hermit` | ✓ | | ARM64 HermitCore
-[`aarch64-unknown-none-hermitkernel`](platform-support/aarch64-unknown-none-hermitkernel.md) | * | | ARM64 HermitCore kernel
`aarch64-unknown-uefi` | * | | ARM64 UEFI
`aarch64-unknown-linux-gnu_ilp32` | ✓ | ✓ | ARM64 Linux (ILP32 ABI)
`aarch64-unknown-netbsd` | ✓ | ✓ |
`powerpc64le-unknown-linux-musl` | ? | |
`riscv32gc-unknown-linux-gnu` | | | RISC-V Linux (kernel 5.4, glibc 2.33)
`riscv32gc-unknown-linux-musl` | | | RISC-V Linux (kernel 5.4, musl + RISCV32 support patches)
+`riscv32im-unknown-none-elf` | * | | Bare RISC-V (RV32IM ISA)
`riscv32imc-esp-espidf` | ✓ | | RISC-V ESP-IDF
`riscv64gc-unknown-freebsd` | | | RISC-V FreeBSD
`riscv64gc-unknown-linux-musl` | | | RISC-V Linux (kernel 4.20, musl 1.2.0)
`x86_64-unknown-hermit` | ✓ | | HermitCore
`x86_64-unknown-l4re-uclibc` | ? | |
[`x86_64-unknown-none`](platform-support/x86_64-unknown-none.md) | * | | Freestanding/bare-metal x86_64, softfloat
-`x86_64-unknown-none-hermitkernel` | * | | HermitCore kernel
`x86_64-unknown-none-linuxkernel` | * | | Linux kernel modules
[`x86_64-unknown-openbsd`](platform-support/openbsd.md) | ✓ | ✓ | 64-bit OpenBSD
`x86_64-unknown-uefi` | * | | 64-bit UEFI
+++ /dev/null
-# `aarch64-unknown-none-hermitkernel`
-
-**Tier: 3**
-
-Required to build the kernel for [HermitCore](https://github.com/hermitcore/hermit-playground)
-or [RustyHermit](https://github.com/hermitcore/rusty-hermit).
-The result is a bare-metal aarch64 binary in ELF format.
-
-## Target maintainers
-
-- Stefan Lankes, https://github.com/stlankes
-
-## Requirements
-
-This target is cross-compiled. There is no support for `std`, but the
-library operating system provides a simple allocator to use `alloc`.
-
-By default, Rust code generated for this target does not use any vector or
-floating-point registers. This allows the generated code to build the library
-operaring system, which may need to avoid the use of such
-registers or which may have special considerations about the use of such
-registers (e.g. saving and restoring them to avoid breaking userspace code
-using the same registers). In contrast to `aarch64-unknown-none-softfloat`,
-the target is completly relocatable, which is a required feature of RustyHermit.
-
-By default, code generated with this target should run on any `aarch64`
-hardware; enabling additional target features may raise this baseline.
-On `aarch64-unknown-none-hermitkernel`, `extern "C"` uses the [standard System V calling
-convention](https://github.com/ARM-software/abi-aa/releases/download/2021Q3/sysvabi64.pdf),
-without red zones.
-
-This target generated binaries in the ELF format.
-
-## Building the target
-
-Typical you should not use the target directly. The target `aarch64-unknown-hermit`
-builds the _user space_ of RustyHermit and supports red zones and floating-point
-operations.
-To build and link the kernel to the application, the crate
-[hermit-sys](https://github.com/hermitcore/rusty-hermit/tree/master/hermit-sys)
-should be used by adding the following lines to the `Cargo.toml` file of
-your application.
-
-```toml
-[target.'cfg(target_os = "hermit")'.dependencies]
-hermit-sys = "0.1.*"
-```
-
-The crate `hermit-sys` uses the target `aarch64-unknown-none-hermitkernel`
-to build the kernel.
-
-## Building Rust programs
-
-Rust does not yet ship pre-compiled artifacts for this target. To compile for
-this target, you need to build the crate `hermit-sys` (see
-"Building the target" above).
-
-## Testing
-
-As `aarch64-unknown-none-hermitkernel` does not support `std`
-and does not support running any Rust testsuite.
-
-## Cross-compilation toolchains and C code
-
-If you want to compile C code along with Rust you will need an
-appropriate `aarch64` toolchain.
-
-Rust *may* be able to use an `aarch64-linux-gnu-` toolchain with appropriate
-standalone flags to build for this toolchain (depending on the assumptions of
-that toolchain, see below), or you may wish to use a separate
-`aarch64-unknown-none` (or `aarch64-elf-`) toolchain.
-
-On some `aarch64` hosts that use ELF binaries, you *may* be able to use the host
-C toolchain, if it does not introduce assumptions about the host environment
-that don't match the expectations of a standalone environment. Otherwise, you
-may need a separate toolchain for standalone/freestanding development, just as
-when cross-compiling from a non-`aarch64` platform.
cc = "/path/to/arm-unknown-linux-uclibcgnueabi-gcc"
cxx = "/path/to/arm-unknown-linux-uclibcgnueabi-g++"
ar = "path/to/arm-unknown-linux-uclibcgnueabi-ar"
-ranlib = "path/to/arm-unknown-linux-uclibcgnueabi-"
-linker = "/path/to/arm-unknown-linux-uclibcgnueabi-"
+ranlib = "path/to/arm-unknown-linux-uclibcgnueabi-ranlib"
+linker = "/path/to/arm-unknown-linux-uclibcgnueabi-gcc"
```
## Building Rust programs
```
* Build with:
```text
- CC=/opt/tomatoware/arm-soft-mmc/bin/arm-linux-gcc \
- CXX=/opt/tomatoware/arm-soft-mmc/bin/arm-linux-g++ \
- AR=/opt/tomatoware/arm-soft-mmc/bin/arm-linux-ar \
+ CC_armv7_unknown_linux_uclibceabi=/opt/tomatoware/arm-soft-mmc/bin/arm-linux-gcc \
+ CXX_armv7_unknown_linux_uclibceabi=/opt/tomatoware/arm-soft-mmc/bin/arm-linux-g++ \
+ AR_armv7_unknown_linux_uclibceabi=/opt/tomatoware/arm-soft-mmc/bin/arm-linux-ar \
+ CFLAGS_armv7_unknown_linux_uclibceabi="-march=armv7-a -mtune=cortex-a9" \
+ CXXFLAGS_armv7_unknown_linux_uclibceabi="-march=armv7-a -mtune=cortex-a9" \
CARGO_TARGET_ARMV7_UNKNOWN_LINUX_UCLIBCEABI_LINKER=/opt/tomatoware/arm-soft-mmc/bin/arm-linux-gcc \
CARGO_TARGET_ARMV7_UNKNOWN_LINUX_UCLIBCEABI_RUSTFLAGS='-Clink-arg=-s -Clink-arg=-Wl,--dynamic-linker=/mmc/lib/ld-uClibc.so.1 -Clink-arg=-Wl,-rpath,/mmc/lib' \
- cargo +stage2 build --target armv7-unknown-linux-uclibceabi --release
+ cargo +stage2 \
+ build \
+ --target armv7-unknown-linux-uclibceabi \
+ --release
```
* Copy the binary to your target device and run.
-We specify `CC`, `CXX`, and `AR` because somtimes a project or a subproject requires the use of your `'C'` cross toolchain. Since Tomatoware has a modified sysroot we also pass via RUSTFLAGS the location of the dynamic-linker and rpath.
+We specify `CC`, `CXX`, `AR`, `CFLAGS`, and `CXXFLAGS` environment variables because sometimes a project or a subproject requires the use of your `'C'` cross toolchain. Since Tomatoware has a modified sysroot we also pass via RUSTFLAGS the location of the dynamic-linker and rpath.
### Test with QEMU
To test a cross-compiled binary on your build system follow the instructions for `Cross Compilation`, install `qemu-arm-static`, and run with the following.
```text
-CC=/opt/tomatoware/arm-soft-mmc/bin/arm-linux-gcc \
-CXX=/opt/tomatoware/arm-soft-mmc/bin/arm-linux-g++ \
-AR=/opt/tomatoware/arm-soft-mmc/bin/arm-linux-ar \
+CC_armv7_unknown_linux_uclibceabi=/opt/tomatoware/arm-soft-mmc/bin/arm-linux-gcc \
+CXX_armv7_unknown_linux_uclibceabi=/opt/tomatoware/arm-soft-mmc/bin/arm-linux-g++ \
+AR_armv7_unknown_linux_uclibceabi=/opt/tomatoware/arm-soft-mmc/bin/arm-linux-ar \
+CFLAGS_armv7_unknown_linux_uclibceabi="-march=armv7-a -mtune=cortex-a9" \
+CXXFLAGS_armv7_unknown_linux_uclibceabi="-march=armv7-a -mtune=cortex-a9" \
CARGO_TARGET_ARMV7_UNKNOWN_LINUX_UCLIBCEABI_LINKER=/opt/tomatoware/arm-soft-mmc/bin/arm-linux-gcc \
CARGO_TARGET_ARMV7_UNKNOWN_LINUX_UCLIBCEABI_RUNNER="qemu-arm-static -L /opt/tomatoware/arm-soft-mmc/arm-tomatoware-linux-uclibcgnueabi/sysroot/" \
-cargo +stage2 run --target armv7-unknown-linux-uclibceabi --release
+cargo +stage2 \
+run \
+--target armv7-unknown-linux-uclibceabi \
+--release
```
### Run in a chroot
list of expected values. If `"value"` is not in it, then `rustc` will report an `unexpected_cfgs`
lint diagnostic. The default diagnostic level for this lint is `Warn`.
-The form `values()` is an error, because it does not specify a condition name.
-
To enable checking of values, but to provide an empty set of valid values, use this form:
```bash
different names. If it is repeated for the same condition name, then the sets of values for that
condition are merged together.
+If `values()` is specified, then `rustc` will enable the checking of well-known values defined
+by itself. Note that it's necessary to specify the `values()` form to enable the checking of
+well known values, specifying the other forms doesn't implicitly enable it.
+
## Examples
Consider this command line:
```bash
rustc --check-cfg 'names(feature)' \
- --check-cfg 'values(feature,"lion","zebra")' \
+ --check-cfg 'values(feature, "lion", "zebra")' \
--cfg 'feature="lion"' -Z unstable-options \
example.rs
```
#[naked]
pub extern "C" fn add_two(x: i32) {
- // x + 2 preceeded by a landing pad/nop block
+ // x + 2 preceded by a landing pad/nop block
unsafe {
asm!(
"
for where_predicate in generics["where_predicates"]:
if "bound_predicate" in where_predicate:
pred = where_predicate["bound_predicate"]
- check_type(pred["ty"])
+ check_type(pred["type"])
for bound in pred["bounds"]:
check_generic_bound(bound)
elif "region_predicate" in where_predicate:
for bound in item["inner"]["bounds"]:
check_generic_bound(bound)
work_list |= (
- set(item["inner"]["items"]) | set(item["inner"]["implementors"])
+ set(item["inner"]["items"]) | set(item["inner"]["implementations"])
) - visited
elif item["kind"] == "impl":
check_generics(item["inner"]["generics"])
fn build_external_function(cx: &mut DocContext<'_>, did: DefId) -> clean::Function {
let sig = cx.tcx.fn_sig(did);
- let constness =
- if cx.tcx.is_const_fn_raw(did) { hir::Constness::Const } else { hir::Constness::NotConst };
- let asyncness = cx.tcx.asyncness(did);
let predicates = cx.tcx.predicates_of(did);
let (generics, decl) = clean::enter_impl_trait(cx, |cx| {
// NOTE: generics need to be cleaned before the decl!
let decl = clean_fn_decl_from_did_and_sig(cx, Some(did), sig);
(generics, decl)
});
- clean::Function {
- decl,
- generics,
- header: hir::FnHeader { unsafety: sig.unsafety(), abi: sig.abi(), constness, asyncness },
- }
+ clean::Function { decl, generics }
}
fn build_enum(cx: &mut DocContext<'_>, did: DefId) -> clean::Enum {
clean::Enum {
generics: clean_ty_generics(cx, cx.tcx.generics_of(did), predicates),
variants_stripped: false,
- variants: cx.tcx.adt_def(did).variants.iter().map(|v| v.clean(cx)).collect(),
+ variants: cx.tcx.adt_def(did).variants().iter().map(|v| v.clean(cx)).collect(),
}
}
/// Inline an `impl`, inherent or of a trait. The `did` must be for an `impl`.
crate fn build_impl(
cx: &mut DocContext<'_>,
- parent_module: impl Into<Option<DefId>>,
+ parent_module: Option<DefId>,
did: DefId,
attrs: Option<Attrs<'_>>,
ret: &mut Vec<clean::Item>,
record_extern_trait(cx, did);
}
- let (merged_attrs, cfg) = merge_attrs(cx, parent_module.into(), load_attrs(cx, did), attrs);
+ let (merged_attrs, cfg) = merge_attrs(cx, parent_module, load_attrs(cx, did), attrs);
trace!("merged_attrs={:?}", merged_attrs);
trace!(
use rustc_ast as ast;
use rustc_attr as attr;
-use rustc_const_eval::const_eval::is_unstable_const_fn;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_hir as hir;
use rustc_hir::def::{CtorKind, DefKind, Res};
use rustc_span::hygiene::{AstPass, MacroKind};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{self, ExpnKind};
-use rustc_target::spec::abi::Abi;
-use rustc_typeck::check::intrinsic::intrinsic_operation_unsafety;
use rustc_typeck::hir_ty_to_ty;
use std::assert_matches::assert_matches;
let trait_ref = ty::TraitRef::identity(cx.tcx, def_id).skip_binder();
let generic_args = generic_args.clean(cx);
- let bindings = match generic_args {
- GenericArgs::AngleBracketed { bindings, .. } => bindings,
- _ => bug!("clean: parenthesized `GenericBound::LangItemTrait`"),
+ let GenericArgs::AngleBracketed { bindings, .. } = generic_args
+ else {
+ bug!("clean: parenthesized `GenericBound::LangItemTrait`");
};
let trait_ = clean_trait_ref_with_bindings(cx, trait_ref, &bindings);
hir::GenericBound::Trait(ref t, modifier) => {
// `T: ~const Drop` is not equivalent to `T: Drop`, and we don't currently document `~const` bounds
// because of its experimental status, so just don't show these.
- if Some(t.trait_ref.trait_def_id().unwrap()) == cx.tcx.lang_items().drop_trait()
- && hir::TraitBoundModifier::MaybeConst == modifier
+ // `T: ~const Destruct` is hidden because `T: Destruct` is a no-op.
+ if modifier == hir::TraitBoundModifier::MaybeConst
+ && [cx.tcx.lang_items().drop_trait(), cx.tcx.lang_items().destruct_trait()]
+ .iter()
+ .any(|tr| *tr == Some(t.trait_ref.trait_def_id().unwrap()))
{
return None;
}
+
+ #[cfg(bootstrap)]
+ {
+ // FIXME: remove `lang_items().drop_trait()` from above logic,
+ // as well as the comment about `~const Drop` because it was renamed to `Destruct`.
+ }
GenericBound::TraitBound(t.clean(cx), modifier)
}
})
fn clean(&self, cx: &mut DocContext<'_>) -> Option<WherePredicate> {
// `T: ~const Drop` is not equivalent to `T: Drop`, and we don't currently document `~const` bounds
// because of its experimental status, so just don't show these.
+ // `T: ~const Destruct` is hidden because `T: Destruct` is a no-op.
if self.skip_binder().constness == ty::BoundConstness::ConstIfConst
- && Some(self.skip_binder().trait_ref.def_id) == cx.tcx.lang_items().drop_trait()
+ && [cx.tcx.lang_items().drop_trait(), cx.tcx.lang_items().destruct_trait()]
+ .iter()
+ .any(|tr| *tr == Some(self.skip_binder().def_id()))
{
return None;
}
+ #[cfg(bootstrap)]
+ {
+ // FIXME: remove `lang_items().drop_trait()` from above logic,
+ // as well as the comment about `~const Drop` because it was renamed to `Destruct`.
+ }
+
let poly_trait_ref = self.map_bound(|pred| pred.trait_ref);
Some(WherePredicate::BoundPredicate {
ty: poly_trait_ref.skip_binder().self_ty().clean(cx),
}
None => {
let mut func = clean_function(cx, sig, generics, body_id);
- let def_id = item.def_id.to_def_id();
- func.header.constness =
- if cx.tcx.is_const_fn(def_id) && is_unstable_const_fn(cx.tcx, def_id).is_none() {
- hir::Constness::Const
- } else {
- hir::Constness::NotConst
- };
clean_fn_decl_legacy_const_generics(&mut func, attrs);
FunctionItem(func)
}
let decl = clean_fn_decl_with_args(cx, sig.decl, args);
(generics, decl)
});
- Function { decl, generics, header: sig.header }
+ Function { decl, generics }
}
fn clean_args_from_types_and_names(
AssocConstItem(ty.clean(cx), default)
}
hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Provided(body)) => {
- let mut m = clean_function(cx, sig, &self.generics, body);
- if m.header.constness == hir::Constness::Const
- && is_unstable_const_fn(cx.tcx, local_did).is_some()
- {
- m.header.constness = hir::Constness::NotConst;
- }
+ let m = clean_function(cx, sig, &self.generics, body);
MethodItem(m, None)
}
hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Required(names)) => {
let decl = clean_fn_decl_with_args(cx, sig.decl, args);
(generics, decl)
});
- let mut t = Function { header: sig.header, decl, generics };
- if t.header.constness == hir::Constness::Const
- && is_unstable_const_fn(cx.tcx, local_did).is_some()
- {
- t.header.constness = hir::Constness::NotConst;
- }
- TyMethodItem(t)
+ TyMethodItem(Function { decl, generics })
}
hir::TraitItemKind::Type(bounds, ref default) => {
let generics = enter_impl_trait(cx, |cx| self.generics.clean(cx));
AssocConstItem(ty.clean(cx), default)
}
hir::ImplItemKind::Fn(ref sig, body) => {
- let mut m = clean_function(cx, sig, &self.generics, body);
- if m.header.constness == hir::Constness::Const
- && is_unstable_const_fn(cx.tcx, local_did).is_some()
- {
- m.header.constness = hir::Constness::NotConst;
- }
+ let m = clean_function(cx, sig, &self.generics, body);
let defaultness = cx.tcx.associated_item(self.def_id).defaultness;
MethodItem(m, Some(defaultness))
}
ty::TraitContainer(_) => self.defaultness.has_value(),
};
if provided {
- let constness = if tcx.is_const_fn_raw(self.def_id) {
- hir::Constness::Const
- } else {
- hir::Constness::NotConst
- };
- let asyncness = tcx.asyncness(self.def_id);
let defaultness = match self.container {
ty::ImplContainer(_) => Some(self.defaultness),
ty::TraitContainer(_) => None,
};
- MethodItem(
- Function {
- generics,
- decl,
- header: hir::FnHeader {
- unsafety: sig.unsafety(),
- abi: sig.abi(),
- constness,
- asyncness,
- },
- },
- defaultness,
- )
+ MethodItem(Function { generics, decl }, defaultness)
} else {
- TyMethodItem(Function {
- generics,
- decl,
- header: hir::FnHeader {
- unsafety: sig.unsafety(),
- abi: sig.abi(),
- constness: hir::Constness::NotConst,
- asyncness: hir::IsAsync::NotAsync,
- },
- })
+ TyMethodItem(Function { generics, decl })
}
}
ty::AssocKind::Type => {
fn clean_qpath(hir_ty: &hir::Ty<'_>, cx: &mut DocContext<'_>) -> Type {
let hir::Ty { hir_id: _, span, ref kind } = *hir_ty;
- let qpath = match kind {
- hir::TyKind::Path(qpath) => qpath,
- _ => unreachable!(),
- };
+ let hir::TyKind::Path(qpath) = kind else { unreachable!() };
match qpath {
hir::QPath::Resolved(None, ref path) => {
})
}
ty::Adt(def, substs) => {
- let did = def.did;
+ let did = def.did();
let kind = match def.adt_kind() {
AdtKind::Struct => ItemType::Struct,
AdtKind::Union => ItemType::Union,
cx.with_param_env(def_id, |cx| {
let kind = match item.kind {
hir::ForeignItemKind::Fn(decl, names, ref generics) => {
- let abi = cx.tcx.hir().get_foreign_abi(item.hir_id());
let (generics, decl) = enter_impl_trait(cx, |cx| {
// NOTE: generics must be cleaned before args
let generics = generics.clean(cx);
let decl = clean_fn_decl_with_args(cx, decl, args);
(generics, decl)
});
- ForeignFunctionItem(Function {
- decl,
- generics,
- header: hir::FnHeader {
- unsafety: if abi == Abi::RustIntrinsic {
- intrinsic_operation_unsafety(item.ident.name)
- } else {
- hir::Unsafety::Unsafe
- },
- abi,
- constness: hir::Constness::NotConst,
- asyncness: hir::IsAsync::NotAsync,
- },
- })
+ ForeignFunctionItem(Function { decl, generics })
}
hir::ForeignItemKind::Static(ref ty, mutability) => {
ForeignStaticItem(Static { type_: ty.clean(cx), mutability, expr: None })
let Some((self_, trait_did, name)) = lhs.projection() else {
return true;
};
- let generic = match self_ {
- clean::Generic(s) => s,
- _ => return true,
- };
- let (bounds, _) = match params.get_mut(generic) {
- Some(bound) => bound,
- None => return true,
- };
+ let clean::Generic(generic) = self_ else { return true };
+ let Some((bounds, _)) = params.get_mut(generic) else { return true };
merge_bounds(cx, bounds, trait_did, name, rhs)
});
use rustc_ast::util::comments::beautify_doc_string;
use rustc_ast::{self as ast, AttrStyle};
use rustc_attr::{ConstStability, Deprecation, Stability, StabilityLevel};
+use rustc_const_eval::const_eval::is_unstable_const_fn;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::thin_vec::ThinVec;
use rustc_hir as hir;
use rustc_span::{self, FileName, Loc};
use rustc_target::abi::VariantIdx;
use rustc_target::spec::abi::Abi;
+use rustc_typeck::check::intrinsic::intrinsic_operation_unsafety;
use crate::clean::cfg::Cfg;
use crate::clean::external_path;
_ => false,
}
}
+
+ /// Returns a `FnHeader` if `self` is a function item, otherwise returns `None`.
+ crate fn fn_header(&self, tcx: TyCtxt<'_>) -> Option<hir::FnHeader> {
+ fn build_fn_header(
+ def_id: DefId,
+ tcx: TyCtxt<'_>,
+ asyncness: hir::IsAsync,
+ ) -> hir::FnHeader {
+ let sig = tcx.fn_sig(def_id);
+ let constness =
+ if tcx.is_const_fn(def_id) && is_unstable_const_fn(tcx, def_id).is_none() {
+ hir::Constness::Const
+ } else {
+ hir::Constness::NotConst
+ };
+ hir::FnHeader { unsafety: sig.unsafety(), abi: sig.abi(), constness, asyncness }
+ }
+ let header = match *self.kind {
+ ItemKind::ForeignFunctionItem(_) => {
+ let abi = tcx.fn_sig(self.def_id.as_def_id().unwrap()).abi();
+ hir::FnHeader {
+ unsafety: if abi == Abi::RustIntrinsic {
+ intrinsic_operation_unsafety(self.name.unwrap())
+ } else {
+ hir::Unsafety::Unsafe
+ },
+ abi,
+ constness: hir::Constness::NotConst,
+ asyncness: hir::IsAsync::NotAsync,
+ }
+ }
+ ItemKind::FunctionItem(_) | ItemKind::MethodItem(_, _) => {
+ let def_id = self.def_id.as_def_id().unwrap();
+ build_fn_header(def_id, tcx, tcx.asyncness(def_id))
+ }
+ ItemKind::TyMethodItem(_) => {
+ build_fn_header(self.def_id.as_def_id().unwrap(), tcx, hir::IsAsync::NotAsync)
+ }
+ _ => return None,
+ };
+ Some(header)
+ }
}
#[derive(Clone, Debug)]
{
match Cfg::parse(cfg_mi) {
Ok(new_cfg) => cfg &= new_cfg,
- Err(e) => sess.span_err(e.span, e.msg),
+ Err(e) => {
+ sess.span_err(e.span, e.msg);
+ }
}
}
}
crate struct Function {
crate decl: FnDecl,
crate generics: Generics,
- crate header: hir::FnHeader,
}
#[derive(Clone, PartialEq, Eq, Debug, Hash)]
/// alias instead of the final type. This will always have the final type, regardless of whether
/// `type_` came from HIR or from metadata.
///
- /// If `item_type.is_none()`, `type_` is guarenteed to come from metadata (and therefore hold the
+ /// If `item_type.is_none()`, `type_` is guaranteed to come from metadata (and therefore hold the
/// final type).
crate item_type: Option<Type>,
}
let primitives = local_crate.primitives(cx.tcx);
let keywords = local_crate.keywords(cx.tcx);
{
- let m = match *module.kind {
- ItemKind::ModuleItem(ref mut m) => m,
- _ => unreachable!(),
- };
+ let ItemKind::ModuleItem(ref mut m) = *module.kind
+ else { unreachable!() };
m.items.extend(primitives.iter().map(|&(def_id, prim)| {
Item::from_def_id_and_parts(
def_id,
let edition = config::parse_crate_edition(matches);
let mut id_map = html::markdown::IdMap::new();
- let external_html = match ExternalHtml::load(
+ let Some(external_html) = ExternalHtml::load(
&matches.opt_strs("html-in-header"),
&matches.opt_strs("html-before-content"),
&matches.opt_strs("html-after-content"),
&mut id_map,
edition,
&None,
- ) {
- Some(eh) => eh,
- None => return Err(3),
+ ) else {
+ return Err(3);
};
match matches.opt_str("r").as_deref() {
}
}
- if tcx.sess.diagnostic().has_errors_or_lint_errors() {
+ if tcx.sess.diagnostic().has_errors_or_lint_errors().is_some() {
rustc_errors::FatalError.raise();
}
collector
});
- if compiler.session().diagnostic().has_errors_or_lint_errors() {
+ if compiler.session().diagnostic().has_errors_or_lint_errors().is_some() {
FatalError.raise();
}
eprint!("{}", self.0);
}
}
- let mut out_lines = str::from_utf8(&output.stderr)
+ let mut out = str::from_utf8(&output.stderr)
.unwrap()
.lines()
.filter(|l| {
true
}
})
- .collect::<Vec<_>>();
+ .intersperse_with(|| "\n")
+ .collect::<String>();
// Add a \n to the end to properly terminate the last line,
// but only if there was output to be printed
- if !out_lines.is_empty() {
- out_lines.push("");
+ if !out.is_empty() {
+ out.push('\n');
}
- let out = out_lines.join("\n");
let _bomb = Bomb(&out);
match (output.status.success(), lang_string.compile_fail) {
(true, true) => {
(found_main, found_extern_crate, found_macro)
})
});
- let (already_has_main, already_has_extern_crate, found_macro) = match result {
- Ok(result) => result,
- Err(ErrorGuaranteed) => {
- // If the parser panicked due to a fatal error, pass the test code through unchanged.
- // The error will be reported during compilation.
- return (s.to_owned(), 0, false);
- }
+ let Ok((already_has_main, already_has_extern_crate, found_macro)) = result
+ else {
+ // If the parser panicked due to a fatal error, pass the test code through unchanged.
+ // The error will be reported during compilation.
+ return (s.to_owned(), 0, false);
};
// If a doctest's `fn main` is being masked by a wrapper macro, the parsing loop above won't
fn load_external_files(names: &[String], diag: &rustc_errors::Handler) -> Option<String> {
let mut out = String::new();
for name in names {
- let s = match load_string(name, diag) {
- Ok(s) => s,
- Err(_) => return None,
- };
+ let Ok(s) = load_string(name, diag) else { return None };
out.push_str(&s);
out.push('\n');
}
/// Gives a description of the renderer. Used for performance profiling.
fn descr() -> &'static str;
- /// Whether to call `item` recursivly for modules
+ /// Whether to call `item` recursively for modules
///
/// This is true for html, and false for json. See #80664
const RUN_ON_MODULE: bool;
prof.generic_activity_with_arg("render_mod_item", item.name.unwrap().to_string());
cx.mod_item_in(&item)?;
- let module = match *item.kind {
- clean::StrippedItem(box clean::ModuleItem(m)) | clean::ModuleItem(m) => m,
- _ => unreachable!(),
- };
+ let (clean::StrippedItem(box clean::ModuleItem(module)) | clean::ModuleItem(module)) = *item.kind
+ else { unreachable!() };
for it in module.items {
debug!("Adding {:?} to worklist", it.name);
work.push((cx.make_child_renderer(), it));
klass: Option<Class>,
context_info: &Option<ContextInfo<'_, '_, '_>>,
) {
- let klass = match klass {
- None => return write!(out, "{}", text),
- Some(klass) => klass,
- };
- let def_span = match klass.get_span() {
- Some(d) => d,
- None => {
- write!(out, "<span class=\"{}\">{}</span>", klass.as_html(), text);
- return;
- }
+ let Some(klass) = klass
+ else { return write!(out, "{}", text) };
+ let Some(def_span) = klass.get_span()
+ else {
+ write!(out, "<span class=\"{}\">{}</span>", klass.as_html(), text);
+ return;
};
let mut text_s = text.to_string();
if text_s.contains("::") {
// Render sidebar-items.js used throughout this module.
if !self.render_redirect_pages {
- let module = match *item.kind {
- clean::StrippedItem(box clean::ModuleItem(ref m)) | clean::ModuleItem(ref m) => m,
- _ => unreachable!(),
- };
+ let (clean::StrippedItem(box clean::ModuleItem(ref module)) | clean::ModuleItem(ref module)) = *item.kind
+ else { unreachable!() };
let items = self.build_sidebar_items(module);
let js_dst = self.dst.join(&format!("sidebar-items{}.js", self.shared.resource_suffix));
let v = format!("initSidebarItems({});", serde_json::to_string(&items).unwrap());
use rustc_ast_pretty::pprust;
use rustc_attr::{ConstStability, Deprecation, StabilityLevel};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_hir as hir;
use rustc_hir::def::CtorKind;
use rustc_hir::def_id::DefId;
use rustc_hir::Mutability;
// Render the list of items inside one of the sections "Trait Implementations",
// "Auto Trait Implementations," "Blanket Trait Implementations" (on struct/enum pages).
-fn render_impls(cx: &Context<'_>, w: &mut Buffer, impls: &[&&Impl], containing_item: &clean::Item) {
+fn render_impls(
+ cx: &Context<'_>,
+ w: &mut Buffer,
+ impls: &[&&Impl],
+ containing_item: &clean::Item,
+ toggle_open_by_default: bool,
+) {
let tcx = cx.tcx();
let mut rendered_impls = impls
.iter()
is_on_foreign_type: false,
show_default_items: true,
show_non_assoc_items: true,
- toggle_open_by_default: true,
+ toggle_open_by_default,
},
);
buffer.into_inner()
fn assoc_method(
w: &mut Buffer,
meth: &clean::Item,
- header: hir::FnHeader,
g: &clean::Generics,
d: &clean::FnDecl,
link: AssocItemLink<'_>,
cx: &Context<'_>,
render_mode: RenderMode,
) {
+ let header = meth.fn_header(cx.tcx()).expect("Trying to get header from a non-function item");
let name = meth.name.as_ref().unwrap();
let href = match link {
AssocItemLink::Anchor(Some(ref id)) => Some(format!("#{}", id)),
match *item.kind {
clean::StrippedItem(..) => {}
clean::TyMethodItem(ref m) => {
- assoc_method(w, item, m.header, &m.generics, &m.decl, link, parent, cx, render_mode)
+ assoc_method(w, item, &m.generics, &m.decl, link, parent, cx, render_mode)
}
clean::MethodItem(ref m, _) => {
- assoc_method(w, item, m.header, &m.generics, &m.decl, link, parent, cx, render_mode)
+ assoc_method(w, item, &m.generics, &m.decl, link, parent, cx, render_mode)
}
clean::AssocConstItem(ref ty, _) => {
assoc_const(w, item, ty, link, if parent == ItemType::Trait { " " } else { "" }, cx)
) {
info!("Documenting associated items of {:?}", containing_item.name);
let cache = cx.cache();
- let v = match cache.impls.get(&it) {
- Some(v) => v,
- None => return,
- };
+ let Some(v) = cache.impls.get(&it) else { return };
let (non_trait, traits): (Vec<_>, _) = v.iter().partition(|i| i.inner_impl().trait_.is_none());
if !non_trait.is_empty() {
let mut tmp_buf = Buffer::empty_from(w);
concrete.into_iter().partition(|t| t.inner_impl().kind.is_blanket());
let mut impls = Buffer::empty_from(w);
- render_impls(cx, &mut impls, &concrete, containing_item);
+ render_impls(cx, &mut impls, &concrete, containing_item, true);
let impls = impls.into_inner();
if !impls.is_empty() {
write!(
</h2>\
<div id=\"synthetic-implementations-list\">",
);
- render_impls(cx, w, &synthetic, containing_item);
+ render_impls(cx, w, &synthetic, containing_item, false);
w.write_str("</div>");
}
</h2>\
<div id=\"blanket-implementations-list\">",
);
- render_impls(cx, w, &blanket_impl, containing_item);
+ render_impls(cx, w, &blanket_impl, containing_item, false);
w.write_str("</div>");
}
}
let tcx = cx.tcx();
let def_id = item.def_id.expect_def_id();
let key = tcx.def_path_hash(def_id);
- let call_locations = match cx.shared.call_locations.get(&key) {
- Some(call_locations) => call_locations,
- _ => {
- return;
- }
- };
+ let Some(call_locations) = cx.shared.call_locations.get(&key) else { return };
// Generate a unique ID so users can link to this section for a given method
let id = cx.id_map.borrow_mut().derive("scraped-examples");
let mut it = ordered_locations.into_iter().peekable();
// An example may fail to write if its source can't be read for some reason, so this method
- // continues iterating until a write suceeds
+ // continues iterating until a write succeeds
let write_and_skip_failure = |w: &mut Buffer, it: &mut Peekable<_>| {
while let Some(example) = it.next() {
if write_example(&mut *w, example) {
}
let unsafety_flag = match *myitem.kind {
- clean::FunctionItem(ref func) | clean::ForeignFunctionItem(ref func)
- if func.header.unsafety == hir::Unsafety::Unsafe =>
+ clean::FunctionItem(_) | clean::ForeignFunctionItem(_)
+ if myitem.fn_header(cx.tcx()).unwrap().unsafety
+ == hir::Unsafety::Unsafe =>
{
"<a title=\"unsafe function\" href=\"#\"><sup>⚠</sup></a>"
}
let stab = myitem.stability_class(cx.tcx());
let add = if stab.is_some() { " " } else { "" };
+ let visibility_emoji = match myitem.visibility {
+ clean::Visibility::Restricted(_) => {
+ "<span title=\"Restricted Visibility\"> 🔒</span> "
+ }
+ _ => "",
+ };
+
let doc_value = myitem.doc_value().unwrap_or_default();
w.write_str(ITEM_TABLE_ROW_OPEN);
write!(
w,
"<div class=\"item-left {stab}{add}module-item\">\
- <a class=\"{class}\" href=\"{href}\" title=\"{title}\">{name}</a>\
- {unsafety_flag}\
- {stab_tags}\
+ <a class=\"{class}\" href=\"{href}\" title=\"{title}\">{name}</a>\
+ {visibility_emoji}\
+ {unsafety_flag}\
+ {stab_tags}\
</div>\
<div class=\"item-right docblock-short\">{docs}</div>",
name = myitem.name.unwrap(),
+ visibility_emoji = visibility_emoji,
stab_tags = extra_info_tags(myitem, item, cx.tcx()),
docs = MarkdownSummaryLine(&doc_value, &myitem.links(cx)).into_string(),
class = myitem.type_(),
}
fn item_function(w: &mut Buffer, cx: &Context<'_>, it: &clean::Item, f: &clean::Function) {
- let vis = it.visibility.print_with_space(it.def_id, cx).to_string();
- let constness = print_constness_with_space(&f.header.constness, it.const_stability(cx.tcx()));
- let asyncness = f.header.asyncness.print_with_space();
- let unsafety = f.header.unsafety.print_with_space();
- let abi = print_abi_with_space(f.header.abi).to_string();
+ let header = it.fn_header(cx.tcx()).expect("printing a function which isn't a function");
+ let constness = print_constness_with_space(&header.constness, it.const_stability(cx.tcx()));
+ let unsafety = header.unsafety.print_with_space().to_string();
+ let abi = print_abi_with_space(header.abi).to_string();
+ let asyncness = header.asyncness.print_with_space();
+ let visibility = it.visibility.print_with_space(it.def_id, cx).to_string();
let name = it.name.unwrap();
let generics_len = format!("{:#}", f.generics.print(cx)).len();
let header_len = "fn ".len()
- + vis.len()
+ + visibility.len()
+ constness.len()
+ asyncness.len()
+ unsafety.len()
w,
"{vis}{constness}{asyncness}{unsafety}{abi}fn \
{name}{generics}{decl}{notable_traits}{where_clause}",
- vis = vis,
+ vis = visibility,
constness = constness,
asyncness = asyncness,
unsafety = unsafety,
name = name,
generics = f.generics.print(cx),
where_clause = print_where_clause(&f.generics, cx, 0, true),
- decl = f.decl.full_print(header_len, 0, f.header.asyncness, cx),
+ decl = f.decl.full_print(header_len, 0, header.asyncness, cx),
notable_traits = notable_traits_decl(&f.decl, cx),
);
});
};
for (index, layout) in variants.iter_enumerated() {
- let name = adt.variants[index].name;
+ let name = adt.variant(index).name;
write!(w, "<li><code>{name}</code>: ", name = name);
write_size_of_layout(w, *layout, tag_size);
writeln!(w, "</li>");
overflow-y: hidden;
}
-.source .sidebar > *:not(:first-child) {
- transition: opacity 0.5s;
+.source .sidebar > *:not(#sidebar-toggle) {
opacity: 0;
visibility: hidden;
}
overflow-y: auto;
}
-.source .sidebar.expanded > * {
+.source .sidebar.expanded > *:not(#sidebar-toggle) {
opacity: 1;
visibility: visible;
}
display: none;
}
- /* It doesn't render well on mobile because of the layout, so better only have the transition
- on desktop. */
- .rustdoc.source .sidebar {
- transition: width .5s;
- }
-
.source .sidebar.expanded {
width: 300px;
}
use clean::ItemKind::*;
let name = item.name;
let is_crate = item.is_crate();
+ let header = item.fn_header(tcx);
+
match *item.kind {
ModuleItem(m) => ItemEnum::Module(Module { is_crate, items: ids(m.items) }),
ImportItem(i) => ItemEnum::Import(i.into_tcx(tcx)),
StructFieldItem(f) => ItemEnum::StructField(f.into_tcx(tcx)),
EnumItem(e) => ItemEnum::Enum(e.into_tcx(tcx)),
VariantItem(v) => ItemEnum::Variant(v.into_tcx(tcx)),
- FunctionItem(f) => ItemEnum::Function(f.into_tcx(tcx)),
- ForeignFunctionItem(f) => ItemEnum::Function(f.into_tcx(tcx)),
+ FunctionItem(f) => ItemEnum::Function(from_function(f, header.unwrap(), tcx)),
+ ForeignFunctionItem(f) => ItemEnum::Function(from_function(f, header.unwrap(), tcx)),
TraitItem(t) => ItemEnum::Trait(t.into_tcx(tcx)),
TraitAliasItem(t) => ItemEnum::TraitAlias(t.into_tcx(tcx)),
- MethodItem(m, _) => ItemEnum::Method(from_function_method(m, true, tcx)),
- TyMethodItem(m) => ItemEnum::Method(from_function_method(m, false, tcx)),
+ MethodItem(m, _) => ItemEnum::Method(from_function_method(m, true, header.unwrap(), tcx)),
+ TyMethodItem(m) => ItemEnum::Method(from_function_method(m, false, header.unwrap(), tcx)),
ImplItem(i) => ItemEnum::Impl(i.into_tcx(tcx)),
StaticItem(s) => ItemEnum::Static(s.into_tcx(tcx)),
ForeignStaticItem(s) => ItemEnum::Static(s.into_tcx(tcx)),
}
}
-impl FromWithTcx<clean::Function> for Function {
- fn from_tcx(function: clean::Function, tcx: TyCtxt<'_>) -> Self {
- let clean::Function { decl, generics, header } = function;
- Function {
- decl: decl.into_tcx(tcx),
- generics: generics.into_tcx(tcx),
- header: from_fn_header(&header),
- }
- }
-}
-
impl FromWithTcx<clean::Generics> for Generics {
fn from_tcx(generics: clean::Generics, tcx: TyCtxt<'_>) -> Self {
Generics {
Lifetime { outlives } => GenericParamDefKind::Lifetime {
outlives: outlives.into_iter().map(|lt| lt.0.to_string()).collect(),
},
- Type { did: _, bounds, default, synthetic: _ } => GenericParamDefKind::Type {
+ Type { did: _, bounds, default, synthetic } => GenericParamDefKind::Type {
bounds: bounds.into_iter().map(|x| x.into_tcx(tcx)).collect(),
default: default.map(|x| (*x).into_tcx(tcx)),
+ synthetic,
+ },
+ Const { did: _, ty, default } => GenericParamDefKind::Const {
+ type_: (*ty).into_tcx(tcx),
+ default: default.map(|x| *x),
},
- Const { did: _, ty, default } => {
- GenericParamDefKind::Const { ty: (*ty).into_tcx(tcx), default: default.map(|x| *x) }
- }
}
}
}
use clean::WherePredicate::*;
match predicate {
BoundPredicate { ty, bounds, .. } => WherePredicate::BoundPredicate {
- ty: ty.into_tcx(tcx),
+ type_: ty.into_tcx(tcx),
bounds: bounds.into_iter().map(|x| x.into_tcx(tcx)).collect(),
// FIXME: add `bound_params` to rustdoc-json-params?
},
items: ids(items),
generics: generics.into_tcx(tcx),
bounds: bounds.into_iter().map(|x| x.into_tcx(tcx)).collect(),
- implementors: Vec::new(), // Added in JsonRenderer::item
+ implementations: Vec::new(), // Added in JsonRenderer::item
}
}
}
}
}
+crate fn from_function(
+ function: clean::Function,
+ header: rustc_hir::FnHeader,
+ tcx: TyCtxt<'_>,
+) -> Function {
+ let clean::Function { decl, generics } = function;
+ Function {
+ decl: decl.into_tcx(tcx),
+ generics: generics.into_tcx(tcx),
+ header: from_fn_header(&header),
+ }
+}
+
crate fn from_function_method(
function: clean::Function,
has_body: bool,
+ header: rustc_hir::FnHeader,
tcx: TyCtxt<'_>,
) -> Method {
- let clean::Function { header, decl, generics } = function;
+ let clean::Function { decl, generics } = function;
Method {
decl: decl.into_tcx(tcx),
generics: generics.into_tcx(tcx),
let id = item.def_id;
if let Some(mut new_item) = self.convert_item(item) {
if let types::ItemEnum::Trait(ref mut t) = new_item.inner {
- t.implementors = self.get_trait_implementors(id.expect_def_id())
+ t.implementations = self.get_trait_implementors(id.expect_def_id())
} else if let types::ItemEnum::Struct(ref mut s) = new_item.inner {
s.impls = self.get_impls(id.expect_def_id())
} else if let types::ItemEnum::Enum(ref mut e) = new_item.inner {
let exit_code = rustc_driver::catch_with_exit_code(|| match get_args() {
Some(args) => main_args(&args),
- _ => Err(ErrorGuaranteed),
+ _ => Err(ErrorGuaranteed::unchecked_claim_error_was_emitted()),
});
process::exit(exit_code);
}
// codes from `from_matches` here.
let options = match config::Options::from_matches(&matches) {
Ok(opts) => opts,
- Err(code) => return if code == 0 { Ok(()) } else { Err(ErrorGuaranteed) },
+ Err(code) => {
+ return if code == 0 {
+ Ok(())
+ } else {
+ Err(ErrorGuaranteed::unchecked_claim_error_was_emitted())
+ };
+ }
};
rustc_interface::util::run_in_thread_pool_with_globals(
options.edition,
match res {
Ok(()) => Ok(()),
Err(err) => {
- diag.struct_err(&err).emit();
- Err(ErrorGuaranteed)
+ let reported = diag.struct_err(&err).emit();
+ Err(reported)
}
}
}
(resolver.clone(), resolver_caches)
};
- if sess.diagnostic().has_errors_or_lint_errors() {
+ if sess.diagnostic().has_errors_or_lint_errors().is_some() {
sess.fatal("Compilation failed, aborting rustdoc");
}
impl<'a, 'tcx> DocVisitor for BareUrlsLinter<'a, 'tcx> {
fn visit_item(&mut self, item: &Item) {
- let hir_id = match DocContext::as_local_hir_id(self.cx.tcx, item.def_id) {
- Some(hir_id) => hir_id,
- None => {
- // If non-local, no need to check anything.
- return;
- }
+ let Some(hir_id) = DocContext::as_local_hir_id(self.cx.tcx, item.def_id)
+ else {
+ // If non-local, no need to check anything.
+ return;
};
let dox = item.attrs.collapsed_doc_value().unwrap_or_default();
if !dox.is_empty() {
format!("<{}>", url),
Applicability::MachineApplicable,
)
- .emit()
+ .emit();
});
};
return;
}
- let local_id = match item.def_id.as_def_id().and_then(|x| x.as_local()) {
- Some(id) => id,
+ let Some(local_id) = item.def_id.as_def_id().and_then(|x| x.as_local())
+ else {
// We don't need to check the syntax for other crates so returning
// without doing anything should not be a problem.
- None => return,
+ return;
};
let hir_id = self.cx.tcx.hir().local_def_id_to_hir_id(local_id);
// lambda that will use the lint to start a new diagnostic and add
// a suggestion to it when needed.
- let diag_builder = |lint: LintDiagnosticBuilder<'_>| {
+ let diag_builder = |lint: LintDiagnosticBuilder<'_, ()>| {
let explanation = if is_ignore {
"`ignore` code blocks require valid Rust code for syntax highlighting; \
mark blocks that do not contain Rust code as text"
}
crate fn look_for_tests<'tcx>(cx: &DocContext<'tcx>, dox: &str, item: &Item) {
- let hir_id = match DocContext::as_local_hir_id(cx.tcx, item.def_id) {
- Some(hir_id) => hir_id,
- None => {
- // If non-local, no need to check anything.
- return;
- }
+ let Some(hir_id) = DocContext::as_local_hir_id(cx.tcx, item.def_id)
+ else {
+ // If non-local, no need to check anything.
+ return;
};
let mut tests = Tests { found_tests: 0 };
crate::lint::MISSING_DOC_CODE_EXAMPLES,
hir_id,
sp,
- |lint| lint.build("missing code example in this documentation").emit(),
+ |lint| {
+ lint.build("missing code example in this documentation").emit();
+ },
);
}
} else if tests.found_tests > 0
crate::lint::PRIVATE_DOC_TESTS,
hir_id,
item.attr_span(cx.tcx),
- |lint| lint.build("documentation test in private item").emit(),
+ |lint| {
+ lint.build("documentation test in private item").emit();
+ },
);
}
}
//!
//! [RFC 1946]: https://github.com/rust-lang/rfcs/blob/master/text/1946-intra-rustdoc-links.md
-use rustc_data_structures::{fx::FxHashMap, stable_set::FxHashSet};
+use rustc_data_structures::{fx::FxHashMap, intern::Interned, stable_set::FxHashSet};
use rustc_errors::{Applicability, Diagnostic};
use rustc_hir::def::{
DefKind,
Err(ResolutionFailure::NotResolved {
item_id,
module_id,
- partial_res: Some(Res::Def(DefKind::Enum, def.did)),
+ partial_res: Some(Res::Def(DefKind::Enum, def.did())),
unresolved: variant_field_name.to_string().into(),
}
.into())
module_id: DefId,
) -> Result<Res, ResolutionFailure<'a>> {
self.cx.enter_resolver(|resolver| {
- // NOTE: this needs 2 separate lookups because `resolve_str_path_error` doesn't take
+ // NOTE: this needs 2 separate lookups because `resolve_rustdoc_path` doesn't take
// lexical scope into account (it ignores all macros not defined at the mod-level)
debug!("resolving {} as a macro in the module {:?}", path_str, module_id);
- if let Ok((_, res)) =
- resolver.resolve_str_path_error(DUMMY_SP, path_str, MacroNS, module_id)
- {
+ if let Some(res) = resolver.resolve_rustdoc_path(path_str, MacroNS, module_id) {
// don't resolve builtins like `#[derive]`
if let Ok(res) = res.try_into() {
return Ok(res);
})
}
- /// Convenience wrapper around `resolve_str_path_error`.
+ /// Convenience wrapper around `resolve_rustdoc_path`.
///
/// This also handles resolving `true` and `false` as booleans.
- /// NOTE: `resolve_str_path_error` knows only about paths, not about types.
+ /// NOTE: `resolve_rustdoc_path` knows only about paths, not about types.
/// Associated items will never be resolved by this function.
fn resolve_path(
&self,
return res;
}
- let result = self.cx.enter_resolver(|resolver| {
- resolver
- .resolve_str_path_error(DUMMY_SP, path_str, ns, module_id)
- .and_then(|(_, res)| res.try_into())
- });
+ // Resolver doesn't know about true, false, and types that aren't paths (e.g. `()`).
+ let result = self
+ .cx
+ .enter_resolver(|resolver| resolver.resolve_rustdoc_path(path_str, ns, module_id))
+ .and_then(|res| res.try_into().ok())
+ .or_else(|| resolve_primitive(path_str, ns));
debug!("{} resolved to {:?} in namespace {:?}", path_str, result, ns);
- match result {
- // resolver doesn't know about true, false, and types that aren't paths (e.g. `()`)
- // manually as bool
- Err(()) => resolve_primitive(path_str, ns),
- Ok(res) => Some(res),
- }
+ result
}
/// Resolves a string as a path within a particular namespace. Returns an
ty::FnDef(..) => panic!("type alias to a function definition"),
ty::FnPtr(_) => Res::Primitive(Fn),
ty::Never => Res::Primitive(Never),
- ty::Adt(&ty::AdtDef { did, .. }, _) | ty::Foreign(did) => {
+ ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did, .. }, _)), _) | ty::Foreign(did) => {
Res::Def(self.cx.tcx.def_kind(did), did)
}
ty::Projection(_)
if ns == TypeNS && def_kind == DefKind::Enum {
match tcx.type_of(did).kind() {
ty::Adt(adt_def, _) => {
- for variant in &adt_def.variants {
+ for variant in adt_def.variants() {
if variant.name == item_name {
return Some((
root_res,
///
/// This function returns `None` if no associated item was found in the impl.
/// This can occur when the trait associated item has a default value that is
-/// not overriden in the impl.
+/// not overridden in the impl.
///
/// This is just a wrapper around [`TyCtxt::impl_item_implementor_ids()`] and
/// [`TyCtxt::associated_item()`] (with some helpful logging added).
let saw_impl = impl_type == ty
|| match (impl_type.kind(), ty.kind()) {
(ty::Adt(impl_def, _), ty::Adt(ty_def, _)) => {
- debug!("impl def_id: {:?}, ty def_id: {:?}", impl_def.did, ty_def.did);
- impl_def.did == ty_def.did
+ debug!("impl def_id: {:?}, ty def_id: {:?}", impl_def.did(), ty_def.did());
+ impl_def.did() == ty_def.did()
}
_ => false,
};
DiagnosticInfo { item, ori_link: _, dox, link_range }: &DiagnosticInfo<'_>,
decorate: impl FnOnce(&mut Diagnostic, Option<rustc_span::Span>),
) {
- let hir_id = match DocContext::as_local_hir_id(tcx, item.def_id) {
- Some(hir_id) => hir_id,
- None => {
- // If non-local, no need to check anything.
- info!("ignoring warning from parent crate: {}", msg);
- return;
- }
+ let Some(hir_id) = DocContext::as_local_hir_id(tcx, item.def_id)
+ else {
+ // If non-local, no need to check anything.
+ info!("ignoring warning from parent crate: {}", msg);
+ return;
};
let sp = item.attr_span(tcx);
use rustc_middle::ty::{DefIdTree, Visibility};
use rustc_resolve::{ParentScope, Resolver};
use rustc_session::config::Externs;
-use rustc_span::{Span, SyntaxContext, DUMMY_SP};
+use rustc_span::SyntaxContext;
use std::collections::hash_map::Entry;
use std::mem;
// Overridden `visit_item` below doesn't apply to the crate root,
// so we have to visit its attributes and reexports separately.
- loader.load_links_in_attrs(&krate.attrs, krate.span);
+ loader.load_links_in_attrs(&krate.attrs);
loader.process_module_children_or_reexports(CRATE_DEF_ID.to_def_id());
visit::walk_crate(&mut loader, krate);
loader.add_foreign_traits_in_scope();
// DO NOT REMOVE THIS without first testing on the reproducer in
// https://github.com/jyn514/objr/commit/edcee7b8124abf0e4c63873e8422ff81beb11ebb
for (extern_name, _) in externs.iter().filter(|(_, entry)| entry.add_prelude) {
- let _ = loader.resolver.resolve_str_path_error(
- DUMMY_SP,
- extern_name,
- TypeNS,
- CRATE_DEF_ID.to_def_id(),
- );
+ loader.resolver.resolve_rustdoc_path(extern_name, TypeNS, CRATE_DEF_ID.to_def_id());
}
ResolverCaches {
}
}
- fn load_links_in_attrs(&mut self, attrs: &[ast::Attribute], span: Span) {
+ fn load_links_in_attrs(&mut self, attrs: &[ast::Attribute]) {
// FIXME: this needs to consider reexport inlining.
let attrs = clean::Attributes::from_ast(attrs, None);
for (parent_module, doc) in attrs.collapsed_doc_value_by_module_level() {
} else {
continue;
};
- let _ = self.resolver.resolve_str_path_error(span, &path_str, TypeNS, module_id);
+ self.resolver.resolve_rustdoc_path(&path_str, TypeNS, module_id);
}
}
}
// loaded, even if the module itself has no doc comments.
self.add_traits_in_parent_scope(self.current_mod.to_def_id());
- self.load_links_in_attrs(&item.attrs, item.span);
+ self.load_links_in_attrs(&item.attrs);
self.process_module_children_or_reexports(self.current_mod.to_def_id());
visit::walk_item(self, item);
}
_ => {}
}
- self.load_links_in_attrs(&item.attrs, item.span);
+ self.load_links_in_attrs(&item.attrs);
visit::walk_item(self, item);
}
}
fn visit_assoc_item(&mut self, item: &ast::AssocItem, ctxt: AssocCtxt) {
- self.load_links_in_attrs(&item.attrs, item.span);
+ self.load_links_in_attrs(&item.attrs);
visit::walk_assoc_item(self, item, ctxt)
}
fn visit_foreign_item(&mut self, item: &ast::ForeignItem) {
- self.load_links_in_attrs(&item.attrs, item.span);
+ self.load_links_in_attrs(&item.attrs);
visit::walk_foreign_item(self, item)
}
fn visit_variant(&mut self, v: &ast::Variant) {
- self.load_links_in_attrs(&v.attrs, v.span);
+ self.load_links_in_attrs(&v.attrs);
visit::walk_variant(self, v)
}
fn visit_field_def(&mut self, field: &ast::FieldDef) {
- self.load_links_in_attrs(&field.attrs, field.span);
+ self.load_links_in_attrs(&field.attrs);
visit::walk_field_def(self, field)
}
impl<'a, 'tcx> DocVisitor for InvalidHtmlTagsLinter<'a, 'tcx> {
fn visit_item(&mut self, item: &Item) {
let tcx = self.cx.tcx;
- let hir_id = match DocContext::as_local_hir_id(tcx, item.def_id) {
- Some(hir_id) => hir_id,
- None => {
- // If non-local, no need to check anything.
- return;
- }
- };
+ let Some(hir_id) = DocContext::as_local_hir_id(tcx, item.def_id)
+ // If non-local, no need to check anything.
+ else { return };
let dox = item.attrs.collapsed_doc_value().unwrap_or_default();
if !dox.is_empty() {
let report_diag = |msg: &str, range: &Range<usize>, is_open_tag: bool| {
// In here, the `min_indent` is 1 (because non-sugared fragment are always counted with minimum
// 1 whitespace), meaning that "hello!" will be considered a codeblock because it starts with 4
// (5 - 1) whitespaces.
- let min_indent = match docs
+ let Some(min_indent) = docs
.iter()
.map(|fragment| {
fragment.doc.as_str().lines().fold(usize::MAX, |min_indent, line| {
})
})
.min()
- {
- Some(x) => x,
- None => return,
+ else {
+ return;
};
for fragment in docs {
om.items.push((item, renamed))
}
hir::ItemKind::Macro(ref macro_def, _) => {
- // `#[macro_export] macro_rules!` items are handled seperately in `visit()`,
+ // `#[macro_export] macro_rules!` items are handled separately in `visit()`,
// above, since they need to be documented at the module top level. Accordingly,
// we only want to handle macros if one of three conditions holds:
//
-Subproject commit c8eccf626fb5bb851b2ade93af8851ca1523807f
+Subproject commit 9168e236c548d1d0e9938ee6dd4cdbd308fdfd72
use serde::{Deserialize, Serialize};
/// rustdoc format-version.
-pub const FORMAT_VERSION: u32 = 12;
+pub const FORMAT_VERSION: u32 = 14;
/// A `Crate` is the root of the emitted JSON blob. It contains all type/documentation information
/// about the language items in the local crate, as well as info about external items to allow
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "snake_case")]
pub enum GenericParamDefKind {
- Lifetime { outlives: Vec<String> },
- Type { bounds: Vec<GenericBound>, default: Option<Type> },
- Const { ty: Type, default: Option<String> },
+ Lifetime {
+ outlives: Vec<String>,
+ },
+ Type {
+ bounds: Vec<GenericBound>,
+ default: Option<Type>,
+ /// This is normally `false`, which means that this generic parameter is
+ /// declared in the Rust source text.
+ ///
+ /// If it is `true`, this generic parameter has been introduced by the
+ /// compiler behind the scenes.
+ ///
+ /// # Example
+ ///
+ /// Consider
+ ///
+ /// ```ignore (pseudo-rust)
+ /// pub fn f(_: impl Trait) {}
+ /// ```
+ ///
+ /// The compiler will transform this behind the scenes to
+ ///
+ /// ```ignore (pseudo-rust)
+ /// pub fn f<impl Trait: Trait>(_: impl Trait) {}
+ /// ```
+ ///
+ /// In this example, the generic parameter named `impl Trait` (and which
+ /// is bound by `Trait`) is synthetic, because it was not originally in
+ /// the Rust source text.
+ synthetic: bool,
+ },
+ Const {
+ #[serde(rename = "type")]
+ type_: Type,
+ default: Option<String>,
+ },
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
#[serde(rename_all = "snake_case")]
pub enum WherePredicate {
- BoundPredicate { ty: Type, bounds: Vec<GenericBound> },
- RegionPredicate { lifetime: String, bounds: Vec<GenericBound> },
- EqPredicate { lhs: Type, rhs: Term },
+ BoundPredicate {
+ #[serde(rename = "type")]
+ type_: Type,
+ bounds: Vec<GenericBound>,
+ },
+ RegionPredicate {
+ lifetime: String,
+ bounds: Vec<GenericBound>,
+ },
+ EqPredicate {
+ lhs: Type,
+ rhs: Term,
+ },
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub items: Vec<Id>,
pub generics: Generics,
pub bounds: Vec<GenericBound>,
- pub implementors: Vec<Id>,
+ pub implementations: Vec<Id>,
}
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
// FIXME: No way to reliably check the filename.
-// CHECK-DAG: [[ASYNC_FN:!.*]] = !DINamespace(name: "async_fn_test"
-// CHECK-DAG: [[GEN:!.*]] = !DICompositeType(tag: DW_TAG_union_type, name: "async_fn_env$0"
+// CHECK-DAG: [[GEN:!.*]] = !DICompositeType(tag: DW_TAG_union_type, name: "enum$<async_fn_debug_msvc::async_fn_test::async_fn_env$0>",
// CHECK: {{!.*}} = !DIDerivedType(tag: DW_TAG_member, name: "variant0", scope: [[GEN]],
// For brevity, we only check the struct name and members of the last variant.
// CHECK-SAME: file: [[FILE:![0-9]*]], line: 11,
// CHECK-SAME: baseType: [[VARIANT:![0-9]*]]
// CHECK-NOT: flags: DIFlagArtificial
// CHECK-SAME: )
-// CHECK: [[S1:!.*]] = !DICompositeType(tag: DW_TAG_structure_type, name: "Suspend1", scope: [[GEN]],
+// CHECK: [[VARIANT]] = !DICompositeType(tag: DW_TAG_structure_type, name: "Suspend1", scope: [[GEN]],
// CHECK-NOT: flags: DIFlagArtificial
// CHECK-SAME: )
-// CHECK: {{!.*}} = !DIDerivedType(tag: DW_TAG_member, name: "s", scope: [[S1]]
+// CHECK: {{!.*}} = !DIDerivedType(tag: DW_TAG_member, name: "s", scope: [[VARIANT]]
// CHECK-NOT: flags: DIFlagArtificial
// CHECK-SAME: )
// CHECK: {{!.*}} = !DIDerivedType(tag: DW_TAG_member, name: "discriminant", scope: [[GEN]],
// CHECK-DAG: [[ASYNC_FN:!.*]] = !DINamespace(name: "async_fn_test"
// CHECK-DAG: [[GEN:!.*]] = !DICompositeType(tag: DW_TAG_structure_type, name: "{async_fn_env#0}", scope: [[ASYNC_FN]]
-// CHECK: [[VARIANT:!.*]] = !DICompositeType(tag: DW_TAG_variant_part, scope: [[ASYNC_FN]],
+// CHECK: [[VARIANT:!.*]] = !DICompositeType(tag: DW_TAG_variant_part, scope: [[GEN]],
// CHECK-NOT: flags: DIFlagArtificial
// CHECK-SAME: discriminator: [[DISC:![0-9]*]]
// CHECK: {{!.*}} = !DIDerivedType(tag: DW_TAG_member, name: "0", scope: [[VARIANT]],
// CHECK: {{!.*}} = !DIDerivedType(tag: DW_TAG_member, name: "s", scope: [[S1]]
// CHECK-NOT: flags: DIFlagArtificial
// CHECK-SAME: )
-// CHECK: [[DISC]] = !DIDerivedType(tag: DW_TAG_member, name: "__state", scope: [[ASYNC_FN]],
+// CHECK: [[DISC]] = !DIDerivedType(tag: DW_TAG_member, name: "__state", scope: [[GEN]],
// CHECK-SAME: flags: DIFlagArtificial
fn main() {
// compile-flags: -O
-// On x86 the closure is inlined in foo() producting something like
+// On x86 the closure is inlined in foo() producing something like
// define i32 @foo() [...] {
// tail call void @bar() [...]
// ret i32 0
// CHECK: @STATIC = {{.*}}, align 4
// This checks the constants from inline_enum_const
-// CHECK: @alloc9 = {{.*}}, align 2
+// CHECK: @alloc12 = {{.*}}, align 2
// This checks the constants from {low,high}_align_const, they share the same
// constant, but the alignment differs, so the higher one should be used
-// Verify that debuginfo column nubmers are 1-based byte offsets.
+// Verify that debuginfo column numbers are 1-based byte offsets.
//
// ignore-windows
// compile-flags: -C debuginfo=2
// FIXME: No way to reliably check the filename.
-// CHECK-DAG: [[GEN_FN:!.*]] = !DINamespace(name: "generator_test"
-// CHECK-DAG: [[GEN:!.*]] = !DICompositeType(tag: DW_TAG_union_type, name: "generator_env$0"
+// CHECK-DAG: [[GEN:!.*]] = !DICompositeType(tag: DW_TAG_union_type, name: "enum$<generator_debug_msvc::generator_test::generator_env$0>"
// CHECK: {{!.*}} = !DIDerivedType(tag: DW_TAG_member, name: "variant0", scope: [[GEN]],
// For brevity, we only check the struct name and members of the last variant.
// CHECK-SAME: file: [[FILE:![0-9]*]], line: 14,
// CHECK-SAME: baseType: [[VARIANT:![0-9]*]]
// CHECK-NOT: flags: DIFlagArtificial
// CHECK-SAME: )
-// CHECK: [[S1:!.*]] = !DICompositeType(tag: DW_TAG_structure_type, name: "Suspend1", scope: [[GEN]],
+// CHECK: [[VARIANT]] = !DICompositeType(tag: DW_TAG_structure_type, name: "Suspend1", scope: [[GEN]],
// CHECK-NOT: flags: DIFlagArtificial
// CHECK-SAME: )
-// CHECK: {{!.*}} = !DIDerivedType(tag: DW_TAG_member, name: "s", scope: [[S1]]
+// CHECK: {{!.*}} = !DIDerivedType(tag: DW_TAG_member, name: "s", scope: [[VARIANT]]
// CHECK-NOT: flags: DIFlagArtificial
// CHECK-SAME: )
// CHECK: {{!.*}} = !DIDerivedType(tag: DW_TAG_member, name: "discriminant", scope: [[GEN]],
// CHECK-DAG: [[GEN_FN:!.*]] = !DINamespace(name: "generator_test"
// CHECK-DAG: [[GEN:!.*]] = !DICompositeType(tag: DW_TAG_structure_type, name: "{generator_env#0}", scope: [[GEN_FN]]
-// CHECK: [[VARIANT:!.*]] = !DICompositeType(tag: DW_TAG_variant_part, scope: [[GEN_FN]],
+// CHECK: [[VARIANT:!.*]] = !DICompositeType(tag: DW_TAG_variant_part, scope: [[GEN]],
// CHECK-NOT: flags: DIFlagArtificial
// CHECK-SAME: discriminator: [[DISC:![0-9]*]]
// CHECK: {{!.*}} = !DIDerivedType(tag: DW_TAG_member, name: "0", scope: [[VARIANT]],
// CHECK: {{!.*}} = !DIDerivedType(tag: DW_TAG_member, name: "s", scope: [[S1]]
// CHECK-NOT: flags: DIFlagArtificial
// CHECK-SAME: )
-// CHECK: [[DISC]] = !DIDerivedType(tag: DW_TAG_member, name: "__state", scope: [[GEN_FN]],
+// CHECK: [[DISC]] = !DIDerivedType(tag: DW_TAG_member, name: "__state", scope: [[GEN]],
// CHECK-SAME: flags: DIFlagArtificial
fn main() {
-// Gdb doesn't know about UTF-32 character encoding and will print a rust char as only
-// its numerical value.
-
// compile-flags:-g
// min-lldb-version: 310
// gdb-command:print *int_ref
// gdb-check:$2 = -1
-// gdb-command:print *char_ref
+// gdb-command:print/d *char_ref
// gdb-check:$3 = 97
// gdb-command:print *i8_ref
// min-lldb-version: 310
-// Gdb doesn't know about UTF-32 character encoding and will print a rust char as only
-// its numerical value.
-
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:print *int_ref
// gdb-check:$2 = -1
-// gdb-command:print *char_ref
+// gdb-command:print/d *char_ref
// gdb-check:$3 = 97
// gdb-command:print/d *i8_ref
--- /dev/null
+// GDB got support for DW_ATE_UTF in 11.2, see
+// https://sourceware.org/bugzilla/show_bug.cgi?id=28637.
+
+// min-gdb-version: 11.2
+// compile-flags: -g
+
+// === GDB TESTS ===================================================================================
+
+// gdb-command:run
+// gdb-command:print ch
+// gdb-check:$1 = 97 'a'
+
+#![allow(unused_variables)]
+#![feature(omit_gdb_pretty_printer_section)]
+#![omit_gdb_pretty_printer_section]
+
+fn main() {
+ let ch: char = 'a';
+
+ zzz(); // #break
+}
+
+fn zzz() {()}
// lldb-command:print b
// lldbg-check:(generator_objects::main::{generator_env#0}) $3 =
+// === CDB TESTS ===================================================================================
+
+// cdb-command: g
+// cdb-command: dx b
+// cdb-check: b : Unresumed [Type: enum$<generator_objects::main::generator_env$0>]
+// cdb-check: [variant] : Unresumed
+// cdb-check: [+0x[...]] _ref__a : 0x[...] : 5 [Type: int *]
+
+// cdb-command: g
+// cdb-command: dx b
+// cdb-check: b : Suspend0 [Type: enum$<generator_objects::main::generator_env$0>]
+// cdb-check: [variant] : Suspend0
+// cdb-check: [+0x[...]] c : 6 [Type: int]
+// cdb-check: [+0x[...]] d : 7 [Type: int]
+// cdb-check: [+0x[...]] _ref__a : 0x[...] : 5 [Type: int *]
+
+// cdb-command: g
+// cdb-command: dx b
+// cdb-check: b : Suspend1 [Type: enum$<generator_objects::main::generator_env$0>]
+// cdb-check: [variant] : Suspend1
+// cdb-check: [+0x[...]] c : 7 [Type: int]
+// cdb-check: [+0x[...]] d : 8 [Type: int]
+// cdb-check: [+0x[...]] _ref__a : 0x[...] : 6 [Type: int *]
+
+// cdb-command: g
+// cdb-command: dx b
+// cdb-check: b : Returned [Type: enum$<generator_objects::main::generator_env$0>]
+// cdb-check: [<Raw View>] [Type: enum$<generator_objects::main::generator_env$0>]
+// cdb-check: [variant] : Returned
+// cdb-check: [+0x[...]] _ref__a : 0x[...] : 6 [Type: int *]
+
#![feature(omit_gdb_pretty_printer_section, generators, generator_trait)]
#![omit_gdb_pretty_printer_section]
_zzz(); // #break
}
+#[inline(never)]
fn _zzz() {
()
}
// cdb-command: g
// cdb-command: dx a
-// cdb-check:a : Some({...}) [Type: enum$<core::option::Option<enum$<msvc_pretty_enums::CStyleEnum> >, 2, 16, Some>]
-// cdb-check: [<Raw View>] [Type: enum$<core::option::Option<enum$<msvc_pretty_enums::CStyleEnum> >, 2, 16, Some>]
+// cdb-check:a : Some({...}) [Type: enum$<core::option::Option<msvc_pretty_enums::CStyleEnum>, 2, 16, Some>]
+// cdb-check: [<Raw View>] [Type: enum$<core::option::Option<msvc_pretty_enums::CStyleEnum>, 2, 16, Some>]
// cdb-check: [variant] : Some
// cdb-check: [+0x000] __0 : Low (0x2) [Type: msvc_pretty_enums::CStyleEnum]
// cdb-command: dx b
-// cdb-check:b : None [Type: enum$<core::option::Option<enum$<msvc_pretty_enums::CStyleEnum> >, 2, 16, Some>]
-// cdb-check: [<Raw View>] [Type: enum$<core::option::Option<enum$<msvc_pretty_enums::CStyleEnum> >, 2, 16, Some>]
+// cdb-check:b : None [Type: enum$<core::option::Option<msvc_pretty_enums::CStyleEnum>, 2, 16, Some>]
+// cdb-check: [<Raw View>] [Type: enum$<core::option::Option<msvc_pretty_enums::CStyleEnum>, 2, 16, Some>]
// cdb-check: [variant] : None
// cdb-command: dx c
Tag2,
}
-pub enum Empty { }
+pub enum Empty {}
fn main() {
let a = Some(CStyleEnum::Low);
zzz(); // #break
}
-fn zzz() { () }
+fn zzz() {
+ ()
+}
// gdb-check:type = type_names::mod1::Enum2
// gdb-command:whatis generic_enum_1
-// gdb-check:type = type_names::mod1::mod2::Enum3
+// gdb-check:type = type_names::mod1::mod2::Enum3<type_names::mod1::Struct2>
// gdb-command:whatis generic_enum_2
-// gdb-check:type = type_names::mod1::mod2::Enum3
+// gdb-check:type = type_names::mod1::mod2::Enum3<type_names::Struct1>
// TUPLES
// gdb-command:whatis tuple1
// gdb-command:whatis has_associated_type_trait
// gdb-check:type = &(dyn type_names::Trait3<u32, AssocType=isize> + core::marker::Send)
+// gdb-command:whatis has_associated_type_but_no_generics_trait
+// gdb-check:type = &dyn type_names::TraitNoGenericsButWithAssocType<Output=isize>
+
// BARE FUNCTIONS
// gdb-command:whatis rust_fn
// gdb-check:type = (fn(core::option::Option<isize>, core::option::Option<&type_names::mod1::Struct2>), usize)
// FOREIGN TYPES
// gdb-command:whatis foreign1
-// gdb-check:type = *mut ForeignType1
+// gdb-check:type = *mut type_names::{extern#0}::ForeignType1
// gdb-command:whatis foreign2
-// gdb-check:type = *mut ForeignType2
+// gdb-check:type = *mut type_names::mod1::{extern#0}::ForeignType2
// === CDB TESTS ==================================================================================
// cdb-command:dv /t *_enum_*
// cdb-check:union enum$<type_names::Enum1> simple_enum_1 = [...]
// cdb-check:union enum$<type_names::Enum1> simple_enum_2 = [...]
-// cdb-check:type_names::mod1::Enum2 simple_enum_3 = [...]
-// cdb-check:type_names::mod1::mod2::Enum3 generic_enum_1 = [...]
-// cdb-check:type_names::mod1::mod2::Enum3 generic_enum_2 = [...]
+// cdb-check:union enum$<type_names::mod1::Enum2> simple_enum_3 = [...]
+// cdb-check:union enum$<type_names::mod1::mod2::Enum3<type_names::mod1::Struct2> > generic_enum_1 = [...]
+// cdb-check:union enum$<type_names::mod1::mod2::Enum3<type_names::Struct1> > generic_enum_2 = [...]
// TUPLES
// cdb-command:dv /t tuple*
// cdb-check:struct ref_mut$<dyn$<type_names::Trait1> > mut_ref_trait = [...]
// cdb-check:struct alloc::boxed::Box<dyn$<core::marker::Send,core::marker::Sync>,alloc::alloc::Global> no_principal_trait = [...]
// cdb-check:struct ref$<dyn$<type_names::Trait3<u32,assoc$<AssocType,isize> >,core::marker::Send> > has_associated_type_trait = struct ref$<dyn$<type_names::Trait3<u32,assoc$<AssocType,isize> >,core::marker::Send> >
+// cdb-check:struct ref$<dyn$<type_names::TraitNoGenericsButWithAssocType<assoc$<Output,isize> > > > has_associated_type_but_no_generics_trait = struct ref$<dyn$<type_names::TraitNoGenericsButWithAssocType<assoc$<Output,isize> > > >
// BARE FUNCTIONS
// cdb-command:dv /t *_fn*
// FOREIGN TYPES
// cdb-command:dv /t foreign*
-// cdb-check:struct ForeignType2 * foreign2 = [...]
-// cdb-check:struct ForeignType1 * foreign1 = [...]
+// cdb-check:struct type_names::mod1::extern$0::ForeignType2 * foreign2 = [...]
+// cdb-check:struct type_names::extern$0::ForeignType1 * foreign1 = [...]
#![allow(unused_variables)]
#![feature(omit_gdb_pretty_printer_section)]
}
mod mod1 {
- pub use self::Enum2::{Variant1, Variant2};
pub struct Struct2;
pub enum Enum2 {
panic!()
}
}
+trait TraitNoGenericsButWithAssocType {
+ type Output;
+ fn foo(&self) -> Self::Output;
+}
impl Trait1 for isize {}
impl<T1, T2> Trait2<T1, T2> for isize {}
impl<T> Trait3<T> for isize {
type AssocType = isize;
}
+impl TraitNoGenericsButWithAssocType for isize {
+ type Output = isize;
+ fn foo(&self) -> Self::Output {
+ *self
+ }
+}
fn rust_fn(_: Option<isize>, _: Option<&mod1::Struct2>) {}
extern "C" fn extern_c_fn(_: isize) {}
// Enums
let simple_enum_1 = Variant1;
let simple_enum_2 = Variant2(0);
- let simple_enum_3 = mod1::Variant2(Struct1);
+ let simple_enum_3 = mod1::Enum2::Variant2(Struct1);
let generic_enum_1: mod1::mod2::Enum3<mod1::Struct2> = mod1::mod2::Variant1;
let generic_enum_2 = mod1::mod2::Variant2(Struct1);
// Tuples
let tuple1 = (8u32, Struct1, mod1::mod2::Variant2(mod1::Struct2));
- let tuple2 = ((Struct1, mod1::mod2::Struct3), mod1::Variant1, 'x');
+ let tuple2 = ((Struct1, mod1::mod2::Struct3), mod1::Enum2::Variant1, 'x');
// Box
let box1 = (Box::new(1f32), 0i32);
let vec1 = vec![0_usize, 2, 3];
let slice1 = &*vec1;
- let vec2 = vec![mod1::Variant2(Struct1)];
+ let vec2 = vec![mod1::Enum2::Variant2(Struct1)];
let slice2 = &*vec2;
// Trait Objects
let mut_ref_trait = (&mut mut_int1) as &mut dyn Trait1;
let no_principal_trait = Box::new(0_isize) as Box<(dyn Send + Sync)>;
let has_associated_type_trait = &0_isize as &(dyn Trait3<u32, AssocType = isize> + Send);
+ let has_associated_type_but_no_generics_trait =
+ &0_isize as &dyn TraitNoGenericsButWithAssocType<Output = isize>;
let generic_box_trait = Box::new(0_isize) as Box<dyn Trait2<i32, mod1::Struct2>>;
let generic_ref_trait = (&0_isize) as &dyn Trait2<Struct1, Struct1>;
// gdbg-check:$3 = {pointer = [...], vtable = [...]}
// gdbr-check:$3 = &unsized::Foo<dyn core::fmt::Debug> {pointer: [...], vtable: [...]}
+// gdb-command:print _box
+// gdbg-check:$4 = {pointer = [...], vtable = [...]}
+// gdbr-check:$4 = alloc::boxed::Box<unsized::Foo<dyn core::fmt::Debug>, alloc::alloc::Global> {pointer: [...], vtable: [...]}
+
// gdb-command:print tuple_slice
-// gdbg-check:$4 = {data_ptr = [...], length = 2}
-// gdbr-check:$4 = &(i32, i32, [i32]) {data_ptr: [...], length: 2}
+// gdbg-check:$5 = {data_ptr = [...], length = 2}
+// gdbr-check:$5 = &(i32, i32, [i32]) {data_ptr: [...], length: 2}
// gdb-command:print tuple_dyn
-// gdbg-check:$5 = {pointer = [...], vtable = [...]}
-// gdbr-check:$5 = &(i32, i32, dyn core::fmt::Debug) {pointer: [...], vtable: [...]}
+// gdbg-check:$6 = {pointer = [...], vtable = [...]}
+// gdbr-check:$6 = &(i32, i32, dyn core::fmt::Debug) {pointer: [...], vtable: [...]}
// === CDB TESTS ===================================================================================
// cdb-check: [+0x000] pointer : 0x[...] [Type: unsized::Foo<dyn$<core::fmt::Debug> > *]
// cdb-check: [...] vtable : 0x[...] [Type: unsigned [...]int[...] (*)[3]]
+// cdb-command:dx _box
+// cdb-check:
+// cdb-check:_box [Type: alloc::boxed::Box<unsized::Foo<dyn$<core::fmt::Debug> >,alloc::alloc::Global>]
+// cdb-check:[+0x000] pointer : 0x[...] [Type: unsized::Foo<dyn$<core::fmt::Debug> > *]
+// cdb-check:[...] vtable : 0x[...] [Type: unsigned [...]int[...] (*)[3]]
+
// cdb-command:dx tuple_slice
// cdb-check:tuple_slice [Type: ref$<tuple$<i32,i32,slice$<i32> > >]
// cdb-check: [+0x000] data_ptr : 0x[...] [Type: tuple$<i32,i32,slice$<i32> > *]
let a: &Foo<[u8]> = &foo.value;
let b: &Foo<Foo<[u8]>> = &foo;
let c: &Foo<dyn std::fmt::Debug> = &Foo { value: 7i32 };
+ let _box: Box<Foo<dyn std::fmt::Debug>> = Box::new(Foo { value: 8i32 });
// Also check unsized tuples
let tuple_slice: &(i32, i32, [i32]) = &(0, 1, [2, 3]);
// pp-exact:dollar-crate.pp
fn main() {
- { ::std::io::_print(::core::fmt::Arguments::new_v1(&["rust\n"], &[])); };
+ ::std::io::_print(::core::fmt::Arguments::new_v1(&["rust\n"], &[]));
}
-include ../tools.mk
all:
- $(RUSTC) --edition=2018 --crate-type=rlib ../../../../library/alloc/src/lib.rs --cfg feature=\"external_crate\" --cfg no_global_oom_handling
+ $(RUSTC) --edition=2021 --crate-type=rlib ../../../../library/alloc/src/lib.rs --cfg no_global_oom_handling
116| 1|
117| 1| let
118| 1| _unused_closure
- 119| | =
- 120| | |
+ 119| 1| =
+ 120| 1| |
121| | mut countdown
122| | |
123| 0| {
169| | ;
170| |
171| 1| let short_used_not_covered_closure_line_break_no_block_embedded_branch =
- 172| | | _unused_arg: u8 |
+ 172| 1| | _unused_arg: u8 |
173| 0| println!(
174| 0| "not called: {}",
175| 0| if is_true { "check" } else { "me" }
187| | ;
188| |
189| 1| let short_used_covered_closure_line_break_no_block_embedded_branch =
- 190| 1| | _unused_arg: u8 |
+ 190| | | _unused_arg: u8 |
191| 1| println!(
192| 1| "not called: {}",
193| 1| if is_true { "check" } else { "me" }
click: ".srclink"
wait-for: "#sidebar-toggle"
click: "#sidebar-toggle"
-wait-for: 500
fail: true
assert-css: ("#source-sidebar", { "left": "-300px" })
// check.
goto: file://|DOC_PATH|/settings/index.html
// Wait a bit to be sure the default theme is applied.
-wait-for: 1000
-assert-css: ("body", {"background-color": "rgb(15, 20, 25)"})
+// If the theme isn't applied, the command will time out.
+wait-for-css: ("body", {"background-color": "rgb(15, 20, 25)"})
wait-for: "#search h1" // The search element is empty before the first search
assert-attribute: ("#search", {"class": "content"})
assert-attribute: ("#main-content", {"class": "content hidden"})
+assert-document-property: ({"URL": "index.html?search=test"}, ENDS_WITH)
press-key: "Escape"
assert-attribute: ("#search", {"class": "content hidden"})
assert-attribute: ("#main-content", {"class": "content"})
+assert-document-property: ({"URL": "index.html"}, [ENDS_WITH])
// Check that focusing the search input brings back the search results
focus: ".search-input"
assert-attribute: ("#search", {"class": "content"})
assert-attribute: ("#main-content", {"class": "content hidden"})
+assert-document-property: ({"URL": "index.html?search=test"}, ENDS_WITH)
// Now let's check that when the help popup is displayed and we press Escape, it doesn't
// hide the search results too.
click: "#help-button"
+assert-document-property: ({"URL": "index.html?search=test"}, [ENDS_WITH])
assert-attribute: ("#help", {"class": ""})
press-key: "Escape"
assert-attribute: ("#help", {"class": "hidden"})
assert-attribute: ("#search", {"class": "content"})
assert-attribute: ("#main-content", {"class": "content hidden"})
+assert-document-property: ({"URL": "index.html?search=test"}, [ENDS_WITH])
// Check that Escape hides the search results when a search result is focused.
focus: ".search-input"
press-key: "Enter"
// Waiting for the search results to appear...
wait-for: "#titles"
+assert-document-property: ({"URL": "&filter-crate="}, CONTAINS)
// We check that there is no more "test_docs" appearing.
assert-false: "#results .externcrate"
// We also check that "lib2" is the filter crate.
// Now we check that leaving the search results and putting them back keeps the
// crate filtering.
press-key: "Escape"
-wait-for: 100
-assert-css: ("#main-content", {"display": "block"})
+wait-for-css: ("#main-content", {"display": "block"})
focus: ".search-input"
-wait-for: 100
-assert-css: ("#main-content", {"display": "none"})
+wait-for-css: ("#main-content", {"display": "none"})
// We check that there is no more "test_docs" appearing.
assert-false: "#results .externcrate"
assert-property: ("#crate-search", {"value": "lib2"})
"//*[@class='result-name']//*[text()='(keyword)']",
{"color": "rgb(0, 0, 0)"},
)
+
+// Check the alias more specifically in the dark theme.
+goto: file://|DOC_PATH|/test_docs/index.html
+// We set the theme so we're sure that the correct values will be used, whatever the computer
+// this test is running on.
+local-storage: {
+ "rustdoc-theme": "dark",
+ "rustdoc-preferred-dark-theme": "dark",
+ "rustdoc-use-system-theme": "false",
+}
+// If the text isn't displayed, the browser doesn't compute color style correctly...
+show-text: true
+// We reload the page so the local storage settings are being used.
+reload:
+write: (".search-input", "thisisanalias")
+// Waiting for the search results to appear...
+wait-for: "#titles"
+// Checking that the colors for the alias element are the ones expected.
+assert-css: (".result-name > .alias", {"color": "rgb(255, 255, 255)"})
+assert-css: (".result-name > .alias > .grey", {"color": "rgb(204, 204, 204)"})
+++ /dev/null
-// Checks that the result colors are as expected.
-goto: file://|DOC_PATH|/test_docs/index.html
-// We set the theme so we're sure that the correct values will be used, whatever the computer
-// this test is running on.
-local-storage: {
- "rustdoc-theme": "dark",
- "rustdoc-preferred-dark-theme": "dark",
- "rustdoc-use-system-theme": "false",
-}
-// If the text isn't displayed, the browser doesn't compute color style correctly...
-show-text: true
-// We reload the page so the local storage settings are being used.
-reload:
-write: (".search-input", "thisisanalias")
-// Waiting for the search results to appear...
-wait-for: "#titles"
-// Checking that the colors for the alias element are the ones expected.
-assert-css: (".result-name > .alias", {"color": "rgb(255, 255, 255)"})
-assert-css: (".result-name > .alias > .grey", {"color": "rgb(204, 204, 204)"})
// Now we can check that the feature is working as expected!
goto: file://|DOC_PATH|/test_docs/index.html?search=struct%3AFoo&go_to_first=true
// Waiting for the page to load...
-wait-for: 500
-assert-text: (".fqn .in-band", "Struct test_docs::Foo")
+wait-for-text: (".fqn .in-band", "Struct test_docs::Foo")
--- /dev/null
+// This test ensures that the elements in the sidebar are displayed correctly.
+javascript: false
+goto: file://|DOC_PATH|/src/test_docs/lib.rs.html
+// Since the javascript is disabled, there shouldn't be a toggle.
+assert-false: "#sidebar-toggle"
+// For some reason, we need to wait a bit here because it seems like the transition on opacity
+// is being applied whereas it can't be reproduced in a browser...
+wait-for-css: (".sidebar > *", {"visibility": "hidden", "opacity": 0})
+
+// Let's retry with javascript enabled.
+javascript: true
+reload:
+wait-for: "#sidebar-toggle"
+assert-css: ("#sidebar-toggle", {"visibility": "visible", "opacity": 1})
+assert-css: (".sidebar > *:not(#sidebar-toggle)", {"visibility": "hidden", "opacity": 0})
+// Let's expand the sidebar now.
+click: "#sidebar-toggle"
+// Because of the transition CSS, better wait a second before checking.
+wait-for-css: ("#sidebar-toggle", {"visibility": "visible", "opacity": 1})
+assert-css: (".sidebar > *:not(#sidebar-toggle)", {"visibility": "visible", "opacity": 1})
assert-css: ("nav.sidebar", {"width": "50px"})
// We now click on the button to expand the sidebar.
click: (10, 10)
-// We wait for the sidebar to be expanded (there is a 0.5s animation).
-wait-for: 600
-assert-css: ("nav.sidebar.expanded", {"width": "300px"})
+// We wait for the sidebar to be expanded.
+wait-for-css: ("nav.sidebar.expanded", {"width": "300px"})
assert-css: ("nav.sidebar.expanded a", {"font-size": "14px"})
// We collapse the sidebar.
click: (10, 10)
-// We wait for the sidebar to be collapsed (there is a 0.5s animation).
-wait-for: 600
// We ensure that the class has been removed.
-assert-false: "nav.sidebar.expanded"
+wait-for: "nav.sidebar:not(.expanded)"
assert: "nav.sidebar"
// We now switch to mobile mode.
goto: file://|DOC_PATH|/test_docs/index.html
click: "#theme-picker"
click: "#theme-choices > button:first-child"
-wait-for: 500
// should be the ayu theme so let's check the color
-assert-css: ("body", { "background-color": "rgb(15, 20, 25)" })
+wait-for-css: ("body", { "background-color": "rgb(15, 20, 25)" })
click: "#theme-choices > button:last-child"
-wait-for: 500
// should be the light theme so let's check the color
-assert-css: ("body", { "background-color": "rgb(255, 255, 255)" })
+wait-for-css: ("body", { "background-color": "rgb(255, 255, 255)" })
goto: file://|DOC_PATH|/settings.html
click: "#theme-light"
-wait-for: 500
-assert-css: ("body", { "background-color": "rgb(255, 255, 255)" })
+wait-for-css: ("body", { "background-color": "rgb(255, 255, 255)" })
assert-local-storage: { "rustdoc-theme": "light" }
click: "#theme-dark"
-wait-for: 500
-assert-css: ("body", { "background-color": "rgb(53, 53, 53)" })
+wait-for-css: ("body", { "background-color": "rgb(53, 53, 53)" })
assert-local-storage: { "rustdoc-theme": "dark" }
click: "#theme-ayu"
-wait-for: 500
-assert-css: ("body", { "background-color": "rgb(15, 20, 25)" })
+wait-for-css: ("body", { "background-color": "rgb(15, 20, 25)" })
assert-local-storage: { "rustdoc-theme": "ayu" }
wait-for: ".settings"
// We change the theme to "light".
click: "#theme-light"
-wait-for: 250
-assert-css: ("body", { "background-color": "rgb(255, 255, 255)" })
+wait-for-css: ("body", { "background-color": "rgb(255, 255, 255)" })
assert-local-storage: { "rustdoc-theme": "light" }
// We go back in history.
assert-attribute-false: ("#main-content > details.top-doc", {"open": ""})
assert-text: ("#toggle-all-docs", "[+]")
click: "#toggle-all-docs"
-wait-for: 50
// Not collapsed anymore so the "open" attribute should be back.
-assert-attribute: ("#main-content > details.top-doc", {"open": ""})
+wait-for-attribute: ("#main-content > details.top-doc", {"open": ""})
assert-text: ("#toggle-all-docs", "[−]")
// Check that it works on non-module pages as well.
goto: file://|DOC_PATH|/test_docs/struct.Foo.html
// We first check that everything is visible.
assert-text: ("#toggle-all-docs", "[−]")
-assert-attribute: ("details.rustdoc-toggle", {"open": ""}, ALL)
+assert-attribute: ("#implementations-list details.rustdoc-toggle", {"open": ""}, ALL)
+assert-attribute: ("#trait-implementations-list details.rustdoc-toggle", {"open": ""}, ALL)
+assert-attribute-false: (
+ "#blanket-implementations-list > details.rustdoc-toggle",
+ {"open": ""},
+ ALL,
+)
+
// We collapse them all.
click: "#toggle-all-docs"
-wait-for: 50
-assert-text: ("#toggle-all-docs", "[+]")
+wait-for-text: ("#toggle-all-docs", "[+]")
// We check that all <details> are collapsed (except for the impl block ones).
assert-attribute-false: ("details.rustdoc-toggle:not(.implementors-toggle)", {"open": ""}, ALL)
assert-attribute: ("#implementations-list > details.implementors-toggle", {"open": ""})
)
// We open them all again.
click: "#toggle-all-docs"
-wait-for: 50
-assert-text: ("#toggle-all-docs", "[−]")
+wait-for-text: ("#toggle-all-docs", "[−]")
assert-attribute: ("details.rustdoc-toggle", {"open": ""}, ALL)
--- /dev/null
+// ignore-tidy-linelength
+
+#![feature(no_core)]
+#![no_core]
+
+// @set foo = generic_args.json "$.index[*][?(@.name=='Foo')].id"
+pub trait Foo {}
+
+// @is - "$.index[*][?(@.name=='generics')].inner.generics.where_predicates" "[]"
+// @count - "$.index[*][?(@.name=='generics')].inner.generics.params[*]" 1
+// @is - "$.index[*][?(@.name=='generics')].inner.generics.params[0].name" '"F"'
+// @is - "$.index[*][?(@.name=='generics')].inner.generics.params[0].kind.type.default" 'null'
+// @count - "$.index[*][?(@.name=='generics')].inner.generics.params[0].kind.type.bounds[*]" 1
+// @is - "$.index[*][?(@.name=='generics')].inner.generics.params[0].kind.type.bounds[0].trait_bound.trait.inner.id" '$foo'
+// @count - "$.index[*][?(@.name=='generics')].inner.decl.inputs[*]" 1
+// @is - "$.index[*][?(@.name=='generics')].inner.decl.inputs[0][0]" '"f"'
+// @is - "$.index[*][?(@.name=='generics')].inner.decl.inputs[0][1].kind" '"generic"'
+// @is - "$.index[*][?(@.name=='generics')].inner.decl.inputs[0][1].inner" '"F"'
+pub fn generics<F: Foo>(f: F) {}
+
+// @is - "$.index[*][?(@.name=='impl_trait')].inner.generics.where_predicates" "[]"
+// @count - "$.index[*][?(@.name=='impl_trait')].inner.generics.params[*]" 1
+// @is - "$.index[*][?(@.name=='impl_trait')].inner.generics.params[0].name" '"impl Foo"'
+// @is - "$.index[*][?(@.name=='impl_trait')].inner.generics.params[0].kind.type.bounds[0].trait_bound.trait.inner.id" $foo
+// @count - "$.index[*][?(@.name=='impl_trait')].inner.decl.inputs[*]" 1
+// @is - "$.index[*][?(@.name=='impl_trait')].inner.decl.inputs[0][0]" '"f"'
+// @is - "$.index[*][?(@.name=='impl_trait')].inner.decl.inputs[0][1].kind" '"impl_trait"'
+// @count - "$.index[*][?(@.name=='impl_trait')].inner.decl.inputs[0][1].inner[*]" 1
+// @is - "$.index[*][?(@.name=='impl_trait')].inner.decl.inputs[0][1].inner[0].trait_bound.trait.inner.id" $foo
+pub fn impl_trait(f: impl Foo) {}
+
+// @count - "$.index[*][?(@.name=='where_clase')].inner.generics.params[*]" 1
+// @is - "$.index[*][?(@.name=='where_clase')].inner.generics.params[0].name" '"F"'
+// @is - "$.index[*][?(@.name=='where_clase')].inner.generics.params[0].kind" '{"type": {"bounds": [], "default": null, "synthetic": false}}'
+// @count - "$.index[*][?(@.name=='where_clase')].inner.decl.inputs[*]" 1
+// @is - "$.index[*][?(@.name=='where_clase')].inner.decl.inputs[0][0]" '"f"'
+// @is - "$.index[*][?(@.name=='where_clase')].inner.decl.inputs[0][1].kind" '"generic"'
+// @is - "$.index[*][?(@.name=='where_clase')].inner.decl.inputs[0][1].inner" '"F"'
+// @count - "$.index[*][?(@.name=='where_clase')].inner.generics.where_predicates[*]" 1
+// @is - "$.index[*][?(@.name=='where_clase')].inner.generics.where_predicates[0].bound_predicate.type" '{"inner": "F", "kind": "generic"}'
+// @count - "$.index[*][?(@.name=='where_clase')].inner.generics.where_predicates[0].bound_predicate.bounds[*]" 1
+// @is - "$.index[*][?(@.name=='where_clase')].inner.generics.where_predicates[0].bound_predicate.bounds[0].trait_bound.trait.inner.id" $foo
+pub fn where_clase<F>(f: F)
+where
+ F: Foo,
+{
+}
--- /dev/null
+// ignore-tidy-linelength
+
+#![feature(no_core)]
+#![no_core]
+
+// @count generic_returns.json "$.index[*][?(@.name=='generic_returns')].inner.items[*]" 2
+
+// @set foo = - "$.index[*][?(@.name=='Foo')].id"
+pub trait Foo {}
+
+// @is - "$.index[*][?(@.name=='get_foo')].inner.decl.inputs" []
+// @is - "$.index[*][?(@.name=='get_foo')].inner.decl.output.kind" '"impl_trait"'
+// @count - "$.index[*][?(@.name=='get_foo')].inner.decl.output.inner[*]" 1
+// @is - "$.index[*][?(@.name=='get_foo')].inner.decl.output.inner[0].trait_bound.trait.inner.id" $foo
+pub fn get_foo() -> impl Foo {
+ Fooer {}
+}
+
+struct Fooer {}
+
+impl Foo for Fooer {}
--- /dev/null
+// ignore-tidy-linelength
+
+#![feature(no_core)]
+#![no_core]
+
+// @set wham_id = generics.json "$.index[*][?(@.name=='Wham')].id"
+pub trait Wham {}
+
+// @is - "$.index[*][?(@.name=='one_generic_param_fn')].inner.generics.where_predicates" []
+// @count - "$.index[*][?(@.name=='one_generic_param_fn')].inner.generics.params[*]" 1
+// @is - "$.index[*][?(@.name=='one_generic_param_fn')].inner.generics.params[0].name" '"T"'
+// @has - "$.index[*][?(@.name=='one_generic_param_fn')].inner.generics.params[0].kind.type.synthetic" false
+// @has - "$.index[*][?(@.name=='one_generic_param_fn')].inner.generics.params[0].kind.type.bounds[0].trait_bound.trait.inner.id" $wham_id
+// @is - "$.index[*][?(@.name=='one_generic_param_fn')].inner.decl.inputs" '[["w", {"inner": "T", "kind": "generic"}]]'
+pub fn one_generic_param_fn<T: Wham>(w: T) {}
+
+// @is - "$.index[*][?(@.name=='one_synthetic_generic_param_fn')].inner.generics.where_predicates" []
+// @count - "$.index[*][?(@.name=='one_synthetic_generic_param_fn')].inner.generics.params[*]" 1
+// @is - "$.index[*][?(@.name=='one_synthetic_generic_param_fn')].inner.generics.params[0].name" '"impl Wham"'
+// @has - "$.index[*][?(@.name=='one_synthetic_generic_param_fn')].inner.generics.params[0].kind.type.synthetic" true
+// @has - "$.index[*][?(@.name=='one_synthetic_generic_param_fn')].inner.generics.params[0].kind.type.bounds[0].trait_bound.trait.inner.id" $wham_id
+// @count - "$.index[*][?(@.name=='one_synthetic_generic_param_fn')].inner.decl.inputs[*]" 1
+// @is - "$.index[*][?(@.name=='one_synthetic_generic_param_fn')].inner.decl.inputs[0][0]" '"w"'
+// @is - "$.index[*][?(@.name=='one_synthetic_generic_param_fn')].inner.decl.inputs[0][1].kind" '"impl_trait"'
+// @is - "$.index[*][?(@.name=='one_synthetic_generic_param_fn')].inner.decl.inputs[0][1].inner[0].trait_bound.trait.inner.id" $wham_id
+pub fn one_synthetic_generic_param_fn(w: impl Wham) {}
// @count - "$.index[*][?(@.name=='LendingItem')].inner.generics.params[*]" 1
// @is - "$.index[*][?(@.name=='LendingItem')].inner.generics.params[*].name" \"\'a\"
// @count - "$.index[*][?(@.name=='LendingItem')].inner.generics.where_predicates[*]" 1
- // @is - "$.index[*][?(@.name=='LendingItem')].inner.generics.where_predicates[*].bound_predicate.ty.inner" \"Self\"
+ // @is - "$.index[*][?(@.name=='LendingItem')].inner.generics.where_predicates[*].bound_predicate.type.inner" \"Self\"
// @is - "$.index[*][?(@.name=='LendingItem')].inner.generics.where_predicates[*].bound_predicate.bounds[*].outlives" \"\'a\"
// @count - "$.index[*][?(@.name=='LendingItem')].inner.bounds[*]" 1
- type LendingItem<'a>: Display where Self: 'a;
+ type LendingItem<'a>: Display
+ where
+ Self: 'a;
// @is - "$.index[*][?(@.name=='lending_next')].inner.decl.output.kind" \"qualified_path\"
// @count - "$.index[*][?(@.name=='lending_next')].inner.decl.output.inner.args.angle_bracketed.args[*]" 1
--- /dev/null
+// ignore-tidy-linelength
+
+#![feature(no_core)]
+#![no_core]
+
+// @is longest.json "$.index[*][?(@.name=='longest')].inner.generics.params[0].name" \"\'a\"
+// @is - "$.index[*][?(@.name=='longest')].inner.generics.params[0].kind" '{"lifetime": {"outlives": []}}'
+// @is - "$.index[*][?(@.name=='longest')].inner.generics.params[0].kind" '{"lifetime": {"outlives": []}}'
+// @count - "$.index[*][?(@.name=='longest')].inner.generics.params[*]" 1
+// @is - "$.index[*][?(@.name=='longest')].inner.generics.where_predicates" []
+
+// @count - "$.index[*][?(@.name=='longest')].inner.decl.inputs[*]" 2
+// @is - "$.index[*][?(@.name=='longest')].inner.decl.inputs[0][0]" '"l"'
+// @is - "$.index[*][?(@.name=='longest')].inner.decl.inputs[1][0]" '"r"'
+
+// @is - "$.index[*][?(@.name=='longest')].inner.decl.inputs[0][1].kind" '"borrowed_ref"'
+// @is - "$.index[*][?(@.name=='longest')].inner.decl.inputs[0][1].inner.lifetime" \"\'a\"
+// @is - "$.index[*][?(@.name=='longest')].inner.decl.inputs[0][1].inner.mutable" false
+// @is - "$.index[*][?(@.name=='longest')].inner.decl.inputs[0][1].inner.type" '{"inner": "str", "kind": "primitive"}'
+
+// @is - "$.index[*][?(@.name=='longest')].inner.decl.inputs[1][1].kind" '"borrowed_ref"'
+// @is - "$.index[*][?(@.name=='longest')].inner.decl.inputs[1][1].inner.lifetime" \"\'a\"
+// @is - "$.index[*][?(@.name=='longest')].inner.decl.inputs[1][1].inner.mutable" false
+// @is - "$.index[*][?(@.name=='longest')].inner.decl.inputs[1][1].inner.type" '{"inner": "str", "kind": "primitive"}'
+
+// @is - "$.index[*][?(@.name=='longest')].inner.decl.output.kind" '"borrowed_ref"'
+// @is - "$.index[*][?(@.name=='longest')].inner.decl.output.inner.lifetime" \"\'a\"
+// @is - "$.index[*][?(@.name=='longest')].inner.decl.output.inner.mutable" false
+// @is - "$.index[*][?(@.name=='longest')].inner.decl.output.inner.type" '{"inner": "str", "kind": "primitive"}'
+
+pub fn longest<'a>(l: &'a str, r: &'a str) -> &'a str {
+ if l.len() > r.len() { l } else { r }
+}
--- /dev/null
+// ignore-tidy-linelength
+
+#![feature(no_core)]
+#![no_core]
+
+// @count outlives.json "$.index[*][?(@.name=='foo')].inner.generics.params[*]" 3
+// @is - "$.index[*][?(@.name=='foo')].inner.generics.where_predicates" []
+// @is - "$.index[*][?(@.name=='foo')].inner.generics.params[0].name" \"\'a\"
+// @is - "$.index[*][?(@.name=='foo')].inner.generics.params[1].name" \"\'b\"
+// @is - "$.index[*][?(@.name=='foo')].inner.generics.params[2].name" '"T"'
+// @is - "$.index[*][?(@.name=='foo')].inner.generics.params[0].kind.lifetime.outlives" []
+// @is - "$.index[*][?(@.name=='foo')].inner.generics.params[1].kind.lifetime.outlives" [\"\'a\"]
+// @is - "$.index[*][?(@.name=='foo')].inner.generics.params[2].kind.type.default" null
+// @count - "$.index[*][?(@.name=='foo')].inner.generics.params[2].kind.type.bounds[*]" 1
+// @is - "$.index[*][?(@.name=='foo')].inner.generics.params[2].kind.type.bounds[0].outlives" \"\'b\"
+// @is - "$.index[*][?(@.name=='foo')].inner.decl.inputs[0][1].kind" '"borrowed_ref"'
+// @is - "$.index[*][?(@.name=='foo')].inner.decl.inputs[0][1].inner.lifetime" \"\'a\"
+// @is - "$.index[*][?(@.name=='foo')].inner.decl.inputs[0][1].inner.mutable" false
+// @is - "$.index[*][?(@.name=='foo')].inner.decl.inputs[0][1].inner.type.kind" '"borrowed_ref"'
+// @is - "$.index[*][?(@.name=='foo')].inner.decl.inputs[0][1].inner.type.inner.lifetime" \"\'b\"
+// @is - "$.index[*][?(@.name=='foo')].inner.decl.inputs[0][1].inner.type.inner.mutable" false
+// @is - "$.index[*][?(@.name=='foo')].inner.decl.inputs[0][1].inner.type.inner.type" '{"inner": "T", "kind": "generic"}'
+pub fn foo<'a, 'b: 'a, T: 'b>(_: &'a &'b T) {}
--- /dev/null
+#![feature(no_core)]
+#![no_core]
+
+// @set wham = implementors.json "$.index[*][?(@.name=='Wham')].id"
+// @count - "$.index[*][?(@.name=='Wham')].inner.implementations[*]" 1
+// @set gmWham = - "$.index[*][?(@.name=='Wham')].inner.implementations[0]"
+pub trait Wham {}
+
+// @count - "$.index[*][?(@.name=='GeorgeMichael')].inner.impls[*]" 1
+// @is - "$.index[*][?(@.name=='GeorgeMichael')].inner.impls[0]" $gmWham
+// @set gm = - "$.index[*][?(@.name=='Wham')].id"
+
+// jsonpath_lib isnt expressive enough (for now) to get the "impl" item, so we
+// just check it isn't pointing to the type, but when you port to jsondocck-ng
+// check what the impl item is
+// @!is - "$.index[*][?(@.name=='Wham')].inner.implementations[0]" $gm
+pub struct GeorgeMichael {}
+
+impl Wham for GeorgeMichael {}
--- /dev/null
+// ignore-tidy-linelength
+
+#![feature(no_core)]
+#![feature(lang_items)]
+#![no_core]
+
+// @set loud_id = supertrait.json "$.index[*][?(@.name=='Loud')].id"
+pub trait Loud {}
+
+// @set very_loud_id = - "$.index[*][?(@.name=='VeryLoud')].id"
+// @count - "$.index[*][?(@.name=='VeryLoud')].inner.bounds[*]" 1
+// @is - "$.index[*][?(@.name=='VeryLoud')].inner.bounds[0].trait_bound.trait.inner.id" $loud_id
+pub trait VeryLoud: Loud {}
+
+// @set sounds_good_id = - "$.index[*][?(@.name=='SoundsGood')].id"
+pub trait SoundsGood {}
+
+// @count - "$.index[*][?(@.name=='MetalBand')].inner.bounds[*]" 2
+// @is - "$.index[*][?(@.name=='MetalBand')].inner.bounds[0].trait_bound.trait.inner.id" $very_loud_id
+// @is - "$.index[*][?(@.name=='MetalBand')].inner.bounds[1].trait_bound.trait.inner.id" $sounds_good_id
+pub trait MetalBand: VeryLoud + SoundsGood {}
+
+// @count - "$.index[*][?(@.name=='DnabLatem')].inner.bounds[*]" 2
+// @is - "$.index[*][?(@.name=='DnabLatem')].inner.bounds[1].trait_bound.trait.inner.id" $very_loud_id
+// @is - "$.index[*][?(@.name=='DnabLatem')].inner.bounds[0].trait_bound.trait.inner.id" $sounds_good_id
+pub trait DnabLatem: SoundsGood + VeryLoud {}
LL | | //! ```
| |_______^
|
-note: the lint level is defined here
- --> $DIR/check-fail.rs:4:9
- |
-LL | #![deny(rustdoc::all)]
- | ^^^^^^^^^^^^
= note: `#[deny(rustdoc::invalid_codeblock_attributes)]` implied by `#[deny(rustdoc::all)]`
= help: the code block will either not be tested if not marked as a rust one or the code will be wrapped inside a main function
LL | | pub fn foo() {}
| |_______________^
|
-note: the lint level is defined here
- --> $DIR/check.rs:8:9
- |
-LL | #![warn(rustdoc::all)]
- | ^^^^^^^^^^^^
= note: `#[warn(rustdoc::missing_doc_code_examples)]` implied by `#[warn(rustdoc::all)]`
warning: missing code example in this documentation
-// Test that `--show-output` has an effect and `allow(unused)` can be overriden.
+// Test that `--show-output` has an effect and `allow(unused)` can be overridden.
// check-pass
// edition:2018
LL | fn foo(x: &dyn std::fmt::Display) {}
| ^^^
|
-note: the lint level is defined here
- --> $DIR/display-output.rs:9:9
- |
-LL | #![warn(unused)]
- | ^^^^^^
= note: `#[warn(dead_code)]` implied by `#[warn(unused)]`
warning: 3 warnings emitted
LL | | /// ```
| |_______^
|
-note: the lint level is defined here
- --> $DIR/lint-group.rs:7:9
- |
-LL | #![deny(rustdoc::all)]
- | ^^^^^^^^^^^^
= note: `#[deny(rustdoc::private_doc_tests)]` implied by `#[deny(rustdoc::all)]`
error: missing code example in this documentation
LL | /// what up, let's make an [error]
| ^^^^^ no item named `error` in scope
|
-note: the lint level is defined here
- --> $DIR/lint-group.rs:7:9
- |
-LL | #![deny(rustdoc::all)]
- | ^^^^^^^^^^^^
= note: `#[deny(rustdoc::broken_intra_doc_links)]` implied by `#[deny(rustdoc::all)]`
= help: to escape `[` and `]` characters, add '\' before them like `\[` or `\]`
LL | /// <unknown>
| ^^^^^^^^^
|
-note: the lint level is defined here
- --> $DIR/lint-group.rs:7:9
- |
-LL | #![deny(rustdoc::all)]
- | ^^^^^^^^^^^^
= note: `#[deny(rustdoc::invalid_html_tags)]` implied by `#[deny(rustdoc::all)]`
error: aborting due to 5 previous errors
--- /dev/null
+#![crate_name = "foo"]
+
+// @has 'foo/struct.Foo.html'
+// @has - '//*[@id="impl-Send"]' 'impl !Send for Foo'
+// @has - '//*[@id="impl-Sync"]' 'impl !Sync for Foo'
+pub struct Foo(*const i8);
+pub trait Whatever: Send {}
+impl<T: Send + ?Sized> Whatever for T {}
--- /dev/null
+// aux-build:all-item-types.rs
+
+// This test is to ensure there is no problem on handling foreign functions
+// coming from a dependency.
+
+#![crate_name = "foo"]
+
+extern crate all_item_types;
+
+// @has 'foo/fn.foo_ffn.html'
+// @has - '//*[@class="docblock item-decl"]//code' 'pub unsafe extern "C" fn foo_ffn()'
+pub use all_item_types::foo_ffn;
#![crate_name = "foo"]
+// @!has 'foo/index.html' '//a[@href="struct.FooPublic.html"]/..' 'FooPublic 🔒'
// @has 'foo/struct.FooPublic.html' '//pre' 'pub struct FooPublic'
pub struct FooPublic;
+// @has 'foo/index.html' '//a[@href="struct.FooJustCrate.html"]/..' 'FooJustCrate 🔒'
// @has 'foo/struct.FooJustCrate.html' '//pre' 'pub(crate) struct FooJustCrate'
crate struct FooJustCrate;
+// @has 'foo/index.html' '//a[@href="struct.FooPubCrate.html"]/..' 'FooPubCrate 🔒'
// @has 'foo/struct.FooPubCrate.html' '//pre' 'pub(crate) struct FooPubCrate'
pub(crate) struct FooPubCrate;
+// @has 'foo/index.html' '//a[@href="struct.FooSelf.html"]/..' 'FooSelf 🔒'
// @has 'foo/struct.FooSelf.html' '//pre' 'pub(crate) struct FooSelf'
pub(self) struct FooSelf;
+// @has 'foo/index.html' '//a[@href="struct.FooInSelf.html"]/..' 'FooInSelf 🔒'
// @has 'foo/struct.FooInSelf.html' '//pre' 'pub(crate) struct FooInSelf'
pub(in self) struct FooInSelf;
+// @has 'foo/index.html' '//a[@href="struct.FooPriv.html"]/..' 'FooPriv 🔒'
// @has 'foo/struct.FooPriv.html' '//pre' 'pub(crate) struct FooPriv'
struct FooPriv;
+// @!has 'foo/index.html' '//a[@href="pub_mod/index.html"]/..' 'pub_mod 🔒'
+pub mod pub_mod {}
+
+// @has 'foo/index.html' '//a[@href="pub_crate_mod/index.html"]/..' 'pub_crate_mod 🔒'
+pub(crate) mod pub_crate_mod {}
+
+// @has 'foo/index.html' '//a[@href="a/index.html"]/..' 'a 🔒'
mod a {
+ // @has 'foo/a/index.html' '//a[@href="struct.FooASuper.html"]/..' 'FooASuper 🔒'
// @has 'foo/a/struct.FooASuper.html' '//pre' 'pub(crate) struct FooASuper'
pub(super) struct FooASuper;
+ // @has 'foo/a/index.html' '//a[@href="struct.FooAInSuper.html"]/..' 'FooAInSuper 🔒'
// @has 'foo/a/struct.FooAInSuper.html' '//pre' 'pub(crate) struct FooAInSuper'
pub(in super) struct FooAInSuper;
+ // @has 'foo/a/index.html' '//a[@href="struct.FooAInA.html"]/..' 'FooAInA 🔒'
// @has 'foo/a/struct.FooAInA.html' '//pre' 'struct FooAInA'
// @!has 'foo/a/struct.FooAInA.html' '//pre' 'pub'
pub(in a) struct FooAInA;
+ // @has 'foo/a/index.html' '//a[@href="struct.FooAPriv.html"]/..' 'FooAPriv 🔒'
// @has 'foo/a/struct.FooAPriv.html' '//pre' 'struct FooAPriv'
// @!has 'foo/a/struct.FooAPriv.html' '//pre' 'pub'
struct FooAPriv;
+ // @has 'foo/a/index.html' '//a[@href="b/index.html"]/..' 'b 🔒'
mod b {
+ // @has 'foo/a/b/index.html' '//a[@href="struct.FooBSuper.html"]/..' 'FooBSuper 🔒'
// @has 'foo/a/b/struct.FooBSuper.html' '//pre' 'pub(super) struct FooBSuper'
pub(super) struct FooBSuper;
+ // @has 'foo/a/b/index.html' '//a[@href="struct.FooBInSuperSuper.html"]/..' 'FooBInSuperSuper 🔒'
// @has 'foo/a/b/struct.FooBInSuperSuper.html' '//pre' 'pub(crate) struct FooBInSuperSuper'
pub(in super::super) struct FooBInSuperSuper;
+ // @has 'foo/a/b/index.html' '//a[@href="struct.FooBInAB.html"]/..' 'FooBInAB 🔒'
// @has 'foo/a/b/struct.FooBInAB.html' '//pre' 'struct FooBInAB'
// @!has 'foo/a/b/struct.FooBInAB.html' '//pre' 'pub'
pub(in a::b) struct FooBInAB;
+ // @has 'foo/a/b/index.html' '//a[@href="struct.FooBPriv.html"]/..' 'FooBPriv 🔒'
// @has 'foo/a/b/struct.FooBPriv.html' '//pre' 'struct FooBPriv'
// @!has 'foo/a/b/struct.FooBPriv.html' '//pre' 'pub'
struct FooBPriv;
+
+ // @!has 'foo/a/b/index.html' '//a[@href="struct.FooBPub.html"]/..' 'FooBPub 🔒'
+ // @has 'foo/a/b/struct.FooBPub.html' '//pre' 'pub struct FooBPub'
+ pub struct FooBPub;
}
}
//
// @has 'foo/trait.PubTrait.html' '//pre' 'fn function();'
// @!has 'foo/trait.PubTrait.html' '//pre' 'pub fn function();'
+//
+// @!has 'foo/index.html' '//a[@href="trait.PubTrait.html"]/..' 'PubTrait 🔒'
pub trait PubTrait {
type Type;
fn function();
}
+// @has 'foo/index.html' '//a[@href="trait.PrivTrait.html"]/..' 'PrivTrait 🔒'
+trait PrivTrait {}
+
// @has 'foo/struct.FooPublic.html' '//h4[@class="code-header"]' 'type Type'
// @!has 'foo/struct.FooPublic.html' '//h4[@class="code-header"]' 'pub type Type'
//
let allowed = |attr| pprust::attribute_to_string(attr).contains("allowed_attr");
if !cx.tcx.hir().attrs(item.hir_id()).iter().any(allowed) {
cx.lint(MISSING_ALLOWED_ATTR, |lint| {
- lint.build("Missing 'allowed_attr' attribute").set_span(span).emit()
+ lint.build("Missing 'allowed_attr' attribute").set_span(span).emit();
});
}
}
if !cx.sess().contains_name(attrs, $attr) {
cx.lint(CRATE_NOT_OKAY, |lint| {
let msg = format!("crate is not marked with #![{}]", $attr);
- lint.build(&msg).set_span(span).emit()
+ lint.build(&msg).set_span(span).emit();
});
}
)*
let span = cx.tcx.def_span(CRATE_DEF_ID);
if !cx.sess().contains_name(attrs, Symbol::intern("crate_okay")) {
cx.lint(CRATE_NOT_OKAY, |lint| {
- lint.build("crate is not marked with #![crate_okay]").set_span(span).emit()
+ lint.build("crate is not marked with #![crate_okay]").set_span(span).emit();
});
}
}
fn check_item(&mut self, cx: &LateContext, it: &rustc_hir::Item) {
match it.ident.as_str() {
"lintme" => cx.lint(TEST_LINT, |lint| {
- lint.build("item is named 'lintme'").set_span(it.span).emit()
+ lint.build("item is named 'lintme'").set_span(it.span).emit();
}),
"pleaselintme" => cx.lint(PLEASE_LINT, |lint| {
- lint.build("item is named 'pleaselintme'").set_span(it.span).emit()
+ lint.build("item is named 'pleaselintme'").set_span(it.span).emit();
}),
_ => {}
}
fn check_item(&mut self, cx: &EarlyContext, it: &ast::Item) {
if it.ident.name.as_str() == "lintme" {
cx.lint(TEST_LINT, |lint| {
- lint.build("item is named 'lintme'").set_span(it.span).emit()
+ lint.build("item is named 'lintme'").set_span(it.span).emit();
});
}
}
fn check_item(&mut self, cx: &EarlyContext, it: &ast::Item) {
if it.ident.name.as_str() == "lintme" {
cx.lint(TEST_LINT, |lint| {
- lint.build("item is named 'lintme'").set_span(it.span).emit()
+ lint.build("item is named 'lintme'").set_span(it.span).emit();
});
}
if it.ident.name.as_str() == "lintmetoo" {
cx.lint(TEST_GROUP, |lint| {
- lint.build("item is named 'lintmetoo'").set_span(it.span).emit()
+ lint.build("item is named 'lintmetoo'").set_span(it.span).emit();
});
}
}
// ignore-stage1
-
+// compile-flags: -Zdeduplicate-diagnostics=yes
extern crate rustc_data_structures;
//~^ use of unstable library feature 'rustc_private'
extern crate rustc_macros;
LL | fn lintmetoo() { }
| ^^^^^^^^^^^^^^^^^^
|
-note: the lint level is defined here
- --> $DIR/lint-tool-test.rs:14:9
- |
-LL | #![deny(clippy_group)]
- | ^^^^^^^^^^^^
= note: `#[deny(clippy::test_group)]` implied by `#[deny(clippy::group)]`
warning: lint name `test_group` is deprecated and may not have an effect in the future.
// ignore-android
// ignore-arm
// ignore-aarch64
+// needs-asm-support
#![feature(asm_sym)]
#[cfg(target_arch = "x86_64")]
+++ /dev/null
-#![crate_name = "anonexternmod"]
-#![feature(rustc_private)]
-
-extern crate libc;
-
-#[link(name = "rust_test_helpers", kind = "static")]
-extern "C" {
- pub fn rust_get_test_int() -> libc::intptr_t;
-}
+++ /dev/null
-#![crate_name = "foreign_lib"]
-#![feature(rustc_private)]
-
-pub mod rustrt {
- extern crate libc;
-
- #[link(name = "rust_test_helpers", kind = "static")]
- extern "C" {
- pub fn rust_get_test_int() -> libc::intptr_t;
- }
-}
-
-pub mod rustrt2 {
- extern crate libc;
-
- extern "C" {
- pub fn rust_get_test_int() -> libc::intptr_t;
- }
-}
-
-pub mod rustrt3 {
- // Different type, but same ABI (on all supported platforms).
- // Ensures that we don't ICE or trigger LLVM asserts when
- // importing the same symbol under different types.
- // See https://github.com/rust-lang/rust/issues/32740.
- extern "C" {
- pub fn rust_get_test_int() -> *const u8;
- }
-}
-
-pub fn local_uses() {
- unsafe {
- let x = rustrt::rust_get_test_int();
- assert_eq!(x, rustrt2::rust_get_test_int());
- assert_eq!(x as *const _, rustrt3::rust_get_test_int());
- }
-}
+++ /dev/null
-#![crate_name = "anonexternmod"]
-#![feature(rustc_private)]
-
-extern crate libc;
-
-#[link(name = "rust_test_helpers", kind = "static")]
-extern "C" {
- pub fn rust_get_test_int() -> libc::intptr_t;
-}
--- /dev/null
+// run-pass
+// aux-build:anon-extern-mod-cross-crate-1.rs
+// aux-build:anon-extern-mod-cross-crate-1.rs
+// pretty-expanded FIXME #23616
+// ignore-wasm32-bare no libc to test ffi with
+
+extern crate anonexternmod;
+
+pub fn main() { }
+++ /dev/null
-// run-pass
-// aux-build:anon-extern-mod-cross-crate-1.rs
-// aux-build:anon-extern-mod-cross-crate-1.rs
-// pretty-expanded FIXME #23616
-// ignore-wasm32-bare no libc to test ffi with
-
-extern crate anonexternmod;
-
-pub fn main() { }
--- /dev/null
+// run-pass
+// aux-build:foreign_lib.rs
+// ignore-wasm32-bare no libc to test ffi with
+
+// The purpose of this test is to check that we can
+// successfully (and safely) invoke external, cdecl
+// functions from outside the crate.
+
+// pretty-expanded FIXME #23616
+
+extern crate foreign_lib;
+
+pub fn main() {
+ unsafe {
+ let _foo = foreign_lib::rustrt::rust_get_test_int();
+ }
+}
+++ /dev/null
-// run-pass
-// aux-build:foreign_lib.rs
-// ignore-wasm32-bare no libc to test ffi with
-
-// The purpose of this test is to check that we can
-// successfully (and safely) invoke external, cdecl
-// functions from outside the crate.
-
-// pretty-expanded FIXME #23616
-
-extern crate foreign_lib;
-
-pub fn main() {
- unsafe {
- let _foo = foreign_lib::rustrt::rust_get_test_int();
- }
-}
// only-aarch64
-// compile-flags: -C target-feature=+fp
+// compile-flags: -C target-feature=+neon
#![feature(asm_const, asm_sym)]
// only-aarch64
// run-pass
+// needs-asm-support
// revisions: mirunsafeck thirunsafeck
// [thirunsafeck]compile-flags: -Z thir-unsafeck
// only-aarch64
+// needs-asm-support
// run-rustfix
use std::arch::asm;
// only-aarch64
+// needs-asm-support
// run-rustfix
use std::arch::asm;
error: the `nomem` option was already provided
- --> $DIR/duplicate-options.rs:8:33
+ --> $DIR/duplicate-options.rs:9:33
|
LL | asm!("", options(nomem, nomem));
| ^^^^^ this option was already provided
error: the `preserves_flags` option was already provided
- --> $DIR/duplicate-options.rs:10:43
+ --> $DIR/duplicate-options.rs:11:43
|
LL | asm!("", options(preserves_flags, preserves_flags));
| ^^^^^^^^^^^^^^^ this option was already provided
error: the `nostack` option was already provided
- --> $DIR/duplicate-options.rs:12:61
+ --> $DIR/duplicate-options.rs:13:61
|
LL | asm!("", options(nostack, preserves_flags), options(nostack));
| ^^^^^^^ this option was already provided
error: the `nostack` option was already provided
- --> $DIR/duplicate-options.rs:14:35
+ --> $DIR/duplicate-options.rs:15:35
|
LL | asm!("", options(nostack, nostack), options(nostack), options(nostack));
| ^^^^^^^ this option was already provided
error: the `nostack` option was already provided
- --> $DIR/duplicate-options.rs:14:53
+ --> $DIR/duplicate-options.rs:15:53
|
LL | asm!("", options(nostack, nostack), options(nostack), options(nostack));
| ^^^^^^^ this option was already provided
error: the `nostack` option was already provided
- --> $DIR/duplicate-options.rs:14:71
+ --> $DIR/duplicate-options.rs:15:71
|
LL | asm!("", options(nostack, nostack), options(nostack), options(nostack));
| ^^^^^^^ this option was already provided
error: the `noreturn` option was already provided
- --> $DIR/duplicate-options.rs:21:38
+ --> $DIR/duplicate-options.rs:22:38
|
LL | options(preserves_flags, noreturn),
| ^^^^^^^^ this option was already provided
error: the `nomem` option was already provided
- --> $DIR/duplicate-options.rs:22:21
+ --> $DIR/duplicate-options.rs:23:21
|
LL | options(nomem, nostack),
| ^^^^^ this option was already provided
error: the `noreturn` option was already provided
- --> $DIR/duplicate-options.rs:23:21
+ --> $DIR/duplicate-options.rs:24:21
|
LL | options(noreturn),
| ^^^^^^^^ this option was already provided
// only-aarch64
-
+// needs-asm-support
use std::arch::asm;
macro_rules! m {
// only-aarch64
// build-fail
+// needs-asm-support
// compile-flags: -Ccodegen-units=1
use std::arch::asm;
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:11:15
+ --> $DIR/srcloc.rs:12:15
|
LL | asm!("invalid_instruction");
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:15:13
+ --> $DIR/srcloc.rs:16:13
|
LL | invalid_instruction
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:20:13
+ --> $DIR/srcloc.rs:21:13
|
LL | invalid_instruction
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:26:13
+ --> $DIR/srcloc.rs:27:13
|
LL | invalid_instruction
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:33:13
+ --> $DIR/srcloc.rs:34:13
|
LL | invalid_instruction
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:38:14
+ --> $DIR/srcloc.rs:39:14
|
LL | asm!(concat!("invalid", "_", "instruction"));
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:42:14
+ --> $DIR/srcloc.rs:43:14
|
LL | "invalid_instruction",
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:48:14
+ --> $DIR/srcloc.rs:49:14
|
LL | "invalid_instruction",
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:55:14
+ --> $DIR/srcloc.rs:56:14
|
LL | "invalid_instruction",
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:62:13
+ --> $DIR/srcloc.rs:63:13
|
LL | concat!("invalid", "_", "instruction"),
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:69:13
+ --> $DIR/srcloc.rs:70:13
|
LL | concat!("invalid", "_", "instruction"),
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:76:14
+ --> $DIR/srcloc.rs:77:14
|
LL | "invalid_instruction1",
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:77:14
+ --> $DIR/srcloc.rs:78:14
|
LL | "invalid_instruction2",
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:83:13
+ --> $DIR/srcloc.rs:84:13
|
LL | concat!(
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:83:13
+ --> $DIR/srcloc.rs:84:13
|
LL | concat!(
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:92:13
+ --> $DIR/srcloc.rs:93:13
|
LL | concat!(
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:92:13
+ --> $DIR/srcloc.rs:93:13
|
LL | concat!(
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:96:13
+ --> $DIR/srcloc.rs:97:13
|
LL | concat!(
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:96:13
+ --> $DIR/srcloc.rs:97:13
|
LL | concat!(
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:107:13
+ --> $DIR/srcloc.rs:108:13
|
LL | concat!(
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:107:13
+ --> $DIR/srcloc.rs:108:13
|
LL | concat!(
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:111:13
+ --> $DIR/srcloc.rs:112:13
|
LL | concat!(
| ^
| ^
error: unrecognized instruction mnemonic
- --> $DIR/srcloc.rs:111:13
+ --> $DIR/srcloc.rs:112:13
|
LL | concat!(
| ^
// only-aarch64
// only-linux
+// needs-asm-support
// run-pass
#![feature(thread_local, asm_sym)]
| ^
error: unknown directive
- --> $DIR/inline-syntax.rs:31:15
+ --> $DIR/inline-syntax.rs:32:15
|
LL | asm!(".intel_syntax noprefix", "nop");
| ^
| ^
error: unknown directive
- --> $DIR/inline-syntax.rs:34:15
+ --> $DIR/inline-syntax.rs:35:15
|
LL | asm!(".intel_syntax aaa noprefix", "nop");
| ^
| ^
error: unknown directive
- --> $DIR/inline-syntax.rs:37:15
+ --> $DIR/inline-syntax.rs:38:15
|
LL | asm!(".att_syntax noprefix", "nop");
| ^
| ^
error: unknown directive
- --> $DIR/inline-syntax.rs:40:15
+ --> $DIR/inline-syntax.rs:41:15
|
LL | asm!(".att_syntax bbb noprefix", "nop");
| ^
| ^
error: unknown directive
- --> $DIR/inline-syntax.rs:43:15
+ --> $DIR/inline-syntax.rs:44:15
|
LL | asm!(".intel_syntax noprefix; nop");
| ^
| ^
error: unknown directive
- --> $DIR/inline-syntax.rs:49:13
+ --> $DIR/inline-syntax.rs:50:13
|
LL | .intel_syntax noprefix
| ^
//[arm] compile-flags: --target armv7-unknown-linux-gnueabihf
//[arm] build-fail
//[arm] needs-llvm-components: arm
+// needs-asm-support
#![feature(no_core, lang_items, rustc_attrs)]
#![crate_type = "rlib"]
warning: avoid using `.intel_syntax`, Intel syntax is the default
- --> $DIR/inline-syntax.rs:57:14
+ --> $DIR/inline-syntax.rs:58:14
|
LL | global_asm!(".intel_syntax noprefix", "nop");
| ^^^^^^^^^^^^^^^^^^^^^^
= note: `#[warn(bad_asm_style)]` on by default
warning: avoid using `.intel_syntax`, Intel syntax is the default
- --> $DIR/inline-syntax.rs:31:15
+ --> $DIR/inline-syntax.rs:32:15
|
LL | asm!(".intel_syntax noprefix", "nop");
| ^^^^^^^^^^^^^^^^^^^^^^
warning: avoid using `.intel_syntax`, Intel syntax is the default
- --> $DIR/inline-syntax.rs:34:15
+ --> $DIR/inline-syntax.rs:35:15
|
LL | asm!(".intel_syntax aaa noprefix", "nop");
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
warning: avoid using `.att_syntax`, prefer using `options(att_syntax)` instead
- --> $DIR/inline-syntax.rs:37:15
+ --> $DIR/inline-syntax.rs:38:15
|
LL | asm!(".att_syntax noprefix", "nop");
| ^^^^^^^^^^^^^^^^^^^^
warning: avoid using `.att_syntax`, prefer using `options(att_syntax)` instead
- --> $DIR/inline-syntax.rs:40:15
+ --> $DIR/inline-syntax.rs:41:15
|
LL | asm!(".att_syntax bbb noprefix", "nop");
| ^^^^^^^^^^^^^^^^^^^^^^^^
warning: avoid using `.intel_syntax`, Intel syntax is the default
- --> $DIR/inline-syntax.rs:43:15
+ --> $DIR/inline-syntax.rs:44:15
|
LL | asm!(".intel_syntax noprefix; nop");
| ^^^^^^^^^^^^^^^^^^^^^^
warning: avoid using `.intel_syntax`, Intel syntax is the default
- --> $DIR/inline-syntax.rs:49:13
+ --> $DIR/inline-syntax.rs:50:13
|
LL | .intel_syntax noprefix
| ^^^^^^^^^^^^^^^^^^^^^^
// compile-flags: --target armv5te-unknown-linux-gnueabi
// needs-llvm-components: arm
+// needs-asm-support
// build-pass
#![feature(no_core, lang_items, rustc_attrs, isa_attribute)]
error: unused variable: `a`
- --> $DIR/naked-functions-unused.rs:16:32
+ --> $DIR/naked-functions-unused.rs:17:32
|
LL | pub extern "C" fn function(a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_a`
|
note: the lint level is defined here
- --> $DIR/naked-functions-unused.rs:4:9
+ --> $DIR/naked-functions-unused.rs:5:9
|
LL | #![deny(unused)]
| ^^^^^^
= note: `#[deny(unused_variables)]` implied by `#[deny(unused)]`
error: unused variable: `b`
- --> $DIR/naked-functions-unused.rs:16:42
+ --> $DIR/naked-functions-unused.rs:17:42
|
LL | pub extern "C" fn function(a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_b`
error: unused variable: `a`
- --> $DIR/naked-functions-unused.rs:25:38
+ --> $DIR/naked-functions-unused.rs:26:38
|
LL | pub extern "C" fn associated(a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_a`
error: unused variable: `b`
- --> $DIR/naked-functions-unused.rs:25:48
+ --> $DIR/naked-functions-unused.rs:26:48
|
LL | pub extern "C" fn associated(a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_b`
error: unused variable: `a`
- --> $DIR/naked-functions-unused.rs:31:41
+ --> $DIR/naked-functions-unused.rs:32:41
|
LL | pub extern "C" fn method(&self, a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_a`
error: unused variable: `b`
- --> $DIR/naked-functions-unused.rs:31:51
+ --> $DIR/naked-functions-unused.rs:32:51
|
LL | pub extern "C" fn method(&self, a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_b`
error: unused variable: `a`
- --> $DIR/naked-functions-unused.rs:39:40
+ --> $DIR/naked-functions-unused.rs:40:40
|
LL | extern "C" fn trait_associated(a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_a`
error: unused variable: `b`
- --> $DIR/naked-functions-unused.rs:39:50
+ --> $DIR/naked-functions-unused.rs:40:50
|
LL | extern "C" fn trait_associated(a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_b`
error: unused variable: `a`
- --> $DIR/naked-functions-unused.rs:45:43
+ --> $DIR/naked-functions-unused.rs:46:43
|
LL | extern "C" fn trait_method(&self, a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_a`
error: unused variable: `b`
- --> $DIR/naked-functions-unused.rs:45:53
+ --> $DIR/naked-functions-unused.rs:46:53
|
LL | extern "C" fn trait_method(&self, a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_b`
// revisions: x86_64 aarch64
+// needs-asm-support
//[x86_64] only-x86_64
//[aarch64] only-aarch64
#![deny(unused)]
error: unused variable: `a`
- --> $DIR/naked-functions-unused.rs:16:32
+ --> $DIR/naked-functions-unused.rs:17:32
|
LL | pub extern "C" fn function(a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_a`
|
note: the lint level is defined here
- --> $DIR/naked-functions-unused.rs:4:9
+ --> $DIR/naked-functions-unused.rs:5:9
|
LL | #![deny(unused)]
| ^^^^^^
= note: `#[deny(unused_variables)]` implied by `#[deny(unused)]`
error: unused variable: `b`
- --> $DIR/naked-functions-unused.rs:16:42
+ --> $DIR/naked-functions-unused.rs:17:42
|
LL | pub extern "C" fn function(a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_b`
error: unused variable: `a`
- --> $DIR/naked-functions-unused.rs:25:38
+ --> $DIR/naked-functions-unused.rs:26:38
|
LL | pub extern "C" fn associated(a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_a`
error: unused variable: `b`
- --> $DIR/naked-functions-unused.rs:25:48
+ --> $DIR/naked-functions-unused.rs:26:48
|
LL | pub extern "C" fn associated(a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_b`
error: unused variable: `a`
- --> $DIR/naked-functions-unused.rs:31:41
+ --> $DIR/naked-functions-unused.rs:32:41
|
LL | pub extern "C" fn method(&self, a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_a`
error: unused variable: `b`
- --> $DIR/naked-functions-unused.rs:31:51
+ --> $DIR/naked-functions-unused.rs:32:51
|
LL | pub extern "C" fn method(&self, a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_b`
error: unused variable: `a`
- --> $DIR/naked-functions-unused.rs:39:40
+ --> $DIR/naked-functions-unused.rs:40:40
|
LL | extern "C" fn trait_associated(a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_a`
error: unused variable: `b`
- --> $DIR/naked-functions-unused.rs:39:50
+ --> $DIR/naked-functions-unused.rs:40:50
|
LL | extern "C" fn trait_associated(a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_b`
error: unused variable: `a`
- --> $DIR/naked-functions-unused.rs:45:43
+ --> $DIR/naked-functions-unused.rs:46:43
|
LL | extern "C" fn trait_method(&self, a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_a`
error: unused variable: `b`
- --> $DIR/naked-functions-unused.rs:45:53
+ --> $DIR/naked-functions-unused.rs:46:53
|
LL | extern "C" fn trait_method(&self, a: usize, b: usize) -> usize {
| ^ help: if this is intentional, prefix it with an underscore: `_b`
// only-x86_64
// run-pass
+// needs-asm-support
// revisions: mirunsafeck thirunsafeck
// [thirunsafeck]compile-flags: -Z thir-unsafeck
// build-pass
+// needs-asm-support
// only-x86_64
#![feature(target_feature_11)]
// min-llvm-version: 12.0.1
// only-x86_64
// only-linux
+// needs-asm-support
// run-pass
#![feature(thread_local, asm_sym)]
const BAR: u32;
}
-const IMPL_REF_BAR: u32 = GlobalImplRef::BAR; //~ ERROR E0391
+const IMPL_REF_BAR: u32 = GlobalImplRef::BAR;
struct GlobalImplRef;
impl GlobalImplRef {
- const BAR: u32 = IMPL_REF_BAR;
+ const BAR: u32 = IMPL_REF_BAR; //~ ERROR E0391
}
fn main() {}
-error[E0391]: cycle detected when simplifying constant for the type system `IMPL_REF_BAR`
+error[E0391]: cycle detected when elaborating drops for `<impl at $DIR/issue-24949-assoc-const-static-recursion-impl.rs:11:1: 13:2>::BAR`
+ --> $DIR/issue-24949-assoc-const-static-recursion-impl.rs:12:5
+ |
+LL | const BAR: u32 = IMPL_REF_BAR;
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: ...which requires normalizing `IMPL_REF_BAR`...
+note: ...which requires simplifying constant for the type system `IMPL_REF_BAR`...
--> $DIR/issue-24949-assoc-const-static-recursion-impl.rs:7:1
|
LL | const IMPL_REF_BAR: u32 = GlobalImplRef::BAR;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- |
note: ...which requires simplifying constant for the type system `IMPL_REF_BAR`...
--> $DIR/issue-24949-assoc-const-static-recursion-impl.rs:7:1
|
|
LL | const BAR: u32 = IMPL_REF_BAR;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- = note: ...which requires normalizing `IMPL_REF_BAR`...
- = note: ...which again requires simplifying constant for the type system `IMPL_REF_BAR`, completing the cycle
+ = note: ...which again requires elaborating drops for `<impl at $DIR/issue-24949-assoc-const-static-recursion-impl.rs:11:1: 13:2>::BAR`, completing the cycle
= note: cycle used when running analysis passes on this crate
error: aborting due to previous error
}
trait FooDefault {
- const BAR: u32 = DEFAULT_REF_BAR;
+ const BAR: u32 = DEFAULT_REF_BAR; //~ ERROR E0391
}
-const DEFAULT_REF_BAR: u32 = <GlobalDefaultRef>::BAR; //~ ERROR E0391
+const DEFAULT_REF_BAR: u32 = <GlobalDefaultRef>::BAR;
struct GlobalDefaultRef;
-error[E0391]: cycle detected when simplifying constant for the type system `DEFAULT_REF_BAR`
+error[E0391]: cycle detected when elaborating drops for `FooDefault::BAR`
+ --> $DIR/issue-24949-assoc-const-static-recursion-trait-default.rs:8:5
+ |
+LL | const BAR: u32 = DEFAULT_REF_BAR;
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: ...which requires normalizing `DEFAULT_REF_BAR`...
+note: ...which requires simplifying constant for the type system `DEFAULT_REF_BAR`...
--> $DIR/issue-24949-assoc-const-static-recursion-trait-default.rs:11:1
|
LL | const DEFAULT_REF_BAR: u32 = <GlobalDefaultRef>::BAR;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- |
note: ...which requires simplifying constant for the type system `DEFAULT_REF_BAR`...
--> $DIR/issue-24949-assoc-const-static-recursion-trait-default.rs:11:1
|
|
LL | const BAR: u32 = DEFAULT_REF_BAR;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- = note: ...which requires normalizing `DEFAULT_REF_BAR`...
- = note: ...which again requires simplifying constant for the type system `DEFAULT_REF_BAR`, completing the cycle
+ = note: ...which again requires elaborating drops for `FooDefault::BAR`, completing the cycle
= note: cycle used when running analysis passes on this crate
error: aborting due to previous error
const BAR: u32;
}
-const TRAIT_REF_BAR: u32 = <GlobalTraitRef>::BAR; //~ ERROR E0391
+const TRAIT_REF_BAR: u32 = <GlobalTraitRef>::BAR;
struct GlobalTraitRef;
impl Foo for GlobalTraitRef {
- const BAR: u32 = TRAIT_REF_BAR;
+ const BAR: u32 = TRAIT_REF_BAR; //~ ERROR E0391
}
fn main() {}
-error[E0391]: cycle detected when simplifying constant for the type system `TRAIT_REF_BAR`
+error[E0391]: cycle detected when elaborating drops for `<impl at $DIR/issue-24949-assoc-const-static-recursion-trait.rs:11:1: 13:2>::BAR`
+ --> $DIR/issue-24949-assoc-const-static-recursion-trait.rs:12:5
+ |
+LL | const BAR: u32 = TRAIT_REF_BAR;
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: ...which requires normalizing `TRAIT_REF_BAR`...
+note: ...which requires simplifying constant for the type system `TRAIT_REF_BAR`...
--> $DIR/issue-24949-assoc-const-static-recursion-trait.rs:7:1
|
LL | const TRAIT_REF_BAR: u32 = <GlobalTraitRef>::BAR;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- |
note: ...which requires simplifying constant for the type system `TRAIT_REF_BAR`...
--> $DIR/issue-24949-assoc-const-static-recursion-trait.rs:7:1
|
|
LL | const BAR: u32 = TRAIT_REF_BAR;
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- = note: ...which requires normalizing `TRAIT_REF_BAR`...
- = note: ...which again requires simplifying constant for the type system `TRAIT_REF_BAR`, completing the cycle
+ = note: ...which again requires elaborating drops for `<impl at $DIR/issue-24949-assoc-const-static-recursion-trait.rs:11:1: 13:2>::BAR`, completing the cycle
= note: cycle used when running analysis passes on this crate
error: aborting due to previous error
-{"attrs":[{"kind":{"variant":"Normal","fields":[{"path":{"span":{"lo":0,"hi":0},"segments":[{"ident":{"name":"crate_type","span":{"lo":0,"hi":0}},"id":0,"args":null}],"tokens":null},"args":{"variant":"Eq","fields":[{"lo":0,"hi":0},{"kind":{"variant":"Interpolated","fields":[{"variant":"NtExpr","fields":[{"id":0,"kind":{"variant":"Lit","fields":[{"token":{"kind":"Str","symbol":"lib","suffix":null},"kind":{"variant":"Str","fields":["lib","Cooked"]},"span":{"lo":0,"hi":0}}]},"span":{"lo":0,"hi":0},"attrs":{"0":null},"tokens":{"0":[[{"variant":"Token","fields":[{"kind":{"variant":"Literal","fields":[{"kind":"Str","symbol":"lib","suffix":null}]},"span":{"lo":0,"hi":0}}]},"Alone"]]}}]}]},"span":{"lo":0,"hi":0}}]},"tokens":null},{"0":[[{"variant":"Token","fields":[{"kind":"Pound","span":{"lo":0,"hi":0}}]},"Joint"],[{"variant":"Token","fields":[{"kind":"Not","span":{"lo":0,"hi":0}}]},"Alone"],[{"variant":"Delimited","fields":[{"open":{"lo":0,"hi":0},"close":{"lo":0,"hi":0}},"Bracket",{"0":[[{"variant":"Token","fields":[{"kind":{"variant":"Ident","fields":["crate_type",false]},"span":{"lo":0,"hi":0}}]},"Alone"],[{"variant":"Token","fields":[{"kind":"Eq","span":{"lo":0,"hi":0}}]},"Alone"],[{"variant":"Token","fields":[{"kind":{"variant":"Literal","fields":[{"kind":"Str","symbol":"lib","suffix":null}]},"span":{"lo":0,"hi":0}}]},"Alone"]]}]},"Alone"]]}]},"id":null,"style":"Inner","span":{"lo":0,"hi":0}}],"items":[{"attrs":[],"id":0,"span":{"lo":0,"hi":0},"vis":{"kind":"Inherited","span":{"lo":0,"hi":0},"tokens":null},"ident":{"name":"core","span":{"lo":0,"hi":0}},"kind":{"variant":"ExternCrate","fields":[null]},"tokens":null}],"span":{"lo":0,"hi":0},"id":0,"is_placeholder":false}
+{"attrs":[{"kind":{"variant":"Normal","fields":[{"path":{"span":{"lo":0,"hi":0},"segments":[{"ident":{"name":"crate_type","span":{"lo":0,"hi":0}},"id":0,"args":null}],"tokens":null},"args":{"variant":"Eq","fields":[{"lo":0,"hi":0},{"kind":{"variant":"Interpolated","fields":[{"variant":"NtExpr","fields":[{"id":0,"kind":{"variant":"Lit","fields":[{"token":{"kind":"Str","symbol":"lib","suffix":null},"kind":{"variant":"Str","fields":["lib","Cooked"]},"span":{"lo":0,"hi":0}}]},"span":{"lo":0,"hi":0},"attrs":{"0":null},"tokens":{"0":[[{"variant":"Token","fields":[{"kind":{"variant":"Literal","fields":[{"kind":"Str","symbol":"lib","suffix":null}]},"span":{"lo":0,"hi":0}}]},"Alone"]]}}]}]},"span":{"lo":0,"hi":0}}]},"tokens":null},{"0":[[{"variant":"Token","fields":[{"kind":"Pound","span":{"lo":0,"hi":0}}]},"Joint"],[{"variant":"Token","fields":[{"kind":"Not","span":{"lo":0,"hi":0}}]},"Alone"],[{"variant":"Delimited","fields":[{"open":{"lo":0,"hi":0},"close":{"lo":0,"hi":0}},"Bracket",{"0":[[{"variant":"Token","fields":[{"kind":{"variant":"Ident","fields":["crate_type",false]},"span":{"lo":0,"hi":0}}]},"Alone"],[{"variant":"Token","fields":[{"kind":"Eq","span":{"lo":0,"hi":0}}]},"Alone"],[{"variant":"Token","fields":[{"kind":{"variant":"Literal","fields":[{"kind":"Str","symbol":"lib","suffix":null}]},"span":{"lo":0,"hi":0}}]},"Alone"]]}]},"Alone"]]}]},"id":null,"style":"Inner","span":{"lo":0,"hi":0}}],"items":[{"attrs":[],"id":0,"span":{"lo":0,"hi":0},"vis":{"kind":"Inherited","span":{"lo":0,"hi":0},"tokens":null},"ident":{"name":"core","span":{"lo":0,"hi":0}},"kind":{"variant":"ExternCrate","fields":[null]},"tokens":null}],"spans":{"inner_span":{"lo":0,"hi":0},"inject_use_span":{"lo":0,"hi":0}},"id":0,"is_placeholder":false}
-{"attrs":[{"kind":{"variant":"Normal","fields":[{"path":{"span":{"lo":0,"hi":0},"segments":[{"ident":{"name":"crate_type","span":{"lo":0,"hi":0}},"id":0,"args":null}],"tokens":null},"args":{"variant":"Eq","fields":[{"lo":0,"hi":0},{"kind":{"variant":"Interpolated","fields":[{"variant":"NtExpr","fields":[{"id":0,"kind":{"variant":"Lit","fields":[{"token":{"kind":"Str","symbol":"lib","suffix":null},"kind":{"variant":"Str","fields":["lib","Cooked"]},"span":{"lo":0,"hi":0}}]},"span":{"lo":0,"hi":0},"attrs":{"0":null},"tokens":{"0":[[{"variant":"Token","fields":[{"kind":{"variant":"Literal","fields":[{"kind":"Str","symbol":"lib","suffix":null}]},"span":{"lo":0,"hi":0}}]},"Alone"]]}}]}]},"span":{"lo":0,"hi":0}}]},"tokens":null},{"0":[[{"variant":"Token","fields":[{"kind":"Pound","span":{"lo":0,"hi":0}}]},"Joint"],[{"variant":"Token","fields":[{"kind":"Not","span":{"lo":0,"hi":0}}]},"Alone"],[{"variant":"Delimited","fields":[{"open":{"lo":0,"hi":0},"close":{"lo":0,"hi":0}},"Bracket",{"0":[[{"variant":"Token","fields":[{"kind":{"variant":"Ident","fields":["crate_type",false]},"span":{"lo":0,"hi":0}}]},"Alone"],[{"variant":"Token","fields":[{"kind":"Eq","span":{"lo":0,"hi":0}}]},"Alone"],[{"variant":"Token","fields":[{"kind":{"variant":"Literal","fields":[{"kind":"Str","symbol":"lib","suffix":null}]},"span":{"lo":0,"hi":0}}]},"Alone"]]}]},"Alone"]]}]},"id":null,"style":"Inner","span":{"lo":0,"hi":0}}],"items":[{"attrs":[{"kind":{"variant":"Normal","fields":[{"path":{"span":{"lo":0,"hi":0},"segments":[{"ident":{"name":"prelude_import","span":{"lo":0,"hi":0}},"id":0,"args":null}],"tokens":null},"args":"Empty","tokens":null},null]},"id":null,"style":"Outer","span":{"lo":0,"hi":0}}],"id":0,"span":{"lo":0,"hi":0},"vis":{"kind":"Inherited","span":{"lo":0,"hi":0},"tokens":null},"ident":{"name":"","span":{"lo":0,"hi":0}},"kind":{"variant":"Use","fields":[{"prefix":{"span":{"lo":0,"hi":0},"segments":[{"ident":{"name":"{{root}}","span":{"lo":0,"hi":0}},"id":0,"args":null},{"ident":{"name":"std","span":{"lo":0,"hi":0}},"id":0,"args":null},{"ident":{"name":"prelude","span":{"lo":0,"hi":0}},"id":0,"args":null},{"ident":{"name":"rust_2015","span":{"lo":0,"hi":0}},"id":0,"args":null}],"tokens":null},"kind":"Glob","span":{"lo":0,"hi":0}}]},"tokens":null},{"attrs":[{"kind":{"variant":"Normal","fields":[{"path":{"span":{"lo":0,"hi":0},"segments":[{"ident":{"name":"macro_use","span":{"lo":0,"hi":0}},"id":0,"args":null}],"tokens":null},"args":"Empty","tokens":null},null]},"id":null,"style":"Outer","span":{"lo":0,"hi":0}}],"id":0,"span":{"lo":0,"hi":0},"vis":{"kind":"Inherited","span":{"lo":0,"hi":0},"tokens":null},"ident":{"name":"std","span":{"lo":0,"hi":0}},"kind":{"variant":"ExternCrate","fields":[null]},"tokens":null},{"attrs":[],"id":0,"span":{"lo":0,"hi":0},"vis":{"kind":"Inherited","span":{"lo":0,"hi":0},"tokens":null},"ident":{"name":"core","span":{"lo":0,"hi":0}},"kind":{"variant":"ExternCrate","fields":[null]},"tokens":null}],"span":{"lo":0,"hi":0},"id":0,"is_placeholder":false}
+{"attrs":[{"kind":{"variant":"Normal","fields":[{"path":{"span":{"lo":0,"hi":0},"segments":[{"ident":{"name":"crate_type","span":{"lo":0,"hi":0}},"id":0,"args":null}],"tokens":null},"args":{"variant":"Eq","fields":[{"lo":0,"hi":0},{"kind":{"variant":"Interpolated","fields":[{"variant":"NtExpr","fields":[{"id":0,"kind":{"variant":"Lit","fields":[{"token":{"kind":"Str","symbol":"lib","suffix":null},"kind":{"variant":"Str","fields":["lib","Cooked"]},"span":{"lo":0,"hi":0}}]},"span":{"lo":0,"hi":0},"attrs":{"0":null},"tokens":{"0":[[{"variant":"Token","fields":[{"kind":{"variant":"Literal","fields":[{"kind":"Str","symbol":"lib","suffix":null}]},"span":{"lo":0,"hi":0}}]},"Alone"]]}}]}]},"span":{"lo":0,"hi":0}}]},"tokens":null},{"0":[[{"variant":"Token","fields":[{"kind":"Pound","span":{"lo":0,"hi":0}}]},"Joint"],[{"variant":"Token","fields":[{"kind":"Not","span":{"lo":0,"hi":0}}]},"Alone"],[{"variant":"Delimited","fields":[{"open":{"lo":0,"hi":0},"close":{"lo":0,"hi":0}},"Bracket",{"0":[[{"variant":"Token","fields":[{"kind":{"variant":"Ident","fields":["crate_type",false]},"span":{"lo":0,"hi":0}}]},"Alone"],[{"variant":"Token","fields":[{"kind":"Eq","span":{"lo":0,"hi":0}}]},"Alone"],[{"variant":"Token","fields":[{"kind":{"variant":"Literal","fields":[{"kind":"Str","symbol":"lib","suffix":null}]},"span":{"lo":0,"hi":0}}]},"Alone"]]}]},"Alone"]]}]},"id":null,"style":"Inner","span":{"lo":0,"hi":0}}],"items":[{"attrs":[{"kind":{"variant":"Normal","fields":[{"path":{"span":{"lo":0,"hi":0},"segments":[{"ident":{"name":"prelude_import","span":{"lo":0,"hi":0}},"id":0,"args":null}],"tokens":null},"args":"Empty","tokens":null},null]},"id":null,"style":"Outer","span":{"lo":0,"hi":0}}],"id":0,"span":{"lo":0,"hi":0},"vis":{"kind":"Inherited","span":{"lo":0,"hi":0},"tokens":null},"ident":{"name":"","span":{"lo":0,"hi":0}},"kind":{"variant":"Use","fields":[{"prefix":{"span":{"lo":0,"hi":0},"segments":[{"ident":{"name":"{{root}}","span":{"lo":0,"hi":0}},"id":0,"args":null},{"ident":{"name":"std","span":{"lo":0,"hi":0}},"id":0,"args":null},{"ident":{"name":"prelude","span":{"lo":0,"hi":0}},"id":0,"args":null},{"ident":{"name":"rust_2015","span":{"lo":0,"hi":0}},"id":0,"args":null}],"tokens":null},"kind":"Glob","span":{"lo":0,"hi":0}}]},"tokens":null},{"attrs":[{"kind":{"variant":"Normal","fields":[{"path":{"span":{"lo":0,"hi":0},"segments":[{"ident":{"name":"macro_use","span":{"lo":0,"hi":0}},"id":0,"args":null}],"tokens":null},"args":"Empty","tokens":null},null]},"id":null,"style":"Outer","span":{"lo":0,"hi":0}}],"id":0,"span":{"lo":0,"hi":0},"vis":{"kind":"Inherited","span":{"lo":0,"hi":0},"tokens":null},"ident":{"name":"std","span":{"lo":0,"hi":0}},"kind":{"variant":"ExternCrate","fields":[null]},"tokens":null},{"attrs":[],"id":0,"span":{"lo":0,"hi":0},"vis":{"kind":"Inherited","span":{"lo":0,"hi":0},"tokens":null},"ident":{"name":"core","span":{"lo":0,"hi":0}},"kind":{"variant":"ExternCrate","fields":[null]},"tokens":null}],"spans":{"inner_span":{"lo":0,"hi":0},"inject_use_span":{"lo":0,"hi":0}},"id":0,"is_placeholder":false}
return 0u8;
};
let _: &dyn Future<Output = ()> = █
- //~^ ERROR type mismatch resolving `<impl Future<Output = [async output]> as Future>::Output == ()`
+ //~^ ERROR type mismatch resolving `<impl Future as Future>::Output == ()`
}
fn no_break_in_async_block() {
LL | | }
| |_^ expected `u8`, found `()`
-error[E0271]: type mismatch resolving `<impl Future<Output = [async output]> as Future>::Output == ()`
+error[E0271]: type mismatch resolving `<impl Future as Future>::Output == ()`
--> $DIR/async-block-control-flow-static-semantics.rs:26:39
|
LL | let _: &dyn Future<Output = ()> = █
| |
| implicitly returns `()` as its body has no tail or `return` expression
-error[E0271]: type mismatch resolving `<impl Future<Output = [async output]> as Future>::Output == ()`
+error[E0271]: type mismatch resolving `<impl Future as Future>::Output == ()`
--> $DIR/async-block-control-flow-static-semantics.rs:17:39
|
LL | let _: &dyn Future<Output = ()> = █
impl IntoFuture for AwaitMe {
type Output = i32;
- type Future = Pin<Box<dyn Future<Output = i32>>>;
+ type IntoFuture = Pin<Box<dyn Future<Output = i32>>>;
- fn into_future(self) -> Self::Future {
+ fn into_future(self) -> Self::IntoFuture {
Box::pin(me())
}
}
| the expected opaque type
| the found opaque type
|
- = note: expected opaque type `impl Future<Output = [async output]>` (`async` closure body)
- found opaque type `impl Future<Output = [async output]>` (`async` closure body)
+ = note: expected opaque type `impl Future` (`async` closure body)
+ found opaque type `impl Future` (`async` closure body)
error: aborting due to 3 previous errors
async fn match_() {
match tuple() { //~ HELP consider `await`ing on the `Future`
+ //~^ NOTE this expression has type `impl Future<Output = Tuple>`
Tuple(_) => {} //~ ERROR mismatched types
//~^ NOTE expected opaque type, found struct `Tuple`
//~| NOTE expected opaque type `impl Future<Output = Tuple>`
| ++++++
error[E0308]: mismatched types
- --> $DIR/issue-61076.rs:90:9
+ --> $DIR/issue-61076.rs:91:9
|
+LL | match tuple() {
+ | ------- this expression has type `impl Future<Output = Tuple>`
+LL |
LL | Tuple(_) => {}
| ^^^^^^^^ expected opaque type, found struct `Tuple`
|
LL | spawn(async {
| ^^^^^ future created by async block is not `Send`
|
- = help: within `impl Future<Output = [async output]>`, the trait `Send` is not implemented for `*mut ()`
+ = help: within `impl Future`, the trait `Send` is not implemented for `*mut ()`
note: future is not `Send` as this value is used across an await
--> $DIR/issue-67252-unnamed-future.rs:20:16
|
= note: required because of the requirements on the impl of `Send` for `Arc<RefCell<i32>>`
= note: required because it appears within the type `[static generator@$DIR/issue-68112.rs:47:31: 47:36]`
= note: required because it appears within the type `from_generator::GenFuture<[static generator@$DIR/issue-68112.rs:47:31: 47:36]>`
- = note: required because it appears within the type `impl Future<Output = [async output]>`
+ = note: required because it appears within the type `impl Future`
= note: required because it appears within the type `impl Future<Output = Arc<RefCell<i32>>>`
= note: required because it appears within the type `impl Future<Output = Arc<RefCell<i32>>>`
= note: required because it appears within the type `{ResumeTy, impl Future<Output = Arc<RefCell<i32>>>, (), i32, Ready<i32>}`
= note: required because it appears within the type `[static generator@$DIR/issue-68112.rs:55:26: 59:6]`
= note: required because it appears within the type `from_generator::GenFuture<[static generator@$DIR/issue-68112.rs:55:26: 59:6]>`
- = note: required because it appears within the type `impl Future<Output = [async output]>`
+ = note: required because it appears within the type `impl Future`
note: required by a bound in `require_send`
--> $DIR/issue-68112.rs:11:25
|
LL | assert_send(async {
| ^^^^^^^^^^^ future created by async block is not `Send`
|
- = help: within `impl Future<Output = [async output]>`, the trait `Send` is not implemented for `*const u8`
+ = help: within `impl Future`, the trait `Send` is not implemented for `*const u8`
note: future is not `Send` as this value is used across an await
--> $DIR/issue-65436-raw-ptr-not-send.rs:14:35
|
= note: required because it appears within the type `{ResumeTy, (NotSend,), impl Future<Output = ()>, ()}`
= note: required because it appears within the type `[static generator@$DIR/partial-drop-partial-reinit.rs:22:16: 27:2]`
= note: required because it appears within the type `from_generator::GenFuture<[static generator@$DIR/partial-drop-partial-reinit.rs:22:16: 27:2]>`
- = note: required because it appears within the type `impl Future<Output = [async output]>`
+ = note: required because it appears within the type `impl Future`
= note: required because it appears within the type `impl Future<Output = ()>`
note: required by a bound in `gimme_send`
--> $DIR/partial-drop-partial-reinit.rs:10:18
error[E0308]: mismatched types
--> $DIR/suggest-missing-await.rs:53:9
|
+LL | let _x = match dummy() {
+ | ------- this expression has type `impl Future<Output = ()>`
LL | () => {}
| ^^ expected opaque type, found `()`
|
error[E0308]: mismatched types
--> $DIR/suggest-missing-await.rs:67:9
|
+LL | match dummy_result() {
+ | -------------- this expression has type `impl Future<Output = Result<(), ()>>`
+...
LL | Ok(_) => {}
| ^^^^^ expected opaque type, found enum `Result`
|
error[E0308]: mismatched types
--> $DIR/suggest-missing-await.rs:69:9
|
+LL | match dummy_result() {
+ | -------------- this expression has type `impl Future<Output = Result<(), ()>>`
+...
LL | Err(_) => {}
| ^^^^^^ expected opaque type, found enum `Result`
|
//~^ ERROR
//~| WARNING this will change its meaning
+pub struct WithLifetime<'a, T>(&'a (), T);
+unsafe impl<T> Send for WithLifetime<'static, T> {} // ok
+unsafe impl<T> Sync for WithLifetime<'static, Vec<T>> {}
+//~^ ERROR
+//~| WARNING this will change its meaning
+
fn main() {}
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: `*const T` is not a generic parameter
-error: aborting due to 4 previous errors
+error: cross-crate traits with a default impl, like `Sync`, should not be specialized
+ --> $DIR/suspicious-impls-lint.rs:46:1
+ |
+LL | unsafe impl<T> Sync for WithLifetime<'static, Vec<T>> {}
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = warning: this will change its meaning in a future release!
+ = note: for more information, see issue #93367 <https://github.com/rust-lang/rust/issues/93367>
+note: try using the same sequence of generic parameters as the struct definition
+ --> $DIR/suspicious-impls-lint.rs:44:1
+ |
+LL | pub struct WithLifetime<'a, T>(&'a (), T);
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ = note: `Vec<T>` is not a generic parameter
+
+error: aborting due to 5 previous errors
// Test that the recursion limit can be changed. In this case, we have
// deeply nested types that will fail the `Send` check by overflow
// when the recursion limit is set very low.
+// compile-flags: -Zdeduplicate-diagnostics=yes
+
#![allow(dead_code)]
-#![recursion_limit="10"]
+#![recursion_limit = "10"]
macro_rules! link {
($outer:ident, $inner:ident) => {
struct $outer($inner);
&self.0
}
}
- }
+ };
}
+
struct Bottom;
+
impl Bottom {
fn new() -> Bottom {
Bottom
}
}
+
link!(Top, A);
link!(A, B);
link!(B, C);
link!(I, J);
link!(J, K);
link!(K, Bottom);
+
fn main() {
let t = Top::new();
let x: &Bottom = &t;
error[E0055]: reached the recursion limit while auto-dereferencing `J`
- --> $DIR/issue-38940.rs:43:22
+ --> $DIR/issue-38940.rs:49:22
|
LL | let x: &Bottom = &t;
| ^^ deref recursion limit reached
= help: consider increasing the recursion limit by adding a `#![recursion_limit = "20"]` attribute to your crate (`issue_38940`)
error[E0308]: mismatched types
- --> $DIR/issue-38940.rs:43:22
+ --> $DIR/issue-38940.rs:49:22
|
LL | let x: &Bottom = &t;
| ------- ^^ expected struct `Bottom`, found struct `Top`
+++ /dev/null
-pub struct XEmpty1 {}
-pub struct XEmpty2;
-pub struct XEmpty6();
-
-pub enum XE {
- XEmpty3 {},
- XEmpty4,
- XEmpty5(),
-}
+++ /dev/null
-// compile-flags:--cfg foo
-
-#![cfg_attr(foo, unstable(feature = "unstable_test_feature", issue = "none"))]
-#![cfg_attr(not(foo), stable(feature = "test_feature", since = "1.0.0"))]
-#![feature(staged_api)]
+++ /dev/null
-// no-prefer-dynamic
-
-// This aux-file will require the eh_personality function to be codegen'd, but
-// it hasn't been defined just yet. Make sure we don't explode.
-
-#![no_std]
-#![crate_type = "rlib"]
-
-struct A;
-
-impl core::ops::Drop for A {
- fn drop(&mut self) {}
-}
-
-pub fn foo() {
- let _a = A;
- panic!("wut");
-}
-
-mod std {
- pub use core::{option, fmt};
-}
assert!(!s.contains("stack backtrace") && !s.contains(" - foo"),
"bad output3: {}", s);
- // Make sure a stack trace is printed
- let p = template(me).arg("double-fail").spawn().unwrap();
- let out = p.wait_with_output().unwrap();
- assert!(!out.status.success());
- let s = str::from_utf8(&out.stderr).unwrap();
- // loosened the following from double::h to double:: due to
- // spurious failures on mac, 32bit, optimized
- assert!(s.contains("stack backtrace") && contains_verbose_expected(s, "double"),
- "bad output3: {}", s);
+ #[cfg(not(panic = "abort"))]
+ {
+ // Make sure a stack trace is printed
+ let p = template(me).arg("double-fail").spawn().unwrap();
+ let out = p.wait_with_output().unwrap();
+ assert!(!out.status.success());
+ let s = str::from_utf8(&out.stderr).unwrap();
+ // loosened the following from double::h to double:: due to
+ // spurious failures on mac, 32bit, optimized
+ assert!(s.contains("stack backtrace") && contains_verbose_expected(s, "double"),
+ "bad output3: {}", s);
- // Make sure a stack trace isn't printed too many times
- let p = template(me).arg("double-fail")
- .env("RUST_BACKTRACE", "1").spawn().unwrap();
- let out = p.wait_with_output().unwrap();
- assert!(!out.status.success());
- let s = str::from_utf8(&out.stderr).unwrap();
- let mut i = 0;
- for _ in 0..2 {
- i += s[i + 10..].find("stack backtrace").unwrap() + 10;
+ // Make sure a stack trace isn't printed too many times
+ let p = template(me).arg("double-fail")
+ .env("RUST_BACKTRACE", "1").spawn().unwrap();
+ let out = p.wait_with_output().unwrap();
+ assert!(!out.status.success());
+ let s = str::from_utf8(&out.stderr).unwrap();
+ let mut i = 0;
+ for _ in 0..2 {
+ i += s[i + 10..].find("stack backtrace").unwrap() + 10;
+ }
+ assert!(s[i + 10..].find("stack backtrace").is_none(),
+ "bad output4: {}", s);
}
- assert!(s[i + 10..].find("stack backtrace").is_none(),
- "bad output4: {}", s);
}
fn main() {
| --------------- unit struct defined here
...
LL | let bar = 5;
- | ^^^
+ | ^^^ - this expression has type `{integer}`
| |
| expected integer, found struct `bar`
| `bar` is interpreted as a unit struct, not a new binding
--> $DIR/issue-20862.rs:2:5
|
LL | fn foo(x: i32) {
- | - possibly return type missing here?
+ | - help: a return type might be missing here: `-> _`
LL | |y| x + y
| ^^^^^^^^^ expected `()`, found closure
|
--- /dev/null
+// compile-flags: -O
+// build-pass
+
+#![feature(allocator_api, bench_black_box)]
+
+#[inline(never)]
+pub fn by_ref(node: &mut Box<[u8; 1], &std::alloc::Global>) {
+ node[0] = 9u8;
+}
+
+pub fn main() {
+ let mut node = Box::new_in([5u8], &std::alloc::Global);
+ node[0] = 7u8;
+
+ std::hint::black_box(node);
+
+ let mut node = Box::new_in([5u8], &std::alloc::Global);
+
+ by_ref(&mut node);
+
+ std::hint::black_box(node);
+}
// build-pass
#![feature(allocator_api)]
+#![allow(unused_must_use)]
use std::alloc::Allocator;
fn main() {
Box::new_in((), &std::alloc::Global);
Box::new_in((), BigAllocator([0; 2]));
+ generic_function(0);
+}
+
+fn generic_function<T>(val: T) {
+ *Box::new_in(val, &std::alloc::Global);
}
--- /dev/null
+// check-fail
+
+#[cfg(any(foo, foo::bar))]
+//~^ERROR `cfg` predicate key must be an identifier
+fn foo1() {}
+
+#[cfg(any(foo::bar, foo))]
+//~^ERROR `cfg` predicate key must be an identifier
+fn foo2() {}
+
+#[cfg(all(foo, foo::bar))]
+//~^ERROR `cfg` predicate key must be an identifier
+fn foo3() {}
+
+#[cfg(all(foo::bar, foo))]
+//~^ERROR `cfg` predicate key must be an identifier
+fn foo4() {}
+
+fn main() {}
--- /dev/null
+error: `cfg` predicate key must be an identifier
+ --> $DIR/cfg-path-error.rs:3:16
+ |
+LL | #[cfg(any(foo, foo::bar))]
+ | ^^^^^^^^
+
+error: `cfg` predicate key must be an identifier
+ --> $DIR/cfg-path-error.rs:7:11
+ |
+LL | #[cfg(any(foo::bar, foo))]
+ | ^^^^^^^^
+
+error: `cfg` predicate key must be an identifier
+ --> $DIR/cfg-path-error.rs:11:16
+ |
+LL | #[cfg(all(foo, foo::bar))]
+ | ^^^^^^^^
+
+error: `cfg` predicate key must be an identifier
+ --> $DIR/cfg-path-error.rs:15:11
+ |
+LL | #[cfg(all(foo::bar, foo))]
+ | ^^^^^^^^
+
+error: aborting due to 4 previous errors
+
LL | T: Generator<ResumeTy, Yield = ()>,
| ^^^^^^^^^^ required by this bound in `from_generator`
-error[E0280]: the requirement `<impl Future<Output = [async output]> as Future>::Output == u32` is not satisfied
+error[E0280]: the requirement `<impl Future as Future>::Output == u32` is not satisfied
--> $DIR/async.rs:7:25
|
LL | async fn foo(x: u32) -> u32 {
|
LL | a = 1;
| ^
+help: consider mutably borrowing `b`
+ |
+LL | let mut c = &mut b;
+ | ++++
error: aborting due to previous error
//~^ WARNING unexpected `cfg` condition name
cfg!(any(feature = "bad", windows));
//~^ WARNING unexpected `cfg` condition value
+ cfg!(any(windows, xxx));
+ //~^ WARNING unexpected `cfg` condition name
+ cfg!(all(unix, xxx));
+ //~^ WARNING unexpected `cfg` condition name
+ cfg!(all(aa, bb));
+ //~^ WARNING unexpected `cfg` condition name
+ //~| WARNING unexpected `cfg` condition name
+ cfg!(any(aa, bb));
+ //~^ WARNING unexpected `cfg` condition name
+ //~| WARNING unexpected `cfg` condition name
+ cfg!(any(unix, feature = "zebra"));
+ //~^ WARNING unexpected `cfg` condition value
+ cfg!(any(xxx, feature = "zebra"));
+ //~^ WARNING unexpected `cfg` condition name
+ //~| WARNING unexpected `cfg` condition value
+ cfg!(any(xxx, unix, xxx));
+ //~^ WARNING unexpected `cfg` condition name
+ //~| WARNING unexpected `cfg` condition name
+ cfg!(all(feature = "zebra", feature = "zebra", feature = "zebra"));
+ //~^ WARNING unexpected `cfg` condition value
+ //~| WARNING unexpected `cfg` condition value
+ //~| WARNING unexpected `cfg` condition value
}
fn main() {}
|
= note: expected values for `feature` are: bar, foo
-warning: 9 warnings emitted
+warning: unexpected `cfg` condition name
+ --> $DIR/mix.rs:48:23
+ |
+LL | cfg!(any(windows, xxx));
+ | ^^^
+
+warning: unexpected `cfg` condition name
+ --> $DIR/mix.rs:50:20
+ |
+LL | cfg!(all(unix, xxx));
+ | ^^^
+
+warning: unexpected `cfg` condition name
+ --> $DIR/mix.rs:52:14
+ |
+LL | cfg!(all(aa, bb));
+ | ^^
+
+warning: unexpected `cfg` condition name
+ --> $DIR/mix.rs:52:18
+ |
+LL | cfg!(all(aa, bb));
+ | ^^
+
+warning: unexpected `cfg` condition name
+ --> $DIR/mix.rs:55:14
+ |
+LL | cfg!(any(aa, bb));
+ | ^^
+
+warning: unexpected `cfg` condition name
+ --> $DIR/mix.rs:55:18
+ |
+LL | cfg!(any(aa, bb));
+ | ^^
+
+warning: unexpected `cfg` condition value
+ --> $DIR/mix.rs:58:20
+ |
+LL | cfg!(any(unix, feature = "zebra"));
+ | ^^^^^^^^^^^^^^^^^
+ |
+ = note: expected values for `feature` are: bar, foo
+
+warning: unexpected `cfg` condition name
+ --> $DIR/mix.rs:60:14
+ |
+LL | cfg!(any(xxx, feature = "zebra"));
+ | ^^^
+
+warning: unexpected `cfg` condition value
+ --> $DIR/mix.rs:60:19
+ |
+LL | cfg!(any(xxx, feature = "zebra"));
+ | ^^^^^^^^^^^^^^^^^
+ |
+ = note: expected values for `feature` are: bar, foo
+
+warning: unexpected `cfg` condition name
+ --> $DIR/mix.rs:63:14
+ |
+LL | cfg!(any(xxx, unix, xxx));
+ | ^^^
+
+warning: unexpected `cfg` condition name
+ --> $DIR/mix.rs:63:25
+ |
+LL | cfg!(any(xxx, unix, xxx));
+ | ^^^
+
+warning: unexpected `cfg` condition value
+ --> $DIR/mix.rs:66:14
+ |
+LL | cfg!(all(feature = "zebra", feature = "zebra", feature = "zebra"));
+ | ^^^^^^^^^^^^^^^^^
+ |
+ = note: expected values for `feature` are: bar, foo
+
+warning: unexpected `cfg` condition value
+ --> $DIR/mix.rs:66:33
+ |
+LL | cfg!(all(feature = "zebra", feature = "zebra", feature = "zebra"));
+ | ^^^^^^^^^^^^^^^^^
+ |
+ = note: expected values for `feature` are: bar, foo
+
+warning: unexpected `cfg` condition value
+ --> $DIR/mix.rs:66:52
+ |
+LL | cfg!(all(feature = "zebra", feature = "zebra", feature = "zebra"));
+ | ^^^^^^^^^^^^^^^^^
+ |
+ = note: expected values for `feature` are: bar, foo
+
+warning: 23 warnings emitted
|
LL | if let MultiVariant::Point(ref mut x, _) = point {
| ^^^^^
+help: consider mutably borrowing `c`
+ |
+LL | let a = &mut c;
+ | ++++
error: aborting due to previous error
|
LL | let SingleVariant::Point(ref mut x, _) = point;
| ^^^^^
+help: consider mutably borrowing `c`
+ |
+LL | let b = &mut c;
+ | ++++
error: aborting due to previous error
|
LL | x.y.a += 1;
| ^^^^^
+help: consider mutably borrowing `hello`
+ |
+LL | let b = &mut hello;
+ | ++++
error: aborting due to previous error
|
LL | x.0 += 1;
| ^^^
+help: consider mutably borrowing `hello`
+ |
+LL | let b = &mut hello;
+ | ++++
error: aborting due to previous error
LL | a += 1;
| ^
|
-note: the lint level is defined here
- --> $DIR/liveness.rs:5:9
- |
-LL | #![warn(unused)]
- | ^^^^^^
= note: `#[warn(unused_variables)]` implied by `#[warn(unused)]`
= help: did you mean to capture by reference instead?
LL | a = s;
| ^
|
-note: the lint level is defined here
- --> $DIR/liveness_unintentional_copy.rs:4:9
- |
-LL | #![warn(unused)]
- | ^^^^^^
= note: `#[warn(unused_variables)]` implied by `#[warn(unused)]`
= help: did you mean to capture by reference instead?
--- /dev/null
+// check-pass
+
+#![feature(negative_impls)]
+#![feature(rustc_attrs)]
+#![feature(with_negative_coherence)]
+
+trait Foo {}
+
+impl !Foo for u32 {}
+
+#[rustc_strict_coherence]
+struct MyStruct<T>(T);
+
+impl MyStruct<u32> {
+ fn method(&self) {}
+}
+
+impl<T> MyStruct<T>
+where
+ T: Foo,
+{
+ fn method(&self) {}
+}
+
+fn main() {}
--- /dev/null
+// check-pass
+
+#![feature(negative_impls)]
+#![feature(rustc_attrs)]
+#![feature(with_negative_coherence)]
+
+#[rustc_strict_coherence]
+trait Foo {}
+
+impl !Foo for u32 {}
+
+struct MyStruct<T>(T);
+
+impl<T: Foo> MyStruct<T> {
+ fn method(&self) {}
+}
+
+impl MyStruct<u32> {
+ fn method(&self) {}
+}
+
+fn main() {}
--- /dev/null
+// check-pass
+
+#![feature(negative_impls)]
+#![feature(with_negative_coherence)]
+
+trait A {}
+trait B: A {}
+
+impl !A for u32 {}
+impl !B for u32 {}
+
+fn main() {}
+// check-pass
+
#![feature(negative_impls)]
#![feature(rustc_attrs)]
#![feature(trait_alias)]
trait C {}
impl<T: AB> C for T {}
impl C for u32 {}
-//~^ ERROR: conflicting implementations of trait `C` for type `u32` [E0119]
-// FIXME this should work, we should implement an `assemble_neg_candidates` fn
fn main() {}
+++ /dev/null
-error[E0119]: conflicting implementations of trait `C` for type `u32`
- --> $DIR/coherence-overlap-negate-alias-strict.rs:15:1
- |
-LL | impl<T: AB> C for T {}
- | ------------------- first implementation here
-LL | impl C for u32 {}
- | ^^^^^^^^^^^^^^ conflicting implementation for `u32`
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0119`.
--- /dev/null
+// check-pass
+
+#![feature(negative_impls)]
+#![feature(rustc_attrs)]
+#![feature(with_negative_coherence)]
+
+trait Trait1: Trait2 {}
+trait Trait2 {}
+
+struct MyType {}
+impl !Trait2 for MyType {}
+
+#[rustc_strict_coherence]
+trait Foo {}
+impl<T: Trait1> Foo for T {}
+impl Foo for MyType {}
+
+fn main() {}
--- /dev/null
+#![feature(extern_types)]
+
+extern "Rust" {
+ type OpaqueListContents;
+}
+
+pub struct ListS<T> {
+ len: usize,
+ data: [T; 0],
+ opaque: OpaqueListContents,
+}
+
+pub struct Interned<'a, T>(&'a T);
+
+impl<'a, T> Clone for Interned<'a, T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<'a, T> Copy for Interned<'a, T> {}
+
+pub struct List<'tcx, T>(Interned<'tcx, ListS<T>>);
+//~^ NOTE this field does not implement `Copy`
+//~| NOTE the `Copy` impl for `Interned<'tcx, ListS<T>>` requires that `OpaqueListContents: Sized`
+
+impl<'tcx, T> Clone for List<'tcx, T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<'tcx, T> Copy for List<'tcx, T> {}
+//~^ ERROR the trait `Copy` may not be implemented for this type
+
+fn assert_is_copy<T: Copy>() {}
+
+fn main() {
+ assert_is_copy::<List<'static, ()>>();
+}
--- /dev/null
+error[E0204]: the trait `Copy` may not be implemented for this type
+ --> $DIR/deep-bad-copy-reason.rs:33:15
+ |
+LL | pub struct List<'tcx, T>(Interned<'tcx, ListS<T>>);
+ | ------------------------ this field does not implement `Copy`
+...
+LL | impl<'tcx, T> Copy for List<'tcx, T> {}
+ | ^^^^
+ |
+note: the `Copy` impl for `Interned<'tcx, ListS<T>>` requires that `OpaqueListContents: Sized`
+ --> $DIR/deep-bad-copy-reason.rs:23:26
+ |
+LL | pub struct List<'tcx, T>(Interned<'tcx, ListS<T>>);
+ | ^^^^^^^^^^^^^^^^^^^^^^^^
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0204`.
--- /dev/null
+fn foo<const N: i32>() -> i32 {
+ N
+}
+
+const fn bar(n: i32, m: i32) -> i32 {
+ n
+}
+
+const fn baz() -> i32 {
+ 1
+}
+
+const FOO: i32 = 3;
+
+fn main() {
+ foo::<baz()>(); //~ ERROR expected type, found function `baz`
+ //~| ERROR unresolved item provided when a constant was expected
+ foo::<bar(bar(1, 1), bar(1, 1))>(); //~ ERROR expected type, found `1`
+ foo::<bar(1, 1)>(); //~ ERROR expected type, found `1`
+ foo::<bar(FOO, 2)>(); //~ ERROR expected type, found `2`
+}
--- /dev/null
+error: expected type, found `1`
+ --> $DIR/const-generic-function.rs:18:19
+ |
+LL | foo::<bar(bar(1, 1), bar(1, 1))>();
+ | ^ expected type
+ |
+help: expressions must be enclosed in braces to be used as const generic arguments
+ |
+LL | foo::<{ bar(bar(1, 1), bar(1, 1)) }>();
+ | + +
+
+error: expected type, found `1`
+ --> $DIR/const-generic-function.rs:19:15
+ |
+LL | foo::<bar(1, 1)>();
+ | ^ expected type
+ |
+help: expressions must be enclosed in braces to be used as const generic arguments
+ |
+LL | foo::<{ bar(1, 1) }>();
+ | + +
+
+error: expected type, found `2`
+ --> $DIR/const-generic-function.rs:20:20
+ |
+LL | foo::<bar(FOO, 2)>();
+ | ^ expected type
+ |
+help: expressions must be enclosed in braces to be used as const generic arguments
+ |
+LL | foo::<{ bar(FOO, 2) }>();
+ | + +
+
+error[E0573]: expected type, found function `baz`
+ --> $DIR/const-generic-function.rs:16:11
+ |
+LL | foo::<baz()>();
+ | ^^^^^ not a type
+
+error[E0747]: unresolved item provided when a constant was expected
+ --> $DIR/const-generic-function.rs:16:11
+ |
+LL | foo::<baz()>();
+ | ^^^^^
+ |
+help: if this generic argument was intended as a const parameter, surround it with braces
+ |
+LL | foo::<{ baz() }>();
+ | + +
+
+error: aborting due to 5 previous errors
+
+Some errors have detailed explanations: E0573, E0747.
+For more information about an error, try `rustc --explain E0573`.
--- /dev/null
+// build-pass
+
+#![feature(generic_const_exprs)]
+//~^ WARNING the feature `generic_const_exprs` is incomplete
+
+trait Generic {
+ const ASSOC: usize;
+}
+
+impl Generic for u8 {
+ const ASSOC: usize = 17;
+}
+impl Generic for u16 {
+ const ASSOC: usize = 13;
+}
+
+
+fn uses_assoc_type<T: Generic, const N: usize>() -> [u8; N + T::ASSOC] {
+ [0; N + T::ASSOC]
+}
+
+fn only_generic_n<const N: usize>() -> [u8; N + 13] {
+ uses_assoc_type::<u16, N>()
+}
+
+fn main() {}
--- /dev/null
+warning: the feature `generic_const_exprs` is incomplete and may not be safe to use and/or cause compiler crashes
+ --> $DIR/eval-try-unify.rs:3:12
+ |
+LL | #![feature(generic_const_exprs)]
+ | ^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(incomplete_features)]` on by default
+ = note: see issue #76560 <https://github.com/rust-lang/rust/issues/76560> for more information
+
+warning: 1 warning emitted
+
--- /dev/null
+// #95163
+fn return_ty() -> impl Into<<() as Reexported;
+//~^ ERROR expected one of `(`, `::`, `<`, or `>`, found `;`
+
+fn main() {}
--- /dev/null
+error: expected one of `(`, `::`, `<`, or `>`, found `;`
+ --> $DIR/ice-const-generic-function-return-ty.rs:2:46
+ |
+LL | fn return_ty() -> impl Into<<() as Reexported;
+ | ^ expected one of `(`, `::`, `<`, or `>`
+
+error: aborting due to previous error
+
#![allow(incomplete_features)]
trait TensorDimension {
- const DIM : usize;
- const ISSCALAR : bool = Self::DIM == 0;
- fn is_scalar(&self) -> bool {Self::ISSCALAR}
+ const DIM: usize;
+ //~^ ERROR cycle detected when resolving instance
+ // FIXME Given the current state of the compiler its expected that we cycle here,
+ // but the cycle is still wrong.
+ const ISSCALAR: bool = Self::DIM == 0;
+ fn is_scalar(&self) -> bool {
+ Self::ISSCALAR
+ }
}
-trait TensorSize : TensorDimension {
- fn size(&self) -> [usize;Self::DIM];
- fn inbounds(&self,index : [usize;Self::DIM]) -> bool {
- index.iter().zip(self.size().iter()).all(|(i,s)| i < s)
+trait TensorSize: TensorDimension {
+ fn size(&self) -> [usize; Self::DIM];
+ fn inbounds(&self, index: [usize; Self::DIM]) -> bool {
+ index.iter().zip(self.size().iter()).all(|(i, s)| i < s)
}
}
-
trait Broadcastable: TensorSize + Sized {
type Element;
- fn bget(&self, index:[usize;Self::DIM]) -> Option<Self::Element>;
- fn lazy_updim<const NEWDIM : usize>(&self, size : [usize;NEWDIM] ) ->
- LazyUpdim<Self,{Self::DIM},NEWDIM>
- {
- assert!(NEWDIM >= Self::DIM,
- "Updimmed tensor cannot have fewer indices than the initial one.");
- LazyUpdim {size,reference:&self}
+ fn bget(&self, index: [usize; Self::DIM]) -> Option<Self::Element>;
+ fn lazy_updim<const NEWDIM: usize>(
+ &self,
+ size: [usize; NEWDIM],
+ ) -> LazyUpdim<Self, { Self::DIM }, NEWDIM> {
+ assert!(
+ NEWDIM >= Self::DIM,
+ "Updimmed tensor cannot have fewer indices than the initial one."
+ );
+ LazyUpdim { size, reference: &self }
}
- fn bmap<T,F :Fn(Self::Element) -> T>(&self,foo : F) -> BMap<T,Self,F,{Self::DIM}>{
- BMap {reference:self,closure : foo}
+ fn bmap<T, F: Fn(Self::Element) -> T>(&self, foo: F) -> BMap<T, Self, F, { Self::DIM }> {
+ BMap { reference: self, closure: foo }
}
}
-
-struct LazyUpdim<'a,T : Broadcastable,const OLDDIM : usize, const DIM : usize> {
- size : [usize;DIM],
- reference : &'a T
+struct LazyUpdim<'a, T: Broadcastable, const OLDDIM: usize, const DIM: usize> {
+ size: [usize; DIM],
+ reference: &'a T,
}
-impl<'a,T : Broadcastable,const DIM : usize> TensorDimension for LazyUpdim<'a,T,{T::DIM},DIM> {
- const DIM : usize = DIM;
+impl<'a, T: Broadcastable, const DIM: usize> TensorDimension for LazyUpdim<'a, T, { T::DIM }, DIM> {
+ const DIM: usize = DIM;
}
-impl<'a,T : Broadcastable,const DIM : usize> TensorSize for LazyUpdim<'a,T,{T::DIM},DIM> {
- fn size(&self) -> [usize;DIM] {self.size}
- //~^ ERROR method not compatible with trait
+impl<'a, T: Broadcastable, const DIM: usize> TensorSize for LazyUpdim<'a, T, { T::DIM }, DIM> {
+ fn size(&self) -> [usize; DIM] {
+ self.size
+ }
}
-impl<'a,T : Broadcastable,const DIM : usize> Broadcastable for LazyUpdim<'a,T,{T::DIM},DIM>
-{
+impl<'a, T: Broadcastable, const DIM: usize> Broadcastable for LazyUpdim<'a, T, { T::DIM }, DIM> {
type Element = T::Element;
- fn bget(&self,index:[usize;DIM]) -> Option<Self::Element> {
- //~^ ERROR method not compatible with trait
+ fn bget(&self, index: [usize; DIM]) -> Option<Self::Element> {
assert!(DIM >= T::DIM);
- if !self.inbounds(index) {return None}
- //~^ ERROR unconstrained generic constant
- //~| ERROR mismatched types
+ if !self.inbounds(index) {
+ return None;
+ }
let size = self.size();
- //~^ ERROR unconstrained generic constant
- let newindex : [usize;T::DIM] = Default::default();
- //~^ ERROR the trait bound `[usize; _]: Default` is not satisfied
+ let newindex: [usize; T::DIM] = Default::default();
self.reference.bget(newindex)
}
}
-struct BMap<'a,R, T : Broadcastable, F : Fn(T::Element) -> R , const DIM: usize> {
- reference : &'a T,
- closure : F
+struct BMap<'a, R, T: Broadcastable, F: Fn(T::Element) -> R, const DIM: usize> {
+ reference: &'a T,
+ closure: F,
}
-impl<'a,R, T : Broadcastable, F : Fn(T::Element) -> R,
- const DIM: usize> TensorDimension for BMap<'a,R,T,F,DIM> {
-
- const DIM : usize = DIM;
+impl<'a, R, T: Broadcastable, F: Fn(T::Element) -> R, const DIM: usize> TensorDimension
+ for BMap<'a, R, T, F, DIM>
+{
+ const DIM: usize = DIM;
}
-impl<'a,R, T : Broadcastable, F : Fn(T::Element) -> R ,
- const DIM: usize> TensorSize for BMap<'a,R,T,F,DIM> {
-
- fn size(&self) -> [usize;DIM] {self.reference.size()}
- //~^ ERROR unconstrained generic constant
- //~| ERROR mismatched types
- //~| ERROR method not compatible with trait
+impl<'a, R, T: Broadcastable, F: Fn(T::Element) -> R, const DIM: usize> TensorSize
+ for BMap<'a, R, T, F, DIM>
+{
+ fn size(&self) -> [usize; DIM] {
+ self.reference.size()
+ }
}
-impl<'a,R, T : Broadcastable, F : Fn(T::Element) -> R ,
- const DIM: usize> Broadcastable for BMap<'a,R,T,F,DIM> {
-
+impl<'a, R, T: Broadcastable, F: Fn(T::Element) -> R, const DIM: usize> Broadcastable
+ for BMap<'a, R, T, F, DIM>
+{
type Element = R;
- fn bget(&self,index:[usize;DIM]) -> Option<Self::Element> {
- //~^ ERROR method not compatible with trait
+ fn bget(&self, index: [usize; DIM]) -> Option<Self::Element> {
self.reference.bget(index).map(&self.closure)
- //~^ ERROR unconstrained generic constant
- //~| ERROR mismatched types
}
}
impl<T> TensorDimension for Vec<T> {
- const DIM : usize = 1;
+ const DIM: usize = 1;
}
impl<T> TensorSize for Vec<T> {
- fn size(&self) -> [usize;1] {[self.len()]}
+ fn size(&self) -> [usize; 1] {
+ [self.len()]
+ }
}
impl<T: Clone> Broadcastable for Vec<T> {
type Element = T;
- fn bget(& self,index : [usize;1]) -> Option<T> {
+ fn bget(&self, index: [usize; 1]) -> Option<T> {
self.get(index[0]).cloned()
}
}
fn main() {
- let v = vec![1,2,3];
- let bv = v.lazy_updim([3,4]);
- let bbv = bv.bmap(|x| x*x);
+ let v = vec![1, 2, 3];
+ let bv = v.lazy_updim([3, 4]);
+ let bbv = bv.bmap(|x| x * x);
- println!("The size of v is {:?}",bbv.bget([0,2]).expect("Out of bounds."));
+ println!("The size of v is {:?}", bbv.bget([0, 2]).expect("Out of bounds."));
}
-error[E0308]: method not compatible with trait
- --> $DIR/issue-83765.rs:44:5
+error[E0391]: cycle detected when resolving instance `<LazyUpdim<T, { T::DIM }, DIM> as TensorDimension>::DIM`
+ --> $DIR/issue-83765.rs:5:5
|
-LL | fn size(&self) -> [usize;DIM] {self.size}
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `Self::DIM`, found `DIM`
+LL | const DIM: usize;
+ | ^^^^^^^^^^^^^^^^^
|
- = note: expected type `Self::DIM`
- found type `DIM`
-
-error[E0308]: method not compatible with trait
- --> $DIR/issue-83765.rs:51:5
- |
-LL | fn bget(&self,index:[usize;DIM]) -> Option<Self::Element> {
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `Self::DIM`, found `DIM`
- |
- = note: expected type `Self::DIM`
- found type `DIM`
-
-error[E0308]: method not compatible with trait
- --> $DIR/issue-83765.rs:78:5
- |
-LL | fn size(&self) -> [usize;DIM] {self.reference.size()}
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `Self::DIM`, found `DIM`
- |
- = note: expected type `Self::DIM`
- found type `DIM`
-
-error[E0308]: method not compatible with trait
- --> $DIR/issue-83765.rs:88:5
- |
-LL | fn bget(&self,index:[usize;DIM]) -> Option<Self::Element> {
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `Self::DIM`, found `DIM`
- |
- = note: expected type `Self::DIM`
- found type `DIM`
-
-error: unconstrained generic constant
- --> $DIR/issue-83765.rs:54:18
- |
-LL | if !self.inbounds(index) {return None}
- | ^^^^^^^^
- |
- = help: try adding a `where` bound using this expression: `where [(); Self::DIM]:`
-note: required by a bound in `TensorSize::inbounds`
- --> $DIR/issue-83765.rs:12:38
- |
-LL | fn inbounds(&self,index : [usize;Self::DIM]) -> bool {
- | ^^^^^^^^^ required by this bound in `TensorSize::inbounds`
-
-error[E0308]: mismatched types
- --> $DIR/issue-83765.rs:54:27
- |
-LL | if !self.inbounds(index) {return None}
- | ^^^^^ expected `Self::DIM`, found `DIM`
- |
- = note: expected type `Self::DIM`
- found type `DIM`
-
-error: unconstrained generic constant
- --> $DIR/issue-83765.rs:57:25
- |
-LL | let size = self.size();
- | ^^^^
- |
- = help: try adding a `where` bound using this expression: `where [(); Self::DIM]:`
-note: required by a bound in `TensorSize::size`
- --> $DIR/issue-83765.rs:11:30
- |
-LL | fn size(&self) -> [usize;Self::DIM];
- | ^^^^^^^^^ required by this bound in `TensorSize::size`
-
-error[E0277]: the trait bound `[usize; _]: Default` is not satisfied
- --> $DIR/issue-83765.rs:59:41
- |
-LL | let newindex : [usize;T::DIM] = Default::default();
- | ^^^^^^^^^^^^^^^^ the trait `Default` is not implemented for `[usize; _]`
- |
-help: consider introducing a `where` bound, but there might be an alternative better way to express this requirement
- |
-LL | impl<'a,T : Broadcastable,const DIM : usize> Broadcastable for LazyUpdim<'a,T,{T::DIM},DIM> where [usize; _]: Default
- | +++++++++++++++++++++++++
-
-error: unconstrained generic constant
- --> $DIR/issue-83765.rs:78:51
- |
-LL | fn size(&self) -> [usize;DIM] {self.reference.size()}
- | ^^^^
- |
- = help: try adding a `where` bound using this expression: `where [(); Self::DIM]:`
-note: required by a bound in `TensorSize::size`
- --> $DIR/issue-83765.rs:11:30
- |
-LL | fn size(&self) -> [usize;Self::DIM];
- | ^^^^^^^^^ required by this bound in `TensorSize::size`
-
-error[E0308]: mismatched types
- --> $DIR/issue-83765.rs:78:36
- |
-LL | fn size(&self) -> [usize;DIM] {self.reference.size()}
- | ^^^^^^^^^^^^^^^^^^^^^ expected `DIM`, found `Self::DIM`
- |
- = note: expected type `DIM`
- found type `Self::DIM`
-
-error: unconstrained generic constant
- --> $DIR/issue-83765.rs:90:24
- |
-LL | self.reference.bget(index).map(&self.closure)
- | ^^^^
- |
- = help: try adding a `where` bound using this expression: `where [(); Self::DIM]:`
-note: required by a bound in `Broadcastable::bget`
- --> $DIR/issue-83765.rs:20:33
- |
-LL | fn bget(&self, index:[usize;Self::DIM]) -> Option<Self::Element>;
- | ^^^^^^^^^ required by this bound in `Broadcastable::bget`
-
-error[E0308]: mismatched types
- --> $DIR/issue-83765.rs:90:29
- |
-LL | self.reference.bget(index).map(&self.closure)
- | ^^^^^ expected `Self::DIM`, found `DIM`
+note: ...which requires checking if `TensorDimension` fulfills its obligations...
+ --> $DIR/issue-83765.rs:4:1
|
- = note: expected type `Self::DIM`
- found type `DIM`
+LL | trait TensorDimension {
+ | ^^^^^^^^^^^^^^^^^^^^^
+ = note: ...which again requires resolving instance `<LazyUpdim<T, { T::DIM }, DIM> as TensorDimension>::DIM`, completing the cycle
+ = note: cycle used when normalizing `<LazyUpdim<T, { T::DIM }, DIM> as TensorDimension>::DIM`
-error: aborting due to 12 previous errors
+error: aborting due to previous error
-Some errors have detailed explanations: E0277, E0308.
-For more information about an error, try `rustc --explain E0277`.
+For more information about this error, try `rustc --explain E0391`.
// error-pattern: evaluation of constant value failed
#![feature(const_ptr_read)]
-#![feature(const_ptr_offset)]
fn main() {
use std::ptr;
| memory access failed: alloc7 has size 4, so pointer to 4 bytes starting at offset 4 is out-of-bounds
| inside `std::ptr::read::<u32>` at $SRC_DIR/core/src/ptr/mod.rs:LL:COL
|
- ::: $DIR/out_of_bounds_read.rs:13:33
+ ::: $DIR/out_of_bounds_read.rs:12:33
|
LL | const _READ: u32 = unsafe { ptr::read(PAST_END_PTR) };
- | ----------------------- inside `_READ` at $DIR/out_of_bounds_read.rs:13:33
+ | ----------------------- inside `_READ` at $DIR/out_of_bounds_read.rs:12:33
error[E0080]: evaluation of constant value failed
--> $SRC_DIR/core/src/ptr/mod.rs:LL:COL
LL | unsafe { read(self) }
| ---------- inside `ptr::const_ptr::<impl *const u32>::read` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
|
- ::: $DIR/out_of_bounds_read.rs:14:39
+ ::: $DIR/out_of_bounds_read.rs:13:39
|
LL | const _CONST_READ: u32 = unsafe { PAST_END_PTR.read() };
- | ------------------- inside `_CONST_READ` at $DIR/out_of_bounds_read.rs:14:39
+ | ------------------- inside `_CONST_READ` at $DIR/out_of_bounds_read.rs:13:39
error[E0080]: evaluation of constant value failed
--> $SRC_DIR/core/src/ptr/mod.rs:LL:COL
LL | unsafe { read(self) }
| ---------- inside `ptr::mut_ptr::<impl *mut u32>::read` at $SRC_DIR/core/src/ptr/mut_ptr.rs:LL:COL
|
- ::: $DIR/out_of_bounds_read.rs:15:37
+ ::: $DIR/out_of_bounds_read.rs:14:37
|
LL | const _MUT_READ: u32 = unsafe { (PAST_END_PTR as *mut u32).read() };
- | --------------------------------- inside `_MUT_READ` at $DIR/out_of_bounds_read.rs:15:37
+ | --------------------------------- inside `_MUT_READ` at $DIR/out_of_bounds_read.rs:14:37
error: aborting due to 3 previous errors
-// build-fail
+// build-pass
// compile-flags: -Zmir-opt-level=3
+// Overflow can't be detected by const prop
+// could only be detected after optimizations
#![deny(warnings)]
fn main() {
let _ = add(u8::MAX, 1);
- //~^ NOTE in this expansion of inlined source
- //~| NOTE in this expansion of inlined source
}
#[inline(always)]
fn add(x: u8, y: u8) -> u8 {
x + y
- //~^ ERROR this arithmetic operation will overflow
- //~| NOTE attempt to compute `u8::MAX + 1_u8`, which would overflow
- //~| NOTE `#[deny(arithmetic_overflow)]` on by default
}
+++ /dev/null
-error: this arithmetic operation will overflow
- --> $DIR/inline_spans.rs:14:5
- |
-LL | let _ = add(u8::MAX, 1);
- | --------------- in this inlined function call
-...
-LL | x + y
- | ^^^^^ attempt to compute `u8::MAX + 1_u8`, which would overflow
- |
- = note: `#[deny(arithmetic_overflow)]` on by default
-
-error: aborting due to previous error
-
+++ /dev/null
-pub fn foo(x: &usize) -> usize {
- *x
-}
+++ /dev/null
-pub extern "C" fn bar() {
-}
-
-pub const foopy: &'static str = "hi there";
-pub const uint_val: usize = 12;
-pub const uint_expr: usize = (1 << uint_val) - 1;
#![allow(unused)]
#![feature(const_trait_impl, inline_const, negative_impls)]
-const fn f<T: ~const Drop>(x: T) {}
+use std::marker::Destruct;
+
+const fn f<T: ~const Destruct>(x: T) {}
struct UnconstDrop;
fn main() {
const {
f(UnconstDrop);
- //~^ ERROR the trait bound `UnconstDrop: ~const Drop` is not satisfied
+ //~^ ERROR can't drop
f(NonDrop);
- //~^ ERROR the trait bound `NonDrop: ~const Drop` is not satisfied
+ //~^ ERROR can't drop
}
}
-error[E0277]: the trait bound `UnconstDrop: ~const Drop` is not satisfied
- --> $DIR/const-block-const-bound.rs:18:11
+error[E0277]: can't drop `UnconstDrop` in const contexts
+ --> $DIR/const-block-const-bound.rs:20:11
|
LL | f(UnconstDrop);
- | - ^^^^^^^^^^^ expected an implementor of trait `~const Drop`
+ | - ^^^^^^^^^^^ expected an implementor of trait `~const Destruct`
| |
| required by a bound introduced by this call
|
+ = note: the trait bound `UnconstDrop: ~const Destruct` is not satisfied
note: required by a bound in `f`
- --> $DIR/const-block-const-bound.rs:4:15
+ --> $DIR/const-block-const-bound.rs:6:15
|
-LL | const fn f<T: ~const Drop>(x: T) {}
- | ^^^^^^^^^^^ required by this bound in `f`
+LL | const fn f<T: ~const Destruct>(x: T) {}
+ | ^^^^^^^^^^^^^^^ required by this bound in `f`
help: consider borrowing here
|
LL | f(&UnconstDrop);
LL | f(&mut UnconstDrop);
| ++++
-error[E0277]: the trait bound `NonDrop: ~const Drop` is not satisfied
- --> $DIR/const-block-const-bound.rs:20:11
+error[E0277]: can't drop `NonDrop` in const contexts
+ --> $DIR/const-block-const-bound.rs:22:11
|
LL | f(NonDrop);
- | - ^^^^^^^ expected an implementor of trait `~const Drop`
+ | - ^^^^^^^ expected an implementor of trait `~const Destruct`
| |
| required by a bound introduced by this call
|
+ = note: the trait bound `NonDrop: ~const Destruct` is not satisfied
note: required by a bound in `f`
- --> $DIR/const-block-const-bound.rs:4:15
+ --> $DIR/const-block-const-bound.rs:6:15
|
-LL | const fn f<T: ~const Drop>(x: T) {}
- | ^^^^^^^^^^^ required by this bound in `f`
+LL | const fn f<T: ~const Destruct>(x: T) {}
+ | ^^^^^^^^^^^^^^^ required by this bound in `f`
help: consider borrowing here
|
LL | f(&NonDrop);
+++ /dev/null
-// run-pass
-// aux-build:cci_const.rs
-#![allow(non_upper_case_globals)]
-
-extern crate cci_const;
-static foo: &'static str = cci_const::foopy;
-static a: usize = cci_const::uint_val;
-static b: usize = cci_const::uint_expr + 5;
-
-pub fn main() {
- assert_eq!(a, 12);
- let foo2 = a;
- assert_eq!(foo2, cci_const::uint_val);
- assert_eq!(b, cci_const::uint_expr + 5);
- assert_eq!(foo, cci_const::foopy);
-}
+++ /dev/null
-// run-pass
-// aux-build:cci_const.rs
-#![allow(non_upper_case_globals)]
-
-extern crate cci_const;
-use cci_const::bar;
-static foo: extern "C" fn() = bar;
-
-pub fn main() {
- assert!(foo == bar);
-}
--> $DIR/const-deref-ptr.rs:4:29
|
LL | static C: u64 = unsafe {*(0xdeadbeef as *const u64)};
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 0xdeadbeef is not a valid pointer
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ dereferencing pointer failed: 0xdeadbeef is not a valid pointer
error: aborting due to previous error
| ^ referenced constant has errors
query stack during panic:
#0 [try_normalize_mir_const_after_erasing_regions] normalizing `main::promoted[1]`
-#1 [optimized_mir] optimizing MIR for `main`
-#2 [collect_and_partition_mono_items] collect_and_partition_mono_items
+#1 [mir_drops_elaborated_and_const_checked] elaborating drops for `main`
+#2 [optimized_mir] optimizing MIR for `main`
+#3 [collect_and_partition_mono_items] collect_and_partition_mono_items
end of query stack
--> $DIR/const_raw_ptr_ops2.rs:7:26
|
LL | const Z2: i32 = unsafe { *(42 as *const i32) };
- | ^^^^^^^^^^^^^^^^^^^ 0x2a is not a valid pointer
+ | ^^^^^^^^^^^^^^^^^^^ dereferencing pointer failed: 0x2a is not a valid pointer
error[E0080]: evaluation of constant value failed
--> $DIR/const_raw_ptr_ops2.rs:9:26
|
LL | const Z3: i32 = unsafe { *(44 as *const i32) };
- | ^^^^^^^^^^^^^^^^^^^ 0x2c is not a valid pointer
+ | ^^^^^^^^^^^^^^^^^^^ dereferencing pointer failed: 0x2c is not a valid pointer
error: aborting due to 2 previous errors
--> $DIR/issue-49296.rs:9:16
|
LL | const X: u64 = *wat(42);
- | ^^^^^^^^ pointer to alloc2 was dereferenced after this allocation got freed
+ | ^^^^^^^^ pointer to alloc3 was dereferenced after this allocation got freed
error: aborting due to previous error
fn main() {
let _ = PrintName::VOID;
+ //~^ ERROR erroneous constant used [E0080]
}
|
= note: this error originates in the macro `$crate::panic::panic_2015` (in Nightly builds, run with -Z macro-backtrace for more info)
-error: aborting due to previous error
+error[E0080]: erroneous constant used
+ --> $DIR/panic-assoc-never-type.rs:15:13
+ |
+LL | let _ = PrintName::VOID;
+ | ^^^^^^^^^^^^^^^ referenced constant has errors
+
+error: aborting due to 2 previous errors
For more information about this error, try `rustc --explain E0080`.
+warning: this arithmetic operation will overflow
+ --> $DIR/promoted_errors.rs:15:5
+ |
+LL | 0 - 1
+ | ^^^^^ attempt to compute `0_u32 - 1_u32`, which would overflow
+ |
+note: the lint level is defined here
+ --> $DIR/promoted_errors.rs:11:20
+ |
+LL | #![warn(const_err, arithmetic_overflow, unconditional_panic)]
+ | ^^^^^^^^^^^^^^^^^^^
+
+warning: this operation will panic at runtime
+ --> $DIR/promoted_errors.rs:21:5
+ |
+LL | 1 / 0
+ | ^^^^^ attempt to divide `1_i32` by zero
+ |
+note: the lint level is defined here
+ --> $DIR/promoted_errors.rs:11:41
+ |
+LL | #![warn(const_err, arithmetic_overflow, unconditional_panic)]
+ | ^^^^^^^^^^^^^^^^^^^
+
+warning: this operation will panic at runtime
+ --> $DIR/promoted_errors.rs:27:5
+ |
+LL | 1 / (1 - 1)
+ | ^^^^^^^^^^^ attempt to divide `1_i32` by zero
+
+warning: this operation will panic at runtime
+ --> $DIR/promoted_errors.rs:31:5
+ |
+LL | 1 / (false as i32)
+ | ^^^^^^^^^^^^^^^^^^ attempt to divide `1_i32` by zero
+
+warning: this operation will panic at runtime
+ --> $DIR/promoted_errors.rs:35:5
+ |
+LL | [1, 2, 3][4]
+ | ^^^^^^^^^^^^ index out of bounds: the length is 3 but the index is 4
+
warning: any use of this value will cause an error
--> $DIR/promoted_errors.rs:15:5
|
| |
| attempt to compute `0_u32 - 1_u32`, which would overflow
| inside `overflow` at $DIR/promoted_errors.rs:15:5
- | inside `X` at $DIR/promoted_errors.rs:38:29
+ | inside `X` at $DIR/promoted_errors.rs:43:29
...
LL | / const X: () = {
LL | | let _x: &'static u32 = &overflow();
= note: for more information, see issue #71800 <https://github.com/rust-lang/rust/issues/71800>
warning: any use of this value will cause an error
- --> $DIR/promoted_errors.rs:38:28
+ --> $DIR/promoted_errors.rs:43:28
|
LL | / const X: () = {
LL | | let _x: &'static u32 = &overflow();
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #71800 <https://github.com/rust-lang/rust/issues/71800>
-warning: 2 warnings emitted
+warning: 7 warnings emitted
+warning: this arithmetic operation will overflow
+ --> $DIR/promoted_errors.rs:15:5
+ |
+LL | 0 - 1
+ | ^^^^^ attempt to compute `0_u32 - 1_u32`, which would overflow
+ |
+note: the lint level is defined here
+ --> $DIR/promoted_errors.rs:11:20
+ |
+LL | #![warn(const_err, arithmetic_overflow, unconditional_panic)]
+ | ^^^^^^^^^^^^^^^^^^^
+
+warning: this operation will panic at runtime
+ --> $DIR/promoted_errors.rs:21:5
+ |
+LL | 1 / 0
+ | ^^^^^ attempt to divide `1_i32` by zero
+ |
+note: the lint level is defined here
+ --> $DIR/promoted_errors.rs:11:41
+ |
+LL | #![warn(const_err, arithmetic_overflow, unconditional_panic)]
+ | ^^^^^^^^^^^^^^^^^^^
+
+warning: this operation will panic at runtime
+ --> $DIR/promoted_errors.rs:27:5
+ |
+LL | 1 / (1 - 1)
+ | ^^^^^^^^^^^ attempt to divide `1_i32` by zero
+
+warning: this operation will panic at runtime
+ --> $DIR/promoted_errors.rs:31:5
+ |
+LL | 1 / (false as i32)
+ | ^^^^^^^^^^^^^^^^^^ attempt to divide `1_i32` by zero
+
+warning: this operation will panic at runtime
+ --> $DIR/promoted_errors.rs:35:5
+ |
+LL | [1, 2, 3][4]
+ | ^^^^^^^^^^^^ index out of bounds: the length is 3 but the index is 4
+
warning: any use of this value will cause an error
- --> $DIR/promoted_errors.rs:20:5
+ --> $DIR/promoted_errors.rs:21:5
|
LL | 1 / 0
| ^^^^^
| |
| attempt to divide `1_i32` by zero
- | inside `div_by_zero1` at $DIR/promoted_errors.rs:20:5
- | inside `X` at $DIR/promoted_errors.rs:41:29
+ | inside `div_by_zero1` at $DIR/promoted_errors.rs:21:5
+ | inside `X` at $DIR/promoted_errors.rs:46:29
...
LL | / const X: () = {
LL | | let _x: &'static u32 = &overflow();
= note: for more information, see issue #71800 <https://github.com/rust-lang/rust/issues/71800>
warning: any use of this value will cause an error
- --> $DIR/promoted_errors.rs:41:28
+ --> $DIR/promoted_errors.rs:46:28
|
LL | / const X: () = {
LL | | let _x: &'static u32 = &overflow();
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #71800 <https://github.com/rust-lang/rust/issues/71800>
-warning: 2 warnings emitted
+warning: 7 warnings emitted
+warning: this arithmetic operation will overflow
+ --> $DIR/promoted_errors.rs:15:5
+ |
+LL | 0 - 1
+ | ^^^^^ attempt to compute `0_u32 - 1_u32`, which would overflow
+ |
+note: the lint level is defined here
+ --> $DIR/promoted_errors.rs:11:20
+ |
+LL | #![warn(const_err, arithmetic_overflow, unconditional_panic)]
+ | ^^^^^^^^^^^^^^^^^^^
+
+warning: this operation will panic at runtime
+ --> $DIR/promoted_errors.rs:21:5
+ |
+LL | 1 / 0
+ | ^^^^^ attempt to divide `1_i32` by zero
+ |
+note: the lint level is defined here
+ --> $DIR/promoted_errors.rs:11:41
+ |
+LL | #![warn(const_err, arithmetic_overflow, unconditional_panic)]
+ | ^^^^^^^^^^^^^^^^^^^
+
+warning: this operation will panic at runtime
+ --> $DIR/promoted_errors.rs:27:5
+ |
+LL | 1 / (1 - 1)
+ | ^^^^^^^^^^^ attempt to divide `1_i32` by zero
+
+warning: this operation will panic at runtime
+ --> $DIR/promoted_errors.rs:31:5
+ |
+LL | 1 / (false as i32)
+ | ^^^^^^^^^^^^^^^^^^ attempt to divide `1_i32` by zero
+
+warning: this operation will panic at runtime
+ --> $DIR/promoted_errors.rs:35:5
+ |
+LL | [1, 2, 3][4]
+ | ^^^^^^^^^^^^ index out of bounds: the length is 3 but the index is 4
+
warning: any use of this value will cause an error
--> $DIR/promoted_errors.rs:15:5
|
| |
| attempt to compute `0_u32 - 1_u32`, which would overflow
| inside `overflow` at $DIR/promoted_errors.rs:15:5
- | inside `X` at $DIR/promoted_errors.rs:38:29
+ | inside `X` at $DIR/promoted_errors.rs:43:29
...
LL | / const X: () = {
LL | | let _x: &'static u32 = &overflow();
= note: for more information, see issue #71800 <https://github.com/rust-lang/rust/issues/71800>
warning: any use of this value will cause an error
- --> $DIR/promoted_errors.rs:38:28
+ --> $DIR/promoted_errors.rs:43:28
|
LL | / const X: () = {
LL | | let _x: &'static u32 = &overflow();
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #71800 <https://github.com/rust-lang/rust/issues/71800>
-warning: 2 warnings emitted
+warning: 7 warnings emitted
0 - 1
//[opt_with_overflow_checks,noopt]~^ WARN any use of this value will cause an error
//[opt_with_overflow_checks,noopt]~| WARN this was previously accepted by the compiler
+ //~^^^ WARN this arithmetic operation will overflow
}
const fn div_by_zero1() -> i32 {
1 / 0
//[opt]~^ WARN any use of this value will cause an error
//[opt]~| WARN this was previously accepted by the compiler but is being phased out
+ //~^^^ WARN this operation will panic at runtime
}
const fn div_by_zero2() -> i32 {
1 / (1 - 1)
+ //~^ WARN this operation will panic at runtime
}
const fn div_by_zero3() -> i32 {
1 / (false as i32)
+ //~^ WARN this operation will panic at runtime
}
const fn oob() -> i32 {
[1, 2, 3][4]
+ //~^ WARN this operation will panic at runtime
}
// An unused constant containing failing promoteds.
--> $DIR/ub-wide-ptr.rs:135:5
|
LL | mem::transmute::<_, &dyn Trait>((&92u8, 0usize))
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ null pointer is not a valid pointer for this operation
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ memory access failed: null pointer is not a valid pointer
error[E0080]: could not evaluate static initializer
--> $DIR/ub-wide-ptr.rs:139:5
--> $DIR/ub-wide-ptr.rs:135:5
|
LL | mem::transmute::<_, &dyn Trait>((&92u8, 0usize))
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ null pointer is not a valid pointer for this operation
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ memory access failed: null pointer is not a valid pointer
error[E0080]: could not evaluate static initializer
--> $DIR/ub-wide-ptr.rs:139:5
const FIELD3: Field3 = unsafe { UNION.field3 };
//~^ ERROR it is undefined behavior to use this value
FIELD3
+ //~^ ERROR erroneous constant used [E0080]
}
fn main() {
__ __ __ __ __ __ __ __ │ ░░░░░░░░
}
-error: aborting due to previous error
+error[E0080]: erroneous constant used
+ --> $DIR/union-const-eval-field.rs:30:5
+ |
+LL | FIELD3
+ | ^^^^^^ referenced constant has errors
+
+error: aborting due to 2 previous errors
For more information about this error, try `rustc --explain E0080`.
LL | Some(&mut *(42 as *mut i32))
| ^^^^^^^^^^^^^^^^^^^^^^
| |
- | 0x2a is not a valid pointer
+ | dereferencing pointer failed: 0x2a is not a valid pointer
| inside `helper` at $DIR/mut_ref_in_final_dynamic_check.rs:13:10
...
LL | const A: Option<&mut i32> = helper();
// ignore-tidy-linelength
#![feature(intrinsics, staged_api)]
-#![feature(const_mut_refs, const_intrinsic_copy, const_ptr_offset)]
+#![feature(const_mut_refs, const_intrinsic_copy)]
use std::mem;
extern "rust-intrinsic" {
error[E0080]: it is undefined behavior to use this value
- --> $DIR/invalid-union.rs:41:1
+ --> $DIR/invalid-union.rs:40:1
|
LL | fn main() {
| ^^^^^^^^^ type validation failed at .<deref>.y.<enum-variant(B)>.0: encountered `UnsafeCell` in a `const`
}
error: erroneous constant used
- --> $DIR/invalid-union.rs:42:25
+ --> $DIR/invalid-union.rs:41:25
|
LL | let _: &'static _ = &C;
| ^^ referenced constant has errors
error[E0080]: it is undefined behavior to use this value
- --> $DIR/invalid-union.rs:41:1
+ --> $DIR/invalid-union.rs:40:1
|
LL | fn main() {
| ^^^^^^^^^ type validation failed at .<deref>.y.<enum-variant(B)>.0: encountered `UnsafeCell` in a `const`
}
error: erroneous constant used
- --> $DIR/invalid-union.rs:42:25
+ --> $DIR/invalid-union.rs:41:25
|
LL | let _: &'static _ = &C;
| ^^ referenced constant has errors
// build-fail
// stderr-per-bitwidth
#![feature(const_mut_refs)]
-#![feature(const_ptr_offset)]
#![feature(untagged_unions)]
use std::cell::Cell;
const fn foo() { (||{})() }
//~^ ERROR cannot call non-const closure
+//~| ERROR erroneous constant used [const_err]
+//~| WARNING this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
const fn bad(input: fn()) {
input()
= note: calls in constant functions are limited to constant functions, tuple structs and tuple variants
error: function pointers are not allowed in const fn
- --> $DIR/issue-56164.rs:5:5
+ --> $DIR/issue-56164.rs:7:5
|
LL | input()
| ^^^^^^^
-error: aborting due to 2 previous errors
+error: erroneous constant used
+ --> $DIR/issue-56164.rs:1:18
+ |
+LL | const fn foo() { (||{})() }
+ | ^^^^^^ referenced constant has errors
+ |
+ = note: `#[deny(const_err)]` on by default
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #71800 <https://github.com/rust-lang/rust/issues/71800>
+
+error: aborting due to 3 previous errors
For more information about this error, try `rustc --explain E0015`.
//~^ ERROR: argument to `panic!()` in a const context must have type `&str`
const fn _foo() {
- panic!(&1); //~ ERROR: argument to `panic!()` in a const context must have type `&str`
+ panic!(&1);
+ //~^ ERROR: argument to `panic!()` in a const context must have type `&str`
+ //~| ERROR: erroneous constant used [const_err]
+ //~| WARNING: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
}
// ensure that conforming panics don't cause an error
|
= note: this error originates in the macro `$crate::panic::panic_2015` (in Nightly builds, run with -Z macro-backtrace for more info)
-error: aborting due to 3 previous errors
+error: erroneous constant used
+ --> $DIR/issue-66693.rs:11:12
+ |
+LL | panic!(&1);
+ | ^^ referenced constant has errors
+ |
+ = note: `#[deny(const_err)]` on by default
+ = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ = note: for more information, see issue #71800 <https://github.com/rust-lang/rust/issues/71800>
+
+error: aborting due to 4 previous errors
// error-pattern unable to turn pointer into raw bytes
#![feature(const_ptr_read)]
-#![feature(const_ptr_offset)]
const C: () = unsafe {
let foo = Some(&42 as *const i32);
| unable to turn pointer into raw bytes
| inside `std::ptr::read::<u8>` at $SRC_DIR/core/src/ptr/mod.rs:LL:COL
| inside `ptr::const_ptr::<impl *const u8>::read` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
- | inside `C` at $DIR/issue-miri-1910.rs:8:5
+ | inside `C` at $DIR/issue-miri-1910.rs:7:5
|
- ::: $DIR/issue-miri-1910.rs:5:1
+ ::: $DIR/issue-miri-1910.rs:4:1
|
LL | / const C: () = unsafe {
LL | | let foo = Some(&42 as *const i32);
// run-pass
-#![feature(const_ptr_offset)]
#![feature(const_ptr_offset_from)]
use std::ptr;
//~| 0x10 is not a valid pointer
};
+const OUT_OF_BOUNDS_1: isize = {
+ let start_ptr = &4 as *const _ as *const u8;
+ let length = 10;
+ let end_ptr = (start_ptr).wrapping_add(length);
+ // First ptr is out of bounds
+ unsafe { ptr_offset_from(end_ptr, start_ptr) } //~ERROR evaluation of constant value failed
+ //~| pointer at offset 10 is out-of-bounds
+};
+
+const OUT_OF_BOUNDS_2: isize = {
+ let start_ptr = &4 as *const _ as *const u8;
+ let length = 10;
+ let end_ptr = (start_ptr).wrapping_add(length);
+ // Second ptr is out of bounds
+ unsafe { ptr_offset_from(start_ptr, end_ptr) } //~ERROR evaluation of constant value failed
+ //~| pointer at offset 10 is out-of-bounds
+};
+
+const OUT_OF_BOUNDS_SAME: isize = {
+ let start_ptr = &4 as *const _ as *const u8;
+ let length = 10;
+ let end_ptr = (start_ptr).wrapping_add(length);
+ unsafe { ptr_offset_from(end_ptr, end_ptr) } //~ERROR evaluation of constant value failed
+ //~| pointer at offset 10 is out-of-bounds
+};
+
fn main() {}
LL | unsafe { intrinsics::ptr_offset_from(self, origin) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
- | 0x2a is not a valid pointer
+ | out-of-bounds offset_from: 0x2a is not a valid pointer
| inside `ptr::const_ptr::<impl *const u8>::offset_from` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
|
::: $DIR/offset_from_ub.rs:23:14
--> $DIR/offset_from_ub.rs:36:14
|
LL | unsafe { ptr_offset_from(ptr, ptr) }
- | ^^^^^^^^^^^^^^^^^^^^^^^^^ null pointer is not a valid pointer for this operation
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^ out-of-bounds offset_from: null pointer is not a valid pointer
error[E0080]: evaluation of constant value failed
--> $DIR/offset_from_ub.rs:43:14
|
LL | unsafe { ptr_offset_from(ptr2, ptr1) }
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 0x10 is not a valid pointer
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ out-of-bounds offset_from: 0x10 is not a valid pointer
-error: aborting due to 5 previous errors
+error[E0080]: evaluation of constant value failed
+ --> $DIR/offset_from_ub.rs:52:14
+ |
+LL | unsafe { ptr_offset_from(end_ptr, start_ptr) }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ out-of-bounds offset_from: alloc18 has size 4, so pointer at offset 10 is out-of-bounds
+
+error[E0080]: evaluation of constant value failed
+ --> $DIR/offset_from_ub.rs:61:14
+ |
+LL | unsafe { ptr_offset_from(start_ptr, end_ptr) }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ out-of-bounds offset_from: alloc21 has size 4, so pointer at offset 10 is out-of-bounds
+
+error[E0080]: evaluation of constant value failed
+ --> $DIR/offset_from_ub.rs:69:14
+ |
+LL | unsafe { ptr_offset_from(end_ptr, end_ptr) }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ out-of-bounds offset_from: alloc24 has size 4, so pointer at offset 10 is out-of-bounds
+
+error: aborting due to 8 previous errors
For more information about this error, try `rustc --explain E0080`.
-#![feature(const_ptr_offset)]
use std::ptr;
// normalize-stderr-test "alloc\d+" -> "allocN"
| overflowing in-bounds pointer arithmetic
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
|
- ::: $DIR/offset_ub.rs:8:46
+ ::: $DIR/offset_ub.rs:7:46
|
LL | pub const BEFORE_START: *const u8 = unsafe { (&0u8 as *const u8).offset(-1) };
- | ------------------------------ inside `BEFORE_START` at $DIR/offset_ub.rs:8:46
+ | ------------------------------ inside `BEFORE_START` at $DIR/offset_ub.rs:7:46
error[E0080]: evaluation of constant value failed
--> $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| pointer arithmetic failed: allocN has size 1, so pointer to 2 bytes starting at offset 0 is out-of-bounds
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
|
- ::: $DIR/offset_ub.rs:9:43
+ ::: $DIR/offset_ub.rs:8:43
|
LL | pub const AFTER_END: *const u8 = unsafe { (&0u8 as *const u8).offset(2) };
- | ----------------------------- inside `AFTER_END` at $DIR/offset_ub.rs:9:43
+ | ----------------------------- inside `AFTER_END` at $DIR/offset_ub.rs:8:43
error[E0080]: evaluation of constant value failed
--> $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| pointer arithmetic failed: allocN has size 100, so pointer to 101 bytes starting at offset 0 is out-of-bounds
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
|
- ::: $DIR/offset_ub.rs:10:45
+ ::: $DIR/offset_ub.rs:9:45
|
LL | pub const AFTER_ARRAY: *const u8 = unsafe { [0u8; 100].as_ptr().offset(101) };
- | ------------------------------- inside `AFTER_ARRAY` at $DIR/offset_ub.rs:10:45
+ | ------------------------------- inside `AFTER_ARRAY` at $DIR/offset_ub.rs:9:45
error[E0080]: evaluation of constant value failed
--> $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| overflowing in-bounds pointer arithmetic
| inside `ptr::const_ptr::<impl *const u16>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
|
- ::: $DIR/offset_ub.rs:12:43
+ ::: $DIR/offset_ub.rs:11:43
|
LL | pub const OVERFLOW: *const u16 = unsafe { [0u16; 1].as_ptr().offset(isize::MAX) };
- | ------------------------------------- inside `OVERFLOW` at $DIR/offset_ub.rs:12:43
+ | ------------------------------------- inside `OVERFLOW` at $DIR/offset_ub.rs:11:43
error[E0080]: evaluation of constant value failed
--> $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| overflowing in-bounds pointer arithmetic
| inside `ptr::const_ptr::<impl *const u16>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
|
- ::: $DIR/offset_ub.rs:13:44
+ ::: $DIR/offset_ub.rs:12:44
|
LL | pub const UNDERFLOW: *const u16 = unsafe { [0u16; 1].as_ptr().offset(isize::MIN) };
- | ------------------------------------- inside `UNDERFLOW` at $DIR/offset_ub.rs:13:44
+ | ------------------------------------- inside `UNDERFLOW` at $DIR/offset_ub.rs:12:44
error[E0080]: evaluation of constant value failed
--> $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| overflowing in-bounds pointer arithmetic
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
|
- ::: $DIR/offset_ub.rs:14:56
+ ::: $DIR/offset_ub.rs:13:56
|
LL | pub const OVERFLOW_ADDRESS_SPACE: *const u8 = unsafe { (usize::MAX as *const u8).offset(2) };
- | ----------------------------------- inside `OVERFLOW_ADDRESS_SPACE` at $DIR/offset_ub.rs:14:56
+ | ----------------------------------- inside `OVERFLOW_ADDRESS_SPACE` at $DIR/offset_ub.rs:13:56
error[E0080]: evaluation of constant value failed
--> $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| overflowing in-bounds pointer arithmetic
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
|
- ::: $DIR/offset_ub.rs:15:57
+ ::: $DIR/offset_ub.rs:14:57
|
LL | pub const UNDERFLOW_ADDRESS_SPACE: *const u8 = unsafe { (1 as *const u8).offset(-2) };
- | --------------------------- inside `UNDERFLOW_ADDRESS_SPACE` at $DIR/offset_ub.rs:15:57
+ | --------------------------- inside `UNDERFLOW_ADDRESS_SPACE` at $DIR/offset_ub.rs:14:57
error[E0080]: evaluation of constant value failed
--> $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| pointer arithmetic failed: allocN has size 1, so pointer to 2 bytes starting at offset -4 is out-of-bounds
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
|
- ::: $DIR/offset_ub.rs:16:49
+ ::: $DIR/offset_ub.rs:15:49
|
LL | pub const NEGATIVE_OFFSET: *const u8 = unsafe { [0u8; 1].as_ptr().wrapping_offset(-2).offset(-2) };
- | ------------------------------------------------ inside `NEGATIVE_OFFSET` at $DIR/offset_ub.rs:16:49
+ | ------------------------------------------------ inside `NEGATIVE_OFFSET` at $DIR/offset_ub.rs:15:49
error[E0080]: evaluation of constant value failed
--> $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
| pointer arithmetic failed: allocN has size 0, so pointer to 1 byte starting at offset 0 is out-of-bounds
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
|
- ::: $DIR/offset_ub.rs:18:50
+ ::: $DIR/offset_ub.rs:17:50
|
LL | pub const ZERO_SIZED_ALLOC: *const u8 = unsafe { [0u8; 0].as_ptr().offset(1) };
- | --------------------------- inside `ZERO_SIZED_ALLOC` at $DIR/offset_ub.rs:18:50
+ | --------------------------- inside `ZERO_SIZED_ALLOC` at $DIR/offset_ub.rs:17:50
error[E0080]: evaluation of constant value failed
--> $SRC_DIR/core/src/ptr/mut_ptr.rs:LL:COL
LL | unsafe { intrinsics::offset(self, count) as *mut T }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
- | 0x1 is not a valid pointer
+ | pointer arithmetic failed: 0x1 is not a valid pointer
| inside `ptr::mut_ptr::<impl *mut u8>::offset` at $SRC_DIR/core/src/ptr/mut_ptr.rs:LL:COL
|
- ::: $DIR/offset_ub.rs:19:42
+ ::: $DIR/offset_ub.rs:18:42
|
LL | pub const DANGLING: *const u8 = unsafe { ptr::NonNull::<u8>::dangling().as_ptr().offset(4) };
- | ------------------------------------------------- inside `DANGLING` at $DIR/offset_ub.rs:19:42
+ | ------------------------------------------------- inside `DANGLING` at $DIR/offset_ub.rs:18:42
error[E0080]: evaluation of constant value failed
--> $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
LL | unsafe { intrinsics::offset(self, count) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
- | pointer arithmetic failed: 0x0 is not a valid pointer
+ | pointer arithmetic failed: null pointer is not a valid pointer
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
|
- ::: $DIR/offset_ub.rs:22:50
+ ::: $DIR/offset_ub.rs:21:50
|
LL | pub const NULL_OFFSET_ZERO: *const u8 = unsafe { ptr::null::<u8>().offset(0) };
- | --------------------------- inside `NULL_OFFSET_ZERO` at $DIR/offset_ub.rs:22:50
+ | --------------------------- inside `NULL_OFFSET_ZERO` at $DIR/offset_ub.rs:21:50
error[E0080]: evaluation of constant value failed
--> $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
LL | unsafe { intrinsics::offset(self, count) }
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
| |
- | 0x7f..f is not a valid pointer
+ | pointer arithmetic failed: 0x7f..f is not a valid pointer
| inside `ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
|
- ::: $DIR/offset_ub.rs:25:47
+ ::: $DIR/offset_ub.rs:24:47
|
LL | pub const UNDERFLOW_ABS: *const u8 = unsafe { (usize::MAX as *const u8).offset(isize::MIN) };
- | -------------------------------------------- inside `UNDERFLOW_ABS` at $DIR/offset_ub.rs:25:47
+ | -------------------------------------------- inside `UNDERFLOW_ABS` at $DIR/offset_ub.rs:24:47
error: aborting due to 12 previous errors
//[opt_with_overflow_checks]compile-flags: -C overflow-checks=on -O
// build-pass
-#[allow(arithmetic_overflow)]
const fn assert_static<T>(_: &'static T) {}
-const fn fail() -> i32 { 1/0 }
+#[allow(unconditional_panic)]
+const fn fail() -> i32 {
+ 1/0
+}
const C: i32 = {
// Promoted that fails to evaluate in dead code -- this must work
// (for backwards compatibility reasons).
#![feature(
core_intrinsics,
const_raw_ptr_comparison,
- const_ptr_offset,
)]
const FOO: &usize = &42;
| pointer arithmetic failed: alloc3 has size $WORD, so pointer to $TWO_WORDS bytes starting at offset 0 is out-of-bounds
| inside `ptr::const_ptr::<impl *const usize>::offset` at $SRC_DIR/core/src/ptr/const_ptr.rs:LL:COL
|
- ::: $DIR/ptr_comparisons.rs:59:34
+ ::: $DIR/ptr_comparisons.rs:58:34
|
LL | const _: *const usize = unsafe { (FOO as *const usize).offset(2) };
- | ------------------------------- inside `_` at $DIR/ptr_comparisons.rs:59:34
+ | ------------------------------- inside `_` at $DIR/ptr_comparisons.rs:58:34
error[E0080]: evaluation of constant value failed
- --> $DIR/ptr_comparisons.rs:62:33
+ --> $DIR/ptr_comparisons.rs:61:33
|
LL | unsafe { std::ptr::addr_of!((*(FOO as *const usize as *const [u8; 1000]))[999]) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ dereferencing pointer failed: alloc3 has size $WORD, so pointer to 1000 bytes starting at offset 0 is out-of-bounds
error: any use of this value will cause an error
- --> $DIR/ptr_comparisons.rs:66:27
+ --> $DIR/ptr_comparisons.rs:65:27
|
LL | const _: usize = unsafe { std::mem::transmute::<*const usize, usize>(FOO) + 4 };
| --------------------------^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^---
= note: for more information, see issue #71800 <https://github.com/rust-lang/rust/issues/71800>
error: any use of this value will cause an error
- --> $DIR/ptr_comparisons.rs:71:27
+ --> $DIR/ptr_comparisons.rs:70:27
|
LL | const _: usize = unsafe { *std::mem::transmute::<&&usize, &usize>(&FOO) + 4 };
| --------------------------^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^---
| ^^^^^^^^^^^^^^^^
|
= note: `#[warn(incomplete_features)]` on by default
- = note: see issue #44580 <https://github.com/rust-lang/rust/issues/44580> for more information
+ = note: see issue #95174 <https://github.com/rust-lang/rust/issues/95174> for more information
warning: 1 warning emitted
+++ /dev/null
-pub struct Foo {
- pub x: isize
-}
-
-impl Foo {
- pub fn new() -> Foo {
- Foo { x: 3 }
- }
-}
+++ /dev/null
-#![crate_type="lib"]
-
-pub struct Au(pub isize);
+++ /dev/null
-#![crate_type = "lib"]
-
-// used by the rpass test
-
-#[derive(Copy, Clone)]
-pub struct Struct;
-
-#[derive(Copy, Clone)]
-pub enum Unit {
- UnitVariant,
- Argument(Struct)
-}
-
-#[derive(Copy, Clone)]
-pub struct TupleStruct(pub usize, pub &'static str);
-
-// used by the cfail test
-
-#[derive(Copy, Clone)]
-pub struct StructWithFields {
- foo: isize,
-}
-
-#[derive(Copy, Clone)]
-pub enum EnumWithVariants {
- EnumVariant,
- EnumVariantArg(isize)
-}
--- /dev/null
+// run-pass
+// aux-build:cci_const.rs
+#![allow(non_upper_case_globals)]
+
+extern crate cci_const;
+static foo: &'static str = cci_const::foopy;
+static a: usize = cci_const::uint_val;
+static b: usize = cci_const::uint_expr + 5;
+
+pub fn main() {
+ assert_eq!(a, 12);
+ let foo2 = a;
+ assert_eq!(foo2, cci_const::uint_val);
+ assert_eq!(b, cci_const::uint_expr + 5);
+ assert_eq!(foo, cci_const::foopy);
+}
--- /dev/null
+// run-pass
+// aux-build:cci_const.rs
+#![allow(non_upper_case_globals)]
+
+extern crate cci_const;
+use cci_const::bar;
+static foo: extern "C" fn() = bar;
+
+pub fn main() {
+ assert!(foo == bar);
+}
+++ /dev/null
-// run-pass
-// aux-build:newtype_struct_xc.rs
-
-
-extern crate newtype_struct_xc;
-
-pub fn main() {
- let x = newtype_struct_xc::Au(21);
- match x {
- newtype_struct_xc::Au(n) => assert_eq!(n, 21)
- }
-}
+++ /dev/null
-// run-pass
-// aux-build:xcrate_unit_struct.rs
-// pretty-expanded FIXME #23616
-#![allow(non_upper_case_globals)]
-
-extern crate xcrate_unit_struct;
-
-const s1: xcrate_unit_struct::Struct = xcrate_unit_struct::Struct;
-static s2: xcrate_unit_struct::Unit = xcrate_unit_struct::Unit::UnitVariant;
-static s3: xcrate_unit_struct::Unit =
- xcrate_unit_struct::Unit::Argument(xcrate_unit_struct::Struct);
-static s4: xcrate_unit_struct::Unit = xcrate_unit_struct::Unit::Argument(s1);
-static s5: xcrate_unit_struct::TupleStruct = xcrate_unit_struct::TupleStruct(20, "foo");
-
-fn f1(_: xcrate_unit_struct::Struct) {}
-fn f2(_: xcrate_unit_struct::Unit) {}
-fn f3(_: xcrate_unit_struct::TupleStruct) {}
-
-pub fn main() {
- f1(xcrate_unit_struct::Struct);
- f2(xcrate_unit_struct::Unit::UnitVariant);
- f2(xcrate_unit_struct::Unit::Argument(xcrate_unit_struct::Struct));
- f3(xcrate_unit_struct::TupleStruct(10, "bar"));
-
- f1(s1);
- f2(s2);
- f2(s3);
- f2(s4);
- f3(s5);
-}
--- /dev/null
+// Make sure the compiler does not ICE when trying to generate the debuginfo name of a type that
+// causes a layout error. See https://github.com/rust-lang/rust/issues/94961.
+
+// compile-flags:-C debuginfo=2
+// build-fail
+// error-pattern: too big for the current architecture
+// normalize-stderr-64bit "18446744073709551615" -> "SIZE"
+// normalize-stderr-32bit "4294967295" -> "SIZE"
+
+#![crate_type = "rlib"]
+
+pub struct Foo<T>([T; usize::MAX]);
+
+pub fn foo() -> usize {
+ std::mem::size_of::<Foo<u8>>()
+}
--- /dev/null
+error: values of the type `[u8; SIZE]` are too big for the current architecture
+
+error: aborting due to previous error
+
--- /dev/null
+// Make sure the compiler does not ICE when trying to generate the debuginfo name of a type that
+// causes a layout error.
+// This version of the test already ICE'd before the commit that introduce the ICE described in
+// https://github.com/rust-lang/rust/issues/94961.
+
+// compile-flags:-C debuginfo=2
+// build-fail
+// error-pattern: too big for the current architecture
+// normalize-stderr-64bit "18446744073709551615" -> "SIZE"
+// normalize-stderr-32bit "4294967295" -> "SIZE"
+
+#![crate_type = "rlib"]
+
+pub enum Foo<T> {
+ Bar([T; usize::MAX]),
+}
+
+pub fn foo() -> usize {
+ std::mem::size_of::<Foo<u8>>()
+}
--- /dev/null
+error: values of the type `[u8; SIZE]` are too big for the current architecture
+
+error: aborting due to previous error
+
--- /dev/null
+// build-pass
+// compile-flags: -Cdebuginfo=2 --crate-type=rlib
+// Fixes issue #94998
+
+pub trait Trait {}
+
+pub fn run(_: &dyn FnOnce(&()) -> Box<dyn Trait + '_>) {}
| ^^^^^^^^^^^^^^^^^^
|
= help: add `#![feature(deprecated_suggestion)]` to the crate root
- = note: see #XXX for more details
+ = note: see #94785 for more details
error: aborting due to previous error
LL | Bar::<NotClone> { x: 1 }.clone();
| ^^^^^ method cannot be called on `Bar<NotClone>` due to unsatisfied trait bounds
|
-note: the following trait bounds were not satisfied because of the requirements of the implementation of `Clone` for `_`:
- `NotClone: Clone`
+note: trait bound `NotClone: Clone` was not satisfied
--> $DIR/derive-assoc-type-not-impl.rs:6:10
|
LL | #[derive(Clone)]
- | ^^^^^
+ | ^^^^^ unsatisfied trait bound introduced in this `derive` macro
+ = note: the following trait bounds were not satisfied:
+ `NotClone: Clone`
+ which is required by `Bar<NotClone>: Clone`
= help: items from traits can only be used if the trait is implemented and in scope
= note: the following trait defines an item `clone`, perhaps you need to implement it:
candidate #1: `Clone`
- = note: this error originates in the derive macro `Clone` (in Nightly builds, run with -Z macro-backtrace for more info)
help: consider annotating `NotClone` with `#[derive(Clone)]`
|
LL | #[derive(Clone)]
--> $DIR/default-match-bindings-forbidden.rs:4:5
|
LL | (x, y) = &(1, 2);
- | ^^^^^^ expected reference, found tuple
+ | ^^^^^^ ------- this expression has type `&({integer}, {integer})`
+ | |
+ | expected reference, found tuple
|
= note: expected type `&({integer}, {integer})`
found tuple `(_, _)`
--> $DIR/tuple_destructure_fail.rs:6:5
|
LL | (a, a, b) = (1, 2);
- | ^^^^^^^^^ expected a tuple with 2 elements, found one with 3 elements
+ | ^^^^^^^^^ ------ this expression has type `({integer}, {integer})`
+ | |
+ | expected a tuple with 2 elements, found one with 3 elements
|
= note: expected type `({integer}, {integer})`
found tuple `(_, _, _)`
--> $DIR/tuple_destructure_fail.rs:8:5
|
LL | (_,) = (1, 2);
- | ^^^^ expected a tuple with 2 elements, found one with 1 element
+ | ^^^^ ------ this expression has type `({integer}, {integer})`
+ | |
+ | expected a tuple with 2 elements, found one with 1 element
|
= note: expected type `({integer}, {integer})`
found tuple `(_,)`
// Test that the recursion limit can be changed and that the compiler
// suggests a fix. In this case, we have a long chain of Deref impls
// which will cause an overflow during the autoderef loop.
+// compile-flags: -Zdeduplicate-diagnostics=yes
#![allow(dead_code)]
#![recursion_limit="10"]
error[E0055]: reached the recursion limit while auto-dereferencing `J`
- --> $DIR/recursion_limit_deref.rs:50:22
+ --> $DIR/recursion_limit_deref.rs:51:22
|
LL | let x: &Bottom = &t;
| ^^ deref recursion limit reached
= help: consider increasing the recursion limit by adding a `#![recursion_limit = "20"]` attribute to your crate (`recursion_limit_deref`)
error[E0308]: mismatched types
- --> $DIR/recursion_limit_deref.rs:50:22
+ --> $DIR/recursion_limit_deref.rs:51:22
|
LL | let x: &Bottom = &t;
| ------- ^^ expected struct `Bottom`, found struct `Top`
--- /dev/null
+// aux-build:empty-struct.rs
+
+extern crate empty_struct;
+
+fn main() {
+ let empty_struct::XEmpty2 = (); //~ ERROR mismatched types
+ let empty_struct::XEmpty6(..) = (); //~ ERROR mismatched types
+}
--- /dev/null
+error[E0308]: mismatched types
+ --> $DIR/issue-37026.rs:6:9
+ |
+LL | let empty_struct::XEmpty2 = ();
+ | ^^^^^^^^^^^^^^^^^^^^^ -- this expression has type `()`
+ | |
+ | expected `()`, found struct `XEmpty2`
+
+error[E0308]: mismatched types
+ --> $DIR/issue-37026.rs:7:9
+ |
+LL | let empty_struct::XEmpty6(..) = ();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^ -- this expression has type `()`
+ | |
+ | expected `()`, found struct `XEmpty6`
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0308`.
--- /dev/null
+// check-pass
+// aux-build:empty-struct.rs
+
+#[no_link]
+extern crate empty_struct;
+
+fn main() {
+ empty_struct::XEmpty1 {};
+}
--> $DIR/E0396-fixed.rs:5:28
|
LL | const VALUE: u8 = unsafe { *REG_ADDR };
- | ^^^^^^^^^ 0x5f3759df is not a valid pointer
+ | ^^^^^^^^^ dereferencing pointer failed: 0x5f3759df is not a valid pointer
error: aborting due to previous error
| ^^^^^^^^^^^^^^^^
|
= note: `#[warn(incomplete_features)]` on by default
- = note: see issue #44580 <https://github.com/rust-lang/rust/issues/44580> for more information
+ = note: see issue #95174 <https://github.com/rust-lang/rust/issues/95174> for more information
error[E0771]: use of non-static lifetime `'a` in const generic
--> $DIR/E0771.rs:4:41
}
// The limit for "too big for the current architecture" is dependent on the target pointer size
-// however it's artifically limited on 64 bits
+// however it's artificially limited on 64 bits
// logic copied from rustc_target::abi::TargetDataLayout::obj_size_bound()
const fn max_size() -> usize {
#[cfg(target_pointer_width = "16")]
--- /dev/null
+#[deprecated_safe(since = "TBD", note = "...")] //~ ERROR: the `#[deprecated_safe]` attribute is an experimental feature
+unsafe fn deprecated_safe_fn() {}
+
+#[deprecated_safe(since = "TBD", note = "...")] //~ ERROR: the `#[deprecated_safe]` attribute is an experimental feature
+unsafe trait DeprecatedSafeTrait {}
+
+fn main() {}
--- /dev/null
+error[E0658]: the `#[deprecated_safe]` attribute is an experimental feature
+ --> $DIR/feature-gate-deprecated_safe.rs:1:1
+ |
+LL | #[deprecated_safe(since = "TBD", note = "...")]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: see issue #94978 <https://github.com/rust-lang/rust/issues/94978> for more information
+ = help: add `#![feature(deprecated_safe)]` to the crate attributes to enable
+
+error[E0658]: the `#[deprecated_safe]` attribute is an experimental feature
+ --> $DIR/feature-gate-deprecated_safe.rs:4:1
+ |
+LL | #[deprecated_safe(since = "TBD", note = "...")]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: see issue #94978 <https://github.com/rust-lang/rust/issues/94978> for more information
+ = help: add `#![feature(deprecated_safe)]` to the crate attributes to enable
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+// check-pass
+// compile-flags: -Zdrop-tracking
+#![feature(generators, negative_impls)]
+
+struct Client;
+
+impl !Sync for Client {}
+
+fn status(_client_status: &Client) -> i16 {
+ 200
+}
+
+fn assert_send<T: Send>(_thing: T) {}
+
+// This is the same bug as issue 57017, but using yield instead of await
+fn main() {
+ let client = Client;
+ let g = move || match status(&client) {
+ _status => yield,
+ };
+ assert_send(g);
+}
-error[E0271]: type mismatch resolving `<impl Future<Output = [async output]> as Future>::Output == impl Stream<Item = Repr>`
+error[E0271]: type mismatch resolving `<impl Future as Future>::Output == impl Stream<Item = Repr>`
--> $DIR/issue-89008.rs:39:43
|
LL | type LineStream<'a, Repr> = impl Stream<Item = Repr>;
}
impl<T: X<Y<i32> = i32>> M for T {}
-//~^ NOTE the following trait bounds were not satisfied
+//~^ NOTE trait bound `<S as X>::Y<i32> = i32` was not satisfied
+//~| NOTE unsatisfied trait bound introduced here
+//~| NOTE
+//~| NOTE
struct S;
//~^ NOTE method `f` not found for this
error[E0599]: the method `f` exists for struct `S`, but its trait bounds were not satisfied
- --> $DIR/method-unsatified-assoc-type-predicate.rs:27:7
+ --> $DIR/method-unsatified-assoc-type-predicate.rs:30:7
|
LL | struct S;
| ---------
LL | a.f();
| ^ method cannot be called on `S` due to unsatisfied trait bounds
|
-note: the following trait bounds were not satisfied because of the requirements of the implementation of `M` for `_`:
- `<S as X>::Y<i32> = i32`
- --> $DIR/method-unsatified-assoc-type-predicate.rs:14:26
+note: trait bound `<S as X>::Y<i32> = i32` was not satisfied
+ --> $DIR/method-unsatified-assoc-type-predicate.rs:14:11
|
LL | impl<T: X<Y<i32> = i32>> M for T {}
- | ^ ^
+ | ^^^^^^^^^^^^ - -
+ | |
+ | unsatisfied trait bound introduced here
error: aborting due to previous error
| - ^ expected one of `,`, `:`, or `>`
| |
| maybe try to close unmatched angle bracket
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | fn f2<'a>(arg : Box<dyn X< { 1 }> = 32 >>) {}
+ | +
error: aborting due to 2 previous errors
| - ^ expected one of 8 possible tokens
| |
| maybe try to close unmatched angle bracket
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | fn f1<'a>(arg : Box<dyn X<X::Y> = u32>>) {}
+ | +
error: expected one of `,`, `::`, `:`, or `>`, found `=`
--> $DIR/trait-path-segments.rs:19:35
| - ^ expected one of `,`, `::`, `:`, or `>`
| |
| maybe try to close unmatched angle bracket
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | impl<T : X<<Self as X>::Y<'a>> = &'a u32>> Z for T {}
+ | +
error: expected one of `!`, `+`, `,`, `::`, `:`, or `>`, found `=`
--> $DIR/trait-path-segments.rs:30:25
| - ^ expected one of `!`, `+`, `,`, `::`, `:`, or `>`
| |
| maybe try to close unmatched angle bracket
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | impl<T : X<X::Y<'a>> = &'a u32>> Z for T {}
+ | +
error: aborting due to 3 previous errors
| - ^ expected one of `,`, `:`, or `>`
| |
| maybe try to close unmatched angle bracket
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | fn f<'a>(arg : Box<dyn X< [u8; 1]> = u32>>) {}
+ | +
error: expected one of `,`, `:`, or `>`, found `=`
--> $DIR/trait-path-types.rs:13:37
| - ^ expected one of `,`, `:`, or `>`
| |
| maybe try to close unmatched angle bracket
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | fn f1<'a>(arg : Box<dyn X<(Y<'a>)> = &'a ()>>) {}
+ | +
error: expected one of `,`, `:`, or `>`, found `=`
--> $DIR/trait-path-types.rs:18:33
| -- ^ expected one of `,`, `:`, or `>`
| |
| maybe try to close unmatched angle bracket
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | fn f1<'a>(arg : Box<dyn X< 'a> = u32 >>) {}
+ | +
error: aborting due to 3 previous errors
--- /dev/null
+// run-rustfix
+
+#[allow(unused)]
+struct Struct<T>(T);
+
+impl<T: Iterator> Struct<T> where <T as std:: iter::Iterator>::Item: std::fmt::Display {
+//~^ ERROR expected `:` followed by trait or lifetime
+//~| HELP use single colon
+}
+
+fn main() {}
--- /dev/null
+// run-rustfix
+
+#[allow(unused)]
+struct Struct<T>(T);
+
+impl<T: Iterator> Struct<T> where <T as std:: iter::Iterator>::Item:: std::fmt::Display {
+//~^ ERROR expected `:` followed by trait or lifetime
+//~| HELP use single colon
+}
+
+fn main() {}
--- /dev/null
+error: expected `:` followed by trait or lifetime
+ --> $DIR/issue-95208-ignore-qself.rs:6:88
+ |
+LL | impl<T: Iterator> Struct<T> where <T as std:: iter::Iterator>::Item:: std::fmt::Display {
+ | --- ^
+ | |
+ | help: use single colon: `:`
+
+error: aborting due to previous error
+
--- /dev/null
+// run-rustfix
+
+#[allow(unused)]
+struct Struct<T>(T);
+
+impl<T> Struct<T> where T: std::fmt::Display {
+//~^ ERROR expected `:` followed by trait or lifetime
+//~| HELP use single colon
+}
+
+fn main() {}
--- /dev/null
+// run-rustfix
+
+#[allow(unused)]
+struct Struct<T>(T);
+
+impl<T> Struct<T> where T:: std::fmt::Display {
+//~^ ERROR expected `:` followed by trait or lifetime
+//~| HELP use single colon
+}
+
+fn main() {}
--- /dev/null
+error: expected `:` followed by trait or lifetime
+ --> $DIR/issue-95208.rs:6:46
+ |
+LL | impl<T> Struct<T> where T:: std::fmt::Display {
+ | --- ^
+ | |
+ | help: use single colon: `:`
+
+error: aborting due to previous error
+
--- /dev/null
+pub mod foo {
+ pub mod bar {
+ pub struct A;
+ }
+}
+
+pub struct Foo {
+ a: Vec<foo::bar:A>,
+ //~^ ERROR expected
+ //~| HELP path separator
+}
+
+fn main() {}
--- /dev/null
+error: expected one of `,` or `>`, found `:`
+ --> $DIR/single-colon-path-not-const-generics.rs:8:18
+ |
+LL | a: Vec<foo::bar:A>,
+ | ^
+ | |
+ | expected one of `,` or `>`
+ | help: write a path separator here: `::`
+
+error: aborting due to previous error
+
error[E0308]: mismatched types
--> $DIR/exclusive_range_pattern_syntax_collision.rs:6:13
|
+LL | match [5..4, 99..105, 43..44] {
+ | ----------------------- this expression has type `[std::ops::Range<{integer}>; 3]`
LL | [_, 99.., _] => {},
| ^^ expected struct `std::ops::Range`, found integer
|
error[E0308]: mismatched types
--> $DIR/exclusive_range_pattern_syntax_collision2.rs:6:13
|
+LL | match [5..4, 99..105, 43..44] {
+ | ----------------------- this expression has type `[std::ops::Range<{integer}>; 3]`
LL | [_, 99..] => {},
| ^^ expected struct `std::ops::Range`, found integer
|
error[E0308]: mismatched types
--> $DIR/exclusive_range_pattern_syntax_collision3.rs:6:12
|
+LL | match [5..4, 99..105, 43..44] {
+ | ----------------------- this expression has type `[std::ops::Range<{integer}>; 3]`
LL | [..9, 99..100, _] => {},
| ^ expected struct `std::ops::Range`, found integer
|
error[E0308]: mismatched types
--> $DIR/exclusive_range_pattern_syntax_collision3.rs:6:15
|
+LL | match [5..4, 99..105, 43..44] {
+ | ----------------------- this expression has type `[std::ops::Range<{integer}>; 3]`
LL | [..9, 99..100, _] => {},
| ^^ --- this is of type `{integer}`
| |
error[E0308]: mismatched types
--> $DIR/exclusive_range_pattern_syntax_collision3.rs:6:19
|
+LL | match [5..4, 99..105, 43..44] {
+ | ----------------------- this expression has type `[std::ops::Range<{integer}>; 3]`
LL | [..9, 99..100, _] => {},
| -- ^^^ expected struct `std::ops::Range`, found integer
| |
LL ~ '\u{10fffe}'..='\u{10ffff}' => todo!() }
|
-error[E0004]: non-exhaustive patterns: `'\u{0}'` not covered
+error[E0004]: non-exhaustive patterns: `'\0'` not covered
--> $DIR/half-open-range-pats-exhaustive-fail.rs:28:8
|
LL | m!('a', ALMOST_MIN..);
- | ^^^ pattern `'\u{0}'` not covered
+ | ^^^ pattern `'\0'` not covered
|
= note: the matched value is of type `char`
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
LL ~ match $s { $($t)+ => {}
-LL ~ '\u{0}' => todo!() }
+LL ~ '\0' => todo!() }
|
error[E0004]: non-exhaustive patterns: `'\u{10ffff}'` not covered
error[E0308]: mismatched types
--> $DIR/pat-tuple-5.rs:8:10
|
+LL | match (0, 1) {
+ | ------ this expression has type `({integer}, {integer})`
LL | (PAT ..) => {}
| ^^^ expected tuple, found `u8`
|
LL | let filter = map.filterx(|x: &_| true);
| ^^^^^^^ method cannot be called on `Map<Repeat, [closure@$DIR/issue-30786.rs:126:27: 126:36]>` due to unsatisfied trait bounds
|
-note: the following trait bounds were not satisfied because of the requirements of the implementation of `StreamExt` for `_`:
- `&'a mut Map<Repeat, [closure@$DIR/issue-30786.rs:126:27: 126:36]>: Stream`
+note: the following trait bounds were not satisfied:
`&'a mut &Map<Repeat, [closure@$DIR/issue-30786.rs:126:27: 126:36]>: Stream`
`&'a mut &mut Map<Repeat, [closure@$DIR/issue-30786.rs:126:27: 126:36]>: Stream`
- --> $DIR/issue-30786.rs:105:9
+ `&'a mut Map<Repeat, [closure@$DIR/issue-30786.rs:126:27: 126:36]>: Stream`
+ --> $DIR/issue-30786.rs:105:50
|
LL | impl<T> StreamExt for T where for<'a> &'a mut T: Stream {}
- | ^^^^^^^^^ ^
+ | --------- - ^^^^^^ unsatisfied trait bound introduced here
error[E0599]: the method `countx` exists for struct `Filter<Map<Repeat, for<'r> fn(&'r u64) -> &'r u64 {identity::<u64>}>, [closure@$DIR/issue-30786.rs:139:30: 139:42]>`, but its trait bounds were not satisfied
--> $DIR/issue-30786.rs:140:24
LL | let count = filter.countx();
| ^^^^^^ method cannot be called on `Filter<Map<Repeat, for<'r> fn(&'r u64) -> &'r u64 {identity::<u64>}>, [closure@$DIR/issue-30786.rs:139:30: 139:42]>` due to unsatisfied trait bounds
|
-note: the following trait bounds were not satisfied because of the requirements of the implementation of `StreamExt` for `_`:
- `&'a mut Filter<Map<Repeat, for<'r> fn(&'r u64) -> &'r u64 {identity::<u64>}>, [closure@$DIR/issue-30786.rs:139:30: 139:42]>: Stream`
+note: the following trait bounds were not satisfied:
`&'a mut &Filter<Map<Repeat, for<'r> fn(&'r u64) -> &'r u64 {identity::<u64>}>, [closure@$DIR/issue-30786.rs:139:30: 139:42]>: Stream`
`&'a mut &mut Filter<Map<Repeat, for<'r> fn(&'r u64) -> &'r u64 {identity::<u64>}>, [closure@$DIR/issue-30786.rs:139:30: 139:42]>: Stream`
- --> $DIR/issue-30786.rs:105:9
+ `&'a mut Filter<Map<Repeat, for<'r> fn(&'r u64) -> &'r u64 {identity::<u64>}>, [closure@$DIR/issue-30786.rs:139:30: 139:42]>: Stream`
+ --> $DIR/issue-30786.rs:105:50
|
LL | impl<T> StreamExt for T where for<'a> &'a mut T: Stream {}
- | ^^^^^^^^^ ^
+ | --------- - ^^^^^^ unsatisfied trait bound introduced here
error: aborting due to 2 previous errors
LL | let filter = map.filterx(|x: &_| true);
| ^^^^^^^ method cannot be called on `Map<Repeat, [closure@$DIR/issue-30786.rs:126:27: 126:36]>` due to unsatisfied trait bounds
|
-note: the following trait bounds were not satisfied because of the requirements of the implementation of `StreamExt` for `_`:
- `&'a mut Map<Repeat, [closure@$DIR/issue-30786.rs:126:27: 126:36]>: Stream`
+note: the following trait bounds were not satisfied:
`&'a mut &Map<Repeat, [closure@$DIR/issue-30786.rs:126:27: 126:36]>: Stream`
`&'a mut &mut Map<Repeat, [closure@$DIR/issue-30786.rs:126:27: 126:36]>: Stream`
- --> $DIR/issue-30786.rs:105:9
+ `&'a mut Map<Repeat, [closure@$DIR/issue-30786.rs:126:27: 126:36]>: Stream`
+ --> $DIR/issue-30786.rs:105:50
|
LL | impl<T> StreamExt for T where for<'a> &'a mut T: Stream {}
- | ^^^^^^^^^ ^
+ | --------- - ^^^^^^ unsatisfied trait bound introduced here
error[E0599]: the method `countx` exists for struct `Filter<Map<Repeat, for<'r> fn(&'r u64) -> &'r u64 {identity::<u64>}>, [closure@$DIR/issue-30786.rs:139:30: 139:42]>`, but its trait bounds were not satisfied
--> $DIR/issue-30786.rs:140:24
LL | let count = filter.countx();
| ^^^^^^ method cannot be called on `Filter<Map<Repeat, for<'r> fn(&'r u64) -> &'r u64 {identity::<u64>}>, [closure@$DIR/issue-30786.rs:139:30: 139:42]>` due to unsatisfied trait bounds
|
-note: the following trait bounds were not satisfied because of the requirements of the implementation of `StreamExt` for `_`:
- `&'a mut Filter<Map<Repeat, for<'r> fn(&'r u64) -> &'r u64 {identity::<u64>}>, [closure@$DIR/issue-30786.rs:139:30: 139:42]>: Stream`
+note: the following trait bounds were not satisfied:
`&'a mut &Filter<Map<Repeat, for<'r> fn(&'r u64) -> &'r u64 {identity::<u64>}>, [closure@$DIR/issue-30786.rs:139:30: 139:42]>: Stream`
`&'a mut &mut Filter<Map<Repeat, for<'r> fn(&'r u64) -> &'r u64 {identity::<u64>}>, [closure@$DIR/issue-30786.rs:139:30: 139:42]>: Stream`
- --> $DIR/issue-30786.rs:105:9
+ `&'a mut Filter<Map<Repeat, for<'r> fn(&'r u64) -> &'r u64 {identity::<u64>}>, [closure@$DIR/issue-30786.rs:139:30: 139:42]>: Stream`
+ --> $DIR/issue-30786.rs:105:50
|
LL | impl<T> StreamExt for T where for<'a> &'a mut T: Stream {}
- | ^^^^^^^^^ ^
+ | --------- - ^^^^^^ unsatisfied trait bound introduced here
error: aborting due to 2 previous errors
//~| expected `i32`, found `u32`
}
-fn sum_to(n: u32) -> impl Foo { //~ ERROR type annotations needed
+fn sum_to(n: u32) -> impl Foo {
if n == 0 {
0
} else {
|
= help: the trait `Add<impl Foo>` is not implemented for `u32`
-error[E0283]: type annotations needed
- --> $DIR/equality.rs:20:22
- |
-LL | fn sum_to(n: u32) -> impl Foo {
- | ^^^^^^^^ cannot infer type for type `{integer}`
- |
- = note: multiple `impl`s satisfying `{integer}: ToString` found in the `alloc` crate:
- - impl ToString for i8;
- - impl ToString for u8;
-note: required because of the requirements on the impl of `Foo` for `{integer}`
- --> $DIR/equality.rs:5:26
- |
-LL | impl<T: Copy + ToString> Foo for T {}
- | ^^^ ^
-
-error: aborting due to 3 previous errors; 1 warning emitted
+error: aborting due to 2 previous errors; 1 warning emitted
-Some errors have detailed explanations: E0277, E0283, E0308.
+Some errors have detailed explanations: E0277, E0308.
For more information about an error, try `rustc --explain E0277`.
error: aborting due to 5 previous errors
-For more information about this error, try `rustc --explain E0223`.
+Some errors have detailed explanations: E0223, E0667.
+For more information about an error, try `rustc --explain E0223`.
type E = impl std::marker::Copy;
fn foo<T>() -> Self::E {
//~^ ERROR type parameter `T` is part of concrete type but not used in parameter list for the `impl Trait` type alias
- //~| ERROR the trait bound `impl Future<Output = [async output]>: Copy` is not satisfied
+ //~| ERROR the trait bound `impl Future: Copy` is not satisfied
async {}
}
}
-error[E0277]: the trait bound `impl Future<Output = [async output]>: Copy` is not satisfied
+error[E0277]: the trait bound `impl Future: Copy` is not satisfied
--> $DIR/issue-55872-2.rs:13:20
|
LL | fn foo<T>() -> Self::E {
- | ^^^^^^^ the trait `Copy` is not implemented for `impl Future<Output = [async output]>`
+ | ^^^^^^^ the trait `Copy` is not implemented for `impl Future`
error: type parameter `T` is part of concrete type but not used in parameter list for the `impl Trait` type alias
--> $DIR/issue-55872-2.rs:13:28
error: aborting due to previous error
+For more information about this error, try `rustc --explain E0667`.
| ------------------------------- the found opaque type
|
= note: expected opaque type `impl Future<Output = u8>`
- found opaque type `impl Future<Output = [async output]>`
+ found opaque type `impl Future`
= note: distinct uses of `impl Trait` result in different opaque types
error: aborting due to previous error
-// Test that multiple liftimes are allowed in impl trait types.
+// Test that multiple lifetimes are allowed in impl trait types.
// build-pass (FIXME(62277): could be check-pass?)
trait X<'x>: Sized {}
// error-pattern: reached the recursion limit while auto-dereferencing
-
-
+// compile-flags: -Zdeduplicate-diagnostics=yes
use std::ops::Deref;
error[E0308]: mismatched types
- --> $DIR/infinite-autoderef.rs:20:13
+ --> $DIR/infinite-autoderef.rs:19:13
|
LL | x = Box::new(x);
| ^^^^^^^^^^^ cyclic type of infinite size
| +
error[E0055]: reached the recursion limit while auto-dereferencing `Foo`
- --> $DIR/infinite-autoderef.rs:25:5
+ --> $DIR/infinite-autoderef.rs:24:5
|
LL | Foo.foo;
| ^^^^^^^ deref recursion limit reached
= help: consider increasing the recursion limit by adding a `#![recursion_limit = "256"]` attribute to your crate (`infinite_autoderef`)
error[E0055]: reached the recursion limit while auto-dereferencing `Foo`
- --> $DIR/infinite-autoderef.rs:25:9
+ --> $DIR/infinite-autoderef.rs:24:9
|
LL | Foo.foo;
| ^^^ deref recursion limit reached
= help: consider increasing the recursion limit by adding a `#![recursion_limit = "256"]` attribute to your crate (`infinite_autoderef`)
error[E0609]: no field `foo` on type `Foo`
- --> $DIR/infinite-autoderef.rs:25:9
+ --> $DIR/infinite-autoderef.rs:24:9
|
LL | Foo.foo;
| ^^^ unknown field
error[E0055]: reached the recursion limit while auto-dereferencing `Foo`
- --> $DIR/infinite-autoderef.rs:26:9
+ --> $DIR/infinite-autoderef.rs:25:9
|
LL | Foo.bar();
| ^^^ deref recursion limit reached
= help: consider increasing the recursion limit by adding a `#![recursion_limit = "256"]` attribute to your crate (`infinite_autoderef`)
error[E0599]: no method named `bar` found for struct `Foo` in the current scope
- --> $DIR/infinite-autoderef.rs:26:9
+ --> $DIR/infinite-autoderef.rs:25:9
|
LL | struct Foo;
| ----------- method `bar` not found for this
note: required by a bound in `const_eval_select`
--> $SRC_DIR/core/src/intrinsics.rs:LL:COL
|
-LL | G: FnOnce<ARG, Output = RET> + ~const Drop,
+LL | G: FnOnce<ARG, Output = RET> + ~const Drop + ~const Destruct,
| ^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `const_eval_select`
error[E0271]: type mismatch resolving `<fn(i32) -> bool {bar} as FnOnce<(i32,)>>::Output == i32`
note: required by a bound in `const_eval_select`
--> $SRC_DIR/core/src/intrinsics.rs:LL:COL
|
-LL | G: FnOnce<ARG, Output = RET> + ~const Drop,
+LL | G: FnOnce<ARG, Output = RET> + ~const Drop + ~const Destruct,
| ^^^^^^^^^^^^ required by this bound in `const_eval_select`
error[E0631]: type mismatch in function arguments
+++ /dev/null
-pub struct XEmpty1 {}
-pub struct XEmpty2;
-pub struct XEmpty6();
-
-pub enum XE {
- XEmpty3 {},
- XEmpty4,
- XEmpty5(),
-}
error[E0308]: mismatched types
--> $DIR/issue-11844.rs:6:9
|
+LL | match a {
+ | - this expression has type `Option<Box<{integer}>>`
LL | Ok(a) =>
| ^^^^^ expected enum `Option`, found enum `Result`
|
error[E0308]: mismatched types
--> $DIR/issue-12552.rs:6:5
|
+LL | match t {
+ | - this expression has type `Result<_, {integer}>`
LL | Some(k) => match k {
| ^^^^^^^ expected enum `Result`, found enum `Option`
|
error[E0308]: mismatched types
--> $DIR/issue-12552.rs:9:5
|
+LL | match t {
+ | - this expression has type `Result<_, {integer}>`
+...
LL | None => ()
| ^^^^ expected enum `Result`, found enum `Option`
|
error[E0308]: mismatched types
--> $DIR/issue-13466.rs:8:9
|
+LL | let _x: usize = match Some(1) {
+ | ------- this expression has type `Option<{integer}>`
LL | Ok(u) => u,
| ^^^^^ expected enum `Option`, found enum `Result`
|
error[E0308]: mismatched types
--> $DIR/issue-13466.rs:14:9
|
+LL | let _x: usize = match Some(1) {
+ | ------- this expression has type `Option<{integer}>`
+...
LL | Err(e) => panic!(e)
| ^^^^^^ expected enum `Option`, found enum `Result`
|
// check-pass
-#[derive(Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+#[derive(Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd, Clone, Copy)]
struct Array<T> {
f00: [T; 00],
f01: [T; 01],
f32: [T; 32],
}
-// FIXME(#44580): merge with `Array` once `[T; N]: Clone` where `T: Clone`
-#[derive(Clone, Copy)]
-struct CopyArray<T: Copy> {
- f00: [T; 00],
- f01: [T; 01],
- f02: [T; 02],
- f03: [T; 03],
- f04: [T; 04],
- f05: [T; 05],
- f06: [T; 06],
- f07: [T; 07],
- f08: [T; 08],
- f09: [T; 09],
- f10: [T; 10],
- f11: [T; 11],
- f12: [T; 12],
- f13: [T; 13],
- f14: [T; 14],
- f15: [T; 15],
- f16: [T; 16],
- f17: [T; 17],
- f18: [T; 18],
- f19: [T; 19],
- f20: [T; 20],
- f21: [T; 21],
- f22: [T; 22],
- f23: [T; 23],
- f24: [T; 24],
- f25: [T; 25],
- f26: [T; 26],
- f27: [T; 27],
- f28: [T; 28],
- f29: [T; 29],
- f30: [T; 30],
- f31: [T; 31],
- f32: [T; 32],
-}
-
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
struct Fn<A, B, C, D, E, F, G, H, I, J, K, L> {
f00: fn(),
| ------------ unit struct defined here
...
LL | let Test = 1;
- | ^^^^
+ | ^^^^ - this expression has type `{integer}`
| |
| expected integer, found struct `Test`
| `Test` is interpreted as a unit struct, not a new binding
+// compile-flags: -Zdeduplicate-diagnostics=yes
+
use std::collections::HashMap;
fn main() {
for _ in HashMap::new().iter().cloned() {} //~ ERROR type mismatch
//~^ ERROR type mismatch
+ //~| ERROR type mismatch
}
error[E0271]: type mismatch resolving `<std::collections::hash_map::Iter<'_, _, _> as Iterator>::Item == &_`
- --> $DIR/issue-33941.rs:4:36
+ --> $DIR/issue-33941.rs:6:36
|
LL | for _ in HashMap::new().iter().cloned() {}
| ^^^^^^ expected reference, found tuple
| ^^^^^^^^^^^^ required by this bound in `cloned`
error[E0271]: type mismatch resolving `<std::collections::hash_map::Iter<'_, _, _> as Iterator>::Item == &_`
- --> $DIR/issue-33941.rs:4:14
+ --> $DIR/issue-33941.rs:6:14
|
LL | for _ in HashMap::new().iter().cloned() {}
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected tuple, found reference
= note: required because of the requirements on the impl of `Iterator` for `Cloned<std::collections::hash_map::Iter<'_, _, _>>`
= note: required because of the requirements on the impl of `IntoIterator` for `Cloned<std::collections::hash_map::Iter<'_, _, _>>`
-error: aborting due to 2 previous errors
+error[E0271]: type mismatch resolving `<std::collections::hash_map::Iter<'_, _, _> as Iterator>::Item == &_`
+ --> $DIR/issue-33941.rs:6:14
+ |
+LL | for _ in HashMap::new().iter().cloned() {}
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected tuple, found reference
+ |
+ = note: expected tuple `(&_, &_)`
+ found reference `&_`
+ = note: required because of the requirements on the impl of `Iterator` for `Cloned<std::collections::hash_map::Iter<'_, _, _>>`
+
+error: aborting due to 3 previous errors
For more information about this error, try `rustc --explain E0271`.
| | |
| | maybe try to close unmatched angle bracket
| while parsing the type for `sr`
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | let sr: Vec<(u32, _, _)> = vec![];
+ | +
error[E0277]: a value of type `Vec<(u32, _, _)>` cannot be built from an iterator over elements of type `()`
--> $DIR/issue-34334.rs:5:87
error[E0308]: mismatched types
--> $DIR/issue-3680.rs:3:9
|
+LL | match None {
+ | ---- this expression has type `Option<_>`
LL | Err(_) => ()
| ^^^^^^ expected enum `Option`, found enum `Result`
|
+++ /dev/null
-// aux-build:empty-struct.rs
-
-extern crate empty_struct;
-
-fn main() {
- let empty_struct::XEmpty2 = (); //~ ERROR mismatched types
- let empty_struct::XEmpty6(..) = (); //~ ERROR mismatched types
-}
+++ /dev/null
-error[E0308]: mismatched types
- --> $DIR/issue-37026.rs:6:9
- |
-LL | let empty_struct::XEmpty2 = ();
- | ^^^^^^^^^^^^^^^^^^^^^ -- this expression has type `()`
- | |
- | expected `()`, found struct `XEmpty2`
-
-error[E0308]: mismatched types
- --> $DIR/issue-37026.rs:7:9
- |
-LL | let empty_struct::XEmpty6(..) = ();
- | ^^^^^^^^^^^^^^^^^^^^^^^^^ -- this expression has type `()`
- | |
- | expected `()`, found struct `XEmpty6`
-
-error: aborting due to 2 previous errors
-
-For more information about this error, try `rustc --explain E0308`.
| ------------------------------- constant defined here
LL | fn main() {
LL | match 42 { A => () }
- | ^
- | |
- | expected integer, found tuple
- | `A` is interpreted as a constant, not a new binding
- | help: introduce a new binding instead: `other_a`
+ | -- ^
+ | | |
+ | | expected integer, found tuple
+ | | `A` is interpreted as a constant, not a new binding
+ | | help: introduce a new binding instead: `other_a`
+ | this expression has type `{integer}`
|
= note: expected type `{integer}`
found tuple `(isize, isize)`
+++ /dev/null
-fn main() {
- let ref my_ref @ _ = 0;
- *my_ref = 0; //~ ERROR cannot assign to `*my_ref`, which is behind a `&` reference [E0594]
-}
+++ /dev/null
-error[E0594]: cannot assign to `*my_ref`, which is behind a `&` reference
- --> $DIR/issue-51244.rs:3:5
- |
-LL | let ref my_ref @ _ = 0;
- | -------------- help: consider changing this to be a mutable reference: `ref mut my_ref @ _`
-LL | *my_ref = 0;
- | ^^^^^^^^^^^ `my_ref` is a `&` reference, so the data it refers to cannot be written
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0594`.
--> $DIR/issue-66706.rs:2:5
|
LL | fn a() {
- | - possibly return type missing here?
+ | - help: try adding a return type: `-> [i32; _]`
LL | [0; [|_: _ &_| ()].len()]
| ^^^^^^^^^^^^^^^^^^^^^^^^^ expected `()`, found array `[{integer}; _]`
--> $DIR/issue-66706.rs:14:5
|
LL | fn c() {
- | - possibly return type missing here?
+ | - help: try adding a return type: `-> [i32; _]`
LL | [0; [|&_: _ &_| {}; 0 ].len()]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `()`, found array `[{integer}; _]`
--> $DIR/issue-66706.rs:20:5
|
LL | fn d() {
- | - possibly return type missing here?
+ | - help: try adding a return type: `-> [i32; _]`
LL | [0; match [|f @ &ref _| () ] {} ]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `()`, found array `[{integer}; _]`
error[E0308]: mismatched types
--> $DIR/issue-72574-1.rs:4:9
|
+LL | match x {
+ | - this expression has type `({integer}, {integer}, {integer})`
LL | (_a, _x @ ..) => {}
| ^^^^^^^^^^^^^ expected a tuple with 3 elements, found one with 2 elements
|
fn arg_pat_closure_err() {
|x| String::from("x".as_ref()); //~ ERROR type annotations needed
- //~^ ERROR type annotations needed
//~| ERROR type annotations needed
}
LL | |x| String::from("x".as_ref());
| ^ consider giving this closure parameter a type
-error[E0283]: type annotations needed
- --> $DIR/issue-72690.rs:12:9
- |
-LL | |x| String::from("x".as_ref());
- | ^^^^^^^^^^^^ cannot infer type for reference `&_`
- |
- = note: multiple `impl`s satisfying `String: From<&_>` found in the `alloc` crate:
- - impl<> From<&String> for String;
- - impl<> From<&str> for String;
-
error[E0283]: type annotations needed
--> $DIR/issue-72690.rs:12:26
|
- impl AsRef<str> for str;
error[E0283]: type annotations needed for `&T`
- --> $DIR/issue-72690.rs:18:17
+ --> $DIR/issue-72690.rs:17:17
|
LL | let _ = "x".as_ref();
| - ^^^^^^ cannot infer type for type parameter `T` declared on the trait `AsRef`
- impl AsRef<str> for str;
error[E0283]: type annotations needed
- --> $DIR/issue-72690.rs:22:5
+ --> $DIR/issue-72690.rs:21:5
|
LL | String::from("x".as_ref());
| ^^^^^^^^^^^^ cannot infer type for reference `&_`
- impl<> From<&str> for String;
error[E0283]: type annotations needed
- --> $DIR/issue-72690.rs:22:22
+ --> $DIR/issue-72690.rs:21:22
|
LL | String::from("x".as_ref());
| ----^^^^^^--
- impl AsRef<str> for str;
error[E0283]: type annotations needed
- --> $DIR/issue-72690.rs:29:5
+ --> $DIR/issue-72690.rs:28:5
|
LL | String::from("x".as_ref());
| ^^^^^^^^^^^^ cannot infer type for reference `&_`
- impl<> From<&str> for String;
error[E0283]: type annotations needed
- --> $DIR/issue-72690.rs:29:22
+ --> $DIR/issue-72690.rs:28:22
|
LL | String::from("x".as_ref());
| ----^^^^^^--
- impl AsRef<str> for str;
error[E0283]: type annotations needed
- --> $DIR/issue-72690.rs:38:5
+ --> $DIR/issue-72690.rs:37:5
|
LL | String::from("x".as_ref());
| ^^^^^^^^^^^^ cannot infer type for reference `&_`
- impl<> From<&str> for String;
error[E0283]: type annotations needed
- --> $DIR/issue-72690.rs:38:22
+ --> $DIR/issue-72690.rs:37:22
|
LL | String::from("x".as_ref());
| ----^^^^^^--
- impl AsRef<str> for str;
error[E0283]: type annotations needed
- --> $DIR/issue-72690.rs:47:5
+ --> $DIR/issue-72690.rs:46:5
|
LL | String::from("x".as_ref());
| ^^^^^^^^^^^^ cannot infer type for reference `&_`
- impl<> From<&str> for String;
error[E0283]: type annotations needed
- --> $DIR/issue-72690.rs:47:22
+ --> $DIR/issue-72690.rs:46:22
|
LL | String::from("x".as_ref());
| ----^^^^^^--
- impl AsRef<str> for str;
error[E0283]: type annotations needed
- --> $DIR/issue-72690.rs:54:5
+ --> $DIR/issue-72690.rs:53:5
|
LL | String::from("x".as_ref());
| ^^^^^^^^^^^^ cannot infer type for reference `&_`
- impl<> From<&str> for String;
error[E0283]: type annotations needed
- --> $DIR/issue-72690.rs:54:22
+ --> $DIR/issue-72690.rs:53:22
|
LL | String::from("x".as_ref());
| ----^^^^^^--
- impl AsRef<str> for str;
error[E0283]: type annotations needed
- --> $DIR/issue-72690.rs:63:5
+ --> $DIR/issue-72690.rs:62:5
|
LL | String::from("x".as_ref());
| ^^^^^^^^^^^^ cannot infer type for reference `&_`
- impl<> From<&str> for String;
error[E0283]: type annotations needed
- --> $DIR/issue-72690.rs:63:22
+ --> $DIR/issue-72690.rs:62:22
|
LL | String::from("x".as_ref());
| ----^^^^^^--
- impl AsRef<[u8]> for str;
- impl AsRef<str> for str;
-error: aborting due to 18 previous errors
+error: aborting due to 17 previous errors
Some errors have detailed explanations: E0282, E0283.
For more information about an error, try `rustc --explain E0282`.
// run-rustfix
// rustfix-only-machine-applicable
+#[allow(unused_must_use)]
fn main() {
let small = [1, 2];
let big = [0u8; 33];
// run-rustfix
// rustfix-only-machine-applicable
+#[allow(unused_must_use)]
fn main() {
let small = [1, 2];
let big = [0u8; 33];
warning: this method call resolves to `<&[T; N] as IntoIterator>::into_iter` (due to backwards compatibility), but will resolve to <[T; N] as IntoIterator>::into_iter in Rust 2021
- --> $DIR/into-iter-on-arrays-lint.rs:10:11
+ --> $DIR/into-iter-on-arrays-lint.rs:11:11
|
LL | small.into_iter();
| ^^^^^^^^^
| ++++++++++++++++++++++++ ~
warning: this method call resolves to `<&[T; N] as IntoIterator>::into_iter` (due to backwards compatibility), but will resolve to <[T; N] as IntoIterator>::into_iter in Rust 2021
- --> $DIR/into-iter-on-arrays-lint.rs:13:12
+ --> $DIR/into-iter-on-arrays-lint.rs:14:12
|
LL | [1, 2].into_iter();
| ^^^^^^^^^
| ++++++++++++++++++++++++ ~
warning: this method call resolves to `<&[T; N] as IntoIterator>::into_iter` (due to backwards compatibility), but will resolve to <[T; N] as IntoIterator>::into_iter in Rust 2021
- --> $DIR/into-iter-on-arrays-lint.rs:16:9
+ --> $DIR/into-iter-on-arrays-lint.rs:17:9
|
LL | big.into_iter();
| ^^^^^^^^^
| ++++++++++++++++++++++++ ~
warning: this method call resolves to `<&[T; N] as IntoIterator>::into_iter` (due to backwards compatibility), but will resolve to <[T; N] as IntoIterator>::into_iter in Rust 2021
- --> $DIR/into-iter-on-arrays-lint.rs:19:15
+ --> $DIR/into-iter-on-arrays-lint.rs:20:15
|
LL | [0u8; 33].into_iter();
| ^^^^^^^^^
| ++++++++++++++++++++++++ ~
warning: this method call resolves to `<&[T; N] as IntoIterator>::into_iter` (due to backwards compatibility), but will resolve to <[T; N] as IntoIterator>::into_iter in Rust 2021
- --> $DIR/into-iter-on-arrays-lint.rs:23:21
+ --> $DIR/into-iter-on-arrays-lint.rs:24:21
|
LL | Box::new(small).into_iter();
| ^^^^^^^^^ help: use `.iter()` instead of `.into_iter()` to avoid ambiguity: `iter`
= note: for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/IntoIterator-for-arrays.html>
warning: this method call resolves to `<&[T; N] as IntoIterator>::into_iter` (due to backwards compatibility), but will resolve to <[T; N] as IntoIterator>::into_iter in Rust 2021
- --> $DIR/into-iter-on-arrays-lint.rs:26:22
+ --> $DIR/into-iter-on-arrays-lint.rs:27:22
|
LL | Box::new([1, 2]).into_iter();
| ^^^^^^^^^ help: use `.iter()` instead of `.into_iter()` to avoid ambiguity: `iter`
= note: for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/IntoIterator-for-arrays.html>
warning: this method call resolves to `<&[T; N] as IntoIterator>::into_iter` (due to backwards compatibility), but will resolve to <[T; N] as IntoIterator>::into_iter in Rust 2021
- --> $DIR/into-iter-on-arrays-lint.rs:29:19
+ --> $DIR/into-iter-on-arrays-lint.rs:30:19
|
LL | Box::new(big).into_iter();
| ^^^^^^^^^ help: use `.iter()` instead of `.into_iter()` to avoid ambiguity: `iter`
= note: for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/IntoIterator-for-arrays.html>
warning: this method call resolves to `<&[T; N] as IntoIterator>::into_iter` (due to backwards compatibility), but will resolve to <[T; N] as IntoIterator>::into_iter in Rust 2021
- --> $DIR/into-iter-on-arrays-lint.rs:32:25
+ --> $DIR/into-iter-on-arrays-lint.rs:33:25
|
LL | Box::new([0u8; 33]).into_iter();
| ^^^^^^^^^ help: use `.iter()` instead of `.into_iter()` to avoid ambiguity: `iter`
= note: for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/IntoIterator-for-arrays.html>
warning: this method call resolves to `<&[T; N] as IntoIterator>::into_iter` (due to backwards compatibility), but will resolve to <[T; N] as IntoIterator>::into_iter in Rust 2021
- --> $DIR/into-iter-on-arrays-lint.rs:36:31
+ --> $DIR/into-iter-on-arrays-lint.rs:37:31
|
LL | Box::new(Box::new(small)).into_iter();
| ^^^^^^^^^ help: use `.iter()` instead of `.into_iter()` to avoid ambiguity: `iter`
= note: for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/IntoIterator-for-arrays.html>
warning: this method call resolves to `<&[T; N] as IntoIterator>::into_iter` (due to backwards compatibility), but will resolve to <[T; N] as IntoIterator>::into_iter in Rust 2021
- --> $DIR/into-iter-on-arrays-lint.rs:39:32
+ --> $DIR/into-iter-on-arrays-lint.rs:40:32
|
LL | Box::new(Box::new([1, 2])).into_iter();
| ^^^^^^^^^ help: use `.iter()` instead of `.into_iter()` to avoid ambiguity: `iter`
= note: for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/IntoIterator-for-arrays.html>
warning: this method call resolves to `<&[T; N] as IntoIterator>::into_iter` (due to backwards compatibility), but will resolve to <[T; N] as IntoIterator>::into_iter in Rust 2021
- --> $DIR/into-iter-on-arrays-lint.rs:42:29
+ --> $DIR/into-iter-on-arrays-lint.rs:43:29
|
LL | Box::new(Box::new(big)).into_iter();
| ^^^^^^^^^ help: use `.iter()` instead of `.into_iter()` to avoid ambiguity: `iter`
= note: for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/IntoIterator-for-arrays.html>
warning: this method call resolves to `<&[T; N] as IntoIterator>::into_iter` (due to backwards compatibility), but will resolve to <[T; N] as IntoIterator>::into_iter in Rust 2021
- --> $DIR/into-iter-on-arrays-lint.rs:45:35
+ --> $DIR/into-iter-on-arrays-lint.rs:46:35
|
LL | Box::new(Box::new([0u8; 33])).into_iter();
| ^^^^^^^^^ help: use `.iter()` instead of `.into_iter()` to avoid ambiguity: `iter`
--> $DIR/keyword-false-as-identifier.rs:2:9
|
LL | let false = 22;
- | ^^^^^ expected integer, found `bool`
+ | ^^^^^ -- this expression has type `{integer}`
+ | |
+ | expected integer, found `bool`
error: aborting due to previous error
--> $DIR/keyword-true-as-identifier.rs:2:9
|
LL | let true = 22;
- | ^^^^ expected integer, found `bool`
+ | ^^^^ -- this expression has type `{integer}`
+ | |
+ | expected integer, found `bool`
error: aborting due to previous error
--- /dev/null
+struct ErrorKind;
+struct Error(ErrorKind);
+impl Fn(&isize) for Error {
+ //~^ ERROR manual implementations of `Fn` are experimental [E0183]
+ //~^^ ERROR associated type bindings are not allowed here [E0229]
+ fn foo<const N: usize>(&self) -> Self::B<{N}>;
+ //~^ ERROR associated function in `impl` without body
+ //~^^ ERROR method `foo` is not a member of trait `Fn` [E0407]
+ //~^^^ ERROR associated type `B` not found for `Self` [E0220]
+}
+fn main() {}
--- /dev/null
+error: associated function in `impl` without body
+ --> $DIR/issue-95023.rs:6:5
+ |
+LL | fn foo<const N: usize>(&self) -> Self::B<{N}>;
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^-
+ | |
+ | help: provide a definition for the function: `{ <body> }`
+
+error[E0407]: method `foo` is not a member of trait `Fn`
+ --> $DIR/issue-95023.rs:6:5
+ |
+LL | fn foo<const N: usize>(&self) -> Self::B<{N}>;
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not a member of trait `Fn`
+
+error[E0183]: manual implementations of `Fn` are experimental
+ --> $DIR/issue-95023.rs:3:6
+ |
+LL | impl Fn(&isize) for Error {
+ | ^^^^^^^^^^ manual implementations of `Fn` are experimental
+ |
+ = help: add `#![feature(unboxed_closures)]` to the crate attributes to enable
+
+error[E0229]: associated type bindings are not allowed here
+ --> $DIR/issue-95023.rs:3:6
+ |
+LL | impl Fn(&isize) for Error {
+ | ^^^^^^^^^^ associated type not allowed here
+
+error[E0220]: associated type `B` not found for `Self`
+ --> $DIR/issue-95023.rs:6:44
+ |
+LL | fn foo<const N: usize>(&self) -> Self::B<{N}>;
+ | ^ associated type `B` not found
+
+error: aborting due to 5 previous errors
+
+Some errors have detailed explanations: E0183, E0220, E0229, E0407.
+For more information about an error, try `rustc --explain E0183`.
// Tests that subsequent lints specified via the command line override
-// each other, except for ForceWarn and Forbid, which cannot be overriden.
+// each other, except for ForceWarn and Forbid, which cannot be overridden.
//
// revisions: warn_deny forbid_warn force_warn_deny
//
LL | const foo: isize = 3;
| ^^^ help: convert the identifier to upper case (notice the capitalization): `FOO`
|
-note: the lint level is defined here
- --> $DIR/issue-17718-const-naming.rs:2:9
- |
-LL | #![deny(warnings)]
- | ^^^^^^^^
= note: `#[deny(non_upper_case_globals)]` implied by `#[deny(warnings)]`
error: aborting due to 2 previous errors
LL | static bad: isize = 1;
| ^^^ help: convert the identifier to upper case: `BAD`
|
-note: the lint level is defined here
- --> $DIR/lint-group-nonstandard-style.rs:10:14
- |
-LL | #[forbid(nonstandard_style)]
- | ^^^^^^^^^^^^^^^^^
= note: `#[forbid(non_upper_case_globals)]` implied by `#[forbid(nonstandard_style)]`
warning: function `CamelCase` should have a snake case name
LL | fn CamelCase() {}
| ^^^^^^^^^ help: convert the identifier to snake case: `camel_case`
|
-note: the lint level is defined here
- --> $DIR/lint-group-nonstandard-style.rs:18:17
- |
-LL | #![warn(nonstandard_style)]
- | ^^^^^^^^^^^^^^^^^
= note: `#[warn(non_snake_case)]` implied by `#[warn(nonstandard_style)]`
error: aborting due to 3 previous errors; 2 warnings emitted
--- /dev/null
+// check-pass
+// ignore-tidy-linelength
+
+#![feature(lint_reasons)]
+#![warn(unused_mut)]
+
+#![expect(unfulfilled_lint_expectations, reason = "idk why you would expect this")]
+//~^ WARNING this lint expectation is unfulfilled
+//~| NOTE `#[warn(unfulfilled_lint_expectations)]` on by default
+//~| NOTE idk why you would expect this
+//~| NOTE the `unfulfilled_lint_expectations` lint can't be expected and will always produce this message
+
+#[expect(unfulfilled_lint_expectations, reason = "a local: idk why you would expect this")]
+//~^ WARNING this lint expectation is unfulfilled
+//~| NOTE a local: idk why you would expect this
+//~| NOTE the `unfulfilled_lint_expectations` lint can't be expected and will always produce this message
+pub fn normal_test_fn() {
+ #[expect(unused_mut, reason = "this expectation will create a diagnostic with the default lint level")]
+ //~^ WARNING this lint expectation is unfulfilled
+ //~| NOTE this expectation will create a diagnostic with the default lint level
+ let mut v = vec![1, 1, 2, 3, 5];
+ v.sort();
+
+ // Check that lint lists including `unfulfilled_lint_expectations` are also handled correctly
+ #[expect(unused, unfulfilled_lint_expectations, reason = "the expectation for `unused` should be fulfilled")]
+ //~^ WARNING this lint expectation is unfulfilled
+ //~| NOTE the expectation for `unused` should be fulfilled
+ //~| NOTE the `unfulfilled_lint_expectations` lint can't be expected and will always produce this message
+ let value = "I'm unused";
+}
+
+#[expect(warnings, reason = "this suppresses all warnings and also suppresses itself. No warning will be issued")]
+pub fn expect_warnings() {
+ // This lint trigger will be suppressed
+ #[warn(unused_mut)]
+ let mut v = vec![1, 1, 2, 3, 5];
+}
+
+fn main() {}
--- /dev/null
+warning: this lint expectation is unfulfilled
+ --> $DIR/expect_unfulfilled_expectation.rs:7:11
+ |
+LL | #![expect(unfulfilled_lint_expectations, reason = "idk why you would expect this")]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(unfulfilled_lint_expectations)]` on by default
+ = note: idk why you would expect this
+ = note: the `unfulfilled_lint_expectations` lint can't be expected and will always produce this message
+
+warning: this lint expectation is unfulfilled
+ --> $DIR/expect_unfulfilled_expectation.rs:13:10
+ |
+LL | #[expect(unfulfilled_lint_expectations, reason = "a local: idk why you would expect this")]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: a local: idk why you would expect this
+ = note: the `unfulfilled_lint_expectations` lint can't be expected and will always produce this message
+
+warning: this lint expectation is unfulfilled
+ --> $DIR/expect_unfulfilled_expectation.rs:18:14
+ |
+LL | #[expect(unused_mut, reason = "this expectation will create a diagnostic with the default lint level")]
+ | ^^^^^^^^^^
+ |
+ = note: this expectation will create a diagnostic with the default lint level
+
+warning: this lint expectation is unfulfilled
+ --> $DIR/expect_unfulfilled_expectation.rs:25:22
+ |
+LL | #[expect(unused, unfulfilled_lint_expectations, reason = "the expectation for `unused` should be fulfilled")]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: the expectation for `unused` should be fulfilled
+ = note: the `unfulfilled_lint_expectations` lint can't be expected and will always produce this message
+
+warning: 4 warnings emitted
+
LL | hours_are_suns = false;
| ^^^^^^^^^^^^^^
|
-note: the lint level is defined here
- --> $DIR/issue-47390-unused-variable-in-struct-pattern.rs:5:9
- |
-LL | #![warn(unused)] // UI tests pass `-A unused` (#43896)
- | ^^^^^^
= note: `#[warn(unused_assignments)]` implied by `#[warn(unused)]`
= help: maybe it is overwritten before being read?
| |
| help: remove this `mut`
|
-note: the lint level is defined here
- --> $DIR/issue-47390-unused-variable-in-struct-pattern.rs:5:9
- |
-LL | #![warn(unused)] // UI tests pass `-A unused` (#43896)
- | ^^^^^^
= note: `#[warn(unused_mut)]` implied by `#[warn(unused)]`
warning: variable does not need to be mutable
LL | b += 1;
| ^
|
-note: the lint level is defined here
- --> $DIR/liveness-consts.rs:2:9
- |
-LL | #![warn(unused)]
- | ^^^^^^
= note: `#[warn(unused_assignments)]` implied by `#[warn(unused)]`
= help: maybe it is overwritten before being read?
LL | last = Some(s);
| ^^^^
|
-note: the lint level is defined here
- --> $DIR/liveness-upvars.rs:4:9
- |
-LL | #![warn(unused)]
- | ^^^^^^
= note: `#[warn(unused_variables)]` implied by `#[warn(unused)]`
= help: did you mean to capture by reference instead?
--- /dev/null
+// check-pass
+
+// This is a valid macro. Commit 4 in #95159 broke things such that it failed
+// with a "missing tokens in macro arguments" error, as reported in #95267.
+macro_rules! f {
+ (
+ /// ab
+ ) => {};
+}
+
+fn main() {
+ f!();
+}
--- /dev/null
+// run-pass
+
+#![feature(macro_metavar_expr)]
+
+fn main() {
+ macro_rules! one_nested_count_and_length {
+ ( $( [ $( $l:literal ),* ] ),* ) => {
+ [
+ // outer-most repetition
+ $(
+ // inner-most repetition
+ $(
+ ${ignore(l)} ${index()}, ${length()},
+ )*
+ ${count(l)}, ${index()}, ${length()},
+ )*
+ ${count(l)},
+ ]
+ };
+ }
+ assert_eq!(
+ one_nested_count_and_length!(["foo"], ["bar", "baz"]),
+ [
+ // # ["foo"]
+
+ // ## inner-most repetition (first iteration)
+ //
+ // `index` is 0 because this is the first inner-most iteration.
+ // `length` is 1 because there is only one inner-most repetition, "foo".
+ 0, 1,
+
+ // ## outer-most repetition (first iteration)
+ //
+ // `count` is 1 because of "foo", i,e, `$l` has only one repetition,
+ // `index` is 0 because this is the first outer-most iteration.
+ // `length` is 2 because there are 2 outer-most repetitions, ["foo"] and ["bar", "baz"]
+ 1, 0, 2,
+
+ // # ["bar", "baz"]
+
+ // ## inner-most repetition (first iteration)
+ //
+ // `index` is 0 because this is the first inner-most iteration
+ // `length` is 2 because there are repetitions, "bar" and "baz"
+ 0, 2,
+
+ // ## inner-most repetition (second iteration)
+ //
+ // `index` is 1 because this is the second inner-most iteration
+ // `length` is 2 because there are repetitions, "bar" and "baz"
+ 1, 2,
+
+ // ## outer-most repetition (second iteration)
+ //
+ // `count` is 2 because of "bar" and "baz", i,e, `$l` has two repetitions,
+ // `index` is 1 because this is the second outer-most iteration
+ // `length` is 2 because there are 2 outer-most repetitions, ["foo"] and ["bar", "baz"]
+ 2, 1, 2,
+
+ // # last count
+
+ // Because there are a total of 3 repetitions of `$l`, "foo", "bar" and "baz"
+ 3,
+ ]
+ );
+
+ // Based on the above explanation, the following macros should be straightforward
+
+ // Grouped from the outer-most to the inner-most
+ macro_rules! three_nested_count {
+ ( $( { $( [ $( ( $( $i:ident )* ) )* ] )* } )* ) => {
+ &[
+ $( $( $(
+ &[
+ ${ignore(i)} ${count(i, 0)},
+ ][..],
+ )* )* )*
+
+ $( $(
+ &[
+ ${ignore(i)} ${count(i, 0)},
+ ${ignore(i)} ${count(i, 1)},
+ ][..],
+ )* )*
+
+ $(
+ &[
+ ${ignore(i)} ${count(i, 0)},
+ ${ignore(i)} ${count(i, 1)},
+ ${ignore(i)} ${count(i, 2)},
+ ][..],
+ )*
+
+ &[
+ ${count(i, 0)},
+ ${count(i, 1)},
+ ${count(i, 2)},
+ ${count(i, 3)},
+ ][..]
+ ][..]
+ }
+ }
+ assert_eq!(
+ three_nested_count!(
+ {
+ [ (a b c) (d e f) ]
+ [ (g h) (i j k l m) ]
+ [ (n) ]
+ }
+ {
+ [ (o) (p q) (r s) ]
+ [ (t u v w x y z) ]
+ }
+ ),
+ &[
+ // a b c
+ &[3][..],
+ // d e f
+ &[3][..],
+ // g h
+ &[2][..],
+ // i j k l m
+ &[5][..],
+ // n
+ &[1][..],
+ // o
+ &[1][..],
+ // p q
+ &[2][..],
+ // r s
+ &[2][..],
+ // t u v w x y z
+ &[7][..],
+
+ // (a b c) (d e f)
+ &[2, 6][..],
+ // (g h) (i j k l m)
+ &[2, 7][..],
+ // (n)
+ &[1, 1][..],
+ // (o) (p q) (r s)
+ &[3, 5][..],
+ // (t u v w x y z)
+ &[1, 7][..],
+
+ // [ (a b c) (d e f) ]
+ // [ (g h) (i j k l m) ]
+ // [ (n) ]
+ &[3, 5, 14][..],
+ // [ (o) (p q) (r s) ]
+ // [ (t u v w x y z) ]
+ &[2, 4, 12][..],
+
+ // {
+ // [ (a b c) (d e f) ]
+ // [ (g h) (i j k l m) ]
+ // [ (n) ]
+ // }
+ // {
+ // [ (o) (p q) (r s) ]
+ // [ (t u v w x y z) ]
+ // }
+ &[2, 5, 9, 26][..]
+ ][..]
+ );
+
+ // Grouped from the outer-most to the inner-most
+ macro_rules! three_nested_length {
+ ( $( { $( [ $( ( $( $i:ident )* ) )* ] )* } )* ) => {
+ &[
+ $( $( $( $(
+ &[
+ ${ignore(i)} ${length(3)},
+ ${ignore(i)} ${length(2)},
+ ${ignore(i)} ${length(1)},
+ ${ignore(i)} ${length(0)},
+ ][..],
+ )* )* )* )*
+
+ $( $( $(
+ &[
+ ${ignore(i)} ${length(2)},
+ ${ignore(i)} ${length(1)},
+ ${ignore(i)} ${length(0)},
+ ][..],
+ )* )* )*
+
+ $( $(
+ &[
+ ${ignore(i)} ${length(1)},
+ ${ignore(i)} ${length(0)},
+ ][..],
+ )* )*
+
+ $(
+ &[
+ ${ignore(i)} ${length(0)},
+ ][..],
+ )*
+ ][..]
+ }
+ }
+ assert_eq!(
+ three_nested_length!(
+ {
+ [ (a b c) (d e f) ]
+ [ (g h) (i j k l m) ]
+ [ (n) ]
+ }
+ {
+ [ (o) (p q) (r s) ]
+ [ (t u v w x y z) ]
+ }
+ ),
+ &[
+ // a b c
+ &[2, 3, 2, 3][..], &[2, 3, 2, 3][..], &[2, 3, 2, 3][..],
+ // d e f
+ &[2, 3, 2, 3][..], &[2, 3, 2, 3][..], &[2, 3, 2, 3][..],
+ // g h
+ &[2, 3, 2, 2][..], &[2, 3, 2, 2][..],
+ // i j k l m
+ &[2, 3, 2, 5][..], &[2, 3, 2, 5][..], &[2, 3, 2, 5][..], &[2, 3, 2, 5][..],
+ &[2, 3, 2, 5][..],
+ // n
+ &[2, 3, 1, 1][..],
+ // o
+ &[2, 2, 3, 1][..],
+ // p q
+ &[2, 2, 3, 2][..], &[2, 2, 3, 2][..],
+ // r s
+ &[2, 2, 3, 2][..], &[2, 2, 3, 2][..],
+ // t u v w x y z
+ &[2, 2, 1, 7][..], &[2, 2, 1, 7][..], &[2, 2, 1, 7][..], &[2, 2, 1, 7][..],
+ &[2, 2, 1, 7][..], &[2, 2, 1, 7][..], &[2, 2, 1, 7][..],
+
+ // (a b c) (d e f)
+ &[2, 3, 2][..], &[2, 3, 2][..],
+ // (g h) (i j k l m)
+ &[2, 3, 2][..], &[2, 3, 2][..],
+ // (n)
+ &[2, 3, 1][..],
+ // (o) (p q) (r s)
+ &[2, 2, 3][..], &[2, 2, 3][..], &[2, 2, 3][..],
+ // (t u v w x y z)
+ &[2, 2, 1][..],
+
+ // [ (a b c) (d e f) ]
+ // [ (g h) (i j k l m) ]
+ // [ (n) ]
+ &[2, 3][..], &[2, 3][..], &[2, 3,][..],
+ // [ (o) (p q) (r s) ]
+ // [ (t u v w x y z) ]
+ &[2, 2][..], &[2, 2][..],
+
+ // {
+ // [ (a b c) (d e f) ]
+ // [ (g h) (i j k l m) ]
+ // [ (n) ]
+ // }
+ // {
+ // [ (o) (p q) (r s) ]
+ // [ (t u v w x y z) ]
+ // }
+ &[2][..], &[2][..]
+ ][..]
+ );
+
+ // It is possible to say, to some degree, that count is an "amalgamation" of length (see
+ // each length line result and compare them with the count results)
+}
#![feature(macro_metavar_expr)]
-macro_rules! ignore {
- ( $( $i:ident ),* ) => {{
- let array: [i32; 0] = [$( ${ignore(i)} )*];
- array
- }};
+/// Count the number of idents in a macro repetition.
+macro_rules! count_idents {
+ ( $( $i:ident ),* ) => {
+ ${count(i)}
+ };
}
+/// Count the number of idents in a 2-dimensional macro repetition.
+macro_rules! count_idents_2 {
+ ( $( [ $( $i:ident ),* ] ),* ) => {
+ ${count(i)}
+ };
+}
+
+/// Mostly counts the number of OUTER-MOST repetitions
+macro_rules! count_depth_limits {
+ ( $( { $( [ $( $outer:ident : ( $( $inner:ident )* ) )* ] )* } )* ) => {
+ (
+ (
+ ${count(inner)},
+ ${count(inner, 0)},
+ ${count(inner, 1)},
+ ${count(inner, 2)},
+ ${count(inner, 3)},
+ ),
+ (
+ ${count(outer)},
+ ${count(outer, 0)},
+ ${count(outer, 1)},
+ ${count(outer, 2)},
+ ),
+ )
+ };
+}
+
+/// Produce (index, length) pairs for literals in a macro repetition.
+/// The literal is not included in the output, so this macro uses the
+/// `ignore` meta-variable expression to create a non-expanding
+/// repetition binding.
+macro_rules! enumerate_literals {
+ ( $( ($l:stmt) ),* ) => {
+ [$( ${ignore(l)} (${index()}, ${length()}) ),*]
+ };
+}
+
+/// Produce index and length tuples for literals in a 2-dimensional
+/// macro repetition.
+macro_rules! enumerate_literals_2 {
+ ( $( [ $( ($l:literal) ),* ] ),* ) => {
+ [
+ $(
+ $(
+ (
+ ${index(1)},
+ ${length(1)},
+ ${index(0)},
+ ${length(0)},
+ $l
+ ),
+ )*
+ )*
+ ]
+ };
+}
+
+/// Generate macros that count idents and then add a constant number
+/// to the count.
+///
+/// This macro uses dollar escaping to make it unambiguous as to which
+/// macro the repetition belongs to.
+macro_rules! make_count_adders {
+ ( $( $i:ident, $b:literal );* ) => {
+ $(
+ macro_rules! $i {
+ ( $$( $$j:ident ),* ) => {
+ $b + $${count(j)}
+ };
+ }
+ )*
+ };
+}
+
+make_count_adders! { plus_one, 1; plus_five, 5 }
+
+/// Generate a macro that allows selection of a particular literal
+/// from a sequence of inputs by their identifier.
+///
+/// This macro uses dollar escaping to make it unambiguous as to which
+/// macro the repetition belongs to, and to allow expansion of an
+/// identifier the name of which is not known in the definition
+/// of `make_picker`.
+macro_rules! make_picker {
+ ( $m:ident => $( $i:ident ),* ; $p:ident ) => {
+ macro_rules! $m {
+ ( $( $$ $i:literal ),* ) => {
+ $$ $p
+ };
+ }
+ };
+}
+
+make_picker!(first => a, b; a);
+
+make_picker!(second => a, b; b);
+
fn main() {
- assert_eq!(ignore!(a, b, c), []);
+ assert_eq!(count_idents!(a, b, c), 3);
+ assert_eq!(count_idents_2!([a, b, c], [d, e], [f]), 6);
+ assert_eq!(
+ count_depth_limits! {
+ {
+ [ A: (a b c) D: (d e f) ]
+ [ G: (g h) I: (i j k l m) ]
+ [ N: (n) ]
+ }
+ {
+ [ O: (o) P: (p q) R: (r s) ]
+ [ T: (t u v w x y z) ]
+ }
+ },
+ ((26, 2, 5, 9, 26), (9, 2, 5, 9))
+ );
+ assert_eq!(enumerate_literals![("foo"), ("bar")], [(0, 2), (1, 2)]);
+ assert_eq!(
+ enumerate_literals_2![
+ [("foo"), ("bar"), ("baz")],
+ [("qux"), ("quux"), ("quuz"), ("xyzzy")]
+ ],
+ [
+ (0, 2, 0, 3, "foo"),
+ (0, 2, 1, 3, "bar"),
+ (0, 2, 2, 3, "baz"),
+
+ (1, 2, 0, 4, "qux"),
+ (1, 2, 1, 4, "quux"),
+ (1, 2, 2, 4, "quuz"),
+ (1, 2, 3, 4, "xyzzy"),
+ ]
+ );
+ assert_eq!(plus_one!(a, b, c), 4);
+ assert_eq!(plus_five!(a, b), 7);
+ assert_eq!(first!(1, 2), 1);
+ assert_eq!(second!(1, 2), 2);
}
--- /dev/null
+// run-pass
+
+#![feature(macro_metavar_expr)]
+
+#[derive(Debug)]
+struct Example<'a> {
+ _indexes: &'a [(u32, u32)],
+ _counts: &'a [u32],
+ _nested: Vec<Example<'a>>,
+}
+
+macro_rules! example {
+ ( $( [ $( ( $( $x:ident )* ) )* ] )* ) => {
+ Example {
+ _indexes: &[],
+ _counts: &[${count(x, 0)}, ${count(x, 1)}, ${count(x, 2)}],
+ _nested: vec![
+ $(
+ Example {
+ _indexes: &[(${index()}, ${length()})],
+ _counts: &[${count(x, 0)}, ${count(x, 1)}],
+ _nested: vec![
+ $(
+ Example {
+ _indexes: &[(${index(1)}, ${length(1)}), (${index()}, ${length()})],
+ _counts: &[${count(x)}],
+ _nested: vec![
+ $(
+ Example {
+ _indexes: &[
+ (${index(2)}, ${length(2)}),
+ (${index(1)}, ${length(1)}),
+ (${index()}, ${length()})
+ ],
+ _counts: &[],
+ _nested: vec![],
+ ${ignore(x)}
+ }
+ ),*
+ ]
+ }
+ ),*
+ ]
+ }
+ ),*
+ ]
+ }
+ };
+}
+
+static EXPECTED: &str = concat!(
+ "Example { _indexes: [], _counts: [2, 4, 13], _nested: [",
+ concat!(
+ "Example { _indexes: [(0, 2)], _counts: [3, 10], _nested: [",
+ concat!(
+ "Example { _indexes: [(0, 2), (0, 3)], _counts: [4], _nested: [",
+ concat!(
+ "Example { _indexes: [(0, 2), (0, 3), (0, 4)], _counts: [], _nested: [] }, ",
+ "Example { _indexes: [(0, 2), (0, 3), (1, 4)], _counts: [], _nested: [] }, ",
+ "Example { _indexes: [(0, 2), (0, 3), (2, 4)], _counts: [], _nested: [] }, ",
+ "Example { _indexes: [(0, 2), (0, 3), (3, 4)], _counts: [], _nested: [] }",
+ ),
+ "] }, ",
+ "Example { _indexes: [(0, 2), (1, 3)], _counts: [4], _nested: [",
+ concat!(
+ "Example { _indexes: [(0, 2), (1, 3), (0, 4)], _counts: [], _nested: [] }, ",
+ "Example { _indexes: [(0, 2), (1, 3), (1, 4)], _counts: [], _nested: [] }, ",
+ "Example { _indexes: [(0, 2), (1, 3), (2, 4)], _counts: [], _nested: [] }, ",
+ "Example { _indexes: [(0, 2), (1, 3), (3, 4)], _counts: [], _nested: [] }",
+ ),
+ "] }, ",
+ "Example { _indexes: [(0, 2), (2, 3)], _counts: [2], _nested: [",
+ concat!(
+ "Example { _indexes: [(0, 2), (2, 3), (0, 2)], _counts: [], _nested: [] }, ",
+ "Example { _indexes: [(0, 2), (2, 3), (1, 2)], _counts: [], _nested: [] }",
+ ),
+ "] }",
+ ),
+ "] }, ",
+ "Example { _indexes: [(1, 2)], _counts: [1, 3], _nested: [",
+ concat!(
+ "Example { _indexes: [(1, 2), (0, 1)], _counts: [3], _nested: [",
+ concat!(
+ "Example { _indexes: [(1, 2), (0, 1), (0, 3)], _counts: [], _nested: [] }, ",
+ "Example { _indexes: [(1, 2), (0, 1), (1, 3)], _counts: [], _nested: [] }, ",
+ "Example { _indexes: [(1, 2), (0, 1), (2, 3)], _counts: [], _nested: [] }",
+ ),
+ "] }",
+ ),
+ "] }",
+ ),
+ "] }",
+);
+
+fn main() {
+ let e = example! {
+ [ ( A B C D ) ( E F G H ) ( I J ) ]
+ [ ( K L M ) ]
+ };
+ let debug = format!("{:?}", e);
+ assert_eq!(debug, EXPECTED);
+}
--- /dev/null
+#![feature(macro_metavar_expr)]
+
+macro_rules! a {
+ ( $( { $( [ $( ( $( $foo:ident )* ) )* ] )* } )* ) => {
+ (
+ ${count(foo, 0)},
+ ${count(foo, 10)},
+ //~^ ERROR count depth must be less than 4
+ )
+ };
+}
+
+macro_rules! b {
+ ( $( { $( [ $( $foo:ident )* ] )* } )* ) => {
+ (
+ $( $( $(
+ ${ignore(foo)}
+ ${index(0)},
+ ${index(10)},
+ //~^ ERROR index depth must be less than 3
+ )* )* )*
+ )
+ };
+}
+
+macro_rules! c {
+ ( $( { $( $foo:ident )* } )* ) => {
+ (
+ $( $(
+ ${ignore(foo)}
+ ${length(0)}
+ ${length(10)}
+ //~^ ERROR length depth must be less than 2
+ )* )*
+ )
+ };
+}
+
+
+fn main() {
+ a!( { [ (a) ] [ (b c) ] } );
+ b!( { [ a b ] } );
+ c!( { a } );
+}
--- /dev/null
+error: count depth must be less than 4
+ --> $DIR/out-of-bounds-arguments.rs:7:14
+ |
+LL | ${count(foo, 10)},
+ | ^^^^^^^^^^^^^^^^
+
+error: index depth must be less than 3
+ --> $DIR/out-of-bounds-arguments.rs:19:18
+ |
+LL | ${index(10)},
+ | ^^^^^^^^^^^
+
+error: length depth must be less than 2
+ --> $DIR/out-of-bounds-arguments.rs:32:18
+ |
+LL | ${length(10)}
+ | ^^^^^^^^^^^^
+
+error: aborting due to 3 previous errors
+
macro_rules! curly__no_rhs_dollar__no_round {
( $i:ident ) => { ${ count(i) } };
+ //~^ ERROR `count` can not be placed inside the inner-most repetition
}
macro_rules! curly__rhs_dollar__round {
//~| ERROR expected identifier
}
+macro_rules! unknown_count_ident {
+ ( $( $i:ident )* ) => {
+ ${count(foo)}
+ //~^ ERROR variable `foo` is not recognized in meta-variable expression
+ };
+}
+
+macro_rules! unknown_ignore_ident {
+ ( $( $i:ident )* ) => {
+ ${ignore(bar)}
+ //~^ ERROR variable `bar` is not recognized in meta-variable expression
+ };
+}
+
macro_rules! unknown_metavar {
( $( $i:ident ),* ) => { ${ aaaaaaaaaaaaaa(i) } };
//~^ ERROR unrecognized meta-variable expression
//~^ ERROR cannot find value `a` in this scope
extra_garbage_after_metavar!(a);
- unknown_metavar!(a);
- metavar_without_parens!(a);
- metavar_token_without_ident!(a);
metavar_depth_is_not_literal!(a);
+ metavar_token_without_ident!(a);
metavar_with_literal_suffix!(a);
- open_brackets_without_tokens!(a)
+ metavar_without_parens!(a);
+ open_brackets_without_tokens!(a);
+ unknown_count_ident!(a);
+ unknown_ignore_ident!(a);
+ unknown_metavar!(a);
}
error: expected identifier, found `$`
- --> $DIR/syntax-errors.rs:16:33
+ --> $DIR/syntax-errors.rs:17:33
|
LL | ( $( $i:ident ),* ) => { ${ count($i) } };
| ^^^^^ - help: try removing `$`
error: expected identifier, found `$`
- --> $DIR/syntax-errors.rs:22:26
+ --> $DIR/syntax-errors.rs:23:26
|
LL | ( $i:ident ) => { ${ count($i) } };
| ^^^^^ - help: try removing `$`
error: unexpected token: $
- --> $DIR/syntax-errors.rs:52:8
+ --> $DIR/syntax-errors.rs:53:8
|
LL | ( $$ $a:ident ) => {
| ^
note: `$$` and meta-variable expressions are not allowed inside macro parameter definitions
- --> $DIR/syntax-errors.rs:52:8
+ --> $DIR/syntax-errors.rs:53:8
|
LL | ( $$ $a:ident ) => {
| ^
error: unexpected token: a
- --> $DIR/syntax-errors.rs:59:19
+ --> $DIR/syntax-errors.rs:60:19
|
LL | ${count() a b c}
| ^
|
note: meta-variable expression must not have trailing tokens
- --> $DIR/syntax-errors.rs:59:19
+ --> $DIR/syntax-errors.rs:60:19
|
LL | ${count() a b c}
| ^
error: unexpected token: a
- --> $DIR/syntax-errors.rs:62:19
+ --> $DIR/syntax-errors.rs:63:19
|
LL | ${count(i a b c)}
| ^
|
note: meta-variable expression must not have trailing tokens
- --> $DIR/syntax-errors.rs:62:19
+ --> $DIR/syntax-errors.rs:63:19
|
LL | ${count(i a b c)}
| ^
error: unexpected token: a
- --> $DIR/syntax-errors.rs:64:22
+ --> $DIR/syntax-errors.rs:65:22
|
LL | ${count(i, 1 a b c)}
| ^
|
note: meta-variable expression must not have trailing tokens
- --> $DIR/syntax-errors.rs:64:22
+ --> $DIR/syntax-errors.rs:65:22
|
LL | ${count(i, 1 a b c)}
| ^
error: unexpected token: a
- --> $DIR/syntax-errors.rs:66:20
+ --> $DIR/syntax-errors.rs:67:20
|
LL | ${count(i) a b c}
| ^
|
note: meta-variable expression must not have trailing tokens
- --> $DIR/syntax-errors.rs:66:20
+ --> $DIR/syntax-errors.rs:67:20
|
LL | ${count(i) a b c}
| ^
error: unexpected token: a
- --> $DIR/syntax-errors.rs:69:21
+ --> $DIR/syntax-errors.rs:70:21
|
LL | ${ignore(i) a b c}
| ^
|
note: meta-variable expression must not have trailing tokens
- --> $DIR/syntax-errors.rs:69:21
+ --> $DIR/syntax-errors.rs:70:21
|
LL | ${ignore(i) a b c}
| ^
error: unexpected token: a
- --> $DIR/syntax-errors.rs:71:20
+ --> $DIR/syntax-errors.rs:72:20
|
LL | ${ignore(i a b c)}
| ^
|
note: meta-variable expression must not have trailing tokens
- --> $DIR/syntax-errors.rs:71:20
+ --> $DIR/syntax-errors.rs:72:20
|
LL | ${ignore(i a b c)}
| ^
error: unexpected token: a
- --> $DIR/syntax-errors.rs:74:19
+ --> $DIR/syntax-errors.rs:75:19
|
LL | ${index() a b c}
| ^
|
note: meta-variable expression must not have trailing tokens
- --> $DIR/syntax-errors.rs:74:19
+ --> $DIR/syntax-errors.rs:75:19
|
LL | ${index() a b c}
| ^
error: unexpected token: a
- --> $DIR/syntax-errors.rs:76:19
+ --> $DIR/syntax-errors.rs:77:19
|
LL | ${index(1 a b c)}
| ^
|
note: meta-variable expression must not have trailing tokens
- --> $DIR/syntax-errors.rs:76:19
+ --> $DIR/syntax-errors.rs:77:19
|
LL | ${index(1 a b c)}
| ^
error: unexpected token: a
- --> $DIR/syntax-errors.rs:79:19
+ --> $DIR/syntax-errors.rs:80:19
|
LL | ${index() a b c}
| ^
|
note: meta-variable expression must not have trailing tokens
- --> $DIR/syntax-errors.rs:79:19
+ --> $DIR/syntax-errors.rs:80:19
|
LL | ${index() a b c}
| ^
error: unexpected token: a
- --> $DIR/syntax-errors.rs:81:19
+ --> $DIR/syntax-errors.rs:82:19
|
LL | ${index(1 a b c)}
| ^
|
note: meta-variable expression must not have trailing tokens
- --> $DIR/syntax-errors.rs:81:19
+ --> $DIR/syntax-errors.rs:82:19
|
LL | ${index(1 a b c)}
| ^
error: meta-variable expression depth must be a literal
- --> $DIR/syntax-errors.rs:88:33
+ --> $DIR/syntax-errors.rs:89:33
|
LL | ( $( $i:ident ),* ) => { ${ index(IDX) } };
| ^^^^^
error: unexpected token: {
- --> $DIR/syntax-errors.rs:94:8
+ --> $DIR/syntax-errors.rs:95:8
|
LL | ( ${ length() } ) => {
| ^^^^^^^^^^^^
note: `$$` and meta-variable expressions are not allowed inside macro parameter definitions
- --> $DIR/syntax-errors.rs:94:8
+ --> $DIR/syntax-errors.rs:95:8
|
LL | ( ${ length() } ) => {
| ^^^^^^^^^^^^
error: expected one of: `*`, `+`, or `?`
- --> $DIR/syntax-errors.rs:94:8
+ --> $DIR/syntax-errors.rs:95:8
|
LL | ( ${ length() } ) => {
| ^^^^^^^^^^^^
error: expected identifier
- --> $DIR/syntax-errors.rs:101:33
+ --> $DIR/syntax-errors.rs:102:33
|
LL | ( $( $i:ident ),* ) => { ${ ignore() } };
| ^^^^^^
error: only unsuffixes integer literals are supported in meta-variable expressions
- --> $DIR/syntax-errors.rs:107:33
+ --> $DIR/syntax-errors.rs:108:33
|
LL | ( $( $i:ident ),* ) => { ${ index(1u32) } };
| ^^^^^
error: meta-variable expression parameter must be wrapped in parentheses
- --> $DIR/syntax-errors.rs:113:33
+ --> $DIR/syntax-errors.rs:114:33
|
LL | ( $( $i:ident ),* ) => { ${ count{i} } };
| ^^^^^
error: expected identifier
- --> $DIR/syntax-errors.rs:119:31
+ --> $DIR/syntax-errors.rs:120:31
|
LL | ( $( $i:ident ),* ) => { ${ {} } };
| ^^^^^^
error: unrecognized meta-variable expression
- --> $DIR/syntax-errors.rs:125:33
+ --> $DIR/syntax-errors.rs:140:33
|
LL | ( $( $i:ident ),* ) => { ${ aaaaaaaaaaaaaa(i) } };
| ^^^^^^^^^^^^^^ help: supported expressions are count, ignore, index and length
+error: `count` can not be placed inside the inner-most repetition
+ --> $DIR/syntax-errors.rs:12:24
+ |
+LL | ( $i:ident ) => { ${ count(i) } };
+ | ^^^^^^^^^^^^
+
error: expected expression, found `$`
- --> $DIR/syntax-errors.rs:16:30
+ --> $DIR/syntax-errors.rs:17:30
|
LL | ( $( $i:ident ),* ) => { ${ count($i) } };
| ^ expected expression
= note: this error originates in the macro `curly__rhs_dollar__round` (in Nightly builds, run with -Z macro-backtrace for more info)
error: expected expression, found `$`
- --> $DIR/syntax-errors.rs:22:23
+ --> $DIR/syntax-errors.rs:23:23
|
LL | ( $i:ident ) => { ${ count($i) } };
| ^ expected expression
= note: this error originates in the macro `curly__rhs_dollar__no_round` (in Nightly builds, run with -Z macro-backtrace for more info)
error: variable 'i' is still repeating at this depth
- --> $DIR/syntax-errors.rs:40:36
+ --> $DIR/syntax-errors.rs:41:36
|
LL | ( $( $i:ident ),* ) => { count($i) };
| ^^
error: expected expression, found `$`
- --> $DIR/syntax-errors.rs:59:9
+ --> $DIR/syntax-errors.rs:60:9
|
LL | ${count() a b c}
| ^ expected expression
= note: this error originates in the macro `extra_garbage_after_metavar` (in Nightly builds, run with -Z macro-backtrace for more info)
error: expected expression, found `$`
- --> $DIR/syntax-errors.rs:125:30
+ --> $DIR/syntax-errors.rs:89:30
|
-LL | ( $( $i:ident ),* ) => { ${ aaaaaaaaaaaaaa(i) } };
- | ^ expected expression
-...
-LL | unknown_metavar!(a);
- | ------------------- in this macro invocation
- |
- = note: this error originates in the macro `unknown_metavar` (in Nightly builds, run with -Z macro-backtrace for more info)
-
-error: expected expression, found `$`
- --> $DIR/syntax-errors.rs:113:30
- |
-LL | ( $( $i:ident ),* ) => { ${ count{i} } };
+LL | ( $( $i:ident ),* ) => { ${ index(IDX) } };
| ^ expected expression
...
-LL | metavar_without_parens!(a);
- | -------------------------- in this macro invocation
+LL | metavar_depth_is_not_literal!(a);
+ | -------------------------------- in this macro invocation
|
- = note: this error originates in the macro `metavar_without_parens` (in Nightly builds, run with -Z macro-backtrace for more info)
+ = note: this error originates in the macro `metavar_depth_is_not_literal` (in Nightly builds, run with -Z macro-backtrace for more info)
error: expected expression, found `$`
- --> $DIR/syntax-errors.rs:101:30
+ --> $DIR/syntax-errors.rs:102:30
|
LL | ( $( $i:ident ),* ) => { ${ ignore() } };
| ^ expected expression
= note: this error originates in the macro `metavar_token_without_ident` (in Nightly builds, run with -Z macro-backtrace for more info)
error: expected expression, found `$`
- --> $DIR/syntax-errors.rs:88:30
+ --> $DIR/syntax-errors.rs:108:30
|
-LL | ( $( $i:ident ),* ) => { ${ index(IDX) } };
+LL | ( $( $i:ident ),* ) => { ${ index(1u32) } };
| ^ expected expression
...
-LL | metavar_depth_is_not_literal!(a);
- | -------------------------------- in this macro invocation
+LL | metavar_with_literal_suffix!(a);
+ | ------------------------------- in this macro invocation
|
- = note: this error originates in the macro `metavar_depth_is_not_literal` (in Nightly builds, run with -Z macro-backtrace for more info)
+ = note: this error originates in the macro `metavar_with_literal_suffix` (in Nightly builds, run with -Z macro-backtrace for more info)
error: expected expression, found `$`
- --> $DIR/syntax-errors.rs:107:30
+ --> $DIR/syntax-errors.rs:114:30
|
-LL | ( $( $i:ident ),* ) => { ${ index(1u32) } };
+LL | ( $( $i:ident ),* ) => { ${ count{i} } };
| ^ expected expression
...
-LL | metavar_with_literal_suffix!(a);
- | ------------------------------- in this macro invocation
+LL | metavar_without_parens!(a);
+ | -------------------------- in this macro invocation
|
- = note: this error originates in the macro `metavar_with_literal_suffix` (in Nightly builds, run with -Z macro-backtrace for more info)
+ = note: this error originates in the macro `metavar_without_parens` (in Nightly builds, run with -Z macro-backtrace for more info)
error: expected expression, found `$`
- --> $DIR/syntax-errors.rs:119:30
+ --> $DIR/syntax-errors.rs:120:30
|
LL | ( $( $i:ident ),* ) => { ${ {} } };
| ^ expected expression
...
-LL | open_brackets_without_tokens!(a)
+LL | open_brackets_without_tokens!(a);
| -------------------------------- in this macro invocation
|
= note: this error originates in the macro `open_brackets_without_tokens` (in Nightly builds, run with -Z macro-backtrace for more info)
+error: variable `foo` is not recognized in meta-variable expression
+ --> $DIR/syntax-errors.rs:127:17
+ |
+LL | ${count(foo)}
+ | ^^^
+
+error: variable `bar` is not recognized in meta-variable expression
+ --> $DIR/syntax-errors.rs:134:18
+ |
+LL | ${ignore(bar)}
+ | ^^^
+
+error: expected expression, found `$`
+ --> $DIR/syntax-errors.rs:140:30
+ |
+LL | ( $( $i:ident ),* ) => { ${ aaaaaaaaaaaaaa(i) } };
+ | ^ expected expression
+...
+LL | unknown_metavar!(a);
+ | ------------------- in this macro invocation
+ |
+ = note: this error originates in the macro `unknown_metavar` (in Nightly builds, run with -Z macro-backtrace for more info)
+
error[E0425]: cannot find function `count` in this scope
- --> $DIR/syntax-errors.rs:28:30
+ --> $DIR/syntax-errors.rs:29:30
|
LL | ( $( $i:ident ),* ) => { count(i) };
| ^^^^^ not found in this scope
= note: this error originates in the macro `no_curly__no_rhs_dollar__round` (in Nightly builds, run with -Z macro-backtrace for more info)
error[E0425]: cannot find value `i` in this scope
- --> $DIR/syntax-errors.rs:28:36
+ --> $DIR/syntax-errors.rs:29:36
|
LL | ( $( $i:ident ),* ) => { count(i) };
| ^ not found in this scope
= note: this error originates in the macro `no_curly__no_rhs_dollar__round` (in Nightly builds, run with -Z macro-backtrace for more info)
error[E0425]: cannot find function `count` in this scope
- --> $DIR/syntax-errors.rs:34:23
+ --> $DIR/syntax-errors.rs:35:23
|
LL | ( $i:ident ) => { count(i) };
| ^^^^^ not found in this scope
= note: this error originates in the macro `no_curly__no_rhs_dollar__no_round` (in Nightly builds, run with -Z macro-backtrace for more info)
error[E0425]: cannot find value `i` in this scope
- --> $DIR/syntax-errors.rs:34:29
+ --> $DIR/syntax-errors.rs:35:29
|
LL | ( $i:ident ) => { count(i) };
| ^ not found in this scope
= note: this error originates in the macro `no_curly__no_rhs_dollar__no_round` (in Nightly builds, run with -Z macro-backtrace for more info)
error[E0425]: cannot find function `count` in this scope
- --> $DIR/syntax-errors.rs:45:23
+ --> $DIR/syntax-errors.rs:46:23
|
LL | ( $i:ident ) => { count($i) };
| ^^^^^ not found in this scope
= note: this error originates in the macro `no_curly__rhs_dollar__no_round` (in Nightly builds, run with -Z macro-backtrace for more info)
error[E0425]: cannot find value `a` in this scope
- --> $DIR/syntax-errors.rs:138:37
+ --> $DIR/syntax-errors.rs:153:37
|
LL | no_curly__rhs_dollar__no_round!(a);
| ^ not found in this scope
-error: aborting due to 37 previous errors
+error: aborting due to 40 previous errors
For more information about this error, try `rustc --explain E0425`.
| ^^^^^^^^^^^^^^^^^^^^^^^^^
|
= note: expanding `println! { "Hello, World!" }`
- = note: to `{ $crate :: io :: _print($crate :: format_args_nl! ("Hello, World!")) ; }`
+ = note: to `$crate :: io :: _print($crate :: format_args_nl! ("Hello, World!"))`
error[E0308]: mismatched types
--> $DIR/match-range-fail.rs:18:9
|
+LL | match 5 {
+ | - this expression has type `{integer}`
LL | 'c' ..= 100 => { }
| ^^^ --- this is of type `{integer}`
| |
--- /dev/null
+fn main() {
+ let x: i32 = 1;
+ println!("{:?}", x.count()); //~ ERROR is not an iterator
+}
--- /dev/null
+error[E0599]: `i32` is not an iterator
+ --> $DIR/issue-84495.rs:3:24
+ |
+LL | println!("{:?}", x.count());
+ | ^^^^^ `i32` is not an iterator
+ |
+ = note: the following trait bounds were not satisfied:
+ `i32: Iterator`
+ which is required by `&mut i32: Iterator`
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0599`.
// See `mir_drop_order.rs` for more information
#![feature(let_chains)]
+#![allow(irrefutable_let_patterns)]
use std::cell::RefCell;
use std::panic;
#![no_std]
#[lang = "owned_box"]
-pub struct Box<T: ?Sized>(*mut T);
+pub struct Box<T: ?Sized>(*mut T, ());
impl<T: ?Sized> Drop for Box<T> {
fn drop(&mut self) {
#[lang = "box_free"]
#[inline(always)]
-unsafe fn box_free<T: ?Sized>(ptr: *mut T) {
+unsafe fn box_free<T: ?Sized>(ptr: *mut T, _: ()) {
dealloc(ptr)
}
--- /dev/null
+// build-pass
+
+#![feature(adt_const_params)]
+#![allow(incomplete_features)]
+
+#[derive(PartialEq, Eq)]
+struct Yikes;
+
+impl Yikes {
+ fn mut_self(&mut self) {}
+}
+
+fn foo<const YIKES: Yikes>() {
+ YIKES.mut_self()
+ //~^ WARNING taking a mutable reference
+}
+
+fn main() {
+ foo::<{ Yikes }>()
+}
--- /dev/null
+warning: taking a mutable reference to a `const` item
+ --> $DIR/thir-constparam-temp.rs:14:5
+ |
+LL | YIKES.mut_self()
+ | ^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(const_item_mutation)]` on by default
+ = note: each usage of a `const` item creates a new temporary
+ = note: the mutable reference will refer to this temporary, not the original `const` item
+note: mutable reference created due to call to this method
+ --> $DIR/thir-constparam-temp.rs:10:5
+ |
+LL | fn mut_self(&mut self) {}
+ | ^^^^^^^^^^^^^^^^^^^^^^
+note: `const` item defined here
+ --> $DIR/thir-constparam-temp.rs:13:14
+ |
+LL | fn foo<const YIKES: Yikes>() {
+ | ^^^^^
+
+warning: 1 warning emitted
+
error[E0308]: mismatched types
--> $DIR/E0409.rs:5:23
|
+LL | match x {
+ | - this expression has type `({integer}, {integer})`
LL | (0, ref y) | (y, 0) => {}
| ----- ^ expected `&{integer}`, found integer
| |
use std::iter::once;
fn main() {
once::<&str>("str").fuse().filter(|a: &str| true).count();
- //~^ ERROR not an iterator
+ //~^ ERROR the method
//~| ERROR type mismatch in closure arguments
}
LL | P: FnMut(&Self::Item) -> bool,
| ^^^^^^^^^^^^^^^^^^^^^^^^^^ required by this bound in `filter`
-error[E0599]: `Filter<Fuse<std::iter::Once<&str>>, [closure@$DIR/issue-36053-2.rs:7:39: 7:53]>` is not an iterator
+error[E0599]: the method `count` exists for struct `Filter<Fuse<std::iter::Once<&str>>, [closure@$DIR/issue-36053-2.rs:7:39: 7:53]>`, but its trait bounds were not satisfied
--> $DIR/issue-36053-2.rs:7:55
|
LL | once::<&str>("str").fuse().filter(|a: &str| true).count();
- | -------------- ^^^^^ `Filter<Fuse<std::iter::Once<&str>>, [closure@$DIR/issue-36053-2.rs:7:39: 7:53]>` is not an iterator
+ | -------------- ^^^^^ method cannot be called on `Filter<Fuse<std::iter::Once<&str>>, [closure@$DIR/issue-36053-2.rs:7:39: 7:53]>` due to unsatisfied trait bounds
| |
| doesn't satisfy `<_ as FnOnce<(&&str,)>>::Output = bool`
| doesn't satisfy `_: FnMut<(&&str,)>`
LL | self.foo();
| ^^^ method cannot be called on `&Foo<T>` due to unsatisfied trait bounds
|
- = note: the following trait bounds were not satisfied:
- `T: Default`
- which is required by `Foo<T>: Bar`
- `T: Bar`
- which is required by `Foo<T>: Bar`
+note: trait bound `T: Default` was not satisfied
+ --> $DIR/missing-trait-bounds-for-method-call.rs:10:9
+ |
+LL | impl<T: Default + Bar> Bar for Foo<T> {}
+ | ^^^^^^^ --- ------
+ | |
+ | unsatisfied trait bound introduced here
+note: trait bound `T: Bar` was not satisfied
+ --> $DIR/missing-trait-bounds-for-method-call.rs:10:19
+ |
+LL | impl<T: Default + Bar> Bar for Foo<T> {}
+ | ^^^ --- ------
+ | |
+ | unsatisfied trait bound introduced here
help: consider restricting the type parameters to satisfy the trait bounds
|
LL | struct Foo<T> where T: Bar, T: Default {
LL | self.foo();
| ^^^ method cannot be called on `&Fin<T>` due to unsatisfied trait bounds
|
- = note: the following trait bounds were not satisfied:
- `T: Default`
- which is required by `Fin<T>: Bar`
+note: trait bound `T: Default` was not satisfied
+ --> $DIR/missing-trait-bounds-for-method-call.rs:23:9
+ |
+LL | impl<T: Default + Bar> Bar for Fin<T> {}
+ | ^^^^^^^ --- ------
+ | |
+ | unsatisfied trait bound introduced here
help: consider restricting the type parameter to satisfy the trait bound
|
LL | struct Fin<T> where T: Bar, T: Default {
--- /dev/null
+fn takes_fn(f: impl Fn()) {
+ loop {
+ takes_fnonce(f);
+ //~^ ERROR use of moved value
+ //~| HELP consider borrowing
+ }
+}
+
+fn takes_fn_mut(m: impl FnMut()) {
+ if maybe() {
+ takes_fnonce(m);
+ //~^ HELP consider mutably borrowing
+ }
+ takes_fnonce(m);
+ //~^ ERROR use of moved value
+}
+
+fn has_closure() {
+ let mut x = 0;
+ let mut closure = || {
+ x += 1;
+ };
+ takes_fnonce(closure);
+ //~^ HELP consider mutably borrowing
+ closure();
+ //~^ ERROR borrow of moved value
+}
+
+fn maybe() -> bool {
+ false
+}
+
+// Could also be Fn[Mut], here it doesn't matter
+fn takes_fnonce(_: impl FnOnce()) {}
+
+fn main() {}
--- /dev/null
+error[E0382]: use of moved value: `f`
+ --> $DIR/borrow-closures-instead-of-move.rs:3:22
+ |
+LL | fn takes_fn(f: impl Fn()) {
+ | - move occurs because `f` has type `impl Fn()`, which does not implement the `Copy` trait
+LL | loop {
+LL | takes_fnonce(f);
+ | ^ value moved here, in previous iteration of loop
+ |
+help: consider borrowing `f`
+ |
+LL | takes_fnonce(&f);
+ | +
+
+error[E0382]: use of moved value: `m`
+ --> $DIR/borrow-closures-instead-of-move.rs:14:18
+ |
+LL | fn takes_fn_mut(m: impl FnMut()) {
+ | - move occurs because `m` has type `impl FnMut()`, which does not implement the `Copy` trait
+LL | if maybe() {
+LL | takes_fnonce(m);
+ | - value moved here
+...
+LL | takes_fnonce(m);
+ | ^ value used here after move
+ |
+help: consider mutably borrowing `m`
+ |
+LL | takes_fnonce(&mut m);
+ | ++++
+
+error[E0382]: borrow of moved value: `closure`
+ --> $DIR/borrow-closures-instead-of-move.rs:25:5
+ |
+LL | takes_fnonce(closure);
+ | ------- value moved here
+LL |
+LL | closure();
+ | ^^^^^^^ value borrowed here after move
+ |
+note: closure cannot be moved more than once as it is not `Copy` due to moving the variable `x` out of its environment
+ --> $DIR/borrow-closures-instead-of-move.rs:21:9
+ |
+LL | x += 1;
+ | ^
+help: consider mutably borrowing `closure`
+ |
+LL | takes_fnonce(&mut closure);
+ | ++++
+
+error: aborting due to 3 previous errors
+
+For more information about this error, try `rustc --explain E0382`.
LL | f(&mut r, false)
| ^ value borrowed here after move
|
-help: consider further restricting this bound
+help: consider mutably borrowing `f`
|
-LL | fn conspirator<F>(mut f: F) where F: FnMut(&mut R, bool) + Copy {
- | ++++++
+LL | let mut r = R {c: Box::new(&mut f)};
+ | ++++
error: aborting due to 2 previous errors
|
LL | let &_
| ^^ types differ in mutability
+...
+LL | = foo;
+ | --- this expression has type `&mut {integer}`
|
= note: expected mutable reference `&mut {integer}`
found reference `&_`
|
LL | let &mut _
| ^^^^^^ types differ in mutability
+...
+LL | = bar;
+ | --- this expression has type `&{integer}`
|
= note: expected reference `&{integer}`
found mutable reference `&mut _`
error[E0308]: mismatched types
--> $DIR/diverging-tuple-parts-39485.rs:8:5
|
-LL | fn g() {
- | - possibly return type missing here?
LL | &panic!()
| ^^^^^^^^^ expected `()`, found reference
|
= note: expected unit type `()`
found reference `&_`
+help: a return type might be missing here
+ |
+LL | fn g() -> _ {
+ | ++++
help: consider removing the borrow
|
LL - &panic!()
LL | let x: ! = panic!("aah");
| ^ help: if this is intentional, prefix it with an underscore: `_x`
|
-note: the lint level is defined here
- --> $DIR/never-assign-dead-code.rs:6:9
- |
-LL | #![warn(unused)]
- | ^^^^^^
= note: `#[warn(unused_variables)]` implied by `#[warn(unused)]`
warning: 3 warnings emitted
+++ /dev/null
-// check-pass
-// aux-build:empty-struct.rs
-
-#[no_link]
-extern crate empty_struct;
-
-fn main() {
- empty_struct::XEmpty1 {};
-}
|
LL | a += 1;
| ^
+help: consider mutably borrowing `hello`
+ |
+LL | let b = &mut hello;
+ | ++++
error: aborting due to previous error
// run-pass
#![allow(unused_must_use)]
// ignore-emscripten no threads support
+// needs-unwind
#![feature(rustc_attrs)]
use std::thread;
--- /dev/null
+// compile-flags: -Z oom=panic
+// run-pass
+// no-prefer-dynamic
+// needs-unwind
+// only-linux
+
+#![feature(bench_black_box)]
+
+use std::hint::black_box;
+use std::mem::forget;
+use std::panic::catch_unwind;
+
+fn main() {
+ let panic = catch_unwind(|| {
+ // This is guaranteed to exceed even the size of the address space
+ for _ in 0..16 {
+ // Truncates to a suitable value for both 32-bit and 64-bit targets.
+ let alloc_size = 0x1000_0000_1000_0000u64 as usize;
+ forget(black_box(vec![0u8; alloc_size]));
+ }
+ });
+ assert!(panic.is_err());
+}
--> $DIR/already-bound-name.rs:30:32
|
LL | let (B(A(a, _) | B(a)) | A(a, A(a, _) | B(a))) = B(B(1));
- | - ^ expected integer, found enum `E`
- | |
+ | - ^ ------- this expression has type `E<E<{integer}>>`
+ | | |
+ | | expected integer, found enum `E`
| first introduced with type `{integer}` here
|
= note: expected type `{integer}`
--> $DIR/inconsistent-modes.rs:13:32
|
LL | let (Ok((ref a, b)) | Err((ref mut a, ref b))) = Ok((0, &0));
- | ----- ^^^^^^^^^ types differ in mutability
- | |
+ | ----- ^^^^^^^^^ ----------- this expression has type `Result<({integer}, &{integer}), (_, _)>`
+ | | |
+ | | types differ in mutability
| first introduced with type `&{integer}` here
|
= note: expected type `&{integer}`
--- /dev/null
+// run-pass
+// aux-build:weak-lang-items.rs
+
+// ignore-emscripten no threads support
+// pretty-expanded FIXME #23616
+
+extern crate weak_lang_items as other;
+
+use std::thread;
+
+fn main() {
+ let _ = thread::spawn(move|| {
+ other::foo()
+ });
+}
// run-pass
+// needs-unwind
// ignore-emscripten no subprocess support
#![feature(internal_output_capture)]
LL | trait T {
| - while parsing this item list starting here
LL | extern "Rust" unsafe fn foo();
- | ^^^^^^ expected `{`
+ | --------------^^^^^^
+ | | |
+ | | expected `{`
+ | help: `unsafe` must come before `extern "Rust"`: `unsafe extern "Rust"`
LL |
LL | }
| - the item list ends here
+ |
+ = note: keyword order for functions declaration is `default`, `pub`, `const`, `async`, `unsafe`, `extern`
error: aborting due to previous error
|
LL | type Type_2 = Type_1_<'static ()>;
| ^ expected one of `,`, `:`, `=`, or `>`
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | type Type_2 = Type_1_<'static> ()>;
+ | +
error: aborting due to previous error
|
LL | type Type_3<T> = Box<T,,>;
| ^ expected one of `>`, a const expression, lifetime, or type
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | type Type_3<T> = Box<T>,,>;
+ | +
error: aborting due to previous error
|
LL | type Type_4<T> = Type_1_<'static,, T>;
| ^ expected one of `>`, a const expression, lifetime, or type
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | type Type_4<T> = Type_1_<'static>,, T>;
+ | +
error: aborting due to previous error
|
LL | type Type_5<'a> = Type_1_<'a, (),,>;
| ^ expected one of `>`, a const expression, lifetime, or type
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | type Type_5<'a> = Type_1_<'a, ()>,,>;
+ | +
error: aborting due to previous error
|
LL | type Type_6 = Type_5_<'a,,>;
| ^ expected one of `>`, a const expression, lifetime, or type
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | type Type_6 = Type_5_<'a>,,>;
+ | +
error: aborting due to previous error
|
LL | type Type_7 = Box<(),,>;
| ^ expected one of `>`, a const expression, lifetime, or type
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | type Type_7 = Box<()>,,>;
+ | +
error: aborting due to previous error
|
LL | (vec![1, 2, 3]: Vec<i32>)[0];
| + +
+help: alternatively, remove the type ascription
+ |
+LL - vec![1, 2, 3]: Vec<i32>[0];
+LL + vec![1, 2, 3][0];
+ |
error: casts cannot be followed by indexing
--> $DIR/issue-35813-postfix-after-cast.rs:17:5
|
LL | ((&[0i32]): &[i32; 1])[0];
| + +
+help: alternatively, remove the type ascription
+ |
+LL - (&[0i32]): &[i32; 1][0];
+LL + (&[0i32])[0];
+ |
error: casts cannot be followed by a method call
--> $DIR/issue-35813-postfix-after-cast.rs:39:13
|
LL | let _ = (0i32: i32: i32).count_ones();
| + +
+help: alternatively, remove the type ascription
+ |
+LL - let _ = 0i32: i32: i32.count_ones();
+LL + let _ = 0i32: i32.count_ones();
+ |
error: casts cannot be followed by a method call
--> $DIR/issue-35813-postfix-after-cast.rs:41:13
|
LL | let _ = (0 as i32: i32).count_ones();
| + +
+help: alternatively, remove the type ascription
+ |
+LL - let _ = 0 as i32: i32.count_ones();
+LL + let _ = 0 as i32.count_ones();
+ |
error: casts cannot be followed by a method call
--> $DIR/issue-35813-postfix-after-cast.rs:43:13
|
LL | let _ = (0i32: i32).count_ones(): u32;
| + +
+help: alternatively, remove the type ascription
+ |
+LL - let _ = 0i32: i32.count_ones(): u32;
+LL + let _ = 0i32.count_ones(): u32;
+ |
error: casts cannot be followed by a method call
--> $DIR/issue-35813-postfix-after-cast.rs:51:13
|
LL | let _ = (0i32: i32).count_ones() as u32;
| + +
+help: alternatively, remove the type ascription
+ |
+LL - let _ = 0i32: i32.count_ones() as u32;
+LL + let _ = 0i32.count_ones() as u32;
+ |
error: casts cannot be followed by a method call
--> $DIR/issue-35813-postfix-after-cast.rs:55:13
|
LL | let _ = (0i32: i32: i32).count_ones() as u32 as i32;
| + +
+help: alternatively, remove the type ascription
+ |
+LL - let _ = 0i32: i32: i32.count_ones() as u32 as i32;
+LL + let _ = 0i32: i32.count_ones() as u32 as i32;
+ |
error: casts cannot be followed by a method call
--> $DIR/issue-35813-postfix-after-cast.rs:62:13
|
LL | (0: i32).max(0);
| + +
+help: alternatively, remove the type ascription
+ |
+LL - 0: i32.max(0);
+LL + 0.max(0);
+ |
error: casts cannot be followed by a method call
--> $DIR/issue-35813-postfix-after-cast.rs:92:8
|
LL | if (5u64: u64).max(0) == 0 {
| + +
+help: alternatively, remove the type ascription
+ |
+LL - if 5u64: u64.max(0) == 0 {
+LL + if 5u64.max(0) == 0 {
+ |
error: casts cannot be followed by a method call
--> $DIR/issue-35813-postfix-after-cast.rs:102:9
|
LL | (5u64: u64).max(0) == 0
| + +
+help: alternatively, remove the type ascription
+ |
+LL - 5u64: u64.max(0) == 0
+LL + 5u64.max(0) == 0
+ |
error: casts cannot be followed by indexing
--> $DIR/issue-35813-postfix-after-cast.rs:111:24
|
LL | static bar2: &[i32] = &((&[1i32,2,3]: &[i32; 3])[0..1]);
| + +
+help: alternatively, remove the type ascription
+ |
+LL - static bar2: &[i32] = &(&[1i32,2,3]: &[i32; 3][0..1]);
+LL + static bar2: &[i32] = &(&[1i32,2,3][0..1]);
+ |
error: casts cannot be followed by `?`
--> $DIR/issue-35813-postfix-after-cast.rs:119:5
|
LL | (Err(0u64): Result<u64,u64>)?;
| + +
+help: alternatively, remove the type ascription
+ |
+LL - Err(0u64): Result<u64,u64>?;
+LL + Err(0u64)?;
+ |
error: casts cannot be followed by a function call
--> $DIR/issue-35813-postfix-after-cast.rs:145:5
|
LL | (drop_ptr: fn(u8))(0);
| + +
+help: alternatively, remove the type ascription
+ |
+LL - drop_ptr: fn(u8)(0);
+LL + drop_ptr(0);
+ |
error: casts cannot be followed by `.await`
--> $DIR/issue-35813-postfix-after-cast.rs:152:5
|
LL | (Box::pin(noop()): Pin<Box<_>>).await;
| + +
+help: alternatively, remove the type ascription
+ |
+LL - Box::pin(noop()): Pin<Box<_>>.await;
+LL + Box::pin(noop()).await;
+ |
error: casts cannot be followed by a field access
--> $DIR/issue-35813-postfix-after-cast.rs:167:5
|
LL | (Foo::default(): Foo).bar;
| + +
+help: alternatively, remove the type ascription
+ |
+LL - Foo::default(): Foo.bar;
+LL + Foo::default().bar;
+ |
error: casts cannot be followed by a method call
--> $DIR/issue-35813-postfix-after-cast.rs:84:9
|
LL | (if true { 33 } else { 44 }: i32).max(0)
| + +
+help: alternatively, remove the type ascription
+ |
+LL - if true { 33 } else { 44 }: i32.max(0)
+LL + if true { 33 } else { 44 }.max(0)
+ |
error[E0214]: parenthesized type parameters may only be used with a `Fn` trait
--> $DIR/issue-35813-postfix-after-cast.rs:131:13
|
LL | pub fn foo(_: i32, self: Box<Self) {}
| ^ expected one of 9 possible tokens
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | pub fn foo(_: i32, self: Box<Self>) {}
+ | +
error: aborting due to previous error
|
::: $SRC_DIR/core/src/macros/mod.rs:LL:COL
|
-LL | ($left:expr, $right:expr $(,)?) => ({
+LL | ($left:expr, $right:expr $(,)?) => {
| ---------- while parsing argument for this `expr` macro fragment
error: aborting due to 4 previous errors
--> $DIR/issue-84117.rs:2:67
|
LL | let outer_local:e_outer<&str, { let inner_local:e_inner<&str, }
- | ------------ ^ expected one of `>`, a const expression, lifetime, or type
- | | |
- | | help: use `=` if you meant to assign
+ | ----------- ^ expected one of `>`, a const expression, lifetime, or type
+ | |
| while parsing the type for `inner_local`
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | let outer_local:e_outer<&str, { let inner_local:e_inner<&str>, }
+ | +
+help: use `=` if you meant to assign
+ |
+LL | let outer_local:e_outer<&str, { let inner_local =e_inner<&str, }
+ | ~
error: expected one of `!`, `.`, `::`, `;`, `?`, `else`, `{`, or an operator, found `,`
--> $DIR/issue-84117.rs:2:65
--> $DIR/issue-84117.rs:8:1
|
LL | let outer_local:e_outer<&str, { let inner_local:e_inner<&str, }
- | ------------ help: use `=` if you meant to assign - expected one of `,`, `:`, `=`, or `>`
- | |
- | while parsing the type for `outer_local`
+ | ----------- while parsing the type for `outer_local` - expected one of `,`, `:`, `=`, or `>`
...
LL | }
| ^ unexpected token
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | let outer_local:e_outer<&str, { let inner_local:e_inner<&str, }>
+ | +
+help: use `=` if you meant to assign
+ |
+LL | let outer_local =e_outer<&str, { let inner_local:e_inner<&str, }
+ | ~
error: expected one of `>`, a const expression, lifetime, or type, found `}`
--> $DIR/issue-84117.rs:2:67
|
LL | let outer_local:e_outer<&str, { let inner_local:e_inner<&str, }
- | ------------ ^ expected one of `>`, a const expression, lifetime, or type
- | | |
- | | help: use `=` if you meant to assign
+ | ----------- ^ expected one of `>`, a const expression, lifetime, or type
+ | |
| while parsing the type for `inner_local`
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | let outer_local:e_outer<&str, { let inner_local:e_inner<&str>, }
+ | +
+help: use `=` if you meant to assign
+ |
+LL | let outer_local:e_outer<&str, { let inner_local =e_inner<&str, }
+ | ~
error: expected one of `!`, `.`, `::`, `;`, `?`, `else`, `{`, or an operator, found `,`
--> $DIR/issue-84117.rs:2:65
--> $DIR/issue-87635.rs:4:5
|
LL | pub fn bar()
- | ^^^^^^^^^^^-
- | |
- | help: provide a definition for the function: `{ <body> }`
+ | ^^^^^^^^^^^^- help: provide a definition for the function: `{ <body> }`
error: aborting due to 2 previous errors
--- /dev/null
+// run-rustfix
+#![allow(unused)]
+struct Foo<'a, 'b> {
+ a: &'a &'b i32
+}
+
+fn foo<'a, 'b>(_x: &mut Foo<'a, 'b>) {}
+//~^ ERROR expected one of `,`, `:`, `=`, or `>`, found `;`
+
+fn main() {}
+// run-rustfix
+#![allow(unused)]
struct Foo<'a, 'b> {
a: &'a &'b i32
}
-fn foo<'a, 'b>(x: &mut Foo<'a; 'b>) {}
+fn foo<'a, 'b>(_x: &mut Foo<'a; 'b>) {}
//~^ ERROR expected one of `,`, `:`, `=`, or `>`, found `;`
fn main() {}
error: expected one of `,`, `:`, `=`, or `>`, found `;`
- --> $DIR/lifetime-semicolon.rs:5:30
+ --> $DIR/lifetime-semicolon.rs:7:31
|
-LL | fn foo<'a, 'b>(x: &mut Foo<'a; 'b>) {}
- | ^ expected one of `,`, `:`, `=`, or `>`
+LL | fn foo<'a, 'b>(_x: &mut Foo<'a; 'b>) {}
+ | ^ expected one of `,`, `:`, `=`, or `>`
+ |
+help: use a comma to separate type parameters
+ |
+LL | fn foo<'a, 'b>(_x: &mut Foo<'a, 'b>) {}
+ | ~
error: aborting due to previous error
| | |
| | maybe try to close unmatched angle bracket
| while parsing the type for `v`
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | let v : Vec<(u32,_)> = vec![];
+ | +
error: expected one of `!`, `(`, `+`, `,`, `::`, `<`, or `>`, found `{`
--> $DIR/missing-closing-angle-bracket-eq-constraint.rs:13:32
| --- ^ expected one of 7 possible tokens
| |
| while parsing the type for `foo`
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | let foo : Foo::<T1>, T2 = Foo {_a : arg1, _b : arg2};
+ | +
error: expected one of `,`, `:`, or `>`, found `=`
--> $DIR/missing-closing-angle-bracket-eq-constraint.rs:18:18
| | |
| | maybe try to close unmatched angle bracket
| while parsing the type for `v`
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | let v : Vec<'a> = vec![];
+ | +
error[E0282]: type annotations needed for `Vec<T>`
--> $DIR/missing-closing-angle-bracket-eq-constraint.rs:7:25
--- /dev/null
+// run-rustifx
+#![allow(unused)]
+use std::sync::{Arc, Mutex};
+
+pub struct Foo {
+ a: Mutex<usize>,
+ b: Arc<Mutex<usize>, //~ HELP you might have meant to end the type parameters here
+ c: Arc<Mutex<usize>>,
+} //~ ERROR expected one of
+
+fn main() {}
--- /dev/null
+error: expected one of `>`, a const expression, lifetime, or type, found `}`
+ --> $DIR/missing-closing-angle-bracket-struct-field-ty.rs:9:1
+ |
+LL | c: Arc<Mutex<usize>>,
+ | - expected one of `>`, a const expression, lifetime, or type
+LL | }
+ | ^ unexpected token
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | b: Arc<Mutex<usize>>,
+ | +
+
+error: aborting due to previous error
+
--- /dev/null
+struct TypedArenaChunk {
+ next: Option<String>>
+ //~^ ERROR unmatched angle bracket
+}
+
+fn main() {}
--- /dev/null
+error: unmatched angle bracket
+ --> $DIR/recover-field-extra-angle-brackets-in-struct-with-a-field.rs:2:25
+ |
+LL | next: Option<String>>
+ | _________________________^
+LL | |
+LL | | }
+ | |_ help: remove extra angle bracket
+
+error: aborting due to previous error
+
--> $DIR/recover-range-pats.rs:22:12
|
LL | if let .0..Y = 0 {}
- | ^^ - this is of type `u8`
- | |
+ | ^^ - - this expression has type `{integer}`
+ | | |
+ | | this is of type `u8`
| expected integer, found floating-point number
error[E0308]: mismatched types
--> $DIR/recover-range-pats.rs:35:12
|
LL | if let .0..=Y = 0 {}
- | ^^ - this is of type `u8`
- | |
+ | ^^ - - this expression has type `{integer}`
+ | | |
+ | | this is of type `u8`
| expected integer, found floating-point number
error[E0308]: mismatched types
--> $DIR/recover-range-pats.rs:60:12
|
LL | if let .0...Y = 0 {}
- | ^^ - this is of type `u8`
- | |
+ | ^^ - - this expression has type `{integer}`
+ | | |
+ | | this is of type `u8`
| expected integer, found floating-point number
error[E0308]: mismatched types
--> $DIR/recover-range-pats.rs:75:12
|
LL | if let .0.. = 0 {}
- | ^^ expected integer, found floating-point number
+ | ^^ - this expression has type `{integer}`
+ | |
+ | expected integer, found floating-point number
error[E0029]: only `char` and numeric types are allowed in range patterns
--> $DIR/recover-range-pats.rs:83:12
--> $DIR/recover-range-pats.rs:85:12
|
LL | if let .0..= = 0 {}
- | ^^ expected integer, found floating-point number
+ | ^^ - this expression has type `{integer}`
+ | |
+ | expected integer, found floating-point number
error[E0029]: only `char` and numeric types are allowed in range patterns
--> $DIR/recover-range-pats.rs:93:12
--> $DIR/recover-range-pats.rs:95:12
|
LL | if let .0... = 0 {}
- | ^^ expected integer, found floating-point number
+ | ^^ - this expression has type `{integer}`
+ | |
+ | expected integer, found floating-point number
error[E0029]: only `char` and numeric types are allowed in range patterns
--> $DIR/recover-range-pats.rs:103:14
--> $DIR/recover-range-pats.rs:105:15
|
LL | if let .. .0 = 0 {}
- | ^^ expected integer, found floating-point number
+ | ^^ - this expression has type `{integer}`
+ | |
+ | expected integer, found floating-point number
error[E0029]: only `char` and numeric types are allowed in range patterns
--> $DIR/recover-range-pats.rs:113:15
--> $DIR/recover-range-pats.rs:115:15
|
LL | if let ..=.0 = 0 {}
- | ^^ expected integer, found floating-point number
+ | ^^ - this expression has type `{integer}`
+ | |
+ | expected integer, found floating-point number
error[E0029]: only `char` and numeric types are allowed in range patterns
--> $DIR/recover-range-pats.rs:125:15
--> $DIR/recover-range-pats.rs:128:15
|
LL | if let ....3 = 0 {}
- | ^^ expected integer, found floating-point number
+ | ^^ - this expression has type `{integer}`
+ | |
+ | expected integer, found floating-point number
error: aborting due to 60 previous errors
|
LL | type closure = Box<lt/fn()>;
| ^ expected one of 9 possible tokens
+ |
+help: you might have meant to end the type parameters here
+ |
+LL | type closure = Box<lt>/fn()>;
+ | +
error: aborting due to previous error
--> $DIR/issue-74702.rs:2:9
|
LL | let (foo @ ..,) = (0, 0);
- | ^^^^^^^^^^^ expected a tuple with 2 elements, found one with 1 element
+ | ^^^^^^^^^^^ ------ this expression has type `({integer}, {integer})`
+ | |
+ | expected a tuple with 2 elements, found one with 1 element
|
= note: expected tuple `({integer}, {integer})`
found tuple `(_,)`
LL | const { || {} } => {},
| ^^^^^^^^^^^^^^^
-error: `impl Future<Output = [async output]>` cannot be used in patterns
+error: `impl Future` cannot be used in patterns
--> $DIR/non-structural-match-types.rs:12:9
|
LL | const { async {} } => {},
error[E0308]: mismatched types
--> $DIR/pat-tuple-overfield.rs:19:9
|
+LL | match (1, 2, 3) {
+ | --------- this expression has type `({integer}, {integer}, {integer})`
LL | (1, 2, 3, 4) => {}
| ^^^^^^^^^^^^ expected a tuple with 3 elements, found one with 4 elements
|
error[E0308]: mismatched types
--> $DIR/pat-tuple-overfield.rs:20:9
|
+LL | match (1, 2, 3) {
+ | --------- this expression has type `({integer}, {integer}, {integer})`
+LL | (1, 2, 3, 4) => {}
LL | (1, 2, .., 3, 4) => {}
| ^^^^^^^^^^^^^^^^ expected a tuple with 3 elements, found one with 4 elements
|
-pub enum Foo {
+pub enum HiddenEnum {
A,
B,
#[doc(hidden)]
C,
}
+
+#[derive(Default)]
+pub struct HiddenStruct {
+ pub one: u8,
+ pub two: bool,
+ #[doc(hidden)]
+ pub hide: usize,
+}
#![stable(feature = "stable_test_feature", since = "1.0.0")]
#[stable(feature = "stable_test_feature", since = "1.0.0")]
-pub enum Foo {
+pub enum UnstableEnum {
#[stable(feature = "stable_test_feature", since = "1.0.0")]
Stable,
#[stable(feature = "stable_test_feature", since = "1.0.0")]
#[unstable(feature = "unstable_test_feature", issue = "none")]
Unstable,
}
+
+#[derive(Default)]
+#[stable(feature = "stable_test_feature", since = "1.0.0")]
+pub struct UnstableStruct {
+ #[stable(feature = "stable_test_feature", since = "1.0.0")]
+ pub stable: bool,
+ #[stable(feature = "stable_test_feature", since = "1.0.0")]
+ pub stable2: usize,
+ #[unstable(feature = "unstable_test_feature", issue = "none")]
+ pub unstable: u8,
+}
--- /dev/null
+// aux-build:hidden.rs
+
+extern crate hidden;
+
+use hidden::HiddenStruct;
+
+struct InCrate {
+ a: usize,
+ b: bool,
+ #[doc(hidden)]
+ im_hidden: u8
+}
+
+fn main() {
+ let HiddenStruct { one, two } = HiddenStruct::default();
+ //~^ pattern requires `..` due to inaccessible fields
+
+ let HiddenStruct { one } = HiddenStruct::default();
+ //~^ pattern does not mention field `two` and inaccessible fields
+
+ let HiddenStruct { one, hide } = HiddenStruct::default();
+ //~^ pattern does not mention field `two`
+
+ let InCrate { a, b } = InCrate { a: 0, b: false, im_hidden: 0 };
+ //~^ pattern does not mention field `im_hidden`
+}
--- /dev/null
+error: pattern requires `..` due to inaccessible fields
+ --> $DIR/doc-hidden-fields.rs:15:9
+ |
+LL | let HiddenStruct { one, two } = HiddenStruct::default();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+help: ignore the inaccessible and unused fields
+ |
+LL | let HiddenStruct { one, two, .. } = HiddenStruct::default();
+ | ++++
+
+error[E0027]: pattern does not mention field `two` and inaccessible fields
+ --> $DIR/doc-hidden-fields.rs:18:9
+ |
+LL | let HiddenStruct { one } = HiddenStruct::default();
+ | ^^^^^^^^^^^^^^^^^^^^ missing field `two` and inaccessible fields
+ |
+help: include the missing field in the pattern and ignore the inaccessible fields
+ |
+LL | let HiddenStruct { one, two, .. } = HiddenStruct::default();
+ | ~~~~~~~~~~~
+help: if you don't care about this missing field, you can explicitly ignore it
+ |
+LL | let HiddenStruct { one, .. } = HiddenStruct::default();
+ | ~~~~~~
+
+error[E0027]: pattern does not mention field `two`
+ --> $DIR/doc-hidden-fields.rs:21:9
+ |
+LL | let HiddenStruct { one, hide } = HiddenStruct::default();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^ missing field `two`
+ |
+help: include the missing field in the pattern
+ |
+LL | let HiddenStruct { one, hide, two } = HiddenStruct::default();
+ | ~~~~~~~
+help: if you don't care about this missing field, you can explicitly ignore it
+ |
+LL | let HiddenStruct { one, hide, .. } = HiddenStruct::default();
+ | ~~~~~~
+
+error[E0027]: pattern does not mention field `im_hidden`
+ --> $DIR/doc-hidden-fields.rs:24:9
+ |
+LL | let InCrate { a, b } = InCrate { a: 0, b: false, im_hidden: 0 };
+ | ^^^^^^^^^^^^^^^^ missing field `im_hidden`
+ |
+help: include the missing field in the pattern
+ |
+LL | let InCrate { a, b, im_hidden } = InCrate { a: 0, b: false, im_hidden: 0 };
+ | ~~~~~~~~~~~~~
+help: if you don't care about this missing field, you can explicitly ignore it
+ |
+LL | let InCrate { a, b, .. } = InCrate { a: 0, b: false, im_hidden: 0 };
+ | ~~~~~~
+
+error: aborting due to 4 previous errors
+
+For more information about this error, try `rustc --explain E0027`.
extern crate hidden;
-use hidden::Foo;
+use hidden::HiddenEnum;
+
+enum InCrate {
+ A,
+ B,
+ #[doc(hidden)]
+ C,
+}
fn main() {
- match Foo::A {
- Foo::A => {}
- Foo::B => {}
+ match HiddenEnum::A {
+ HiddenEnum::A => {}
+ HiddenEnum::B => {}
}
//~^^^^ non-exhaustive patterns: `_` not covered
- match Foo::A {
- Foo::A => {}
- Foo::C => {}
+ match HiddenEnum::A {
+ HiddenEnum::A => {}
+ HiddenEnum::C => {}
}
//~^^^^ non-exhaustive patterns: `B` not covered
- match Foo::A {
- Foo::A => {}
+ match HiddenEnum::A {
+ HiddenEnum::A => {}
}
//~^^^ non-exhaustive patterns: `B` and `_` not covered
match None {
None => {}
- Some(Foo::A) => {}
+ Some(HiddenEnum::A) => {}
}
//~^^^^ non-exhaustive patterns: `Some(B)` and `Some(_)` not covered
+
+ match InCrate::A {
+ InCrate::A => {}
+ InCrate::B => {}
+ }
+ //~^^^^ non-exhaustive patterns: `C` not covered
}
error[E0004]: non-exhaustive patterns: `_` not covered
- --> $DIR/doc-hidden-non-exhaustive.rs:8:11
+ --> $DIR/doc-hidden-non-exhaustive.rs:15:11
|
-LL | match Foo::A {
- | ^^^^^^ pattern `_` not covered
+LL | match HiddenEnum::A {
+ | ^^^^^^^^^^^^^ pattern `_` not covered
|
-note: `Foo` defined here
+note: `HiddenEnum` defined here
--> $DIR/auxiliary/hidden.rs:1:1
|
-LL | / pub enum Foo {
+LL | / pub enum HiddenEnum {
LL | | A,
LL | | B,
LL | | #[doc(hidden)]
LL | | C,
LL | | }
| |_^
- = note: the matched value is of type `Foo`
+ = note: the matched value is of type `HiddenEnum`
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
-LL ~ Foo::B => {}
+LL ~ HiddenEnum::B => {}
LL + _ => todo!()
|
error[E0004]: non-exhaustive patterns: `B` not covered
- --> $DIR/doc-hidden-non-exhaustive.rs:14:11
+ --> $DIR/doc-hidden-non-exhaustive.rs:21:11
|
-LL | match Foo::A {
- | ^^^^^^ pattern `B` not covered
+LL | match HiddenEnum::A {
+ | ^^^^^^^^^^^^^ pattern `B` not covered
|
-note: `Foo` defined here
+note: `HiddenEnum` defined here
--> $DIR/auxiliary/hidden.rs:3:5
|
-LL | / pub enum Foo {
+LL | / pub enum HiddenEnum {
LL | | A,
LL | | B,
| | ^ not covered
LL | | C,
LL | | }
| |_-
- = note: the matched value is of type `Foo`
+ = note: the matched value is of type `HiddenEnum`
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
-LL ~ Foo::C => {}
+LL ~ HiddenEnum::C => {}
LL + B => todo!()
|
error[E0004]: non-exhaustive patterns: `B` and `_` not covered
- --> $DIR/doc-hidden-non-exhaustive.rs:20:11
+ --> $DIR/doc-hidden-non-exhaustive.rs:27:11
|
-LL | match Foo::A {
- | ^^^^^^ patterns `B` and `_` not covered
+LL | match HiddenEnum::A {
+ | ^^^^^^^^^^^^^ patterns `B` and `_` not covered
|
-note: `Foo` defined here
+note: `HiddenEnum` defined here
--> $DIR/auxiliary/hidden.rs:3:5
|
-LL | / pub enum Foo {
+LL | / pub enum HiddenEnum {
LL | | A,
LL | | B,
| | ^ not covered
LL | | C,
LL | | }
| |_-
- = note: the matched value is of type `Foo`
+ = note: the matched value is of type `HiddenEnum`
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern, a match arm with multiple or-patterns as shown, or multiple match arms
|
-LL ~ Foo::A => {}
+LL ~ HiddenEnum::A => {}
LL + B | _ => todo!()
|
error[E0004]: non-exhaustive patterns: `Some(B)` and `Some(_)` not covered
- --> $DIR/doc-hidden-non-exhaustive.rs:25:11
+ --> $DIR/doc-hidden-non-exhaustive.rs:32:11
|
LL | match None {
| ^^^^ patterns `Some(B)` and `Some(_)` not covered
|
-note: `Option<Foo>` defined here
+note: `Option<HiddenEnum>` defined here
--> $SRC_DIR/core/src/option.rs:LL:COL
|
LL | / pub enum Option<T> {
| | ^^^^ not covered
LL | | }
| |_-
- = note: the matched value is of type `Option<Foo>`
+ = note: the matched value is of type `Option<HiddenEnum>`
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern, a match arm with multiple or-patterns as shown, or multiple match arms
|
-LL ~ Some(Foo::A) => {}
+LL ~ Some(HiddenEnum::A) => {}
LL + Some(B) | Some(_) => todo!()
|
-error: aborting due to 4 previous errors
+error[E0004]: non-exhaustive patterns: `C` not covered
+ --> $DIR/doc-hidden-non-exhaustive.rs:38:11
+ |
+LL | match InCrate::A {
+ | ^^^^^^^^^^ pattern `C` not covered
+ |
+note: `InCrate` defined here
+ --> $DIR/doc-hidden-non-exhaustive.rs:11:5
+ |
+LL | enum InCrate {
+ | -------
+...
+LL | C,
+ | ^ not covered
+ = note: the matched value is of type `InCrate`
+help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
+ |
+LL ~ InCrate::B => {}
+LL + C => todo!()
+ |
+
+error: aborting due to 5 previous errors
For more information about this error, try `rustc --explain E0004`.
--- /dev/null
+// aux-build:unstable.rs
+
+extern crate unstable;
+
+use unstable::UnstableStruct;
+
+fn main() {
+ let UnstableStruct { stable } = UnstableStruct::default();
+ //~^ pattern does not mention field `stable2` and inaccessible fields
+
+ let UnstableStruct { stable, stable2 } = UnstableStruct::default();
+ //~^ pattern requires `..` due to inaccessible fields
+
+ // OK: stable field is matched
+ let UnstableStruct { stable, stable2, .. } = UnstableStruct::default();
+}
--- /dev/null
+error[E0027]: pattern does not mention field `stable2` and inaccessible fields
+ --> $DIR/stable-gated-fields.rs:8:9
+ |
+LL | let UnstableStruct { stable } = UnstableStruct::default();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^ missing field `stable2` and inaccessible fields
+ |
+help: include the missing field in the pattern and ignore the inaccessible fields
+ |
+LL | let UnstableStruct { stable, stable2, .. } = UnstableStruct::default();
+ | ~~~~~~~~~~~~~~~
+help: if you don't care about this missing field, you can explicitly ignore it
+ |
+LL | let UnstableStruct { stable, .. } = UnstableStruct::default();
+ | ~~~~~~
+
+error: pattern requires `..` due to inaccessible fields
+ --> $DIR/stable-gated-fields.rs:11:9
+ |
+LL | let UnstableStruct { stable, stable2 } = UnstableStruct::default();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+help: ignore the inaccessible and unused fields
+ |
+LL | let UnstableStruct { stable, stable2, .. } = UnstableStruct::default();
+ | ++++
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0027`.
extern crate unstable;
-use unstable::Foo;
+use unstable::UnstableEnum;
fn main() {
- match Foo::Stable {
- Foo::Stable => {}
+ match UnstableEnum::Stable {
+ UnstableEnum::Stable => {}
}
//~^^^ non-exhaustive patterns: `Stable2` and `_` not covered
- match Foo::Stable {
- Foo::Stable => {}
- Foo::Stable2 => {}
+ match UnstableEnum::Stable {
+ UnstableEnum::Stable => {}
+ UnstableEnum::Stable2 => {}
}
//~^^^^ non-exhaustive patterns: `_` not covered
}
error[E0004]: non-exhaustive patterns: `Stable2` and `_` not covered
--> $DIR/stable-gated-patterns.rs:8:11
|
-LL | match Foo::Stable {
- | ^^^^^^^^^^^ patterns `Stable2` and `_` not covered
+LL | match UnstableEnum::Stable {
+ | ^^^^^^^^^^^^^^^^^^^^ patterns `Stable2` and `_` not covered
|
-note: `Foo` defined here
+note: `UnstableEnum` defined here
--> $DIR/auxiliary/unstable.rs:9:5
|
-LL | / pub enum Foo {
+LL | / pub enum UnstableEnum {
LL | | #[stable(feature = "stable_test_feature", since = "1.0.0")]
LL | | Stable,
LL | | #[stable(feature = "stable_test_feature", since = "1.0.0")]
LL | | Unstable,
LL | | }
| |_-
- = note: the matched value is of type `Foo`
+ = note: the matched value is of type `UnstableEnum`
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern, a match arm with multiple or-patterns as shown, or multiple match arms
|
-LL ~ Foo::Stable => {}
+LL ~ UnstableEnum::Stable => {}
LL + Stable2 | _ => todo!()
|
error[E0004]: non-exhaustive patterns: `_` not covered
--> $DIR/stable-gated-patterns.rs:13:11
|
-LL | match Foo::Stable {
- | ^^^^^^^^^^^ pattern `_` not covered
+LL | match UnstableEnum::Stable {
+ | ^^^^^^^^^^^^^^^^^^^^ pattern `_` not covered
|
-note: `Foo` defined here
+note: `UnstableEnum` defined here
--> $DIR/auxiliary/unstable.rs:5:1
|
-LL | / pub enum Foo {
+LL | / pub enum UnstableEnum {
LL | | #[stable(feature = "stable_test_feature", since = "1.0.0")]
LL | | Stable,
LL | | #[stable(feature = "stable_test_feature", since = "1.0.0")]
LL | | Unstable,
LL | | }
| |_^
- = note: the matched value is of type `Foo`
+ = note: the matched value is of type `UnstableEnum`
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
-LL ~ Foo::Stable2 => {}
+LL ~ UnstableEnum::Stable2 => {}
LL + _ => todo!()
|
= note: the matched value is of type `Foo`
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
-LL ~ Foo(2, b) => println!("{}", b)
+LL ~ Foo(2, b) => println!("{}", b),
LL + Foo(_, _) => todo!()
|
--- /dev/null
+#![feature(unstable_test_feature)]
+
+// aux-build:unstable.rs
+
+extern crate unstable;
+
+use unstable::UnstableStruct;
+
+fn main() {
+ let UnstableStruct { stable, stable2, } = UnstableStruct::default();
+ //~^ pattern does not mention field `unstable`
+
+ let UnstableStruct { stable, unstable, } = UnstableStruct::default();
+ //~^ pattern does not mention field `stable2`
+
+ // OK: stable field is matched
+ let UnstableStruct { stable, stable2, unstable } = UnstableStruct::default();
+}
--- /dev/null
+error[E0027]: pattern does not mention field `unstable`
+ --> $DIR/unstable-gated-fields.rs:10:9
+ |
+LL | let UnstableStruct { stable, stable2, } = UnstableStruct::default();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing field `unstable`
+ |
+help: include the missing field in the pattern
+ |
+LL | let UnstableStruct { stable, stable2, unstable } = UnstableStruct::default();
+ | ~~~~~~~~~~~~
+help: if you don't care about this missing field, you can explicitly ignore it
+ |
+LL | let UnstableStruct { stable, stable2, .. } = UnstableStruct::default();
+ | ~~~~~~
+
+error[E0027]: pattern does not mention field `stable2`
+ --> $DIR/unstable-gated-fields.rs:13:9
+ |
+LL | let UnstableStruct { stable, unstable, } = UnstableStruct::default();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing field `stable2`
+ |
+help: include the missing field in the pattern
+ |
+LL | let UnstableStruct { stable, unstable, stable2 } = UnstableStruct::default();
+ | ~~~~~~~~~~~
+help: if you don't care about this missing field, you can explicitly ignore it
+ |
+LL | let UnstableStruct { stable, unstable, .. } = UnstableStruct::default();
+ | ~~~~~~
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0027`.
extern crate unstable;
-use unstable::Foo;
+use unstable::UnstableEnum;
fn main() {
- match Foo::Stable {
- Foo::Stable => {}
- Foo::Stable2 => {}
+ match UnstableEnum::Stable {
+ UnstableEnum::Stable => {}
+ UnstableEnum::Stable2 => {}
}
//~^^^^ non-exhaustive patterns: `Unstable` not covered
// Ok: all variants are explicitly matched
- match Foo::Stable {
- Foo::Stable => {}
- Foo::Stable2 => {}
- Foo::Unstable => {}
+ match UnstableEnum::Stable {
+ UnstableEnum::Stable => {}
+ UnstableEnum::Stable2 => {}
+ UnstableEnum::Unstable => {}
}
}
error[E0004]: non-exhaustive patterns: `Unstable` not covered
--> $DIR/unstable-gated-patterns.rs:10:11
|
-LL | match Foo::Stable {
- | ^^^^^^^^^^^ pattern `Unstable` not covered
+LL | match UnstableEnum::Stable {
+ | ^^^^^^^^^^^^^^^^^^^^ pattern `Unstable` not covered
|
-note: `Foo` defined here
+note: `UnstableEnum` defined here
--> $DIR/auxiliary/unstable.rs:11:5
|
-LL | / pub enum Foo {
+LL | / pub enum UnstableEnum {
LL | | #[stable(feature = "stable_test_feature", since = "1.0.0")]
LL | | Stable,
LL | | #[stable(feature = "stable_test_feature", since = "1.0.0")]
| | ^^^^^^^^ not covered
LL | | }
| |_-
- = note: the matched value is of type `Foo`
+ = note: the matched value is of type `UnstableEnum`
help: ensure that all possible cases are being handled by adding a match arm with a wildcard pattern or an explicit pattern as shown
|
-LL ~ Foo::Stable2 => {}
+LL ~ UnstableEnum::Stable2 => {}
LL + Unstable => todo!()
|
--- /dev/null
+// aux-build:amputate-span.rs
+// run-rustfix
+// edition:2018
+// compile-flags: --extern amputate_span
+
+// This test has been crafted to ensure the following things:
+//
+// 1. There's a resolution error that prompts the compiler to suggest
+// adding a `use` item.
+//
+// 2. There are no `use` or `extern crate` items in the source
+// code. In fact, there is only one item, the `fn main`
+// declaration.
+//
+// 3. The single `fn main` declaration has an attribute attached to it
+// that just deletes the first token from the given item.
+//
+// You need all of these conditions to hold in order to replicate the
+// scenario that yielded issue 87613, where the compiler's suggestion
+// looks like:
+//
+// ```
+// help: consider importing this struct
+// |
+// 47 | hey */ async use std::process::Command;
+// | ++++++++++++++++++++++++++
+// ```
+//
+// The first condition is necessary to force the compiler issue a
+// suggestion. The second condition is necessary to force the
+// suggestion to be issued at a span associated with the sole
+// `fn`-item of this crate. The third condition is necessary in order
+// to yield the weird state where the associated span of the `fn`-item
+// does not actually cover all of the original source code of the
+// `fn`-item (which is why we are calling it an "amputated" span
+// here).
+//
+// Note that satisfying conditions 2 and 3 requires the use of the
+// `--extern` compile flag.
+//
+// You might ask yourself: What code would do such a thing? The
+// answer is: the #[tokio::main] attribute does *exactly* this (as
+// well as injecting some other code into the `fn main` that it
+// constructs).
+
+use std::process::Command;
+
+#[amputate_span::drop_first_token]
+/* what the
+hey */ async fn main() {
+ Command::new("git"); //~ ERROR [E0433]
+}
+
+// (The /* ... */ comment in the above is not part of the original
+// bug. It is just meant to illustrate one particular facet of the
+// original non-ideal behavior, where we were transcribing the
+// trailing comment as part of the emitted suggestion, for better or
+// for worse.)
+
+#[allow(dead_code)]
+mod inner {
+ use std::process::Command;
+
+#[amputate_span::drop_first_token]
+ /* another interesting
+ case */ async fn foo() {
+ Command::new("git"); //~ ERROR [E0433]
+ }
+}
--- /dev/null
+// aux-build:amputate-span.rs
+// run-rustfix
+// edition:2018
+// compile-flags: --extern amputate_span
+
+// This test has been crafted to ensure the following things:
+//
+// 1. There's a resolution error that prompts the compiler to suggest
+// adding a `use` item.
+//
+// 2. There are no `use` or `extern crate` items in the source
+// code. In fact, there is only one item, the `fn main`
+// declaration.
+//
+// 3. The single `fn main` declaration has an attribute attached to it
+// that just deletes the first token from the given item.
+//
+// You need all of these conditions to hold in order to replicate the
+// scenario that yielded issue 87613, where the compiler's suggestion
+// looks like:
+//
+// ```
+// help: consider importing this struct
+// |
+// 47 | hey */ async use std::process::Command;
+// | ++++++++++++++++++++++++++
+// ```
+//
+// The first condition is necessary to force the compiler issue a
+// suggestion. The second condition is necessary to force the
+// suggestion to be issued at a span associated with the sole
+// `fn`-item of this crate. The third condition is necessary in order
+// to yield the weird state where the associated span of the `fn`-item
+// does not actually cover all of the original source code of the
+// `fn`-item (which is why we are calling it an "amputated" span
+// here).
+//
+// Note that satisfying conditions 2 and 3 requires the use of the
+// `--extern` compile flag.
+//
+// You might ask yourself: What code would do such a thing? The
+// answer is: the #[tokio::main] attribute does *exactly* this (as
+// well as injecting some other code into the `fn main` that it
+// constructs).
+
+#[amputate_span::drop_first_token]
+/* what the
+hey */ async fn main() {
+ Command::new("git"); //~ ERROR [E0433]
+}
+
+// (The /* ... */ comment in the above is not part of the original
+// bug. It is just meant to illustrate one particular facet of the
+// original non-ideal behavior, where we were transcribing the
+// trailing comment as part of the emitted suggestion, for better or
+// for worse.)
+
+#[allow(dead_code)]
+mod inner {
+ #[amputate_span::drop_first_token]
+ /* another interesting
+ case */ async fn foo() {
+ Command::new("git"); //~ ERROR [E0433]
+ }
+}
--- /dev/null
+error[E0433]: failed to resolve: use of undeclared type `Command`
+ --> $DIR/amputate-span.rs:49:5
+ |
+LL | Command::new("git");
+ | ^^^^^^^ not found in this scope
+ |
+help: consider importing this struct
+ |
+LL | use std::process::Command;
+ |
+
+error[E0433]: failed to resolve: use of undeclared type `Command`
+ --> $DIR/amputate-span.rs:63:9
+ |
+LL | Command::new("git");
+ | ^^^^^^^ not found in this scope
+ |
+help: consider importing this struct
+ |
+LL | use std::process::Command;
+ |
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0433`.
--- /dev/null
+// force-host
+// no-prefer-dynamic
+
+#![crate_type = "proc-macro"]
+
+extern crate proc_macro;
+
+use proc_macro::TokenStream;
+
+#[proc_macro_attribute]
+pub fn drop_first_token(attr: TokenStream, input: TokenStream) -> TokenStream {
+ assert!(attr.is_empty());
+ input.into_iter().skip(1).collect()
+}
spacing: Alone,
span: $DIR/capture-macro-rules-invoke.rs:14:54: 14:55 (#8),
},
- Group {
- delimiter: None,
- stream: TokenStream [
- Ident {
- ident: "my_name",
- span: $DIR/capture-macro-rules-invoke.rs:42:13: 42:20 (#0),
- },
- ],
- span: $DIR/capture-macro-rules-invoke.rs:14:56: 14:62 (#8),
+ Ident {
+ ident: "my_name",
+ span: $DIR/capture-macro-rules-invoke.rs:42:13: 42:20 (#0),
},
Punct {
ch: ',',
LL | #[derive(generate_mod::CheckDerive)]
| ^^^^^^^^^^^^^^^^^^^^^^^^^ names from parent modules are not accessible without an explicit import
|
+ = note: `#[deny(proc_macro_derive_resolution_fallback)]` on by default
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #83583 <https://github.com/rust-lang/rust/issues/83583>
= note: this error originates in the derive macro `generate_mod::CheckDerive` (in Nightly builds, run with -Z macro-backtrace for more info)
LL | #[derive(generate_mod::CheckDerive)]
| ^^^^^^^^^^^^^^^^^^^^^^^^^ names from parent modules are not accessible without an explicit import
|
+ = note: `#[deny(proc_macro_derive_resolution_fallback)]` on by default
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #83583 <https://github.com/rust-lang/rust/issues/83583>
= note: this error originates in the derive macro `generate_mod::CheckDerive` (in Nightly builds, run with -Z macro-backtrace for more info)
LL | #[derive(generate_mod::CheckDerive)]
| ^^^^^^^^^^^^^^^^^^^^^^^^^ names from parent modules are not accessible without an explicit import
|
+ = note: `#[deny(proc_macro_derive_resolution_fallback)]` on by default
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #83583 <https://github.com/rust-lang/rust/issues/83583>
= note: this error originates in the derive macro `generate_mod::CheckDerive` (in Nightly builds, run with -Z macro-backtrace for more info)
LL | #[derive(generate_mod::CheckDeriveLint)] // OK, lint is suppressed
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ names from parent modules are not accessible without an explicit import
|
+note: the lint level is defined here
+ --> $DIR/generate-mod.rs:30:10
+ |
+LL | #[derive(generate_mod::CheckDeriveLint)] // OK, lint is suppressed
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #83583 <https://github.com/rust-lang/rust/issues/83583>
= note: this warning originates in the derive macro `generate_mod::CheckDeriveLint` (in Nightly builds, run with -Z macro-backtrace for more info)
+++ /dev/null
-// ignore-test this is not a test
-
-macro_rules! tuple_from_req {
- ($T:ident) => {
- #[my_macro] struct Three($T);
- }
-}
+++ /dev/null
-// ignore-test this is not a test
-
-macro_rules! tuple_from_req {
- ($T:ident) => {
- #[my_macro] struct Three($T);
- }
-}
+++ /dev/null
-// ignore-test this is not a test
-
-macro_rules! tuple_from_req {
- ($T:ident) => {
- #[my_macro] struct Four($T);
- }
-}
+++ /dev/null
-// ignore-test this is not a test
-
-macro_rules! tuple_from_req {
- ($T:ident) => {
- #[my_macro] struct Four($T);
- }
-}
+++ /dev/null
-// force-host
-// no-prefer-dynamic
-
-#![crate_type = "proc-macro"]
-#![crate_name = "group_compat_hack"]
-
-// This file has an unusual name in order to trigger the back-compat
-// code in the compiler
-
-extern crate proc_macro;
-use proc_macro::TokenStream;
-
-#[proc_macro_attribute]
-pub fn my_macro(_attr: TokenStream, input: TokenStream) -> TokenStream {
- println!("Called proc_macro_hack with {:?}", input);
- input
-}
+++ /dev/null
-// aux-build:pin-project-internal-0.4.0.rs
-// compile-flags: -Z span-debug
-
-#![no_std] // Don't load unnecessary hygiene information from std
-extern crate std;
-
-#[macro_use] extern crate group_compat_hack;
-
-// Tests the backwards compatibility hack added for certain macros
-// When an attribute macro named `proc_macro_hack` or `wasm_bindgen`
-// has an `NtIdent` named `$name`, we pass a plain `Ident` token in
-// place of a `None`-delimited group. This allows us to maintain
-// backwards compatibility for older versions of these crates.
-
-mod no_version {
- include!("js-sys/src/lib.rs");
- include!("time-macros-impl/src/lib.rs");
-
- macro_rules! other {
- ($name:ident) => {
- #[my_macro] struct Three($name);
- }
- }
-
- struct Foo;
- impl_macros!(Foo); //~ ERROR using an old version
- //~| WARN this was previously
- arrays!(Foo);
- other!(Foo);
-}
-
-mod with_version {
- include!("js-sys-0.3.17/src/lib.rs");
- include!("time-macros-impl-0.1.0/src/lib.rs");
-
- macro_rules! other {
- ($name:ident) => {
- #[my_macro] struct Three($name);
- }
- }
-
- struct Foo;
- impl_macros!(Foo); //~ ERROR using an old version
- //~| WARN this was previously
- arrays!(Foo); //~ ERROR using an old version
- //~| WARN this was previously
- other!(Foo);
-}
-
-mod actix_web_test {
- include!("actix-web/src/extract.rs");
-
- struct Foo;
- tuple_from_req!(Foo); //~ ERROR using an old version
- //~| WARN this was previously
-}
-
-mod actix_web_version_test {
- include!("actix-web-2.0.0/src/extract.rs");
-
- struct Foo;
- tuple_from_req!(Foo); //~ ERROR using an old version
- //~| WARN this was previously
-}
-
-mod actori_web_test {
- include!("actori-web/src/extract.rs");
-
- struct Foo;
- tuple_from_req!(Foo);
-}
-
-mod actori_web_version_test {
- include!("actori-web-2.0.0/src/extract.rs");
-
- struct Foo;
- tuple_from_req!(Foo);
-}
-
-mod with_good_js_sys_version {
- include!("js-sys-0.3.40/src/lib.rs");
- struct Foo;
- arrays!(Foo);
-}
-
-
-fn main() {}
+++ /dev/null
-error: using an old version of `time-macros-impl`
- --> $DIR/time-macros-impl/src/lib.rs:5:32
- |
-LL | #[my_macro] struct One($name);
- | ^^^^^
- |
- ::: $DIR/group-compat-hack.rs:26:5
- |
-LL | impl_macros!(Foo);
- | ----------------- in this macro invocation
- |
- = note: `#[deny(proc_macro_back_compat)]` on by default
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #83125 <https://github.com/rust-lang/rust/issues/83125>
- = note: the `time-macros-impl` crate will stop compiling in futures version of Rust. Please update to the latest version of the `time` crate to avoid breakage
- = note: this error originates in the macro `impl_macros` (in Nightly builds, run with -Z macro-backtrace for more info)
-
-error: using an old version of `time-macros-impl`
- --> $DIR/time-macros-impl-0.1.0/src/lib.rs:5:32
- |
-LL | #[my_macro] struct One($name);
- | ^^^^^
- |
- ::: $DIR/group-compat-hack.rs:43:5
- |
-LL | impl_macros!(Foo);
- | ----------------- in this macro invocation
- |
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #83125 <https://github.com/rust-lang/rust/issues/83125>
- = note: the `time-macros-impl` crate will stop compiling in futures version of Rust. Please update to the latest version of the `time` crate to avoid breakage
- = note: this error originates in the macro `impl_macros` (in Nightly builds, run with -Z macro-backtrace for more info)
-
-error: using an old version of `js-sys`
- --> $DIR/js-sys-0.3.17/src/lib.rs:5:32
- |
-LL | #[my_macro] struct Two($name);
- | ^^^^^
- |
- ::: $DIR/group-compat-hack.rs:45:5
- |
-LL | arrays!(Foo);
- | ------------ in this macro invocation
- |
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #83125 <https://github.com/rust-lang/rust/issues/83125>
- = note: older versions of the `js-sys` crate will stop compiling in future versions of Rust; please update to `js-sys` v0.3.40 or above
- = note: this error originates in the macro `arrays` (in Nightly builds, run with -Z macro-backtrace for more info)
-
-error: using an old version of `actix-web`
- --> $DIR/actix-web/src/extract.rs:5:34
- |
-LL | #[my_macro] struct Three($T);
- | ^^
- |
- ::: $DIR/group-compat-hack.rs:54:5
- |
-LL | tuple_from_req!(Foo);
- | -------------------- in this macro invocation
- |
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #83125 <https://github.com/rust-lang/rust/issues/83125>
- = note: the version of `actix-web` you are using might stop compiling in future versions of Rust; please update to the latest version of the `actix-web` crate to avoid breakage
- = note: this error originates in the macro `tuple_from_req` (in Nightly builds, run with -Z macro-backtrace for more info)
-
-error: using an old version of `actix-web`
- --> $DIR/actix-web-2.0.0/src/extract.rs:5:34
- |
-LL | #[my_macro] struct Three($T);
- | ^^
- |
- ::: $DIR/group-compat-hack.rs:62:5
- |
-LL | tuple_from_req!(Foo);
- | -------------------- in this macro invocation
- |
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #83125 <https://github.com/rust-lang/rust/issues/83125>
- = note: the version of `actix-web` you are using might stop compiling in future versions of Rust; please update to the latest version of the `actix-web` crate to avoid breakage
- = note: this error originates in the macro `tuple_from_req` (in Nightly builds, run with -Z macro-backtrace for more info)
-
-error: aborting due to 5 previous errors
-
-Future incompatibility report: Future breakage diagnostic:
-error: using an old version of `time-macros-impl`
- --> $DIR/time-macros-impl/src/lib.rs:5:32
- |
-LL | #[my_macro] struct One($name);
- | ^^^^^
- |
- ::: $DIR/group-compat-hack.rs:26:5
- |
-LL | impl_macros!(Foo);
- | ----------------- in this macro invocation
- |
- = note: `#[deny(proc_macro_back_compat)]` on by default
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #83125 <https://github.com/rust-lang/rust/issues/83125>
- = note: the `time-macros-impl` crate will stop compiling in futures version of Rust. Please update to the latest version of the `time` crate to avoid breakage
- = note: this error originates in the macro `impl_macros` (in Nightly builds, run with -Z macro-backtrace for more info)
-
-Future breakage diagnostic:
-error: using an old version of `time-macros-impl`
- --> $DIR/time-macros-impl-0.1.0/src/lib.rs:5:32
- |
-LL | #[my_macro] struct One($name);
- | ^^^^^
- |
- ::: $DIR/group-compat-hack.rs:43:5
- |
-LL | impl_macros!(Foo);
- | ----------------- in this macro invocation
- |
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #83125 <https://github.com/rust-lang/rust/issues/83125>
- = note: the `time-macros-impl` crate will stop compiling in futures version of Rust. Please update to the latest version of the `time` crate to avoid breakage
- = note: this error originates in the macro `impl_macros` (in Nightly builds, run with -Z macro-backtrace for more info)
-
-Future breakage diagnostic:
-error: using an old version of `js-sys`
- --> $DIR/js-sys-0.3.17/src/lib.rs:5:32
- |
-LL | #[my_macro] struct Two($name);
- | ^^^^^
- |
- ::: $DIR/group-compat-hack.rs:45:5
- |
-LL | arrays!(Foo);
- | ------------ in this macro invocation
- |
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #83125 <https://github.com/rust-lang/rust/issues/83125>
- = note: older versions of the `js-sys` crate will stop compiling in future versions of Rust; please update to `js-sys` v0.3.40 or above
- = note: this error originates in the macro `arrays` (in Nightly builds, run with -Z macro-backtrace for more info)
-
-Future breakage diagnostic:
-error: using an old version of `actix-web`
- --> $DIR/actix-web/src/extract.rs:5:34
- |
-LL | #[my_macro] struct Three($T);
- | ^^
- |
- ::: $DIR/group-compat-hack.rs:54:5
- |
-LL | tuple_from_req!(Foo);
- | -------------------- in this macro invocation
- |
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #83125 <https://github.com/rust-lang/rust/issues/83125>
- = note: the version of `actix-web` you are using might stop compiling in future versions of Rust; please update to the latest version of the `actix-web` crate to avoid breakage
- = note: this error originates in the macro `tuple_from_req` (in Nightly builds, run with -Z macro-backtrace for more info)
-
-Future breakage diagnostic:
-error: using an old version of `actix-web`
- --> $DIR/actix-web-2.0.0/src/extract.rs:5:34
- |
-LL | #[my_macro] struct Three($T);
- | ^^
- |
- ::: $DIR/group-compat-hack.rs:62:5
- |
-LL | tuple_from_req!(Foo);
- | -------------------- in this macro invocation
- |
- = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
- = note: for more information, see issue #83125 <https://github.com/rust-lang/rust/issues/83125>
- = note: the version of `actix-web` you are using might stop compiling in future versions of Rust; please update to the latest version of the `actix-web` crate to avoid breakage
- = note: this error originates in the macro `tuple_from_req` (in Nightly builds, run with -Z macro-backtrace for more info)
-
+++ /dev/null
-Called proc_macro_hack with TokenStream [Ident { ident: "struct", span: $DIR/time-macros-impl/src/lib.rs:5:21: 5:27 (#6) }, Ident { ident: "One", span: $DIR/time-macros-impl/src/lib.rs:5:28: 5:31 (#6) }, Group { delimiter: Parenthesis, stream: TokenStream [Ident { ident: "Foo", span: $DIR/group-compat-hack.rs:26:18: 26:21 (#0) }], span: $DIR/time-macros-impl/src/lib.rs:5:31: 5:38 (#6) }, Punct { ch: ';', spacing: Alone, span: $DIR/time-macros-impl/src/lib.rs:5:38: 5:39 (#6) }]
-Called proc_macro_hack with TokenStream [Ident { ident: "struct", span: $DIR/js-sys/src/lib.rs:5:21: 5:27 (#10) }, Ident { ident: "Two", span: $DIR/js-sys/src/lib.rs:5:28: 5:31 (#10) }, Group { delimiter: Parenthesis, stream: TokenStream [Group { delimiter: None, stream: TokenStream [Ident { ident: "Foo", span: $DIR/group-compat-hack.rs:28:13: 28:16 (#0) }], span: $DIR/js-sys/src/lib.rs:5:32: 5:37 (#10) }], span: $DIR/js-sys/src/lib.rs:5:31: 5:38 (#10) }, Punct { ch: ';', spacing: Alone, span: $DIR/js-sys/src/lib.rs:5:38: 5:39 (#10) }]
-Called proc_macro_hack with TokenStream [Ident { ident: "struct", span: $DIR/group-compat-hack.rs:21:25: 21:31 (#14) }, Ident { ident: "Three", span: $DIR/group-compat-hack.rs:21:32: 21:37 (#14) }, Group { delimiter: Parenthesis, stream: TokenStream [Group { delimiter: None, stream: TokenStream [Ident { ident: "Foo", span: $DIR/group-compat-hack.rs:29:12: 29:15 (#0) }], span: $DIR/group-compat-hack.rs:21:38: 21:43 (#14) }], span: $DIR/group-compat-hack.rs:21:37: 21:44 (#14) }, Punct { ch: ';', spacing: Alone, span: $DIR/group-compat-hack.rs:21:44: 21:45 (#14) }]
-Called proc_macro_hack with TokenStream [Ident { ident: "struct", span: $DIR/time-macros-impl-0.1.0/src/lib.rs:5:21: 5:27 (#20) }, Ident { ident: "One", span: $DIR/time-macros-impl-0.1.0/src/lib.rs:5:28: 5:31 (#20) }, Group { delimiter: Parenthesis, stream: TokenStream [Ident { ident: "Foo", span: $DIR/group-compat-hack.rs:43:18: 43:21 (#0) }], span: $DIR/time-macros-impl-0.1.0/src/lib.rs:5:31: 5:38 (#20) }, Punct { ch: ';', spacing: Alone, span: $DIR/time-macros-impl-0.1.0/src/lib.rs:5:38: 5:39 (#20) }]
-Called proc_macro_hack with TokenStream [Ident { ident: "struct", span: $DIR/js-sys-0.3.17/src/lib.rs:5:21: 5:27 (#24) }, Ident { ident: "Two", span: $DIR/js-sys-0.3.17/src/lib.rs:5:28: 5:31 (#24) }, Group { delimiter: Parenthesis, stream: TokenStream [Ident { ident: "Foo", span: $DIR/group-compat-hack.rs:45:13: 45:16 (#0) }], span: $DIR/js-sys-0.3.17/src/lib.rs:5:31: 5:38 (#24) }, Punct { ch: ';', spacing: Alone, span: $DIR/js-sys-0.3.17/src/lib.rs:5:38: 5:39 (#24) }]
-Called proc_macro_hack with TokenStream [Ident { ident: "struct", span: $DIR/group-compat-hack.rs:38:25: 38:31 (#28) }, Ident { ident: "Three", span: $DIR/group-compat-hack.rs:38:32: 38:37 (#28) }, Group { delimiter: Parenthesis, stream: TokenStream [Group { delimiter: None, stream: TokenStream [Ident { ident: "Foo", span: $DIR/group-compat-hack.rs:47:12: 47:15 (#0) }], span: $DIR/group-compat-hack.rs:38:38: 38:43 (#28) }], span: $DIR/group-compat-hack.rs:38:37: 38:44 (#28) }, Punct { ch: ';', spacing: Alone, span: $DIR/group-compat-hack.rs:38:44: 38:45 (#28) }]
-Called proc_macro_hack with TokenStream [Ident { ident: "struct", span: $DIR/actix-web/src/extract.rs:5:21: 5:27 (#33) }, Ident { ident: "Three", span: $DIR/actix-web/src/extract.rs:5:28: 5:33 (#33) }, Group { delimiter: Parenthesis, stream: TokenStream [Ident { ident: "Foo", span: $DIR/group-compat-hack.rs:54:21: 54:24 (#0) }], span: $DIR/actix-web/src/extract.rs:5:33: 5:37 (#33) }, Punct { ch: ';', spacing: Alone, span: $DIR/actix-web/src/extract.rs:5:37: 5:38 (#33) }]
-Called proc_macro_hack with TokenStream [Ident { ident: "struct", span: $DIR/actix-web-2.0.0/src/extract.rs:5:21: 5:27 (#38) }, Ident { ident: "Three", span: $DIR/actix-web-2.0.0/src/extract.rs:5:28: 5:33 (#38) }, Group { delimiter: Parenthesis, stream: TokenStream [Ident { ident: "Foo", span: $DIR/group-compat-hack.rs:62:21: 62:24 (#0) }], span: $DIR/actix-web-2.0.0/src/extract.rs:5:33: 5:37 (#38) }, Punct { ch: ';', spacing: Alone, span: $DIR/actix-web-2.0.0/src/extract.rs:5:37: 5:38 (#38) }]
-Called proc_macro_hack with TokenStream [Ident { ident: "struct", span: $DIR/actori-web/src/extract.rs:5:21: 5:27 (#43) }, Ident { ident: "Four", span: $DIR/actori-web/src/extract.rs:5:28: 5:32 (#43) }, Group { delimiter: Parenthesis, stream: TokenStream [Group { delimiter: None, stream: TokenStream [Ident { ident: "Foo", span: $DIR/group-compat-hack.rs:70:21: 70:24 (#0) }], span: $DIR/actori-web/src/extract.rs:5:33: 5:35 (#43) }], span: $DIR/actori-web/src/extract.rs:5:32: 5:36 (#43) }, Punct { ch: ';', spacing: Alone, span: $DIR/actori-web/src/extract.rs:5:36: 5:37 (#43) }]
-Called proc_macro_hack with TokenStream [Ident { ident: "struct", span: $DIR/actori-web-2.0.0/src/extract.rs:5:21: 5:27 (#48) }, Ident { ident: "Four", span: $DIR/actori-web-2.0.0/src/extract.rs:5:28: 5:32 (#48) }, Group { delimiter: Parenthesis, stream: TokenStream [Group { delimiter: None, stream: TokenStream [Ident { ident: "Foo", span: $DIR/group-compat-hack.rs:77:21: 77:24 (#0) }], span: $DIR/actori-web-2.0.0/src/extract.rs:5:33: 5:35 (#48) }], span: $DIR/actori-web-2.0.0/src/extract.rs:5:32: 5:36 (#48) }, Punct { ch: ';', spacing: Alone, span: $DIR/actori-web-2.0.0/src/extract.rs:5:36: 5:37 (#48) }]
-Called proc_macro_hack with TokenStream [Ident { ident: "struct", span: $DIR/js-sys-0.3.40/src/lib.rs:5:21: 5:27 (#53) }, Ident { ident: "Two", span: $DIR/js-sys-0.3.40/src/lib.rs:5:28: 5:31 (#53) }, Group { delimiter: Parenthesis, stream: TokenStream [Group { delimiter: None, stream: TokenStream [Ident { ident: "Foo", span: $DIR/group-compat-hack.rs:83:13: 83:16 (#0) }], span: $DIR/js-sys-0.3.40/src/lib.rs:5:32: 5:37 (#53) }], span: $DIR/js-sys-0.3.40/src/lib.rs:5:31: 5:38 (#53) }, Punct { ch: ';', spacing: Alone, span: $DIR/js-sys-0.3.40/src/lib.rs:5:38: 5:39 (#53) }]
+++ /dev/null
-// ignore-test this is not a test
-
-macro_rules! arrays {
- ($name:ident) => {
- #[my_macro] struct Two($name);
- }
-}
+++ /dev/null
-// ignore-test this is not a test
-
-macro_rules! arrays {
- ($name:ident) => {
- #[my_macro] struct Two($name);
- }
-}
+++ /dev/null
-// ignore-test this is not a test
-
-macro_rules! arrays {
- ($name:ident) => {
- #[my_macro] struct Two($name);
- }
-}
+++ /dev/null
-// ignore-test this is not a test
-
-macro_rules! impl_macros {
- ($name:ident) => {
- #[my_macro] struct One($name);
- }
-}
+++ /dev/null
-// ignore-test this is not a test
-
-macro_rules! impl_macros {
- ($name:ident) => {
- #[my_macro] struct One($name);
- }
-}
PRINT-BANG INPUT (DISPLAY): A
PRINT-BANG INPUT (DEBUG): TokenStream [
- Group {
- delimiter: None,
- stream: TokenStream [
- Ident {
- ident: "A",
- span: #0 bytes(503..504),
- },
- ],
- span: #4 bytes(370..372),
+ Ident {
+ ident: "A",
+ span: #0 bytes(503..504),
},
]
PRINT-ATTR INPUT (DISPLAY): const A : u8 = 0 ;
ident: "const",
span: #4 bytes(416..421),
},
- Group {
- delimiter: None,
- stream: TokenStream [
- Ident {
- ident: "A",
- span: #0 bytes(503..504),
- },
- ],
- span: #4 bytes(422..424),
+ Ident {
+ ident: "A",
+ span: #0 bytes(503..504),
},
Punct {
ch: ':',
ident: "struct",
span: #4 bytes(468..474),
},
- Group {
- delimiter: None,
- stream: TokenStream [
- Ident {
- ident: "A",
- span: #0 bytes(503..504),
- },
- ],
- span: #4 bytes(475..477),
+ Ident {
+ ident: "A",
+ span: #0 bytes(503..504),
},
Group {
delimiter: Brace,
LL | enum ProceduralMasqueradeDummyType {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
+ = note: `#[deny(proc_macro_back_compat)]` on by default
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #83125 <https://github.com/rust-lang/rust/issues/83125>
= note: The `procedural-masquerade` crate has been unnecessary since Rust 1.30.0. Versions of this crate below 0.1.7 will eventually stop compiling.
LL | enum ProceduralMasqueradeDummyType {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
+ = note: `#[deny(proc_macro_back_compat)]` on by default
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #83125 <https://github.com/rust-lang/rust/issues/83125>
= note: The `procedural-masquerade` crate has been unnecessary since Rust 1.30.0. Versions of this crate below 0.1.7 will eventually stop compiling.
LL | enum ProceduralMasqueradeDummyType {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
+ = note: `#[deny(proc_macro_back_compat)]` on by default
= warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
= note: for more information, see issue #83125 <https://github.com/rust-lang/rust/issues/83125>
= note: The `procedural-masquerade` crate has been unnecessary since Rust 1.30.0. Versions of this crate below 0.1.7 will eventually stop compiling.
]
PRINT-BANG INPUT (DISPLAY): SecondStruct
PRINT-BANG INPUT (DEBUG): TokenStream [
- Group {
- delimiter: None,
- stream: TokenStream [
- Ident {
- ident: "SecondStruct",
- span: $DIR/nested-macro-rules.rs:21:38: 21:50 (#16),
- },
- ],
- span: $DIR/auxiliary/nested-macro-rules.rs:9:30: 9:35 (#15),
+ Ident {
+ ident: "SecondStruct",
+ span: $DIR/nested-macro-rules.rs:21:38: 21:50 (#16),
},
]
PRINT-ATTR INPUT (DISPLAY): struct SecondAttrStruct {}
ident: "struct",
span: $DIR/auxiliary/nested-macro-rules.rs:10:32: 10:38 (#15),
},
- Group {
- delimiter: None,
- stream: TokenStream [
- Ident {
- ident: "SecondAttrStruct",
- span: $DIR/nested-macro-rules.rs:21:52: 21:68 (#16),
- },
- ],
- span: $DIR/auxiliary/nested-macro-rules.rs:10:39: 10:56 (#15),
+ Ident {
+ ident: "SecondAttrStruct",
+ span: $DIR/nested-macro-rules.rs:21:52: 21:68 (#16),
},
Group {
delimiter: Brace,
First recollected: TokenStream [
- Group {
- delimiter: None,
- stream: TokenStream [
- Ident {
- ident: "pub",
- span: $DIR/nonterminal-recollect-attr.rs:20:11: 20:14 (#0),
- },
- ],
- span: $DIR/nonterminal-recollect-attr.rs:14:9: 14:11 (#4),
+ Ident {
+ ident: "pub",
+ span: $DIR/nonterminal-recollect-attr.rs:20:11: 20:14 (#0),
},
Ident {
ident: "struct",
--> $DIR/span-preservation.rs:39:5
|
LL | extern "C" fn bar() {
- | - possibly return type missing here?
+ | - help: try adding a return type: `-> i32`
LL | 0
| ^ expected `()`, found integer
--> $DIR/span-preservation.rs:44:5
|
LL | extern "C" fn baz() {
- | - possibly return type missing here?
+ | - help: try adding a return type: `-> i32`
LL | 0
| ^ expected `()`, found integer
--> $DIR/span-preservation.rs:49:5
|
LL | extern "Rust" fn rust_abi() {
- | - possibly return type missing here?
+ | - help: try adding a return type: `-> i32`
LL | 0
| ^ expected `()`, found integer
--> $DIR/span-preservation.rs:54:5
|
LL | extern "\x43" fn c_abi_escaped() {
- | - possibly return type missing here?
+ | - help: try adding a return type: `-> i32`
LL | 0
| ^ expected `()`, found integer
// run-pass
// ignore-emscripten no processes
// ignore-sgx no processes
+// needs-unwind
fn check_for_no_backtrace(test: std::process::Output) {
assert!(!test.status.success());
//~| NOTE ...which requires computing layout of `core::option::Option<<S as Mirror>::It>`...
//~| NOTE ...which requires computing layout of `core::option::Option<S>`...
//~| NOTE ...which again requires computing layout of `S`, completing the cycle
-//~| NOTE cycle used when computing layout of `core::option::Option<S>`
// build-fail
struct S(Option<<S as Mirror>::It>);
fn main() {
+ //~^ NOTE cycle used when elaborating drops for `main`
let _s = S(None);
}
= note: ...which requires computing layout of `core::option::Option<<S as Mirror>::It>`...
= note: ...which requires computing layout of `core::option::Option<S>`...
= note: ...which again requires computing layout of `S`, completing the cycle
- = note: cycle used when computing layout of `core::option::Option<S>`
+note: cycle used when elaborating drops for `main`
+ --> $DIR/issue-26548-recursion-via-normalize.rs:16:1
+ |
+LL | fn main() {
+ | ^^^^^^^^^
error: aborting due to previous error
+++ /dev/null
-// Various examples of structs whose fields are not well-formed.
-
-#![allow(dead_code)]
-
-trait Dummy<'a> {
- type Out;
-}
-impl<'a, T> Dummy<'a> for T
-where
- T: 'a,
-{
- type Out = ();
-}
-type RequireOutlives<'a, T> = <T as Dummy<'a>>::Out;
-
-enum Ref1<'a, T> {
- Ref1Variant1(RequireOutlives<'a, T>), //~ ERROR the parameter type `T` may not live long enough
-}
-
-enum Ref2<'a, T> {
- Ref2Variant1,
- Ref2Variant2(isize, RequireOutlives<'a, T>), //~ ERROR the parameter type `T` may not live long enough
-}
-
-enum RefOk<'a, T: 'a> {
- RefOkVariant1(&'a T),
-}
-
-// This is now well formed. RFC 2093
-enum RefIndirect<'a, T> {
- RefIndirectVariant1(isize, RefOk<'a, T>),
-}
-
-enum RefDouble<'a, 'b, T> {
- RefDoubleVariant1(&'a RequireOutlives<'b, T>),
- //~^ the parameter type `T` may not live long enough [E0309]
-}
-
-fn main() {}
+++ /dev/null
-error[E0309]: the parameter type `T` may not live long enough
- --> $DIR/regions-enum-not-wf.rs:17:18
- |
-LL | enum Ref1<'a, T> {
- | - help: consider adding an explicit lifetime bound...: `T: 'a`
-LL | Ref1Variant1(RequireOutlives<'a, T>),
- | ^^^^^^^^^^^^^^^^^^^^^^ ...so that the type `T` will meet its required lifetime bounds
-
-error[E0309]: the parameter type `T` may not live long enough
- --> $DIR/regions-enum-not-wf.rs:22:25
- |
-LL | enum Ref2<'a, T> {
- | - help: consider adding an explicit lifetime bound...: `T: 'a`
-LL | Ref2Variant1,
-LL | Ref2Variant2(isize, RequireOutlives<'a, T>),
- | ^^^^^^^^^^^^^^^^^^^^^^ ...so that the type `T` will meet its required lifetime bounds
-
-error[E0309]: the parameter type `T` may not live long enough
- --> $DIR/regions-enum-not-wf.rs:35:23
- |
-LL | enum RefDouble<'a, 'b, T> {
- | - help: consider adding an explicit lifetime bound...: `T: 'b`
-LL | RefDoubleVariant1(&'a RequireOutlives<'b, T>),
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^ ...so that the type `T` will meet its required lifetime bounds
-
-error: aborting due to 3 previous errors
-
-For more information about this error, try `rustc --explain E0309`.
}
mod foo {
- // FIXME: UsePlacementFinder is broken because active attributes are
- // removed, and thus the `derive` attribute here is not in the AST.
- // An inert attribute should work, though.
- // #[derive(Debug)]
use std::path::Path;
-#[allow(warnings)]
+#[derive(Debug)]
pub struct Foo;
// test whether the use suggestion isn't
}
mod foo {
- // FIXME: UsePlacementFinder is broken because active attributes are
- // removed, and thus the `derive` attribute here is not in the AST.
- // An inert attribute should work, though.
- // #[derive(Debug)]
- #[allow(warnings)]
+ #[derive(Debug)]
pub struct Foo;
// test whether the use suggestion isn't
error[E0412]: cannot find type `Path` in this scope
- --> $DIR/use_suggestion_placement.rs:22:16
+ --> $DIR/use_suggestion_placement.rs:18:16
|
LL | type Bar = Path;
| ^^^^ not found in this scope
|
error[E0425]: cannot find value `A` in this scope
- --> $DIR/use_suggestion_placement.rs:27:13
+ --> $DIR/use_suggestion_placement.rs:23:13
|
LL | let _ = A;
| ^ not found in this scope
|
error[E0412]: cannot find type `HashMap` in this scope
- --> $DIR/use_suggestion_placement.rs:32:23
+ --> $DIR/use_suggestion_placement.rs:28:23
|
LL | type Dict<K, V> = HashMap<K, V>;
| ^^^^^^^ not found in this scope
error[E0308]: mismatched types
--> $DIR/return-type.rs:10:5
|
-LL | fn bar() {
- | - possibly return type missing here?
LL | foo(4 as usize)
- | ^^^^^^^^^^^^^^^- help: consider using a semicolon here: `;`
- | |
- | expected `()`, found struct `S`
+ | ^^^^^^^^^^^^^^^ expected `()`, found struct `S`
|
= note: expected unit type `()`
found struct `S<usize>`
+help: consider using a semicolon here
+ |
+LL | foo(4 as usize);
+ | +
+help: try adding a return type
+ |
+LL | fn bar() -> S<usize> {
+ | +++++++++++
error: aborting due to previous error
Self::Unstable
}
}
+
+#[derive(Default)]
+#[stable(feature = "stable_test_feature", since = "1.0.0")]
+#[non_exhaustive]
+pub struct UnstableStruct {
+ #[stable(feature = "stable_test_feature", since = "1.0.0")]
+ pub stable: bool,
+ #[stable(feature = "stable_test_feature", since = "1.0.0")]
+ pub stable2: usize,
+ #[unstable(feature = "unstable_test_feature", issue = "none")]
+ pub unstable: u8,
+}
+
+#[stable(feature = "stable_test_feature", since = "1.0.0")]
+#[non_exhaustive]
+pub struct OnlyUnstableStruct {
+ #[unstable(feature = "unstable_test_feature", issue = "none")]
+ pub unstable: u8,
+ #[unstable(feature = "unstable_test_feature", issue = "none")]
+ pub unstable2: bool,
+}
+
+impl OnlyUnstableStruct {
+ #[stable(feature = "stable_test_feature", since = "1.0.0")]
+ pub fn new() -> Self {
+ Self {
+ unstable: 0,
+ unstable2: false,
+ }
+ }
+}
EmptyNonExhaustiveEnum, NestedNonExhaustive, NonExhaustiveEnum, NonExhaustiveSingleVariant,
VariantNonExhaustive,
};
-use unstable::{UnstableEnum, OnlyUnstableEnum};
+use unstable::{UnstableEnum, OnlyUnstableEnum, UnstableStruct, OnlyUnstableStruct};
use structs::{FunctionalRecord, MixedVisFields, NestedStruct, NormalStruct};
#[non_exhaustive]
}
//~^^ some variants are not matched explicitly
+ // Ok: the feature is on and all variants are matched
#[deny(non_exhaustive_omitted_patterns)]
match UnstableEnum::Stable {
UnstableEnum::Stable => {}
_ => {}
}
//~^^ some variants are not matched explicitly
+
+ #[warn(non_exhaustive_omitted_patterns)]
+ let OnlyUnstableStruct { unstable, .. } = OnlyUnstableStruct::new();
+ //~^ some fields are not explicitly listed
+
+ // OK: both unstable fields are matched with feature on
+ #[warn(non_exhaustive_omitted_patterns)]
+ let OnlyUnstableStruct { unstable, unstable2, .. } = OnlyUnstableStruct::new();
+
+ #[warn(non_exhaustive_omitted_patterns)]
+ let UnstableStruct { stable, stable2, .. } = UnstableStruct::default();
+ //~^ some fields are not explicitly listed
+
+ // OK: both unstable and stable fields are matched with feature on
+ #[warn(non_exhaustive_omitted_patterns)]
+ let UnstableStruct { stable, stable2, unstable, .. } = UnstableStruct::default();
}
= help: ensure that all fields are mentioned explicitly by adding the suggested fields
= note: the pattern is of type `NestedStruct` and the `non_exhaustive_omitted_patterns` attribute was found
+warning: some fields are not explicitly listed
+ --> $DIR/omitted-patterns.rs:173:9
+ |
+LL | let OnlyUnstableStruct { unstable, .. } = OnlyUnstableStruct::new();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ field `unstable2` not listed
+ |
+note: the lint level is defined here
+ --> $DIR/omitted-patterns.rs:172:12
+ |
+LL | #[warn(non_exhaustive_omitted_patterns)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ = help: ensure that all fields are mentioned explicitly by adding the suggested fields
+ = note: the pattern is of type `OnlyUnstableStruct` and the `non_exhaustive_omitted_patterns` attribute was found
+
+warning: some fields are not explicitly listed
+ --> $DIR/omitted-patterns.rs:181:9
+ |
+LL | let UnstableStruct { stable, stable2, .. } = UnstableStruct::default();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ field `unstable` not listed
+ |
+note: the lint level is defined here
+ --> $DIR/omitted-patterns.rs:180:12
+ |
+LL | #[warn(non_exhaustive_omitted_patterns)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ = help: ensure that all fields are mentioned explicitly by adding the suggested fields
+ = note: the pattern is of type `UnstableStruct` and the `non_exhaustive_omitted_patterns` attribute was found
+
error: some variants are not matched explicitly
--> $DIR/omitted-patterns.rs:58:9
|
= note: the matched value is of type `UnstableEnum` and the `non_exhaustive_omitted_patterns` attribute was found
error: some variants are not matched explicitly
- --> $DIR/omitted-patterns.rs:167:9
+ --> $DIR/omitted-patterns.rs:168:9
|
LL | _ => {}
| ^ pattern `Unstable2` not covered
|
note: the lint level is defined here
- --> $DIR/omitted-patterns.rs:164:12
+ --> $DIR/omitted-patterns.rs:165:12
|
LL | #[deny(non_exhaustive_omitted_patterns)]
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= help: ensure that all variants are matched explicitly by adding the suggested match arms
= note: the matched value is of type `OnlyUnstableEnum` and the `non_exhaustive_omitted_patterns` attribute was found
-error: aborting due to 8 previous errors; 4 warnings emitted
+error: aborting due to 8 previous errors; 6 warnings emitted
// aux-build:unstable.rs
extern crate unstable;
-use unstable::{UnstableEnum, OnlyUnstableEnum};
+use unstable::{UnstableEnum, OnlyUnstableEnum, UnstableStruct, OnlyUnstableStruct};
fn main() {
// OK: this matches all the stable variants
match OnlyUnstableEnum::new() {
_ => {}
}
+
+ // Ok: Same as the above enum (no fields can be matched on)
+ #[warn(non_exhaustive_omitted_patterns)]
+ let OnlyUnstableStruct { .. } = OnlyUnstableStruct::new();
+
+ #[warn(non_exhaustive_omitted_patterns)]
+ let UnstableStruct { stable, .. } = UnstableStruct::default();
+ //~^ some fields are not explicitly listed
+
+ // OK: stable field is matched
+ #[warn(non_exhaustive_omitted_patterns)]
+ let UnstableStruct { stable, stable2, .. } = UnstableStruct::default();
}
+warning: some fields are not explicitly listed
+ --> $DIR/stable-omitted-patterns.rs:39:9
+ |
+LL | let UnstableStruct { stable, .. } = UnstableStruct::default();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ field `stable2` not listed
+ |
+note: the lint level is defined here
+ --> $DIR/stable-omitted-patterns.rs:38:12
+ |
+LL | #[warn(non_exhaustive_omitted_patterns)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ = help: ensure that all fields are mentioned explicitly by adding the suggested fields
+ = note: the pattern is of type `UnstableStruct` and the `non_exhaustive_omitted_patterns` attribute was found
+
error: some variants are not matched explicitly
--> $DIR/stable-omitted-patterns.rs:23:9
|
= help: ensure that all variants are matched explicitly by adding the suggested match arms
= note: the matched value is of type `UnstableEnum` and the `non_exhaustive_omitted_patterns` attribute was found
-error: aborting due to previous error
+error: aborting due to previous error; 1 warning emitted
// run-pass
#![feature(let_chains)]
+#![allow(irrefutable_let_patterns)]
fn main() {
let first = Some(1);
|
= note: only supported directly in conditions of `if` and `while` expressions
= note: as well as when nested within `&&` and parentheses in those conditions
-note: `||` operators are not allowed in let chain expressions
+note: `||` operators are not currently supported in let chain expressions
--> $DIR/disallowed-positions.rs:47:13
|
LL | if true || let 0 = 0 {}
|
= note: only supported directly in conditions of `if` and `while` expressions
= note: as well as when nested within `&&` and parentheses in those conditions
-note: `||` operators are not allowed in let chain expressions
+note: `||` operators are not currently supported in let chain expressions
--> $DIR/disallowed-positions.rs:48:14
|
LL | if (true || let 0 = 0) {}
|
= note: only supported directly in conditions of `if` and `while` expressions
= note: as well as when nested within `&&` and parentheses in those conditions
-note: `||` operators are not allowed in let chain expressions
+note: `||` operators are not currently supported in let chain expressions
--> $DIR/disallowed-positions.rs:49:22
|
LL | if true && (true || let 0 = 0) {}
|
= note: only supported directly in conditions of `if` and `while` expressions
= note: as well as when nested within `&&` and parentheses in those conditions
-note: `||` operators are not allowed in let chain expressions
+note: `||` operators are not currently supported in let chain expressions
--> $DIR/disallowed-positions.rs:50:13
|
LL | if true || (true && let 0 = 0) {}
|
= note: only supported directly in conditions of `if` and `while` expressions
= note: as well as when nested within `&&` and parentheses in those conditions
-note: `||` operators are not allowed in let chain expressions
+note: `||` operators are not currently supported in let chain expressions
--> $DIR/disallowed-positions.rs:111:16
|
LL | while true || let 0 = 0 {}
|
= note: only supported directly in conditions of `if` and `while` expressions
= note: as well as when nested within `&&` and parentheses in those conditions
-note: `||` operators are not allowed in let chain expressions
+note: `||` operators are not currently supported in let chain expressions
--> $DIR/disallowed-positions.rs:112:17
|
LL | while (true || let 0 = 0) {}
|
= note: only supported directly in conditions of `if` and `while` expressions
= note: as well as when nested within `&&` and parentheses in those conditions
-note: `||` operators are not allowed in let chain expressions
+note: `||` operators are not currently supported in let chain expressions
--> $DIR/disallowed-positions.rs:113:25
|
LL | while true && (true || let 0 = 0) {}
|
= note: only supported directly in conditions of `if` and `while` expressions
= note: as well as when nested within `&&` and parentheses in those conditions
-note: `||` operators are not allowed in let chain expressions
+note: `||` operators are not currently supported in let chain expressions
--> $DIR/disallowed-positions.rs:114:16
|
LL | while true || (true && let 0 = 0) {}
|
= note: only supported directly in conditions of `if` and `while` expressions
= note: as well as when nested within `&&` and parentheses in those conditions
-note: `||` operators are not allowed in let chain expressions
+note: `||` operators are not currently supported in let chain expressions
--> $DIR/disallowed-positions.rs:184:10
|
LL | true || let 0 = 0;
|
= note: only supported directly in conditions of `if` and `while` expressions
= note: as well as when nested within `&&` and parentheses in those conditions
-note: `||` operators are not allowed in let chain expressions
+note: `||` operators are not currently supported in let chain expressions
--> $DIR/disallowed-positions.rs:185:11
|
LL | (true || let 0 = 0);
|
= note: only supported directly in conditions of `if` and `while` expressions
= note: as well as when nested within `&&` and parentheses in those conditions
-note: `||` operators are not allowed in let chain expressions
+note: `||` operators are not currently supported in let chain expressions
--> $DIR/disallowed-positions.rs:186:19
|
LL | true && (true || let 0 = 0);
--- /dev/null
+#![feature(let_chains, let_else)]
+
+fn main() {
+ let opt = Some(1i32);
+
+ let Some(n) = opt else {
+ return;
+ };
+ let Some(n) = opt && n == 1 else {
+ //~^ ERROR a `&&` expression cannot be directly assigned in `let...else`
+ //~| ERROR mismatched types
+ //~| ERROR mismatched types
+ return;
+ };
+ let Some(n) = opt && let another = n else {
+ //~^ ERROR a `&&` expression cannot be directly assigned in `let...else`
+ //~| ERROR `let` expressions are not supported here
+ //~| ERROR mismatched types
+ //~| ERROR mismatched types
+ return;
+ };
+
+ if let Some(n) = opt else {
+ //~^ ERROR missing condition for `if` expression
+ return;
+ };
+ if let Some(n) = opt && n == 1 else {
+ //~^ ERROR missing condition for `if` expression
+ return;
+ };
+ if let Some(n) = opt && let another = n else {
+ //~^ ERROR missing condition for `if` expression
+ return;
+ };
+
+ {
+ while let Some(n) = opt else {
+ //~^ ERROR expected `{`, found keyword `else`
+ return;
+ };
+ }
+ {
+ while let Some(n) = opt && n == 1 else {
+ //~^ ERROR expected `{`, found keyword `else`
+ return;
+ };
+ }
+ {
+ while let Some(n) = opt && let another = n else {
+ //~^ ERROR expected `{`, found keyword `else`
+ return;
+ };
+ }
+}
--- /dev/null
+error: a `&&` expression cannot be directly assigned in `let...else`
+ --> $DIR/ensure-that-let-else-does-not-interact-with-let-chains.rs:9:19
+ |
+LL | let Some(n) = opt && n == 1 else {
+ | ^^^^^^^^^^^^^
+ |
+help: wrap the expression in parentheses
+ |
+LL | let Some(n) = (opt && n == 1) else {
+ | + +
+
+error: a `&&` expression cannot be directly assigned in `let...else`
+ --> $DIR/ensure-that-let-else-does-not-interact-with-let-chains.rs:15:19
+ |
+LL | let Some(n) = opt && let another = n else {
+ | ^^^^^^^^^^^^^^^^^^^^^^
+ |
+help: wrap the expression in parentheses
+ |
+LL | let Some(n) = (opt && let another = n) else {
+ | + +
+
+error: missing condition for `if` expression
+ --> $DIR/ensure-that-let-else-does-not-interact-with-let-chains.rs:23:7
+ |
+LL | if let Some(n) = opt else {
+ | ^ expected if condition here
+
+error: missing condition for `if` expression
+ --> $DIR/ensure-that-let-else-does-not-interact-with-let-chains.rs:27:7
+ |
+LL | if let Some(n) = opt && n == 1 else {
+ | ^ expected if condition here
+
+error: missing condition for `if` expression
+ --> $DIR/ensure-that-let-else-does-not-interact-with-let-chains.rs:31:7
+ |
+LL | if let Some(n) = opt && let another = n else {
+ | ^ expected if condition here
+
+error: expected `{`, found keyword `else`
+ --> $DIR/ensure-that-let-else-does-not-interact-with-let-chains.rs:37:33
+ |
+LL | while let Some(n) = opt else {
+ | ----- ----------------- ^^^^ expected `{`
+ | | |
+ | | this `while` condition successfully parsed
+ | while parsing the body of this `while` expression
+
+error: expected `{`, found keyword `else`
+ --> $DIR/ensure-that-let-else-does-not-interact-with-let-chains.rs:43:43
+ |
+LL | while let Some(n) = opt && n == 1 else {
+ | ----- --------------------------- ^^^^ expected `{`
+ | | |
+ | | this `while` condition successfully parsed
+ | while parsing the body of this `while` expression
+
+error: expected `{`, found keyword `else`
+ --> $DIR/ensure-that-let-else-does-not-interact-with-let-chains.rs:49:52
+ |
+LL | while let Some(n) = opt && let another = n else {
+ | ----- ------------------------------------ ^^^^ expected `{`
+ | | |
+ | | this `while` condition successfully parsed
+ | while parsing the body of this `while` expression
+
+error: `let` expressions are not supported here
+ --> $DIR/ensure-that-let-else-does-not-interact-with-let-chains.rs:15:26
+ |
+LL | let Some(n) = opt && let another = n else {
+ | ^^^^^^^^^^^^^^^
+ |
+ = note: only supported directly in conditions of `if` and `while` expressions
+ = note: as well as when nested within `&&` and parentheses in those conditions
+
+error[E0308]: mismatched types
+ --> $DIR/ensure-that-let-else-does-not-interact-with-let-chains.rs:9:19
+ |
+LL | let Some(n) = opt && n == 1 else {
+ | ^^^ expected `bool`, found enum `Option`
+ |
+ = note: expected type `bool`
+ found enum `Option<i32>`
+
+error[E0308]: mismatched types
+ --> $DIR/ensure-that-let-else-does-not-interact-with-let-chains.rs:9:9
+ |
+LL | let Some(n) = opt && n == 1 else {
+ | ^^^^^^^ ------------- this expression has type `bool`
+ | |
+ | expected `bool`, found enum `Option`
+ |
+ = note: expected type `bool`
+ found enum `Option<_>`
+
+error[E0308]: mismatched types
+ --> $DIR/ensure-that-let-else-does-not-interact-with-let-chains.rs:15:19
+ |
+LL | let Some(n) = opt && let another = n else {
+ | ^^^ expected `bool`, found enum `Option`
+ |
+ = note: expected type `bool`
+ found enum `Option<i32>`
+
+error[E0308]: mismatched types
+ --> $DIR/ensure-that-let-else-does-not-interact-with-let-chains.rs:15:9
+ |
+LL | let Some(n) = opt && let another = n else {
+ | ^^^^^^^ ---------------------- this expression has type `bool`
+ | |
+ | expected `bool`, found enum `Option`
+ |
+ = note: expected type `bool`
+ found enum `Option<_>`
+
+error: aborting due to 13 previous errors
+
+For more information about this error, try `rustc --explain E0308`.
--- /dev/null
+error: leading irrefutable pattern in let chain
+ --> $DIR/irrefutable-lets.rs:13:8
+ |
+LL | if let first = &opt && let Some(ref second) = first && let None = second.start {}
+ | ^^^^^^^^^^^^^^^^
+ |
+note: the lint level is defined here
+ --> $DIR/irrefutable-lets.rs:6:30
+ |
+LL | #![cfg_attr(disallowed, deny(irrefutable_let_patterns))]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^
+ = note: this pattern will always match
+ = help: consider moving it outside of the construct
+
+error: irrefutable `if let` patterns
+ --> $DIR/irrefutable-lets.rs:19:8
+ |
+LL | if let first = &opt && let (a, b) = (1, 2) {}
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: these patterns will always match, so the `if let` is useless
+ = help: consider replacing the `if let` with a `let`
+
+error: leading irrefutable pattern in let chain
+ --> $DIR/irrefutable-lets.rs:22:8
+ |
+LL | if let first = &opt && let Some(ref second) = first && let None = second.start && let v = 0 {}
+ | ^^^^^^^^^^^^^^^^
+ |
+ = note: this pattern will always match
+ = help: consider moving it outside of the construct
+
+error: trailing irrefutable pattern in let chain
+ --> $DIR/irrefutable-lets.rs:22:87
+ |
+LL | if let first = &opt && let Some(ref second) = first && let None = second.start && let v = 0 {}
+ | ^^^^^^^^^
+ |
+ = note: this pattern will always match
+ = help: consider moving it into the body
+
+error: trailing irrefutable patterns in let chain
+ --> $DIR/irrefutable-lets.rs:26:37
+ |
+LL | if let Some(ref first) = opt && let second = first && let _third = second {}
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: these patterns will always match
+ = help: consider moving them into the body
+
+error: leading irrefutable pattern in let chain
+ --> $DIR/irrefutable-lets.rs:29:8
+ |
+LL | if let Range { start: local_start, end: _ } = (None..Some(1)) && let None = local_start {}
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: this pattern will always match
+ = help: consider moving it outside of the construct
+
+error: leading irrefutable pattern in let chain
+ --> $DIR/irrefutable-lets.rs:32:8
+ |
+LL | if let (a, b, c) = (Some(1), Some(1), Some(1)) && let None = Some(1) {}
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: this pattern will always match
+ = help: consider moving it outside of the construct
+
+error: leading irrefutable pattern in let chain
+ --> $DIR/irrefutable-lets.rs:35:8
+ |
+LL | if let first = &opt && let None = Some(1) {}
+ | ^^^^^^^^^^^^^^^^
+ |
+ = note: this pattern will always match
+ = help: consider moving it outside of the construct
+
+error: irrefutable `let` patterns
+ --> $DIR/irrefutable-lets.rs:44:28
+ |
+LL | Some(ref first) if let second = first && let _third = second && let v = 4 + 4 => {},
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: these patterns will always match, so the `let` is useless
+ = help: consider removing `let`
+
+error: leading irrefutable pattern in let chain
+ --> $DIR/irrefutable-lets.rs:50:28
+ |
+LL | Some(ref first) if let Range { start: local_start, end: _ } = first
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: this pattern will always match
+ = help: consider moving it outside of the construct
+
+error: irrefutable `while let` patterns
+ --> $DIR/irrefutable-lets.rs:59:11
+ |
+LL | while let first = &opt && let (a, b) = (1, 2) {}
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: these patterns will always match, so the loop will never exit
+ = help: consider instead using a `loop { ... }` with a `let` inside it
+
+error: trailing irrefutable patterns in let chain
+ --> $DIR/irrefutable-lets.rs:62:40
+ |
+LL | while let Some(ref first) = opt && let second = first && let _third = second {}
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: these patterns will always match
+ = help: consider moving them into the body
+
+error: aborting due to 12 previous errors
+
-// check-pass
+// revisions: allowed disallowed
+//[allowed] check-pass
#![feature(if_let_guard, let_chains)]
+#![cfg_attr(allowed, allow(irrefutable_let_patterns))]
+#![cfg_attr(disallowed, deny(irrefutable_let_patterns))]
use std::ops::Range;
fn main() {
let opt = Some(None..Some(1));
- if let first = &opt && let Some(ref second) = first && let None = second.start {
- }
- if let Some(ref first) = opt && let second = first && let _third = second {
- }
+ if let first = &opt && let Some(ref second) = first && let None = second.start {}
+ //[disallowed]~^ ERROR leading irrefutable pattern in let chain
+
+ // No lint as the irrefutable pattern is surrounded by other stuff
+ if 4 * 2 == 0 && let first = &opt && let Some(ref second) = first && let None = second.start {}
+
+ if let first = &opt && let (a, b) = (1, 2) {}
+ //[disallowed]~^ ERROR irrefutable `if let` patterns
+
+ if let first = &opt && let Some(ref second) = first && let None = second.start && let v = 0 {}
+ //[disallowed]~^ ERROR leading irrefutable pattern in let chain
+ //[disallowed]~^^ ERROR trailing irrefutable pattern in let chain
+
+ if let Some(ref first) = opt && let second = first && let _third = second {}
+ //[disallowed]~^ ERROR trailing irrefutable patterns in let chain
+
+ if let Range { start: local_start, end: _ } = (None..Some(1)) && let None = local_start {}
+ //[disallowed]~^ ERROR leading irrefutable pattern in let chain
+
+ if let (a, b, c) = (Some(1), Some(1), Some(1)) && let None = Some(1) {}
+ //[disallowed]~^ ERROR leading irrefutable pattern in let chain
+
+ if let first = &opt && let None = Some(1) {}
+ //[disallowed]~^ ERROR leading irrefutable pattern in let chain
+
if let Some(ref first) = opt
&& let Range { start: local_start, end: _ } = first
&& let None = local_start {
}
match opt {
- Some(ref first) if let second = first && let _third = second => {},
+ Some(ref first) if let second = first && let _third = second && let v = 4 + 4 => {},
+ //[disallowed]~^ ERROR irrefutable `let` patterns
_ => {}
}
+
match opt {
Some(ref first) if let Range { start: local_start, end: _ } = first
+ //[disallowed]~^ ERROR leading irrefutable pattern in let chain
&& let None = local_start => {},
_ => {}
}
- while let first = &opt && let Some(ref second) = first && let None = second.start {
- }
- while let Some(ref first) = opt && let second = first && let _third = second {
- }
+ // No error, despite the prefix being irrefutable
+ while let first = &opt && let Some(ref second) = first && let None = second.start {}
+
+ while let first = &opt && let (a, b) = (1, 2) {}
+ //[disallowed]~^ ERROR irrefutable `while let` patterns
+
+ while let Some(ref first) = opt && let second = first && let _third = second {}
+ //[disallowed]~^ ERROR trailing irrefutable patterns in let chain
+
while let Some(ref first) = opt
&& let Range { start: local_start, end: _ } = first
&& let None = local_start {
#![feature(const_trait_impl)]
#![feature(const_precise_live_drops)]
-const fn foo<T, E>(res: Result<T, E>) -> Option<T> where E: ~const Drop {
+use std::marker::Destruct;
+
+const fn foo<T, E>(res: Result<T, E>) -> Option<T> where E: ~const Destruct {
match res {
Ok(t) => Some(t),
Err(_e) => None,
pub struct Foo<T>(T);
-const fn baz<T: ~const Drop, E: ~const Drop>(res: Result<Foo<T>, Foo<E>>) -> Option<Foo<T>> {
+const fn baz<T, E>(res: Result<Foo<T>, Foo<E>>) -> Option<Foo<T>>
+where
+ T: ~const Destruct,
+ E: ~const Destruct,
+{
foo(res)
}
-error[E0277]: the trait bound `NonTrivialDrop: ~const Drop` is not satisfied
+error[E0277]: can't drop `NonTrivialDrop` in const contexts
--> $DIR/const-drop-fail.rs:43:5
|
LL | const _: () = check($exp);
| ----- required by a bound introduced by this call
...
LL | NonTrivialDrop,
- | ^^^^^^^^^^^^^^ expected an implementor of trait `~const Drop`
+ | ^^^^^^^^^^^^^^ expected an implementor of trait `~const Destruct`
|
+ = note: the trait bound `NonTrivialDrop: ~const Destruct` is not satisfied
note: required by a bound in `check`
--> $DIR/const-drop-fail.rs:34:19
|
-LL | const fn check<T: ~const Drop>(_: T) {}
- | ^^^^^^^^^^^ required by this bound in `check`
+LL | const fn check<T: ~const Destruct>(_: T) {}
+ | ^^^^^^^^^^^^^^^ required by this bound in `check`
help: consider borrowing here
|
LL | &NonTrivialDrop,
LL | &mut NonTrivialDrop,
| ++++
-error[E0277]: the trait bound `NonTrivialDrop: ~const Drop` is not satisfied in `ConstImplWithDropGlue`
+error[E0277]: can't drop `NonTrivialDrop` in const contexts
--> $DIR/const-drop-fail.rs:45:5
|
LL | const _: () = check($exp);
| ----- required by a bound introduced by this call
...
LL | ConstImplWithDropGlue(NonTrivialDrop),
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `ConstImplWithDropGlue`, the trait `~const Drop` is not implemented for `NonTrivialDrop`
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `ConstImplWithDropGlue`, the trait `~const Destruct` is not implemented for `NonTrivialDrop`
|
-note: the trait `Drop` is implemented for `NonTrivialDrop`, but that implementation is not `const`
+note: the trait `Destruct` is implemented for `NonTrivialDrop`, but that implementation is not `const`
--> $DIR/const-drop-fail.rs:45:5
|
LL | ConstImplWithDropGlue(NonTrivialDrop),
note: required by a bound in `check`
--> $DIR/const-drop-fail.rs:34:19
|
-LL | const fn check<T: ~const Drop>(_: T) {}
- | ^^^^^^^^^^^ required by this bound in `check`
+LL | const fn check<T: ~const Destruct>(_: T) {}
+ | ^^^^^^^^^^^^^^^ required by this bound in `check`
-error[E0277]: the trait bound `ConstDropImplWithBounds<NonTrivialDrop>: ~const Drop` is not satisfied
+error[E0277]: the trait bound `ConstDropImplWithBounds<NonTrivialDrop>: ~const Destruct` is not satisfied
--> $DIR/const-drop-fail.rs:47:5
|
LL | const _: () = check($exp);
| ----- required by a bound introduced by this call
...
LL | ConstDropImplWithBounds::<NonTrivialDrop>(PhantomData),
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected an implementor of trait `~const Drop`
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected an implementor of trait `~const Destruct`
|
-note: required because of the requirements on the impl of `~const Drop` for `ConstDropImplWithBounds<NonTrivialDrop>`
+note: required because of the requirements on the impl of `~const Destruct` for `ConstDropImplWithBounds<NonTrivialDrop>`
--> $DIR/const-drop-fail.rs:28:25
|
LL | impl<T: ~const A> const Drop for ConstDropImplWithBounds<T> {
| ^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^
+ = note: 1 redundant requirement hidden
+ = note: required because of the requirements on the impl of `~const Destruct` for `ConstDropImplWithBounds<NonTrivialDrop>`
note: required by a bound in `check`
--> $DIR/const-drop-fail.rs:34:19
|
-LL | const fn check<T: ~const Drop>(_: T) {}
- | ^^^^^^^^^^^ required by this bound in `check`
+LL | const fn check<T: ~const Destruct>(_: T) {}
+ | ^^^^^^^^^^^^^^^ required by this bound in `check`
help: consider borrowing here
|
LL | &ConstDropImplWithBounds::<NonTrivialDrop>(PhantomData),
#![feature(const_mut_refs)]
#![cfg_attr(precise, feature(const_precise_live_drops))]
-use std::marker::PhantomData;
+use std::marker::{Destruct, PhantomData};
struct NonTrivialDrop;
}
}
-const fn check<T: ~const Drop>(_: T) {}
+const fn check<T: ~const Destruct>(_: T) {}
macro_rules! check_all {
($($exp:expr),*$(,)?) => {$(
check_all! {
NonTrivialDrop,
- //~^ ERROR the trait bound
+ //~^ ERROR can't drop
ConstImplWithDropGlue(NonTrivialDrop),
- //~^ ERROR the trait bound
+ //~^ ERROR can't drop
ConstDropImplWithBounds::<NonTrivialDrop>(PhantomData),
//~^ ERROR the trait bound
}
-error[E0277]: the trait bound `NonTrivialDrop: ~const Drop` is not satisfied
+error[E0277]: can't drop `NonTrivialDrop` in const contexts
--> $DIR/const-drop-fail.rs:43:5
|
LL | const _: () = check($exp);
| ----- required by a bound introduced by this call
...
LL | NonTrivialDrop,
- | ^^^^^^^^^^^^^^ expected an implementor of trait `~const Drop`
+ | ^^^^^^^^^^^^^^ expected an implementor of trait `~const Destruct`
|
+ = note: the trait bound `NonTrivialDrop: ~const Destruct` is not satisfied
note: required by a bound in `check`
--> $DIR/const-drop-fail.rs:34:19
|
-LL | const fn check<T: ~const Drop>(_: T) {}
- | ^^^^^^^^^^^ required by this bound in `check`
+LL | const fn check<T: ~const Destruct>(_: T) {}
+ | ^^^^^^^^^^^^^^^ required by this bound in `check`
help: consider borrowing here
|
LL | &NonTrivialDrop,
LL | &mut NonTrivialDrop,
| ++++
-error[E0277]: the trait bound `NonTrivialDrop: ~const Drop` is not satisfied in `ConstImplWithDropGlue`
+error[E0277]: can't drop `NonTrivialDrop` in const contexts
--> $DIR/const-drop-fail.rs:45:5
|
LL | const _: () = check($exp);
| ----- required by a bound introduced by this call
...
LL | ConstImplWithDropGlue(NonTrivialDrop),
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `ConstImplWithDropGlue`, the trait `~const Drop` is not implemented for `NonTrivialDrop`
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ within `ConstImplWithDropGlue`, the trait `~const Destruct` is not implemented for `NonTrivialDrop`
|
-note: the trait `Drop` is implemented for `NonTrivialDrop`, but that implementation is not `const`
+note: the trait `Destruct` is implemented for `NonTrivialDrop`, but that implementation is not `const`
--> $DIR/const-drop-fail.rs:45:5
|
LL | ConstImplWithDropGlue(NonTrivialDrop),
note: required by a bound in `check`
--> $DIR/const-drop-fail.rs:34:19
|
-LL | const fn check<T: ~const Drop>(_: T) {}
- | ^^^^^^^^^^^ required by this bound in `check`
+LL | const fn check<T: ~const Destruct>(_: T) {}
+ | ^^^^^^^^^^^^^^^ required by this bound in `check`
-error[E0277]: the trait bound `ConstDropImplWithBounds<NonTrivialDrop>: ~const Drop` is not satisfied
+error[E0277]: the trait bound `ConstDropImplWithBounds<NonTrivialDrop>: ~const Destruct` is not satisfied
--> $DIR/const-drop-fail.rs:47:5
|
LL | const _: () = check($exp);
| ----- required by a bound introduced by this call
...
LL | ConstDropImplWithBounds::<NonTrivialDrop>(PhantomData),
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected an implementor of trait `~const Drop`
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected an implementor of trait `~const Destruct`
|
-note: required because of the requirements on the impl of `~const Drop` for `ConstDropImplWithBounds<NonTrivialDrop>`
+note: required because of the requirements on the impl of `~const Destruct` for `ConstDropImplWithBounds<NonTrivialDrop>`
--> $DIR/const-drop-fail.rs:28:25
|
LL | impl<T: ~const A> const Drop for ConstDropImplWithBounds<T> {
| ^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^
+ = note: 1 redundant requirement hidden
+ = note: required because of the requirements on the impl of `~const Destruct` for `ConstDropImplWithBounds<NonTrivialDrop>`
note: required by a bound in `check`
--> $DIR/const-drop-fail.rs:34:19
|
-LL | const fn check<T: ~const Drop>(_: T) {}
- | ^^^^^^^^^^^ required by this bound in `check`
+LL | const fn check<T: ~const Destruct>(_: T) {}
+ | ^^^^^^^^^^^^^^^ required by this bound in `check`
help: consider borrowing here
|
LL | &ConstDropImplWithBounds::<NonTrivialDrop>(PhantomData),
#![feature(never_type)]
#![cfg_attr(precise, feature(const_precise_live_drops))]
+use std::marker::Destruct;
+
struct S<'a>(&'a mut u8);
impl<'a> const Drop for S<'a> {
}
}
-const fn a<T: ~const Drop>(_: T) {}
+const fn a<T: ~const Destruct>(_: T) {}
const fn b() -> u8 {
let mut c = 0;
// Regression test for #92111.
//
-// The issue was that we normalize trait bounds before caching
-// results of selection. Checking that `impl Tr for S` requires
-// checking `S: !Drop` because it cannot overlap with the blanket
-// impl. Then we save the (unsatisfied) result from checking `S: Drop`.
-// Then the call to `a` checks whether `S: ~const Drop` but we normalize
-// it to `S: Drop` which the cache claims to be unsatisfied.
-//
// check-pass
#![feature(const_trait_impl)]
+use std::marker::Destruct;
+
pub trait Tr {}
#[allow(drop_bounds)]
impl Tr for S {}
-const fn a<T: ~const Drop>(t: T) {}
+const fn a<T: ~const Destruct>(t: T) {}
fn main() {
a(S(0));
use crate::foo::{bar::{baz::{}}};
//~^ ERROR absolute paths must start with
//~| WARN this is accepted in the current edition
-//~| ERROR absolute paths must start with
-//~| WARN this is accepted in the current edition
use crate::foo::{bar::{XX, baz::{}}};
//~^ ERROR absolute paths must start with
//~| WARN this is accepted in the current edition
//~| ERROR absolute paths must start with
//~| WARN this is accepted in the current edition
-//~| ERROR absolute paths must start with
-//~| WARN this is accepted in the current edition
-//~| ERROR absolute paths must start with
-//~| WARN this is accepted in the current edition
use crate::foo::{bar::{baz::{}, baz1::{}}};
//~^ ERROR absolute paths must start with
//~| WARN this is accepted in the current edition
//~| ERROR absolute paths must start with
//~| WARN this is accepted in the current edition
-//~| ERROR absolute paths must start with
-//~| WARN this is accepted in the current edition
-//~| ERROR absolute paths must start with
-//~| WARN this is accepted in the current edition
fn main() {
}
use foo::{bar::{baz::{}}};
//~^ ERROR absolute paths must start with
//~| WARN this is accepted in the current edition
-//~| ERROR absolute paths must start with
-//~| WARN this is accepted in the current edition
use foo::{bar::{XX, baz::{}}};
//~^ ERROR absolute paths must start with
//~| WARN this is accepted in the current edition
//~| ERROR absolute paths must start with
//~| WARN this is accepted in the current edition
-//~| ERROR absolute paths must start with
-//~| WARN this is accepted in the current edition
-//~| ERROR absolute paths must start with
-//~| WARN this is accepted in the current edition
use foo::{bar::{baz::{}, baz1::{}}};
//~^ ERROR absolute paths must start with
//~| WARN this is accepted in the current edition
//~| ERROR absolute paths must start with
//~| WARN this is accepted in the current edition
-//~| ERROR absolute paths must start with
-//~| WARN this is accepted in the current edition
-//~| ERROR absolute paths must start with
-//~| WARN this is accepted in the current edition
fn main() {
}
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-nested-empty-paths.rs:17:5
- |
-LL | use foo::{bar::{baz::{}}};
- | ^^^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::foo::{bar::{baz::{}}}`
- |
- = warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
- = note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-
-error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-nested-empty-paths.rs:23:5
+ --> $DIR/edition-lint-nested-empty-paths.rs:21:5
|
LL | use foo::{bar::{XX, baz::{}}};
| ^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::foo::{bar::{XX, baz::{}}}`
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-nested-empty-paths.rs:23:5
+ --> $DIR/edition-lint-nested-empty-paths.rs:21:5
|
LL | use foo::{bar::{XX, baz::{}}};
| ^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::foo::{bar::{XX, baz::{}}}`
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-nested-empty-paths.rs:23:5
- |
-LL | use foo::{bar::{XX, baz::{}}};
- | ^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::foo::{bar::{XX, baz::{}}}`
- |
- = warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
- = note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-
-error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-nested-empty-paths.rs:23:5
- |
-LL | use foo::{bar::{XX, baz::{}}};
- | ^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::foo::{bar::{XX, baz::{}}}`
- |
- = warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
- = note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-
-error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-nested-empty-paths.rs:33:5
- |
-LL | use foo::{bar::{baz::{}, baz1::{}}};
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::foo::{bar::{baz::{}, baz1::{}}}`
- |
- = warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
- = note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-
-error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-nested-empty-paths.rs:33:5
- |
-LL | use foo::{bar::{baz::{}, baz1::{}}};
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::foo::{bar::{baz::{}, baz1::{}}}`
- |
- = warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
- = note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-
-error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-nested-empty-paths.rs:33:5
+ --> $DIR/edition-lint-nested-empty-paths.rs:27:5
|
LL | use foo::{bar::{baz::{}, baz1::{}}};
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::foo::{bar::{baz::{}, baz1::{}}}`
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-nested-empty-paths.rs:33:5
+ --> $DIR/edition-lint-nested-empty-paths.rs:27:5
|
LL | use foo::{bar::{baz::{}, baz1::{}}};
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::foo::{bar::{baz::{}, baz1::{}}}`
= warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-error: aborting due to 10 previous errors
+error: aborting due to 5 previous errors
//~| this is accepted in the current edition
//~| ERROR absolute paths must start with
//~| this is accepted in the current edition
-//~| ERROR absolute paths must start with
-//~| this is accepted in the current edition
-//~| ERROR absolute paths must start with
-//~| this is accepted in the current edition
mod foo {
crate fn a() {}
//~| this is accepted in the current edition
//~| ERROR absolute paths must start with
//~| this is accepted in the current edition
- //~| ERROR absolute paths must start with
- //~| this is accepted in the current edition
x::a();
c();
}
//~| this is accepted in the current edition
//~| ERROR absolute paths must start with
//~| this is accepted in the current edition
-//~| ERROR absolute paths must start with
-//~| this is accepted in the current edition
-//~| ERROR absolute paths must start with
-//~| this is accepted in the current edition
mod foo {
crate fn a() {}
//~| this is accepted in the current edition
//~| ERROR absolute paths must start with
//~| this is accepted in the current edition
- //~| ERROR absolute paths must start with
- //~| this is accepted in the current edition
x::a();
c();
}
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-nested-paths.rs:6:5
- |
-LL | use foo::{a, b};
- | ^^^^^^^^^^^ help: use `crate`: `crate::foo::{a, b}`
- |
- = warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
- = note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-
-error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-nested-paths.rs:6:5
- |
-LL | use foo::{a, b};
- | ^^^^^^^^^^^ help: use `crate`: `crate::foo::{a, b}`
- |
- = warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
- = note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-
-error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-nested-paths.rs:27:13
- |
-LL | use foo::{self as x, c};
- | ^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::foo::{self as x, c}`
- |
- = warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
- = note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-
-error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-nested-paths.rs:27:13
+ --> $DIR/edition-lint-nested-paths.rs:23:13
|
LL | use foo::{self as x, c};
| ^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::foo::{self as x, c}`
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-nested-paths.rs:27:13
+ --> $DIR/edition-lint-nested-paths.rs:23:13
|
LL | use foo::{self as x, c};
| ^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::foo::{self as x, c}`
= warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-error: aborting due to 7 previous errors
+error: aborting due to 4 previous errors
use crate::bar::Bar;
//~^ ERROR absolute
//~| WARN this is accepted in the current edition
- //~| ERROR absolute
- //~| WARN this is accepted in the current edition
use super::bar::Bar2;
use crate::bar::Bar3;
use crate::bar::Bar;
//~^ ERROR absolute
//~| WARN this is accepted in the current edition
-//~| ERROR absolute
-//~| WARN this is accepted in the current edition
pub mod bar {
use edition_lint_paths as foo;
impl crate::foo::SomeTrait for u32 {}
//~^ ERROR absolute
//~| WARN this is accepted in the current edition
-//~| ERROR absolute
-//~| WARN this is accepted in the current edition
fn main() {
let x = crate::bar::Bar;
use bar::Bar;
//~^ ERROR absolute
//~| WARN this is accepted in the current edition
- //~| ERROR absolute
- //~| WARN this is accepted in the current edition
use super::bar::Bar2;
use crate::bar::Bar3;
use bar::Bar;
//~^ ERROR absolute
//~| WARN this is accepted in the current edition
-//~| ERROR absolute
-//~| WARN this is accepted in the current edition
pub mod bar {
use edition_lint_paths as foo;
impl ::foo::SomeTrait for u32 {}
//~^ ERROR absolute
//~| WARN this is accepted in the current edition
-//~| ERROR absolute
-//~| WARN this is accepted in the current edition
fn main() {
let x = ::bar::Bar;
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-paths.rs:12:9
- |
-LL | use bar::Bar;
- | ^^^^^^^^ help: use `crate`: `crate::bar::Bar`
- |
- = warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
- = note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-
-error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-paths.rs:21:9
+ --> $DIR/edition-lint-paths.rs:19:9
|
LL | use bar;
| ^^^ help: use `crate`: `crate::bar`
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-paths.rs:27:9
+ --> $DIR/edition-lint-paths.rs:25:9
|
LL | use {main, Bar as SomethingElse};
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::{main, Bar as SomethingElse}`
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-paths.rs:27:9
+ --> $DIR/edition-lint-paths.rs:25:9
|
LL | use {main, Bar as SomethingElse};
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::{main, Bar as SomethingElse}`
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-paths.rs:27:9
+ --> $DIR/edition-lint-paths.rs:25:9
|
LL | use {main, Bar as SomethingElse};
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::{main, Bar as SomethingElse}`
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-paths.rs:42:5
+ --> $DIR/edition-lint-paths.rs:40:5
|
LL | use bar::Bar;
| ^^^^^^^^ help: use `crate`: `crate::bar::Bar`
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-paths.rs:42:5
- |
-LL | use bar::Bar;
- | ^^^^^^^^ help: use `crate`: `crate::bar::Bar`
- |
- = warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
- = note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-
-error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-paths.rs:56:9
+ --> $DIR/edition-lint-paths.rs:52:9
|
LL | use *;
| ^ help: use `crate`: `crate::*`
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-paths.rs:61:6
- |
-LL | impl ::foo::SomeTrait for u32 {}
- | ^^^^^^^^^^^^^^^^ help: use `crate`: `crate::foo::SomeTrait`
- |
- = warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
- = note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-
-error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-paths.rs:61:6
+ --> $DIR/edition-lint-paths.rs:57:6
|
LL | impl ::foo::SomeTrait for u32 {}
| ^^^^^^^^^^^^^^^^ help: use `crate`: `crate::foo::SomeTrait`
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/edition-lint-paths.rs:68:13
+ --> $DIR/edition-lint-paths.rs:62:13
|
LL | let x = ::bar::Bar;
| ^^^^^^^^^^ help: use `crate`: `crate::bar::Bar`
= warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-error: aborting due to 12 previous errors
+error: aborting due to 9 previous errors
use crate::my_crate::foo;
//~^ ERROR absolute paths must start
//~| WARNING this is accepted in the current edition
-//~| ERROR absolute paths must start
-//~| WARNING this is accepted in the current edition
fn main() {
foo();
use my_crate::foo;
//~^ ERROR absolute paths must start
//~| WARNING this is accepted in the current edition
-//~| ERROR absolute paths must start
-//~| WARNING this is accepted in the current edition
fn main() {
foo();
= warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/extern-crate-rename.rs:12:5
- |
-LL | use my_crate::foo;
- | ^^^^^^^^^^^^^ help: use `crate`: `crate::my_crate::foo`
- |
- = warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
- = note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
use crate::m::edition_lint_paths::foo;
//~^ ERROR absolute paths must start
//~| WARNING this is accepted in the current edition
-//~| ERROR absolute paths must start
-//~| WARNING this is accepted in the current edition
-
fn main() {
foo();
use m::edition_lint_paths::foo;
//~^ ERROR absolute paths must start
//~| WARNING this is accepted in the current edition
-//~| ERROR absolute paths must start
-//~| WARNING this is accepted in the current edition
-
fn main() {
foo();
= warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
= note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-error: absolute paths must start with `self`, `super`, `crate`, or an external crate name in the 2018 edition
- --> $DIR/extern-crate-submod.rs:19:5
- |
-LL | use m::edition_lint_paths::foo;
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `crate`: `crate::m::edition_lint_paths::foo`
- |
- = warning: this is accepted in the current edition (Rust 2015) but is a hard error in Rust 2018!
- = note: for more information, see issue #53130 <https://github.com/rust-lang/rust/issues/53130>
-
-error: aborting due to 2 previous errors
+error: aborting due to previous error
error[E0308]: mismatched types
--> $DIR/slightly-nice-generic-literal-messages.rs:7:9
|
+LL | match Foo(1.1, marker::PhantomData) {
+ | ----------------------------- this expression has type `Foo<{float}, _>`
LL | 1 => {}
| ^ expected struct `Foo`, found integer
|
+++ /dev/null
-fn main() {
- let f = |x| x * 3;
- let a = f(); //~ ERROR E0057
- let b = f(4);
- let c = f(2, 3); //~ ERROR E0057
-}
+++ /dev/null
-error[E0057]: this function takes 1 argument but 0 arguments were supplied
- --> $DIR/E0057.rs:3:13
- |
-LL | let a = f();
- | ^-- supplied 0 arguments
- | |
- | expected 1 argument
-
-error[E0057]: this function takes 1 argument but 2 arguments were supplied
- --> $DIR/E0057.rs:5:13
- |
-LL | let c = f(2, 3);
- | ^ - - supplied 2 arguments
- | |
- | expected 1 argument
-
-error: aborting due to 2 previous errors
-
-For more information about this error, try `rustc --explain E0057`.
LL | println!("{}", MyStruct.foo_one());
| ^^^^^^^ method cannot be called on `MyStruct` due to unsatisfied trait bounds
|
-note: the following trait bounds were not satisfied because of the requirements of the implementation of `Foo` for `_`:
- `MyStruct: Foo`
+note: trait bound `MyStruct: Foo` was not satisfied
--> $DIR/specialization-trait-not-implemented.rs:14:17
|
LL | default impl<T> Foo for T {
--- /dev/null
+// run-pass
+// aux-build:newtype_struct_xc.rs
+
+
+extern crate newtype_struct_xc;
+
+pub fn main() {
+ let x = newtype_struct_xc::Au(21);
+ match x {
+ newtype_struct_xc::Au(n) => assert_eq!(n, 21)
+ }
+}
--- /dev/null
+enum Foo {
+ Bar { bar: bool },
+ Other,
+}
+
+fn main() {
+ let foo = Some(Foo::Other);
+
+ if let Some(Foo::Bar {_}) = foo {}
+ //~^ ERROR expected identifier, found reserved identifier `_`
+ //~| ERROR pattern does not mention field `bar` [E0027]
+}
--- /dev/null
+error: expected identifier, found reserved identifier `_`
+ --> $DIR/struct-enum-ignoring-field-with-underscore.rs:9:27
+ |
+LL | if let Some(Foo::Bar {_}) = foo {}
+ | ^ expected identifier, found reserved identifier
+
+error[E0027]: pattern does not mention field `bar`
+ --> $DIR/struct-enum-ignoring-field-with-underscore.rs:9:17
+ |
+LL | if let Some(Foo::Bar {_}) = foo {}
+ | ^^^^^^^^^^^^ missing field `bar`
+ |
+help: include the missing field in the pattern
+ |
+LL | if let Some(Foo::Bar {_, bar }) = foo {}
+ | ~~~~~~~
+help: if you don't care about this missing field, you can explicitly ignore it
+ |
+LL | if let Some(Foo::Bar {_, .. }) = foo {}
+ | ~~~~~~
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0027`.
--- /dev/null
+// run-rustfix
+#[derive(Debug, Default, Eq, PartialEq)]
+struct A {
+ b: u32,
+ c: u64,
+ d: usize,
+}
+
+fn main() {
+ let q = A { c: 5, .. Default::default() };
+ //~^ ERROR mismatched types
+ //~| ERROR missing fields
+ //~| HELP separate the last named field with a comma
+ let r = A { c: 5, .. Default::default() };
+ assert_eq!(q, r);
+}
--- /dev/null
+// run-rustfix
+#[derive(Debug, Default, Eq, PartialEq)]
+struct A {
+ b: u32,
+ c: u64,
+ d: usize,
+}
+
+fn main() {
+ let q = A { c: 5 .. Default::default() };
+ //~^ ERROR mismatched types
+ //~| ERROR missing fields
+ //~| HELP separate the last named field with a comma
+ let r = A { c: 5, .. Default::default() };
+ assert_eq!(q, r);
+}
--- /dev/null
+error[E0308]: mismatched types
+ --> $DIR/struct-record-suggestion.rs:10:20
+ |
+LL | let q = A { c: 5 .. Default::default() };
+ | ^^^^^^^^^^^^^^^^^^^^^^^ expected `u64`, found struct `std::ops::Range`
+ |
+ = note: expected type `u64`
+ found struct `std::ops::Range<{integer}>`
+
+error[E0063]: missing fields `b` and `d` in initializer of `A`
+ --> $DIR/struct-record-suggestion.rs:10:13
+ |
+LL | let q = A { c: 5 .. Default::default() };
+ | ^ missing `b` and `d`
+ |
+help: to set the remaining fields from `Default::default()`, separate the last named field with a comma
+ |
+LL | let q = A { c: 5, .. Default::default() };
+ | +
+
+error: aborting due to 2 previous errors
+
+Some errors have detailed explanations: E0063, E0308.
+For more information about an error, try `rustc --explain E0063`.
error[E0308]: mismatched types
--> $DIR/structure-constructor-type-mismatch.rs:54:9
|
+LL | match (Point { x: 1, y: 2 }) {
+ | ---------------------- this expression has type `Point<{integer}>`
LL | PointF::<u32> { .. } => {}
| ^^^^^^^^^^^^^^^^^^^^ expected integer, found `f32`
|
error[E0308]: mismatched types
--> $DIR/structure-constructor-type-mismatch.rs:59:9
|
+LL | match (Point { x: 1, y: 2 }) {
+ | ---------------------- this expression has type `Point<{integer}>`
LL | PointF { .. } => {}
| ^^^^^^^^^^^^^ expected integer, found `f32`
|
error[E0308]: mismatched types
--> $DIR/structure-constructor-type-mismatch.rs:67:9
|
+LL | match (Pair { x: 1, y: 2 }) {
+ | --------------------- this expression has type `Pair<{integer}, {integer}>`
LL | PairF::<u32> { .. } => {}
| ^^^^^^^^^^^^^^^^^^^ expected integer, found `f32`
|
--- /dev/null
+// aux-build:struct_field_privacy.rs
+
+extern crate struct_field_privacy as xc;
+
+use xc::B;
+
+struct A {
+ pub a: u32,
+ b: u32,
+}
+
+fn main () {
+ // external crate struct
+ let k = B {
+ aa: 20,
+ //~^ ERROR struct `B` has no field named `aa`
+ bb: 20,
+ //~^ ERROR struct `B` has no field named `bb`
+ };
+ // local crate struct
+ let l = A {
+ aa: 20,
+ //~^ ERROR struct `A` has no field named `aa`
+ bb: 20,
+ //~^ ERROR struct `A` has no field named `bb`
+ };
+}
--- /dev/null
+error[E0560]: struct `B` has no field named `aa`
+ --> $DIR/suggest-private-fields.rs:15:9
+ |
+LL | aa: 20,
+ | ^^ help: a field with a similar name exists: `a`
+
+error[E0560]: struct `B` has no field named `bb`
+ --> $DIR/suggest-private-fields.rs:17:9
+ |
+LL | bb: 20,
+ | ^^ `B` does not have this field
+ |
+ = note: available fields are: `a`
+
+error[E0560]: struct `A` has no field named `aa`
+ --> $DIR/suggest-private-fields.rs:22:9
+ |
+LL | aa: 20,
+ | ^^ help: a field with a similar name exists: `a`
+
+error[E0560]: struct `A` has no field named `bb`
+ --> $DIR/suggest-private-fields.rs:24:9
+ |
+LL | bb: 20,
+ | ^^ help: a field with a similar name exists: `b`
+
+error: aborting due to 4 previous errors
+
+For more information about this error, try `rustc --explain E0560`.
--- /dev/null
+enum Foo {
+ Bar { a: u8, b: i8, c: u8 },
+ Baz { a: f32 },
+ None,
+}
+
+fn main() {
+ let foo = Foo::None;
+ match foo {
+ Foo::Bar { a, aa: 1, c } => (),
+ //~^ ERROR variant `Foo::Bar` does not have a field named `aa` [E0026]
+ //~| ERROR pattern does not mention field `b` [E0027]
+ Foo::Baz { bb: 1.0 } => (),
+ //~^ ERROR variant `Foo::Baz` does not have a field named `bb` [E0026]
+ //~| ERROR pattern does not mention field `a` [E0027]
+ _ => (),
+ }
+
+ match foo {
+ Foo::Bar { a, aa: "", c } => (),
+ //~^ ERROR variant `Foo::Bar` does not have a field named `aa` [E0026]
+ //~| ERROR pattern does not mention field `b` [E0027]
+ Foo::Baz { bb: "" } => (),
+ //~^ ERROR variant `Foo::Baz` does not have a field named `bb` [E0026]
+ //~| pattern does not mention field `a` [E0027]
+ _ => (),
+ }
+}
--- /dev/null
+error[E0026]: variant `Foo::Bar` does not have a field named `aa`
+ --> $DIR/suggest-replacing-field-when-specifying-same-type.rs:10:23
+ |
+LL | Foo::Bar { a, aa: 1, c } => (),
+ | ^^
+ | |
+ | variant `Foo::Bar` does not have this field
+ | help: `Foo::Bar` has a field named `b`
+
+error[E0027]: pattern does not mention field `b`
+ --> $DIR/suggest-replacing-field-when-specifying-same-type.rs:10:9
+ |
+LL | Foo::Bar { a, aa: 1, c } => (),
+ | ^^^^^^^^^^^^^^^^^^^^^^^^ missing field `b`
+ |
+help: include the missing field in the pattern
+ |
+LL | Foo::Bar { a, aa: 1, c, b } => (),
+ | ~~~~~
+help: if you don't care about this missing field, you can explicitly ignore it
+ |
+LL | Foo::Bar { a, aa: 1, c, .. } => (),
+ | ~~~~~~
+
+error[E0026]: variant `Foo::Baz` does not have a field named `bb`
+ --> $DIR/suggest-replacing-field-when-specifying-same-type.rs:13:20
+ |
+LL | Foo::Baz { bb: 1.0 } => (),
+ | ^^
+ | |
+ | variant `Foo::Baz` does not have this field
+ | help: `Foo::Baz` has a field named `a`
+
+error[E0027]: pattern does not mention field `a`
+ --> $DIR/suggest-replacing-field-when-specifying-same-type.rs:13:9
+ |
+LL | Foo::Baz { bb: 1.0 } => (),
+ | ^^^^^^^^^^^^^^^^^^^^ missing field `a`
+ |
+help: include the missing field in the pattern
+ |
+LL | Foo::Baz { bb: 1.0, a } => (),
+ | ~~~~~
+help: if you don't care about this missing field, you can explicitly ignore it
+ |
+LL | Foo::Baz { bb: 1.0, .. } => (),
+ | ~~~~~~
+
+error[E0026]: variant `Foo::Bar` does not have a field named `aa`
+ --> $DIR/suggest-replacing-field-when-specifying-same-type.rs:20:23
+ |
+LL | Foo::Bar { a, aa: "", c } => (),
+ | ^^ variant `Foo::Bar` does not have this field
+
+error[E0027]: pattern does not mention field `b`
+ --> $DIR/suggest-replacing-field-when-specifying-same-type.rs:20:9
+ |
+LL | Foo::Bar { a, aa: "", c } => (),
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^ missing field `b`
+ |
+help: include the missing field in the pattern
+ |
+LL | Foo::Bar { a, aa: "", c, b } => (),
+ | ~~~~~
+help: if you don't care about this missing field, you can explicitly ignore it
+ |
+LL | Foo::Bar { a, aa: "", c, .. } => (),
+ | ~~~~~~
+
+error[E0026]: variant `Foo::Baz` does not have a field named `bb`
+ --> $DIR/suggest-replacing-field-when-specifying-same-type.rs:23:20
+ |
+LL | Foo::Baz { bb: "" } => (),
+ | ^^ variant `Foo::Baz` does not have this field
+
+error[E0027]: pattern does not mention field `a`
+ --> $DIR/suggest-replacing-field-when-specifying-same-type.rs:23:9
+ |
+LL | Foo::Baz { bb: "" } => (),
+ | ^^^^^^^^^^^^^^^^^^^ missing field `a`
+ |
+help: include the missing field in the pattern
+ |
+LL | Foo::Baz { bb: "", a } => (),
+ | ~~~~~
+help: if you don't care about this missing field, you can explicitly ignore it
+ |
+LL | Foo::Baz { bb: "", .. } => (),
+ | ~~~~~~
+
+error: aborting due to 8 previous errors
+
+Some errors have detailed explanations: E0026, E0027.
+For more information about an error, try `rustc --explain E0026`.
+++ /dev/null
-pub struct A {
- a: isize,
- pub b: isize,
-}
-
-pub struct B {
- pub a: isize,
- b: isize,
-}
--- /dev/null
+struct Bug<S>{ //~ ERROR parameter `S` is never used [E0392]
+ A: [(); {
+ let x: [u8; Self::W] = [0; Self::W]; //~ ERROR generic `Self` types are currently not permitted in anonymous constants
+ //~^ ERROR generic `Self` types are currently not permitted in anonymous constants
+ //~^^ ERROR the size for values of type `S` cannot be known at compilation time [E0277]
+ F //~ ERROR cannot find value `F` in this scope [E0425]
+ }
+} //~ ERROR mismatched closing delimiter: `}`
+//~^ ERROR mismatched closing delimiter: `}`
+
+fn main() {}
--- /dev/null
+error: mismatched closing delimiter: `}`
+ --> $DIR/constrain-suggest-ice.rs:2:8
+ |
+LL | struct Bug<S>{
+ | - closing delimiter possibly meant for this
+LL | A: [(); {
+ | ^ unclosed delimiter
+...
+LL | }
+ | ^ mismatched closing delimiter
+
+error: mismatched closing delimiter: `}`
+ --> $DIR/constrain-suggest-ice.rs:2:8
+ |
+LL | struct Bug<S>{
+ | - closing delimiter possibly meant for this
+LL | A: [(); {
+ | ^ unclosed delimiter
+...
+LL | }
+ | ^ mismatched closing delimiter
+
+error[E0425]: cannot find value `F` in this scope
+ --> $DIR/constrain-suggest-ice.rs:6:9
+ |
+LL | F
+ | ^
+ |
+help: a local variable with a similar name exists
+ |
+LL | x
+ | ~
+help: you might be missing a type parameter
+ |
+LL | struct Bug<S, F>{
+ | +++
+
+error: generic `Self` types are currently not permitted in anonymous constants
+ --> $DIR/constrain-suggest-ice.rs:3:21
+ |
+LL | let x: [u8; Self::W] = [0; Self::W];
+ | ^^^^
+
+error: generic `Self` types are currently not permitted in anonymous constants
+ --> $DIR/constrain-suggest-ice.rs:3:36
+ |
+LL | let x: [u8; Self::W] = [0; Self::W];
+ | ^^^^
+
+error[E0277]: the size for values of type `S` cannot be known at compilation time
+ --> $DIR/constrain-suggest-ice.rs:3:36
+ |
+LL | struct Bug<S>{
+ | - this type parameter needs to be `std::marker::Sized`
+LL | A: [(); {
+LL | let x: [u8; Self::W] = [0; Self::W];
+ | ^^^^^^^ doesn't have a size known at compile-time
+ |
+note: required by a bound in `Bug`
+ --> $DIR/constrain-suggest-ice.rs:1:12
+ |
+LL | struct Bug<S>{
+ | ^ required by this bound in `Bug`
+help: consider relaxing the implicit `Sized` restriction
+ |
+LL | struct Bug<S: ?Sized>{
+ | ++++++++
+
+error[E0392]: parameter `S` is never used
+ --> $DIR/constrain-suggest-ice.rs:1:12
+ |
+LL | struct Bug<S>{
+ | ^ unused parameter
+ |
+ = help: consider removing `S`, referring to it in a field, or using a marker such as `PhantomData`
+ = help: if you intended `S` to be a const parameter, use `const S: usize` instead
+
+error: aborting due to 7 previous errors
+
+Some errors have detailed explanations: E0277, E0392, E0425.
+For more information about an error, try `rustc --explain E0277`.
| ------------------------------- the found opaque type
|
= note: expected struct `Pin<Box<(dyn Future<Output = i32> + Send + 'static)>>`
- found opaque type `impl Future<Output = [async output]>`
+ found opaque type `impl Future`
help: you need to pin and box this expression
|
LL ~ Box::pin(async {
--- /dev/null
+fn foo(a: &Option<String>, b: &Option<String>) {
+ match (a, b) {
+ //~^ ERROR cannot move out of a shared reference
+ (None, &c) => &c.unwrap(),
+ (&Some(ref c), _) => c,
+ };
+}
+
+fn main() {}
--- /dev/null
+error[E0507]: cannot move out of a shared reference
+ --> $DIR/option-content-move-from-tuple-match.rs:2:11
+ |
+LL | match (a, b) {
+ | ^^^^^^
+LL |
+LL | (None, &c) => &c.unwrap(),
+ | -
+ | |
+ | data moved here
+ | move occurs because `c` has type `Option<String>`, which does not implement the `Copy` trait
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0507`.
--- /dev/null
+mod foo {
+ struct A;
+ mod bar {
+ struct B;
+ }
+}
+
+struct Foo {
+ a: foo:A,
+ //~^ ERROR found single colon in a struct field type path
+ //~| expected `,`, or `}`, found `:`
+}
+
+struct Bar {
+ b: foo::bar:B,
+ //~^ ERROR found single colon in a struct field type path
+ //~| expected `,`, or `}`, found `:`
+}
+
+fn main() {}
--- /dev/null
+error: found single colon in a struct field type path
+ --> $DIR/struct-field-type-including-single-colon.rs:9:11
+ |
+LL | a: foo:A,
+ | ^
+ |
+help: write a path separator here
+ |
+LL | a: foo::A,
+ | ~~
+
+error: expected `,`, or `}`, found `:`
+ --> $DIR/struct-field-type-including-single-colon.rs:9:11
+ |
+LL | a: foo:A,
+ | ^
+
+error: found single colon in a struct field type path
+ --> $DIR/struct-field-type-including-single-colon.rs:15:16
+ |
+LL | b: foo::bar:B,
+ | ^
+ |
+help: write a path separator here
+ |
+LL | b: foo::bar::B,
+ | ~~
+
+error: expected `,`, or `}`, found `:`
+ --> $DIR/struct-field-type-including-single-colon.rs:15:16
+ |
+LL | b: foo::bar:B,
+ | ^
+
+error: aborting due to 4 previous errors
+
+++ /dev/null
-// aux-build:struct_field_privacy.rs
-
-extern crate struct_field_privacy as xc;
-
-use xc::B;
-
-struct A {
- pub a: u32,
- b: u32,
-}
-
-fn main () {
- // external crate struct
- let k = B {
- aa: 20,
- //~^ ERROR struct `B` has no field named `aa`
- bb: 20,
- //~^ ERROR struct `B` has no field named `bb`
- };
- // local crate struct
- let l = A {
- aa: 20,
- //~^ ERROR struct `A` has no field named `aa`
- bb: 20,
- //~^ ERROR struct `A` has no field named `bb`
- };
-}
+++ /dev/null
-error[E0560]: struct `B` has no field named `aa`
- --> $DIR/suggest-private-fields.rs:15:9
- |
-LL | aa: 20,
- | ^^ help: a field with a similar name exists: `a`
-
-error[E0560]: struct `B` has no field named `bb`
- --> $DIR/suggest-private-fields.rs:17:9
- |
-LL | bb: 20,
- | ^^ `B` does not have this field
- |
- = note: available fields are: `a`
-
-error[E0560]: struct `A` has no field named `aa`
- --> $DIR/suggest-private-fields.rs:22:9
- |
-LL | aa: 20,
- | ^^ help: a field with a similar name exists: `a`
-
-error[E0560]: struct `A` has no field named `bb`
- --> $DIR/suggest-private-fields.rs:24:9
- |
-LL | bb: 20,
- | ^^ help: a field with a similar name exists: `b`
-
-error: aborting due to 4 previous errors
-
-For more information about this error, try `rustc --explain E0560`.
--- /dev/null
+// only-aarch64
+// run-pass
+#![allow(dead_code)]
+use std::arch::*;
+use std::arch::aarch64::*;
+
+// Smoke test to verify aarch64 code that enables NEON compiles.
+fn main() {
+ let _zero = if is_aarch64_feature_detected!("neon") {
+ unsafe {
+ let zeros = zero_vector();
+ vgetq_lane_u8::<1>(zeros)
+ }
+ } else {
+ 0
+ };
+}
+
+
+#[target_feature(enable = "neon")]
+unsafe fn zero_vector() -> uint8x16_t {
+ vmovq_n_u8(0)
+}
--- /dev/null
+// revisions: aarch64-neon aarch64-sve2
+// [aarch64-neon] compile-flags: -Ctarget-feature=+neon --target=aarch64-unknown-linux-gnu
+// [aarch64-neon] needs-llvm-components: aarch64
+// [aarch64-sve2] compile-flags: -Ctarget-feature=-neon,+sve2 --target=aarch64-unknown-linux-gnu
+// [aarch64-sve2] needs-llvm-components: aarch64
+// build-pass
+#![no_core]
+#![crate_type = "rlib"]
+#![feature(intrinsics, rustc_attrs, no_core, lang_items, staged_api)]
+#![stable(feature = "test", since = "1.0.0")]
+
+// Tests vetting "feature hierarchies" in the cases where we impose them.
+
+// Supporting minimal rust core code
+#[lang = "sized"]
+trait Sized {}
+#[lang = "copy"]
+trait Copy {}
+impl Copy for bool {}
+
+extern "rust-intrinsic" {
+ #[rustc_const_stable(feature = "test", since = "1.0.0")]
+ fn unreachable() -> !;
+}
+
+#[rustc_builtin_macro]
+macro_rules! cfg {
+ ($($cfg:tt)*) => {};
+}
+
+// Test code
+const fn do_or_die(cond: bool) {
+ if cond {
+ } else {
+ unsafe { unreachable() }
+ }
+}
+
+macro_rules! assert {
+ ($x:expr $(,)?) => {
+ const _: () = do_or_die($x);
+ };
+}
+
+
+#[cfg(aarch64_neon)]
+fn check_neon_not_sve2() {
+ // This checks that a normal aarch64 target doesn't suddenly jump up the feature hierarchy.
+ assert!(cfg!(target_feature = "neon"));
+ assert!(cfg!(not(target_feature = "sve2")));
+}
+
+#[cfg(aarch64_sve2)]
+fn check_sve2_includes_neon() {
+ // This checks that aarch64's sve2 includes neon
+ assert!(cfg!(target_feature = "neon"));
+ assert!(cfg!(target_feature = "sve2"));
+}
// gate-test-avx512_target_feature
// gate-test-tbm_target_feature
// gate-test-arm_target_feature
-// gate-test-aarch64_target_feature
// gate-test-hexagon_target_feature
// gate-test-mips_target_feature
// gate-test-wasm_target_feature
// gate-test-riscv_target_feature
// gate-test-ermsb_target_feature
// gate-test-bpf_target_feature
+// gate-test-aarch64_ver_target_feature
#[target_feature(enable = "avx512bw")]
//~^ ERROR: currently unstable
--- /dev/null
+// revisions: aarch64 x86-64
+// [aarch64] compile-flags: -Ctarget-feature=+neon,+fp16,+fhm --target=aarch64-unknown-linux-gnu
+// [aarch64] needs-llvm-components: aarch64
+// [x86-64] compile-flags: -Ctarget-feature=+sse4.2,+rdrand --target=x86_64-unknown-linux-gnu
+// [x86-64] needs-llvm-components: x86
+// build-pass
+#![no_core]
+#![crate_type = "rlib"]
+#![feature(intrinsics, rustc_attrs, no_core, lang_items, staged_api)]
+#![stable(feature = "test", since = "1.0.0")]
+
+// Supporting minimal rust core code
+#[lang = "sized"]
+trait Sized {}
+#[lang = "copy"]
+trait Copy {}
+impl Copy for bool {}
+
+extern "rust-intrinsic" {
+ #[rustc_const_stable(feature = "test", since = "1.0.0")]
+ fn unreachable() -> !;
+}
+
+#[rustc_builtin_macro]
+macro_rules! cfg {
+ ($($cfg:tt)*) => {};
+}
+
+// Test code
+const fn do_or_die(cond: bool) {
+ if cond {
+ } else {
+ unsafe { unreachable() }
+ }
+}
+
+macro_rules! assert {
+ ($x:expr $(,)?) => {
+ const _: () = do_or_die($x);
+ };
+}
+
+
+#[cfg(target_arch = "aarch64")]
+fn check_aarch64() {
+ // This checks that the rustc feature name is used, not the LLVM feature.
+ assert!(cfg!(target_feature = "neon"));
+ assert!(cfg!(not(target_feature = "fp-armv8")));
+ assert!(cfg!(target_feature = "fhm"));
+ assert!(cfg!(not(target_feature = "fp16fml")));
+ assert!(cfg!(target_feature = "fp16"));
+ assert!(cfg!(not(target_feature = "fullfp16")));
+}
+
+#[cfg(target_arch = "x86_64")]
+fn check_x86_64() {
+ // This checks that the rustc feature name is used, not the LLVM feature.
+ assert!(cfg!(target_feature = "rdrand"));
+ assert!(cfg!(not(target_feature = "rdrnd")));
+
+ // Likewise: We enable LLVM's crc32 feature with SSE4.2, but Rust says it's just SSE4.2
+ assert!(cfg!(target_feature = "sse4.2"));
+ assert!(cfg!(not(target_feature = "crc32")));
+}
// build-fail
// compile-flags: --crate-type=rlib --target=aarch64-unknown-linux-gnu
// needs-llvm-components: aarch64
-#![feature(aarch64_target_feature, target_feature_11)]
+#![cfg_attr(bootstrap, feature(aarch64_target_feature))]
#![feature(no_core, lang_items)]
#![no_core]
// compile-flags:--test
// run-pass
+// needs-unwind
// ignore-emscripten no subprocess support
use std::fmt;
| ^^^^^^^ `Rc<u32>` cannot be sent between threads safely
|
= help: the trait `Send` is not implemented for `Rc<u32>`
+ = note: required because of the requirements on the impl of `SendSync` for `Rc<u32>`
note: required by a bound in `use_alias`
--> $DIR/cross-crate.rs:10:17
|
| ^^^^^^^ `Rc<u32>` cannot be shared between threads safely
|
= help: the trait `Sync` is not implemented for `Rc<u32>`
+ = note: required because of the requirements on the impl of `SendSync` for `Rc<u32>`
note: required by a bound in `use_alias`
--> $DIR/cross-crate.rs:10:17
|
fn qux<'a, T: Bar>(_: &'a T) where <&'a T as Bar>::Baz: String { //~ ERROR expected trait, found
}
+fn issue_95327() where <u8 as Unresolved>::Assoc: String {}
+//~^ ERROR expected trait, found struct
+//~| ERROR use of undeclared type `Unresolved`
+
fn main() {}
+error[E0433]: failed to resolve: use of undeclared type `Unresolved`
+ --> $DIR/assoc_type_bound_with_struct.rs:19:31
+ |
+LL | fn issue_95327() where <u8 as Unresolved>::Assoc: String {}
+ | ^^^^^^^^^^ use of undeclared type `Unresolved`
+
error[E0404]: expected trait, found struct `String`
--> $DIR/assoc_type_bound_with_struct.rs:5:46
|
LL | fn qux<'a, T: Bar>(_: &'a T) where <&'a T as Bar>::Baz: ToString {
| ~~~~~~~~
-error: aborting due to 4 previous errors
+error[E0404]: expected trait, found struct `String`
+ --> $DIR/assoc_type_bound_with_struct.rs:19:51
+ |
+LL | fn issue_95327() where <u8 as Unresolved>::Assoc: String {}
+ | ^^^^^^ help: a trait with a similar name exists: `ToString`
+ |
+ ::: $SRC_DIR/alloc/src/string.rs:LL:COL
+ |
+LL | pub trait ToString {
+ | ------------------ similarly named trait `ToString` defined here
+
+error: aborting due to 6 previous errors
-For more information about this error, try `rustc --explain E0404`.
+Some errors have detailed explanations: E0404, E0433.
+For more information about an error, try `rustc --explain E0404`.
| |
| required by a bound introduced by this call
|
- = note: required because of the requirements on the impl of `Magic` for `NoClone`
+note: required because of the requirements on the impl of `Magic` for `NoClone`
+ --> $DIR/supertrait-auto-trait.rs:8:12
+ |
+LL | auto trait Magic: Copy {}
+ | ^^^^^
note: required by a bound in `copy`
--> $DIR/supertrait-auto-trait.rs:10:12
|
--- /dev/null
+trait SuperTrait {
+ type A;
+ type B;
+}
+
+trait Trait: SuperTrait<A = <Self as SuperTrait>::B> {}
+
+fn transmute<A, B>(x: A) -> B {
+ foo::<A, B, dyn Trait<A = A, B = B>>(x)
+ //~^ ERROR type mismatch resolving `<dyn Trait<A = A, B = B> as SuperTrait>::A == B`
+}
+
+fn foo<A, B, T: ?Sized>(x: T::A) -> B
+where
+ T: Trait<B = B>,
+{
+ x
+}
+
+static X: u8 = 0;
+fn main() {
+ let x = transmute::<&u8, &[u8; 1_000_000]>(&X);
+ println!("{:?}", x[100_000]);
+}
--- /dev/null
+error[E0271]: type mismatch resolving `<dyn Trait<A = A, B = B> as SuperTrait>::A == B`
+ --> $DIR/enforce-supertrait-projection.rs:9:5
+ |
+LL | fn transmute<A, B>(x: A) -> B {
+ | - - expected type parameter
+ | |
+ | found type parameter
+LL | foo::<A, B, dyn Trait<A = A, B = B>>(x)
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected type parameter `B`, found type parameter `A`
+ |
+ = note: expected type parameter `B`
+ found type parameter `A`
+ = note: a type parameter was expected, but a different one was found; you might be missing a type parameter or trait bound
+ = note: for more information, visit https://doc.rust-lang.org/book/ch10-02-traits.html#traits-as-parameters
+note: required by a bound in `foo`
+ --> $DIR/enforce-supertrait-projection.rs:15:8
+ |
+LL | fn foo<A, B, T: ?Sized>(x: T::A) -> B
+ | --- required by a bound in this
+LL | where
+LL | T: Trait<B = B>,
+ | ^^^^^^^^^^^^ required by this bound in `foo`
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0271`.
--- /dev/null
+error: lifetime may not live long enough
+ --> $DIR/supertrait-lifetime-bound.rs:10:5
+ |
+LL | fn test2<'a>() {
+ | -- lifetime `'a` defined here
+...
+LL | test1::<dyn Bar<&'a u32>, _>();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ requires that `'a` must outlive `'static`
+
+error: aborting due to previous error
+
-// check-pass
+trait Foo: 'static { }
-use std::any::Any;
+trait Bar<T>: Foo { }
-trait A<T>: Any {
- fn m(&self) {}
-}
-
-impl<S, T: 'static> A<S> for T {}
+fn test1<T: ?Sized + Bar<S>, S>() { }
-fn call_obj<'a>() {
- let obj: &dyn A<&'a ()> = &();
- obj.m();
+fn test2<'a>() {
+ // Here: the type `dyn Bar<&'a u32>` references `'a`,
+ // and so it does not outlive `'static`.
+ test1::<dyn Bar<&'a u32>, _>();
+ //~^ ERROR the type `(dyn Bar<&'a u32> + 'static)` does not fulfill the required lifetime
}
-fn main() {}
+fn main() { }
--- /dev/null
+error[E0477]: the type `(dyn Bar<&'a u32> + 'static)` does not fulfill the required lifetime
+ --> $DIR/supertrait-lifetime-bound.rs:10:5
+ |
+LL | test1::<dyn Bar<&'a u32>, _>();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+note: type must satisfy the static lifetime as required by this binding
+ --> $DIR/supertrait-lifetime-bound.rs:5:22
+ |
+LL | fn test1<T: ?Sized + Bar<S>, S>() { }
+ | ^^^^^^
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0477`.
--- /dev/null
+// edition:2018
+
+#![feature(ptr_metadata)]
+#![feature(type_alias_impl_trait)]
+
+type Opaque = impl std::fmt::Debug + ?Sized;
+
+fn opaque() -> &'static Opaque {
+ &[1] as &[i32]
+}
+
+fn a<T: ?Sized>() {
+ is_thin::<T>();
+ //~^ ERROR type mismatch resolving `<T as Pointee>::Metadata == ()`
+
+ is_thin::<Opaque>();
+ //~^ ERROR type mismatch resolving `<impl Debug + ?Sized as Pointee>::Metadata == ()`
+}
+
+fn is_thin<T: std::ptr::Pointee<Metadata = ()> + ?Sized>() {}
+
+fn main() {}
--- /dev/null
+error[E0271]: type mismatch resolving `<T as Pointee>::Metadata == ()`
+ --> $DIR/pointee-tail-is-generic-errors.rs:13:5
+ |
+LL | is_thin::<T>();
+ | ^^^^^^^^^^^^ expected `()`, found associated type
+ |
+ = note: expected unit type `()`
+ found associated type `<T as Pointee>::Metadata`
+ = help: consider constraining the associated type `<T as Pointee>::Metadata` to `()`
+ = note: for more information, visit https://doc.rust-lang.org/book/ch19-03-advanced-traits.html
+note: required by a bound in `is_thin`
+ --> $DIR/pointee-tail-is-generic-errors.rs:20:33
+ |
+LL | fn is_thin<T: std::ptr::Pointee<Metadata = ()> + ?Sized>() {}
+ | ^^^^^^^^^^^^^ required by this bound in `is_thin`
+
+error[E0271]: type mismatch resolving `<impl Debug + ?Sized as Pointee>::Metadata == ()`
+ --> $DIR/pointee-tail-is-generic-errors.rs:16:5
+ |
+LL | type Opaque = impl std::fmt::Debug + ?Sized;
+ | ----------------------------- the found opaque type
+...
+LL | is_thin::<Opaque>();
+ | ^^^^^^^^^^^^^^^^^ expected `()`, found associated type
+ |
+ = note: expected unit type `()`
+ found associated type `<impl Debug + ?Sized as Pointee>::Metadata`
+note: required by a bound in `is_thin`
+ --> $DIR/pointee-tail-is-generic-errors.rs:20:33
+ |
+LL | fn is_thin<T: std::ptr::Pointee<Metadata = ()> + ?Sized>() {}
+ | ^^^^^^^^^^^^^ required by this bound in `is_thin`
+help: consider constraining the associated type `<impl Debug + ?Sized as Pointee>::Metadata` to `()`
+ |
+LL | type Opaque = impl std::fmt::Debug<Metadata = ()> + ?Sized;
+ | +++++++++++++++
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0271`.
--- /dev/null
+// check-pass
+// edition:2018
+
+#![feature(ptr_metadata)]
+#![feature(type_alias_impl_trait)]
+
+type Opaque = impl std::future::Future;
+
+fn opaque() -> Opaque {
+ async {}
+}
+
+fn a<T>() {
+ // type parameter T is known to be sized
+ is_thin::<T>();
+ // tail of ADT (which is a type param) is known to be sized
+ is_thin::<std::cell::Cell<T>>();
+ // opaque type is known to be sized
+ is_thin::<Opaque>();
+}
+
+fn a2<T: Iterator>() {
+ // associated type is known to be sized
+ is_thin::<T::Item>();
+}
+
+fn is_thin<T: std::ptr::Pointee<Metadata = ()>>() {}
+
+fn main() {}
fn bar<T>(x: T) {
x += x; //~ ERROR binary assignment operation `+=` cannot be applied to type `T`
}
+
+fn baz<T>(x: T) {
+ let y = -x; //~ ERROR cannot apply unary operator `-` to type `T`
+ let y = !x; //~ ERROR cannot apply unary operator `!` to type `T`
+ let y = *x; //~ ERROR type `T` cannot be dereferenced
+}
LL | fn bar<T: std::ops::AddAssign>(x: T) {
| +++++++++++++++++++++
-error: aborting due to 2 previous errors
+error[E0600]: cannot apply unary operator `-` to type `T`
+ --> $DIR/missing_trait_impl.rs:13:13
+ |
+LL | let y = -x;
+ | ^^ cannot apply unary operator `-`
+ |
+help: consider restricting type parameter `T`
+ |
+LL | fn baz<T: std::ops::Neg<Output = T>>(x: T) {
+ | +++++++++++++++++++++++++++
+
+error[E0600]: cannot apply unary operator `!` to type `T`
+ --> $DIR/missing_trait_impl.rs:14:13
+ |
+LL | let y = !x;
+ | ^^ cannot apply unary operator `!`
+ |
+help: consider restricting type parameter `T`
+ |
+LL | fn baz<T: std::ops::Not<Output = T>>(x: T) {
+ | +++++++++++++++++++++++++++
+
+error[E0614]: type `T` cannot be dereferenced
+ --> $DIR/missing_trait_impl.rs:15:13
+ |
+LL | let y = *x;
+ | ^^
+
+error: aborting due to 5 previous errors
-Some errors have detailed explanations: E0368, E0369.
+Some errors have detailed explanations: E0368, E0369, E0600, E0614.
For more information about an error, try `rustc --explain E0368`.
--- /dev/null
+struct Wrapper<T>(T);
+
+trait Trait {
+ fn method(&self) {}
+}
+
+impl<'a, T> Trait for Wrapper<&'a T> where Wrapper<T>: Trait {}
+
+fn get<T>() -> T {
+ unimplemented!()
+}
+
+fn main() {
+ let thing = get::<Thing>();//~ERROR cannot find type `Thing` in this scope [E0412]
+ let wrapper = Wrapper(thing);
+ Trait::method(&wrapper);
+}
--- /dev/null
+error[E0412]: cannot find type `Thing` in this scope
+ --> $DIR/issue-90319.rs:14:23
+ |
+LL | let thing = get::<Thing>();
+ | ^^^^^ not found in this scope
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0412`.
LL | fn f(){||yield(((){),
| -^^^^^^^^^^^^^^^ expected `()`, found generator
| |
- | possibly return type missing here?
+ | help: a return type might be missing here: `-> _`
|
= note: expected unit type `()`
found generator `[generator@$DIR/issue-91334.rs:10:8: 10:23]`
#[allow(unused)]
-fn foo() {
- //~^ NOTE possibly return type missing here?
+fn foo() { //~ HELP a return type might be missing here
vec!['a'].iter().map(|c| c)
//~^ ERROR mismatched types [E0308]
//~| NOTE expected `()`, found struct `Map`
//~| NOTE expected unit type `()`
+ //~| HELP consider using a semicolon here
}
fn main() {}
error[E0308]: mismatched types
- --> $DIR/return_type_containing_closure.rs:4:5
+ --> $DIR/return_type_containing_closure.rs:3:5
|
-LL | fn foo() {
- | - possibly return type missing here?
-LL |
LL | vec!['a'].iter().map(|c| c)
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^- help: consider using a semicolon here: `;`
- | |
- | expected `()`, found struct `Map`
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `()`, found struct `Map`
|
= note: expected unit type `()`
- found struct `Map<std::slice::Iter<'_, char>, [closure@$DIR/return_type_containing_closure.rs:4:26: 4:31]>`
+ found struct `Map<std::slice::Iter<'_, char>, [closure@$DIR/return_type_containing_closure.rs:3:26: 3:31]>`
+help: consider using a semicolon here
+ |
+LL | vec!['a'].iter().map(|c| c);
+ | +
+help: a return type might be missing here
+ |
+LL | fn foo() -> _ {
+ | ++++
error: aborting due to previous error
...
LL | impl Copy for W {}
| ^^^^
+ |
+note: the `Copy` impl for `ManuallyDrop<String>` requires that `String: Copy`
+ --> $DIR/union-copy.rs:8:5
+ |
+LL | a: std::mem::ManuallyDrop<String>
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error: aborting due to previous error
LL | let w = u.clone();
| ^^^^^ method cannot be called on `U5<CloneNoCopy>` due to unsatisfied trait bounds
|
+note: trait bound `CloneNoCopy: Copy` was not satisfied
+ --> $DIR/union-derive-clone.rs:28:10
+ |
+LL | #[derive(Clone, Copy)]
+ | ^^^^^ unsatisfied trait bound introduced in this `derive` macro
= note: the following trait bounds were not satisfied:
`CloneNoCopy: Copy`
which is required by `U5<CloneNoCopy>: Clone`
LL | let w = u.clone();
| ^^^^^ method cannot be called on `U5<CloneNoCopy>` due to unsatisfied trait bounds
|
+note: trait bound `CloneNoCopy: Copy` was not satisfied
+ --> $DIR/union-derive-clone.rs:28:10
+ |
+LL | #[derive(Clone, Copy)]
+ | ^^^^^ unsatisfied trait bound introduced in this `derive` macro
= note: the following trait bounds were not satisfied:
`CloneNoCopy: Copy`
which is required by `U5<CloneNoCopy>: Clone`
--- /dev/null
+// run-pass
+// revisions: mir thir
+// [thir]compile-flags: -Z thir-unsafeck
+
+#![feature(untagged_unions)]
+
+union Foo {
+ bar: i8,
+ _blah: isize,
+ _zst: (),
+}
+
+struct FooHolder {
+ inner_foo: Foo
+}
+
+fn do_nothing(_x: &mut Foo) {}
+
+pub fn main() {
+ let mut foo = Foo { bar: 5 };
+ do_nothing(&mut foo);
+ foo.bar = 6;
+ unsafe { foo.bar += 1; }
+ assert_eq!(unsafe { foo.bar }, 7);
+ unsafe {
+ let Foo { bar: inner } = foo;
+ assert_eq!(inner, 7);
+ }
+
+ let foo = Foo { bar: 5 };
+ let foo = if let 3 = if let true = true { 3 } else { 4 } { foo } else { foo };
+
+ let (_foo2, _random) = (foo, 42);
+
+ let mut foo_holder = FooHolder { inner_foo: Foo { bar: 5 } };
+ foo_holder.inner_foo.bar = 4;
+ assert_eq!(unsafe { foo_holder.inner_foo.bar }, 4);
+ drop(foo_holder);
+}
-warning: unnecessary `unsafe` block
- --> $DIR/union.rs:61:5
+error[E0133]: access to union field is unsafe and requires unsafe function or block
+ --> $DIR/union.rs:30:20
|
-LL | unsafe {
- | ^^^^^^ unnecessary `unsafe` block
+LL | Foo { bar: _a } => {},
+ | ^^ access to union field
|
- = note: `#[warn(unused_unsafe)]` on by default
+ = note: the field may not be properly initialized: using uninitialized data will cause undefined behavior
-warning: unnecessary `unsafe` block
- --> $DIR/union.rs:66:5
+error[E0133]: access to union field is unsafe and requires unsafe function or block
+ --> $DIR/union.rs:32:11
|
-LL | unsafe {
- | ^^^^^^ unnecessary `unsafe` block
+LL | match foo {
+ | ^^^ access to union field
+ |
+ = note: the field may not be properly initialized: using uninitialized data will cause undefined behavior
-warning: 2 warnings emitted
+error: aborting due to 2 previous errors
+For more information about this error, try `rustc --explain E0133`.
-// run-pass
// revisions: mir thir
// [thir]compile-flags: -Z thir-unsafeck
Pineapple,
}
-struct FooHolder {
- inner_foo: Foo
-}
-
fn do_nothing(_x: &mut Foo) {}
pub fn main() {
let mut foo = Foo { bar: 5 };
do_nothing(&mut foo);
- foo.bar = 6;
- unsafe { foo.bar += 1; }
- assert_eq!(unsafe { foo.bar }, 7);
- unsafe {
- let Foo { bar: inner } = foo;
- assert_eq!(inner, 7);
+
+ // This is UB, so this test isn't run
+ match foo {
+ Foo { bar: _a } => {}, //~ ERROR access to union field is unsafe
+ }
+ match foo { //[mir]~ ERROR access to union field is unsafe
+ Foo {
+ pizza: Pizza { //[thir]~ ERROR access to union field is unsafe
+ topping: Some(PizzaTopping::Cheese) | Some(PizzaTopping::Pineapple) | None
+ }
+ } => {},
}
- let foo = if let true = true { foo } else { foo };
- unsafe {
- match foo {
- Foo { bar: _a } => {},
- }
+ // MIR unsafeck incorrectly thinks that no unsafe block is needed to do these
+ match foo {
+ Foo { zst: () } => {}, //[thir]~ ERROR access to union field is unsafe
}
- unsafe {
- match foo {
- Foo {
- pizza: Pizza {
- topping: Some(PizzaTopping::Cheese) | Some(PizzaTopping::Pineapple) | None
- }
- } => {},
- }
+ match foo {
+ Foo { pizza: Pizza { .. } } => {}, //[thir]~ ERROR access to union field is unsafe
}
+
// binding to wildcard is okay
match foo {
Foo { bar: _ } => {},
}
let Foo { bar: _ } = foo;
- // MIR unsafeck incorrectly thinks that it is safe to do these
- unsafe { //[mir]~ WARNING
- match foo {
- Foo { zst: () } => {},
- }
- }
- unsafe { //[mir]~ WARNING
- match foo {
- Foo { pizza: Pizza { .. } } => {},
- }
- }
- let foo = Foo { bar: 5 };
- let foo = if let 3 = if let true = true { 3 } else { 4 } { foo } else { foo };
-
- let (_foo2, _random) = (foo, 42);
-
- let mut foo_holder = FooHolder { inner_foo: Foo { bar: 5 } };
- foo_holder.inner_foo.bar = 4;
- assert_eq!(unsafe { foo_holder.inner_foo.bar }, 4);
- drop(foo_holder);
}
--- /dev/null
+error[E0133]: access to union field is unsafe and requires unsafe function or block
+ --> $DIR/union.rs:30:20
+ |
+LL | Foo { bar: _a } => {},
+ | ^^ access to union field
+ |
+ = note: the field may not be properly initialized: using uninitialized data will cause undefined behavior
+
+error[E0133]: access to union field is unsafe and requires unsafe function or block
+ --> $DIR/union.rs:34:20
+ |
+LL | pizza: Pizza {
+ | ____________________^
+LL | | topping: Some(PizzaTopping::Cheese) | Some(PizzaTopping::Pineapple) | None
+LL | | }
+ | |_____________^ access to union field
+ |
+ = note: the field may not be properly initialized: using uninitialized data will cause undefined behavior
+
+error[E0133]: access to union field is unsafe and requires unsafe function or block
+ --> $DIR/union.rs:42:20
+ |
+LL | Foo { zst: () } => {},
+ | ^^ access to union field
+ |
+ = note: the field may not be properly initialized: using uninitialized data will cause undefined behavior
+
+error[E0133]: access to union field is unsafe and requires unsafe function or block
+ --> $DIR/union.rs:45:22
+ |
+LL | Foo { pizza: Pizza { .. } } => {},
+ | ^^^^^^^^^^^^ access to union field
+ |
+ = note: the field may not be properly initialized: using uninitialized data will cause undefined behavior
+
+error: aborting due to 4 previous errors
+
+For more information about this error, try `rustc --explain E0133`.
+++ /dev/null
-// run-pass
-// aux-build:weak-lang-items.rs
-
-// ignore-emscripten no threads support
-// pretty-expanded FIXME #23616
-
-extern crate weak_lang_items as other;
-
-use std::thread;
-
-fn main() {
- let _ = thread::spawn(move|| {
- other::foo()
- });
-}
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `&T` is not an iterator
|
= help: the trait `Iterator` is not implemented for `&T`
-note: required because of the requirements on the impl of `IntoIterator` for `&T`
- --> $DIR/hir-wf-check-erase-regions.rs:6:29
- |
-LL | impl<'a, T, const N: usize> IntoIterator for &'a Table<T, N> {
- | ^^^^^^^^^^^^ ^^^^^^^^^^^^^^^
+ = note: required because of the requirements on the impl of `IntoIterator` for `&T`
note: required by a bound in `Flatten`
--> $SRC_DIR/core/src/iter/adapters/flatten.rs:LL:COL
|
| ^^^^^^^^^^^^^^ `&T` is not an iterator
|
= help: the trait `Iterator` is not implemented for `&T`
-note: required because of the requirements on the impl of `IntoIterator` for `&T`
- --> $DIR/hir-wf-check-erase-regions.rs:6:29
- |
-LL | impl<'a, T, const N: usize> IntoIterator for &'a Table<T, N> {
- | ^^^^^^^^^^^^ ^^^^^^^^^^^^^^^
+ = note: required because of the requirements on the impl of `IntoIterator` for `&T`
note: required by a bound in `Flatten`
--> $SRC_DIR/core/src/iter/adapters/flatten.rs:LL:COL
|
--- /dev/null
+// run-pass
+// aux-build:xcrate_unit_struct.rs
+// pretty-expanded FIXME #23616
+#![allow(non_upper_case_globals)]
+
+extern crate xcrate_unit_struct;
+
+const s1: xcrate_unit_struct::Struct = xcrate_unit_struct::Struct;
+static s2: xcrate_unit_struct::Unit = xcrate_unit_struct::Unit::UnitVariant;
+static s3: xcrate_unit_struct::Unit =
+ xcrate_unit_struct::Unit::Argument(xcrate_unit_struct::Struct);
+static s4: xcrate_unit_struct::Unit = xcrate_unit_struct::Unit::Argument(s1);
+static s5: xcrate_unit_struct::TupleStruct = xcrate_unit_struct::TupleStruct(20, "foo");
+
+fn f1(_: xcrate_unit_struct::Struct) {}
+fn f2(_: xcrate_unit_struct::Unit) {}
+fn f3(_: xcrate_unit_struct::TupleStruct) {}
+
+pub fn main() {
+ f1(xcrate_unit_struct::Struct);
+ f2(xcrate_unit_struct::Unit::UnitVariant);
+ f2(xcrate_unit_struct::Unit::Argument(xcrate_unit_struct::Struct));
+ f3(xcrate_unit_struct::TupleStruct(10, "bar"));
+
+ f1(s1);
+ f2(s2);
+ f2(s3);
+ f2(s4);
+ f3(s5);
+}
"powerpc64-unknown-linux-gnu",
"powerpc64le-unknown-linux-gnu",
"riscv32i-unknown-none-elf",
+ "riscv32im-unknown-none-elf",
"riscv32imc-unknown-none-elf",
"riscv32imac-unknown-none-elf",
"riscv32gc-unknown-linux-gnu",
-Subproject commit 65c82664263feddc5fe2d424be0993c28d46377a
+Subproject commit 109bfbd055325ef87a6e7f63d67da7e838f8300b
Test {
name: "ripgrep",
repo: "https://github.com/BurntSushi/ripgrep",
- sha: "3de31f752729525d85a3d1575ac1978733b3f7e7",
+ sha: "ced5b92aa93eb47e892bd2fd26ab454008721730",
lock: None,
packages: &[],
features: None,
sha: "91493fe47175076f330ce5fc518f0196c0476f56",
lock: None,
packages: &[],
- // Test the embeded sqlite variant of diesel
+ // Test the embedded sqlite variant of diesel
// This does not require any dependency to be present,
// sqlite will be compiled as part of the build process
features: Some(&["sqlite", "libsqlite3-sys/bundled"]),
id: reproducer
attributes:
label: Reproducer
- description: Please provide the code and steps to repoduce the bug
+ description: Please provide the code and steps to reproduce the bug
value: |
I tried this code:
attributes:
label: Summary
description: |
- If possible, try to provide a minimal verifiable example. You can read ["Rust Bug Minimization Patterns"][mve] for how to create smaller examples. Otherwise, provide the crate where the ICE occured.
+ If possible, try to provide a minimal verifiable example. You can read ["Rust Bug Minimization Patterns"][mve] for how to create smaller examples. Otherwise, provide the crate where the ICE occurred.
[mve]: http://blog.pnkfx.org/blog/2019/11/18/rust-bug-minimization-patterns/
validations:
run: bash .github/driver.sh
env:
OS: ${{ runner.os }}
-
- - name: Test cargo dev new lint
- run: |
- cargo dev new_lint --name new_early_pass --pass early
- cargo dev new_lint --name new_late_pass --pass late
- cargo check
- git reset --hard HEAD
env:
OS: ${{ runner.os }}
- - name: Test cargo dev new lint
- run: |
- cargo dev new_lint --name new_early_pass --pass early
- cargo dev new_lint --name new_late_pass --pass late
- cargo check
- git reset --hard HEAD
-
integration_build:
needs: changelog
runs-on: ubuntu-latest
- name: Test fmt
run: cargo dev fmt --check
+ - name: Test cargo dev new lint
+ run: |
+ cargo dev new_lint --name new_early_pass --pass early
+ cargo dev new_lint --name new_late_pass --pass late
+ cargo check
+ git reset --hard HEAD
+
# These jobs doesn't actually test anything, but they're only used to tell
# bors the build completed, as there is no practical way to detect when a
# workflow is successful listening to webhooks only.
<!-- lint disable no-unused-definitions -->
<!-- begin autogenerated links to lint list -->
[`absurd_extreme_comparisons`]: https://rust-lang.github.io/rust-clippy/master/index.html#absurd_extreme_comparisons
+[`allow_attributes_without_reason`]: https://rust-lang.github.io/rust-clippy/master/index.html#allow_attributes_without_reason
[`almost_swapped`]: https://rust-lang.github.io/rust-clippy/master/index.html#almost_swapped
[`approx_constant`]: https://rust-lang.github.io/rust-clippy/master/index.html#approx_constant
[`as_conversions`]: https://rust-lang.github.io/rust-clippy/master/index.html#as_conversions
[`bytes_nth`]: https://rust-lang.github.io/rust-clippy/master/index.html#bytes_nth
[`cargo_common_metadata`]: https://rust-lang.github.io/rust-clippy/master/index.html#cargo_common_metadata
[`case_sensitive_file_extension_comparisons`]: https://rust-lang.github.io/rust-clippy/master/index.html#case_sensitive_file_extension_comparisons
+[`cast_enum_constructor`]: https://rust-lang.github.io/rust-clippy/master/index.html#cast_enum_constructor
[`cast_enum_truncation`]: https://rust-lang.github.io/rust-clippy/master/index.html#cast_enum_truncation
[`cast_lossless`]: https://rust-lang.github.io/rust-clippy/master/index.html#cast_lossless
[`cast_possible_truncation`]: https://rust-lang.github.io/rust-clippy/master/index.html#cast_possible_truncation
[`cast_ptr_alignment`]: https://rust-lang.github.io/rust-clippy/master/index.html#cast_ptr_alignment
[`cast_ref_to_mut`]: https://rust-lang.github.io/rust-clippy/master/index.html#cast_ref_to_mut
[`cast_sign_loss`]: https://rust-lang.github.io/rust-clippy/master/index.html#cast_sign_loss
+[`cast_slice_different_sizes`]: https://rust-lang.github.io/rust-clippy/master/index.html#cast_slice_different_sizes
[`char_lit_as_u8`]: https://rust-lang.github.io/rust-clippy/master/index.html#char_lit_as_u8
[`chars_last_cmp`]: https://rust-lang.github.io/rust-clippy/master/index.html#chars_last_cmp
[`chars_next_cmp`]: https://rust-lang.github.io/rust-clippy/master/index.html#chars_next_cmp
[`iter_nth_zero`]: https://rust-lang.github.io/rust-clippy/master/index.html#iter_nth_zero
[`iter_overeager_cloned`]: https://rust-lang.github.io/rust-clippy/master/index.html#iter_overeager_cloned
[`iter_skip_next`]: https://rust-lang.github.io/rust-clippy/master/index.html#iter_skip_next
+[`iter_with_drain`]: https://rust-lang.github.io/rust-clippy/master/index.html#iter_with_drain
[`iterator_step_by_zero`]: https://rust-lang.github.io/rust-clippy/master/index.html#iterator_step_by_zero
[`just_underscores_and_digits`]: https://rust-lang.github.io/rust-clippy/master/index.html#just_underscores_and_digits
[`large_const_arrays`]: https://rust-lang.github.io/rust-clippy/master/index.html#large_const_arrays
[`missing_inline_in_public_items`]: https://rust-lang.github.io/rust-clippy/master/index.html#missing_inline_in_public_items
[`missing_panics_doc`]: https://rust-lang.github.io/rust-clippy/master/index.html#missing_panics_doc
[`missing_safety_doc`]: https://rust-lang.github.io/rust-clippy/master/index.html#missing_safety_doc
+[`missing_spin_loop`]: https://rust-lang.github.io/rust-clippy/master/index.html#missing_spin_loop
[`mistyped_literal_suffixes`]: https://rust-lang.github.io/rust-clippy/master/index.html#mistyped_literal_suffixes
[`mixed_case_hex_literals`]: https://rust-lang.github.io/rust-clippy/master/index.html#mixed_case_hex_literals
[`mod_module_files`]: https://rust-lang.github.io/rust-clippy/master/index.html#mod_module_files
[`needless_for_each`]: https://rust-lang.github.io/rust-clippy/master/index.html#needless_for_each
[`needless_late_init`]: https://rust-lang.github.io/rust-clippy/master/index.html#needless_late_init
[`needless_lifetimes`]: https://rust-lang.github.io/rust-clippy/master/index.html#needless_lifetimes
+[`needless_match`]: https://rust-lang.github.io/rust-clippy/master/index.html#needless_match
[`needless_option_as_deref`]: https://rust-lang.github.io/rust-clippy/master/index.html#needless_option_as_deref
[`needless_pass_by_value`]: https://rust-lang.github.io/rust-clippy/master/index.html#needless_pass_by_value
[`needless_question_mark`]: https://rust-lang.github.io/rust-clippy/master/index.html#needless_question_mark
[`not_unsafe_ptr_arg_deref`]: https://rust-lang.github.io/rust-clippy/master/index.html#not_unsafe_ptr_arg_deref
[`octal_escapes`]: https://rust-lang.github.io/rust-clippy/master/index.html#octal_escapes
[`ok_expect`]: https://rust-lang.github.io/rust-clippy/master/index.html#ok_expect
+[`only_used_in_recursion`]: https://rust-lang.github.io/rust-clippy/master/index.html#only_used_in_recursion
[`op_ref`]: https://rust-lang.github.io/rust-clippy/master/index.html#op_ref
[`option_as_ref_deref`]: https://rust-lang.github.io/rust-clippy/master/index.html#option_as_ref_deref
[`option_env_unwrap`]: https://rust-lang.github.io/rust-clippy/master/index.html#option_env_unwrap
[`option_map_unit_fn`]: https://rust-lang.github.io/rust-clippy/master/index.html#option_map_unit_fn
[`option_option`]: https://rust-lang.github.io/rust-clippy/master/index.html#option_option
[`or_fun_call`]: https://rust-lang.github.io/rust-clippy/master/index.html#or_fun_call
+[`or_then_unwrap`]: https://rust-lang.github.io/rust-clippy/master/index.html#or_then_unwrap
[`out_of_bounds_indexing`]: https://rust-lang.github.io/rust-clippy/master/index.html#out_of_bounds_indexing
[`overflow_check_conditional`]: https://rust-lang.github.io/rust-clippy/master/index.html#overflow_check_conditional
[`panic`]: https://rust-lang.github.io/rust-clippy/master/index.html#panic
[`unit_return_expecting_ord`]: https://rust-lang.github.io/rust-clippy/master/index.html#unit_return_expecting_ord
[`unnecessary_cast`]: https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_cast
[`unnecessary_filter_map`]: https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_filter_map
+[`unnecessary_find_map`]: https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_find_map
[`unnecessary_fold`]: https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_fold
+[`unnecessary_join`]: https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_join
[`unnecessary_lazy_evaluations`]: https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_lazy_evaluations
[`unnecessary_mut_passed`]: https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_mut_passed
[`unnecessary_operation`]: https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_operation
futures = "0.3"
parking_lot = "0.11.2"
tokio = { version = "1", features = ["io-util"] }
-num_cpus = "1.13"
+rustc-semver = "1.1"
[build-dependencies]
rustc_tools_util = { version = "0.2", path = "rustc_tools_util" }
"usage of `cfg(operating_system)` instead of `cfg(target_os = \"operating_system\")`"
}
+declare_clippy_lint! {
+ /// ### What it does
+ /// Checks for attributes that allow lints without a reason.
+ ///
+ /// (This requires the `lint_reasons` feature)
+ ///
+ /// ### Why is this bad?
+ /// Allowing a lint should always have a reason. This reason should be documented to
+ /// ensure that others understand the reasoning
+ ///
+ /// ### Example
+ /// Bad:
+ /// ```rust
+ /// #![feature(lint_reasons)]
+ ///
+ /// #![allow(clippy::some_lint)]
+ /// ```
+ ///
+ /// Good:
+ /// ```rust
+ /// #![feature(lint_reasons)]
+ ///
+ /// #![allow(clippy::some_lint, reason = "False positive rust-lang/rust-clippy#1002020")]
+ /// ```
+ #[clippy::version = "1.61.0"]
+ pub ALLOW_ATTRIBUTES_WITHOUT_REASON,
+ restriction,
+ "ensures that all `allow` and `expect` attributes have a reason"
+}
+
declare_lint_pass!(Attributes => [
+ ALLOW_ATTRIBUTES_WITHOUT_REASON,
INLINE_ALWAYS,
DEPRECATED_SEMVER,
USELESS_ATTRIBUTE,
if is_lint_level(ident.name) {
check_clippy_lint_names(cx, ident.name, items);
}
+ if matches!(ident.name, sym::allow | sym::expect) {
+ check_lint_reason(cx, ident.name, items, attr);
+ }
if items.is_empty() || !attr.has_name(sym::deprecated) {
return;
}
}
}
+fn check_lint_reason(cx: &LateContext<'_>, name: Symbol, items: &[NestedMetaItem], attr: &'_ Attribute) {
+ // Check for the feature
+ if !cx.tcx.sess.features_untracked().lint_reasons {
+ return;
+ }
+
+ // Check if the reason is present
+ if let Some(item) = items.last().and_then(NestedMetaItem::meta_item)
+ && let MetaItemKind::NameValue(_) = &item.kind
+ && item.path == sym::reason
+ {
+ return;
+ }
+
+ span_lint_and_help(
+ cx,
+ ALLOW_ATTRIBUTES_WITHOUT_REASON,
+ attr.span,
+ &format!("`{}` attribute without specifying a reason", name.as_str()),
+ None,
+ "try adding a reason at the end with `, reason = \"..\"`",
+ );
+}
+
fn is_relevant_item(cx: &LateContext<'_>, item: &Item<'_>) -> bool {
if let ItemKind::Fn(_, _, eid) = item.kind {
is_relevant_expr(cx, cx.tcx.typeck_body(eid), &cx.tcx.hir().body(eid).value)
}
fn is_lint_level(symbol: Symbol) -> bool {
- matches!(symbol, sym::allow | sym::warn | sym::deny | sym::forbid)
+ matches!(symbol, sym::allow | sym::expect | sym::warn | sym::deny | sym::forbid)
}
fn check_interior_types(cx: &LateContext<'_>, ty_causes: &[GeneratorInteriorTypeCause<'_>], span: Span) {
for ty_cause in ty_causes {
if let rustc_middle::ty::Adt(adt, _) = ty_cause.ty.kind() {
- if is_mutex_guard(cx, adt.did) {
+ if is_mutex_guard(cx, adt.did()) {
span_lint_and_then(
cx,
AWAIT_HOLDING_LOCK,
},
);
}
- if is_refcell_ref(cx, adt.did) {
+ if is_refcell_ref(cx, adt.did()) {
span_lint_and_then(
cx,
AWAIT_HOLDING_REFCELL_REF,
use clippy_utils::diagnostics::span_lint_and_help;
use if_chain::if_chain;
use rustc_ast::ast::LitKind;
+use rustc_data_structures::intern::Interned;
use rustc_hir::{Expr, ExprKind, PathSegment};
use rustc_lint::{LateContext, LateLintPass};
use rustc_middle::ty;
ty::Str => {
return Some(span);
},
- ty::Adt(&ty::AdtDef { did, .. }, _) => {
+ ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did, .. }, _)), _) => {
if ctx.tcx.is_diagnostic_item(sym::String, did) {
return Some(span);
}
--- /dev/null
+use clippy_utils::diagnostics::span_lint;
+use rustc_hir::def::{CtorKind, CtorOf, DefKind, Res};
+use rustc_hir::{Expr, ExprKind};
+use rustc_lint::LateContext;
+use rustc_middle::ty::{self, Ty};
+
+use super::CAST_ENUM_CONSTRUCTOR;
+
+pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, cast_expr: &Expr<'_>, cast_from: Ty<'_>) {
+ if matches!(cast_from.kind(), ty::FnDef(..))
+ && let ExprKind::Path(path) = &cast_expr.kind
+ && let Res::Def(DefKind::Ctor(CtorOf::Variant, CtorKind::Fn), _) = cx.qpath_res(path, cast_expr.hir_id)
+ {
+ span_lint(
+ cx,
+ CAST_ENUM_CONSTRUCTOR,
+ expr.span,
+ "cast of an enum tuple constructor to an integer",
+ );
+ }
+}
&& let Res::Def(DefKind::Ctor(..), id) = cx.qpath_res(p, cast_expr.hir_id)
{
let i = def.variant_index_with_ctor_id(id);
- let variant = &def.variants[i];
- let nbits = utils::enum_value_nbits(get_discriminant_value(cx.tcx, def, i));
+ let variant = def.variant(i);
+ let nbits = utils::enum_value_nbits(get_discriminant_value(cx.tcx, *def, i));
(nbits, Some(variant))
} else {
- (utils::enum_ty_to_nbits(def, cx.tcx), None)
+ (utils::enum_ty_to_nbits(*def, cx.tcx), None)
};
let to_nbits = utils::int_ty_to_nbits(cast_to, cx.tcx);
- let cast_from_ptr_size = def.repr.int.map_or(true, |ty| {
+ let cast_from_ptr_size = def.repr().int.map_or(true, |ty| {
matches!(
ty,
IntType::SignedInt(ast::IntTy::Isize) | IntType::UnsignedInt(ast::UintTy::Usize)
--- /dev/null
+use clippy_utils::{diagnostics::span_lint_and_then, meets_msrv, msrvs, source::snippet_opt};
+use if_chain::if_chain;
+use rustc_ast::Mutability;
+use rustc_hir::{Expr, ExprKind, Node};
+use rustc_lint::LateContext;
+use rustc_middle::ty::{self, layout::LayoutOf, Ty, TypeAndMut};
+use rustc_semver::RustcVersion;
+
+use super::CAST_SLICE_DIFFERENT_SIZES;
+
+fn is_child_of_cast(cx: &LateContext<'_>, expr: &Expr<'_>) -> bool {
+ let map = cx.tcx.hir();
+ if_chain! {
+ if let Some(parent_id) = map.find_parent_node(expr.hir_id);
+ if let Some(parent) = map.find(parent_id);
+ then {
+ let expr = match parent {
+ Node::Block(block) => {
+ if let Some(parent_expr) = block.expr {
+ parent_expr
+ } else {
+ return false;
+ }
+ },
+ Node::Expr(expr) => expr,
+ _ => return false,
+ };
+
+ matches!(expr.kind, ExprKind::Cast(..))
+ } else {
+ false
+ }
+ }
+}
+
+pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, msrv: &Option<RustcVersion>) {
+ // suggestion is invalid if `ptr::slice_from_raw_parts` does not exist
+ if !meets_msrv(msrv.as_ref(), &msrvs::PTR_SLICE_RAW_PARTS) {
+ return;
+ }
+
+ // if this cast is the child of another cast expression then don't emit something for it, the full
+ // chain will be analyzed
+ if is_child_of_cast(cx, expr) {
+ return;
+ }
+
+ if let Some((from_slice_ty, to_slice_ty)) = expr_cast_chain_tys(cx, expr) {
+ if let (Ok(from_layout), Ok(to_layout)) = (cx.layout_of(from_slice_ty.ty), cx.layout_of(to_slice_ty.ty)) {
+ let from_size = from_layout.size.bytes();
+ let to_size = to_layout.size.bytes();
+ if from_size != to_size && from_size != 0 && to_size != 0 {
+ span_lint_and_then(
+ cx,
+ CAST_SLICE_DIFFERENT_SIZES,
+ expr.span,
+ &format!(
+ "casting between raw pointers to `[{}]` (element size {}) and `[{}]` (element size {}) does not adjust the count",
+ from_slice_ty, from_size, to_slice_ty, to_size,
+ ),
+ |diag| {
+ let cast_expr = match expr.kind {
+ ExprKind::Cast(cast_expr, ..) => cast_expr,
+ _ => unreachable!("expr should be a cast as checked by expr_cast_chain_tys"),
+ };
+ let ptr_snippet = snippet_opt(cx, cast_expr.span).unwrap();
+
+ let (mutbl_fn_str, mutbl_ptr_str) = match to_slice_ty.mutbl {
+ Mutability::Mut => ("_mut", "mut"),
+ Mutability::Not => ("", "const"),
+ };
+ let sugg = format!(
+ "core::ptr::slice_from_raw_parts{mutbl_fn_str}({ptr_snippet} as *{mutbl_ptr_str} {to_slice_ty}, ..)"
+ );
+
+ diag.span_suggestion(
+ expr.span,
+ &format!("replace with `ptr::slice_from_raw_parts{mutbl_fn_str}`"),
+ sugg,
+ rustc_errors::Applicability::HasPlaceholders,
+ );
+ },
+ );
+ }
+ }
+ }
+}
+
+/// Returns the type T of the pointed to *const [T] or *mut [T] and the mutability of the slice if
+/// the type is one of those slices
+fn get_raw_slice_ty_mut(ty: Ty<'_>) -> Option<TypeAndMut<'_>> {
+ match ty.kind() {
+ ty::RawPtr(TypeAndMut { ty: slice_ty, mutbl }) => match slice_ty.kind() {
+ ty::Slice(ty) => Some(TypeAndMut { ty: *ty, mutbl: *mutbl }),
+ _ => None,
+ },
+ _ => None,
+ }
+}
+
+/// Returns the pair (original ptr T, final ptr U) if the expression is composed of casts
+/// Returns None if the expr is not a Cast
+fn expr_cast_chain_tys<'tcx>(cx: &LateContext<'tcx>, expr: &Expr<'_>) -> Option<(TypeAndMut<'tcx>, TypeAndMut<'tcx>)> {
+ if let ExprKind::Cast(cast_expr, _cast_to_hir_ty) = expr.peel_blocks().kind {
+ let cast_to = cx.typeck_results().expr_ty(expr);
+ let to_slice_ty = get_raw_slice_ty_mut(cast_to)?;
+ if let Some((inner_from_ty, _inner_to_ty)) = expr_cast_chain_tys(cx, cast_expr) {
+ Some((inner_from_ty, to_slice_ty))
+ } else {
+ let cast_from = cx.typeck_results().expr_ty(cast_expr);
+ let from_slice_ty = get_raw_slice_ty_mut(cast_from)?;
+ Some((from_slice_ty, to_slice_ty))
+ }
+ } else {
+ None
+ }
+}
+mod cast_enum_constructor;
mod cast_lossless;
mod cast_possible_truncation;
mod cast_possible_wrap;
mod cast_ptr_alignment;
mod cast_ref_to_mut;
mod cast_sign_loss;
+mod cast_slice_different_sizes;
mod char_lit_as_u8;
mod fn_to_numeric_cast;
mod fn_to_numeric_cast_any;
"casts from an enum type to an integral type which will truncate the value"
}
+declare_clippy_lint! {
+ /// Checks for `as` casts between raw pointers to slices with differently sized elements.
+ ///
+ /// ### Why is this bad?
+ /// The produced raw pointer to a slice does not update its length metadata. The produced
+ /// pointer will point to a different number of bytes than the original pointer because the
+ /// length metadata of a raw slice pointer is in elements rather than bytes.
+ /// Producing a slice reference from the raw pointer will either create a slice with
+ /// less data (which can be surprising) or create a slice with more data and cause Undefined Behavior.
+ ///
+ /// ### Example
+ /// // Missing data
+ /// ```rust
+ /// let a = [1_i32, 2, 3, 4];
+ /// let p = &a as *const [i32] as *const [u8];
+ /// unsafe {
+ /// println!("{:?}", &*p);
+ /// }
+ /// ```
+ /// // Undefined Behavior (note: also potential alignment issues)
+ /// ```rust
+ /// let a = [1_u8, 2, 3, 4];
+ /// let p = &a as *const [u8] as *const [u32];
+ /// unsafe {
+ /// println!("{:?}", &*p);
+ /// }
+ /// ```
+ /// Instead use `ptr::slice_from_raw_parts` to construct a slice from a data pointer and the correct length
+ /// ```rust
+ /// let a = [1_i32, 2, 3, 4];
+ /// let old_ptr = &a as *const [i32];
+ /// // The data pointer is cast to a pointer to the target `u8` not `[u8]`
+ /// // The length comes from the known length of 4 i32s times the 4 bytes per i32
+ /// let new_ptr = core::ptr::slice_from_raw_parts(old_ptr as *const u8, 16);
+ /// unsafe {
+ /// println!("{:?}", &*new_ptr);
+ /// }
+ /// ```
+ #[clippy::version = "1.60.0"]
+ pub CAST_SLICE_DIFFERENT_SIZES,
+ correctness,
+ "casting using `as` between raw pointers to slices of types with different sizes"
+}
+
+declare_clippy_lint! {
+ /// ### What it does
+ /// Checks for casts from an enum tuple constructor to an integer.
+ ///
+ /// ### Why is this bad?
+ /// The cast is easily confused with casting a c-like enum value to an integer.
+ ///
+ /// ### Example
+ /// ```rust
+ /// enum E { X(i32) };
+ /// let _ = E::X as usize;
+ /// ```
+ #[clippy::version = "1.61.0"]
+ pub CAST_ENUM_CONSTRUCTOR,
+ suspicious,
+ "casts from an enum tuple constructor to an integer"
+}
+
pub struct Casts {
msrv: Option<RustcVersion>,
}
CAST_LOSSLESS,
CAST_REF_TO_MUT,
CAST_PTR_ALIGNMENT,
+ CAST_SLICE_DIFFERENT_SIZES,
UNNECESSARY_CAST,
FN_TO_NUMERIC_CAST_ANY,
FN_TO_NUMERIC_CAST,
CHAR_LIT_AS_U8,
PTR_AS_PTR,
CAST_ENUM_TRUNCATION,
+ CAST_ENUM_CONSTRUCTOR
]);
impl<'tcx> LateLintPass<'tcx> for Casts {
cast_sign_loss::check(cx, expr, cast_expr, cast_from, cast_to);
}
cast_lossless::check(cx, expr, cast_expr, cast_from, cast_to, &self.msrv);
+ cast_enum_constructor::check(cx, expr, cast_expr, cast_from);
}
}
cast_ref_to_mut::check(cx, expr);
cast_ptr_alignment::check(cx, expr);
char_lit_as_u8::check(cx, expr);
+ ptr_as_ptr::check(cx, expr, &self.msrv);
+ cast_slice_different_sizes::check(cx, expr, &self.msrv);
}
extract_msrv_attr!(LateContext);
.into()
}
-pub(super) fn enum_ty_to_nbits(adt: &AdtDef, tcx: TyCtxt<'_>) -> u64 {
+pub(super) fn enum_ty_to_nbits(adt: AdtDef<'_>, tcx: TyCtxt<'_>) -> u64 {
let mut explicit = 0i128;
let (start, end) = adt
- .variants
+ .variants()
.iter()
.fold((0, i128::MIN), |(start, end), variant| match variant.discr {
VariantDiscr::Relative(x) => match explicit.checked_add(i128::from(x)) {
///
/// Should be written:
///
- /// ```rust.ignore
+ /// ```rust,ignore
/// if x && y {
/// …
/// }
///
/// Should be written:
///
- /// ```rust.ignore
+ /// ```rust,ignore
/// if x {
/// …
/// } else if y {
/// Duplicate code is less maintainable.
///
/// ### Known problems
- /// * The lint doesn't check if the moved expressions modify values that are beeing used in
+ /// * The lint doesn't check if the moved expressions modify values that are being used in
/// the if condition. The suggestion can in that case modify the behavior of the program.
/// See [rust-clippy#7452](https://github.com/rust-lang/rust-clippy/issues/7452)
///
use clippy_utils::diagnostics::span_lint_and_sugg;
+use clippy_utils::is_in_test_function;
use clippy_utils::macros::root_macro_call_first_node;
use clippy_utils::source::snippet_with_applicability;
use rustc_errors::Applicability;
fn check_expr(&mut self, cx: &LateContext<'_>, expr: &Expr<'_>) {
let Some(macro_call) = root_macro_call_first_node(cx, expr) else { return };
if cx.tcx.is_diagnostic_item(sym::dbg_macro, macro_call.def_id) {
+ // we make an exception for test code
+ if is_in_test_function(cx.tcx, expr.hir_id) {
+ return;
+ }
let mut applicability = Applicability::MachineApplicable;
let suggestion = match expr.peel_drop_temps().kind {
// dbg!()
then {
// TODO: Work out a way to put "whatever the imported way of referencing
// this type in this file" rather than a fully-qualified type.
- let replacement = format!("{}::default()", cx.tcx.def_path_str(def.did));
+ let replacement = format!("{}::default()", cx.tcx.def_path_str(def.did()));
span_lint_and_sugg(
cx,
DEFAULT_TRAIT_ACCESS,
if let Some(adt) = binding_type.ty_adt_def();
if adt.is_struct();
let variant = adt.non_enum_variant();
- if adt.did.is_local() || !variant.is_field_list_non_exhaustive();
+ if adt.did().is_local() || !variant.is_field_list_non_exhaustive();
let module_did = cx.tcx.parent_module(stmt.hir_id).to_def_id();
if variant
.fields
if let ty::Adt(adt_def, substs) = binding_type.kind();
if !substs.is_empty();
then {
- let adt_def_ty_name = cx.tcx.item_name(adt_def.did);
+ let adt_def_ty_name = cx.tcx.item_name(adt_def.did());
let generic_args = substs.iter().collect::<Vec<_>>();
let tys_str = generic_args
.iter()
if_chain! {
if let Some(adt_def) = ty.ty_adt_def();
if adt_def.is_struct();
- if let Some(variant) = adt_def.variants.iter().next();
+ if let Some(variant) = adt_def.variants().iter().next();
then {
let fields_def = &variant.fields;
_ => false,
};
if should_emit {
- let path_string = cx.tcx.def_path_str(adt_def.did);
+ let path_string = cx.tcx.def_path_str(adt_def.did());
span_lint_and_help(
cx,
DERIVABLE_IMPLS,
let has_copy_impl = cx.tcx.all_local_trait_impls(()).get(©_id).map_or(false, |impls| {
impls
.iter()
- .any(|&id| matches!(cx.tcx.type_of(id).kind(), ty::Adt(adt, _) if ty_adt.did == adt.did))
+ .any(|&id| matches!(cx.tcx.type_of(id).kind(), ty::Adt(adt, _) if ty_adt.did() == adt.did()))
});
if !has_copy_impl {
return;
if let Some(trait_def_id) = trait_ref.trait_def_id();
if match_def_path(cx, trait_def_id, &paths::SERDE_DESERIALIZE);
if let ty::Adt(def, _) = ty.kind();
- if let Some(local_def_id) = def.did.as_local();
+ if let Some(local_def_id) = def.did().as_local();
let adt_hir_id = cx.tcx.hir().local_def_id_to_hir_id(local_def_id);
if !is_lint_allowed(cx, UNSAFE_DERIVE_DESERIALIZE, adt_hir_id);
- if cx.tcx.inherent_impls(def.did)
+ if cx.tcx.inherent_impls(def.did())
.iter()
.map(|imp_did| cx.tcx.hir().expect_item(imp_did.expect_local()))
.any(|imp| has_unsafe(cx, imp));
loop {
match parser.parse_item(ForceCollect::No) {
Ok(Some(item)) => match &item.kind {
- // Tests with one of these items are ignored
- ItemKind::Static(..)
- | ItemKind::Const(..)
- | ItemKind::ExternCrate(..)
- | ItemKind::ForeignMod(..) => return false,
- // We found a main function ...
ItemKind::Fn(box Fn {
sig, body: Some(block), ..
}) if item.ident.name == sym::main => {
return false;
}
},
- // Another function was found; this case is ignored too
- ItemKind::Fn(..) => return false,
+ // Tests with one of these items are ignored
+ ItemKind::Static(..)
+ | ItemKind::Const(..)
+ | ItemKind::ExternCrate(..)
+ | ItemKind::ForeignMod(..)
+ // Another function was found; this case is ignored
+ | ItemKind::Fn(..) => return false,
_ => {},
},
Ok(None) => break,
if let ItemKind::Enum(..) = item.kind {
let ty = cx.tcx.type_of(item.def_id);
let adt = ty.ty_adt_def().expect("already checked whether this is an enum");
- if adt.variants.is_empty() {
+ if adt.variants().is_empty() {
span_lint_and_help(
cx,
EMPTY_ENUM,
if let Some(Constant::Int(val)) = constant.and_then(miri_to_const) {
if let ty::Adt(adt, _) = ty.kind() {
if adt.is_enum() {
- ty = adt.repr.discr_type().to_ty(cx.tcx);
+ ty = adt.repr().discr_type().to_ty(cx.tcx);
}
}
match ty.kind() {
fn are_equal<'tcx>(cx: &LateContext<'tcx>, middle_ty: Ty<'_>, hir_ty: &rustc_hir::Ty<'_>) -> bool {
if_chain! {
if let ty::Adt(adt_def, _) = middle_ty.kind();
- if let Some(local_did) = adt_def.did.as_local();
+ if let Some(local_did) = adt_def.did().as_local();
let item = cx.tcx.hir().expect_item(local_did);
let middle_ty_id = item.def_id.to_def_id();
if let TyKind::Path(QPath::Resolved(_, path)) = hir_ty.kind;
ty::ImplContainer(def_id) => {
let ty = cx.tcx.type_of(def_id);
match ty.kind() {
- ty::Adt(adt, _) => cx.tcx.def_path_str(adt.did),
+ ty::Adt(adt, _) => cx.tcx.def_path_str(adt.did()),
_ => ty.to_string(),
}
},
// check for `unwrap`
if let Some(arglists) = method_chain_args(expr, &["unwrap"]) {
- let reciever_ty = self.typeck_results.expr_ty(&arglists[0][0]).peel_refs();
- if is_type_diagnostic_item(self.lcx, reciever_ty, sym::Option)
- || is_type_diagnostic_item(self.lcx, reciever_ty, sym::Result)
+ let receiver_ty = self.typeck_results.expr_ty(&arglists[0][0]).peel_refs();
+ if is_type_diagnostic_item(self.lcx, receiver_ty, sym::Option)
+ || is_type_diagnostic_item(self.lcx, receiver_ty, sym::Result)
{
self.result.push(expr.span);
}
///
/// ### Known problems
/// If the user can ensure that b is larger than a, the `.abs()` is
- /// technically unneccessary. However, it will make the code more robust and doesn't have any
+ /// technically unnecessary. However, it will make the code more robust and doesn't have any
/// large performance implications. If the abs call was deliberately left out for performance
/// reasons, it is probably better to state this explicitly in the code, which then can be done
/// with an allow.
if_chain! {
- // left hand side is a substraction
+ // left hand side is a subtraction
if let ExprKind::Binary(
Spanned {
node: BinOpKind::Sub,
if let Res::Def(DefKind::AssocConst, def_id) = cx.qpath_res(epsilon_path, rhs.hir_id);
if match_def_path(cx, def_id, &paths::F32_EPSILON) || match_def_path(cx, def_id, &paths::F64_EPSILON);
- // values of the substractions on the left hand side are of the type float
+ // values of the subtractions on the left hand side are of the type float
let t_val_l = cx.typeck_results().expr_ty(val_l);
let t_val_r = cx.typeck_results().expr_ty(val_r);
if let ty::Float(_) = t_val_l.kind();
if_chain! {
if format_args.format_string_parts == [kw::Empty];
if match cx.typeck_results().expr_ty(value).peel_refs().kind() {
- ty::Adt(adt, _) => cx.tcx.is_diagnostic_item(sym::String, adt.did),
+ ty::Adt(adt, _) => cx.tcx.is_diagnostic_item(sym::String, adt.did()),
ty::Str => true,
_ => false,
};
// primitive types are never mutable
ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str => false,
ty::Adt(adt, substs) => {
- tys.insert(adt.did) && !ty.is_freeze(cx.tcx.at(span), cx.param_env)
- || KNOWN_WRAPPER_TYS.iter().any(|path| match_def_path(cx, adt.did, path))
+ tys.insert(adt.did()) && !ty.is_freeze(cx.tcx.at(span), cx.param_env)
+ || KNOWN_WRAPPER_TYS.iter().any(|path| match_def_path(cx, adt.did(), path))
&& substs.types().any(|ty| is_mutable_ty(cx, ty, span, tys))
},
ty::Tuple(substs) => substs.iter().any(|ty| is_mutable_ty(cx, ty, span, tys)),
let ty = cx.typeck_results().expr_ty(expr);
if let Some(adt_def) = ty.ty_adt_def();
if adt_def.is_struct();
- if let Some(variant) = adt_def.variants.iter().next();
+ if let Some(variant) = adt_def.variants().iter().next();
if fields.iter().all(|f| f.is_shorthand);
then {
let mut def_order_map = FxHashMap::default();
if let ItemKind::Enum(ref def, _) = item.kind {
let ty = cx.tcx.type_of(item.def_id);
let adt = ty.ty_adt_def().expect("already checked whether this is an enum");
- if adt.variants.len() <= 1 {
+ if adt.variants().len() <= 1 {
return;
}
let mut variants_size: Vec<VariantInfo> = Vec::new();
- for (i, variant) in adt.variants.iter().enumerate() {
+ for (i, variant) in adt.variants().iter().enumerate() {
let mut fields_size = Vec::new();
for (i, f) in variant.fields.iter().enumerate() {
let ty = cx.tcx.type_of(f.did);
fn parse_len_output<'tcx>(cx: &LateContext<'_>, sig: FnSig<'tcx>) -> Option<LenOutput<'tcx>> {
match *sig.output().kind() {
ty::Int(_) | ty::Uint(_) => Some(LenOutput::Integral),
- ty::Adt(adt, subs) if cx.tcx.is_diagnostic_item(sym::Option, adt.did) => {
- subs.type_at(0).is_integral().then(|| LenOutput::Option(adt.did))
+ ty::Adt(adt, subs) if cx.tcx.is_diagnostic_item(sym::Option, adt.did()) => {
+ subs.type_at(0).is_integral().then(|| LenOutput::Option(adt.did()))
},
- ty::Adt(adt, subs) if cx.tcx.is_diagnostic_item(sym::Result, adt.did) => subs
+ ty::Adt(adt, subs) if cx.tcx.is_diagnostic_item(sym::Result, adt.did()) => subs
.type_at(0)
.is_integral()
- .then(|| LenOutput::Result(adt.did, subs.type_at(1))),
+ .then(|| LenOutput::Result(adt.did(), subs.type_at(1))),
_ => None,
}
}
fn matches_is_empty_output(self, ty: Ty<'_>) -> bool {
match (self, ty.kind()) {
(_, &ty::Bool) => true,
- (Self::Option(id), &ty::Adt(adt, subs)) if id == adt.did => subs.type_at(0).is_bool(),
- (Self::Result(id, err_ty), &ty::Adt(adt, subs)) if id == adt.did => {
+ (Self::Option(id), &ty::Adt(adt, subs)) if id == adt.did() => subs.type_at(0).is_bool(),
+ (Self::Result(id, err_ty), &ty::Adt(adt, subs)) if id == adt.did() => {
subs.type_at(0).is_bool() && subs.type_at(1) == err_ty
},
_ => false,
.any(|item| is_is_empty(cx, item))
}),
ty::Projection(ref proj) => has_is_empty_impl(cx, proj.item_def_id),
- ty::Adt(id, _) => has_is_empty_impl(cx, id.did),
+ ty::Adt(id, _) => has_is_empty_impl(cx, id.did()),
ty::Array(..) | ty::Slice(..) | ty::Str => true,
_ => false,
}
LintId::of(bool_assert_comparison::BOOL_ASSERT_COMPARISON),
LintId::of(booleans::LOGIC_BUG),
LintId::of(booleans::NONMINIMAL_BOOL),
+ LintId::of(casts::CAST_ENUM_CONSTRUCTOR),
LintId::of(casts::CAST_ENUM_TRUNCATION),
LintId::of(casts::CAST_REF_TO_MUT),
+ LintId::of(casts::CAST_SLICE_DIFFERENT_SIZES),
LintId::of(casts::CHAR_LIT_AS_U8),
LintId::of(casts::FN_TO_NUMERIC_CAST),
LintId::of(casts::FN_TO_NUMERIC_CAST_WITH_TRUNCATION),
LintId::of(loops::ITER_NEXT_LOOP),
LintId::of(loops::MANUAL_FLATTEN),
LintId::of(loops::MANUAL_MEMCPY),
+ LintId::of(loops::MISSING_SPIN_LOOP),
LintId::of(loops::MUT_RANGE_BOUND),
LintId::of(loops::NEEDLESS_COLLECT),
LintId::of(loops::NEEDLESS_RANGE_LOOP),
LintId::of(matches::MATCH_OVERLAPPING_ARM),
LintId::of(matches::MATCH_REF_PATS),
LintId::of(matches::MATCH_SINGLE_BINDING),
+ LintId::of(matches::NEEDLESS_MATCH),
LintId::of(matches::REDUNDANT_PATTERN_MATCHING),
LintId::of(matches::SINGLE_MATCH),
LintId::of(matches::WILDCARD_IN_OR_PATTERNS),
LintId::of(methods::OPTION_FILTER_MAP),
LintId::of(methods::OPTION_MAP_OR_NONE),
LintId::of(methods::OR_FUN_CALL),
+ LintId::of(methods::OR_THEN_UNWRAP),
LintId::of(methods::RESULT_MAP_OR_INTO_OPTION),
LintId::of(methods::SEARCH_IS_SOME),
LintId::of(methods::SHOULD_IMPLEMENT_TRAIT),
LintId::of(methods::SUSPICIOUS_SPLITN),
LintId::of(methods::UNINIT_ASSUMED_INIT),
LintId::of(methods::UNNECESSARY_FILTER_MAP),
+ LintId::of(methods::UNNECESSARY_FIND_MAP),
LintId::of(methods::UNNECESSARY_FOLD),
LintId::of(methods::UNNECESSARY_LAZY_EVALUATIONS),
LintId::of(methods::UNNECESSARY_TO_OWNED),
LintId::of(non_expressive_names::JUST_UNDERSCORES_AND_DIGITS),
LintId::of(non_octal_unix_permissions::NON_OCTAL_UNIX_PERMISSIONS),
LintId::of(octal_escapes::OCTAL_ESCAPES),
+ LintId::of(only_used_in_recursion::ONLY_USED_IN_RECURSION),
LintId::of(open_options::NONSENSICAL_OPEN_OPTIONS),
LintId::of(option_env_unwrap::OPTION_ENV_UNWRAP),
LintId::of(overflow_check_conditional::OVERFLOW_CHECK_CONDITIONAL),
LintId::of(transmute::UNSOUND_COLLECTION_TRANSMUTE),
LintId::of(transmute::WRONG_TRANSMUTE),
LintId::of(transmuting_null::TRANSMUTING_NULL),
- LintId::of(try_err::TRY_ERR),
LintId::of(types::BORROWED_BOX),
LintId::of(types::BOX_COLLECTION),
LintId::of(types::REDUNDANT_ALLOCATION),
LintId::of(map_unit_fn::RESULT_MAP_UNIT_FN),
LintId::of(matches::MATCH_AS_REF),
LintId::of(matches::MATCH_SINGLE_BINDING),
+ LintId::of(matches::NEEDLESS_MATCH),
LintId::of(matches::WILDCARD_IN_OR_PATTERNS),
LintId::of(methods::BIND_INSTEAD_OF_MAP),
LintId::of(methods::CLONE_ON_COPY),
LintId::of(methods::NEEDLESS_SPLITN),
LintId::of(methods::OPTION_AS_REF_DEREF),
LintId::of(methods::OPTION_FILTER_MAP),
+ LintId::of(methods::OR_THEN_UNWRAP),
LintId::of(methods::SEARCH_IS_SOME),
LintId::of(methods::SKIP_WHILE_NEXT),
LintId::of(methods::UNNECESSARY_FILTER_MAP),
+ LintId::of(methods::UNNECESSARY_FIND_MAP),
LintId::of(methods::USELESS_ASREF),
LintId::of(misc::SHORT_CIRCUIT_STATEMENT),
LintId::of(misc_early::UNNEEDED_WILDCARD_PATTERN),
LintId::of(neg_cmp_op_on_partial_ord::NEG_CMP_OP_ON_PARTIAL_ORD),
LintId::of(no_effect::NO_EFFECT),
LintId::of(no_effect::UNNECESSARY_OPERATION),
+ LintId::of(only_used_in_recursion::ONLY_USED_IN_RECURSION),
LintId::of(overflow_check_conditional::OVERFLOW_CHECK_CONDITIONAL),
LintId::of(partialeq_ne_impl::PARTIALEQ_NE_IMPL),
LintId::of(precedence::PRECEDENCE),
LintId::of(bit_mask::INEFFECTIVE_BIT_MASK),
LintId::of(booleans::LOGIC_BUG),
LintId::of(casts::CAST_REF_TO_MUT),
+ LintId::of(casts::CAST_SLICE_DIFFERENT_SIZES),
LintId::of(copies::IFS_SAME_COND),
LintId::of(copies::IF_SAME_THEN_ELSE),
LintId::of(derive::DERIVE_HASH_XOR_EQ),
LintId::of(utils::internal_lints::LINT_WITHOUT_LINT_PASS),
LintId::of(utils::internal_lints::MATCH_TYPE_ON_DIAGNOSTIC_ITEM),
LintId::of(utils::internal_lints::MISSING_CLIPPY_VERSION_ATTRIBUTE),
+ LintId::of(utils::internal_lints::MISSING_MSRV_ATTR_IMPL),
LintId::of(utils::internal_lints::OUTER_EXPN_EXPN_DATA),
LintId::of(utils::internal_lints::PRODUCE_ICE),
LintId::of(utils::internal_lints::UNNECESSARY_SYMBOL_STR),
#[cfg(feature = "internal")]
utils::internal_lints::MISSING_CLIPPY_VERSION_ATTRIBUTE,
#[cfg(feature = "internal")]
+ utils::internal_lints::MISSING_MSRV_ATTR_IMPL,
+ #[cfg(feature = "internal")]
utils::internal_lints::OUTER_EXPN_EXPN_DATA,
#[cfg(feature = "internal")]
utils::internal_lints::PRODUCE_ICE,
assign_ops::ASSIGN_OP_PATTERN,
assign_ops::MISREFACTORED_ASSIGN_OP,
async_yields_async::ASYNC_YIELDS_ASYNC,
+ attrs::ALLOW_ATTRIBUTES_WITHOUT_REASON,
attrs::BLANKET_CLIPPY_RESTRICTION_LINTS,
attrs::DEPRECATED_CFG_ATTR,
attrs::DEPRECATED_SEMVER,
cargo::REDUNDANT_FEATURE_NAMES,
cargo::WILDCARD_DEPENDENCIES,
case_sensitive_file_extension_comparisons::CASE_SENSITIVE_FILE_EXTENSION_COMPARISONS,
+ casts::CAST_ENUM_CONSTRUCTOR,
casts::CAST_ENUM_TRUNCATION,
casts::CAST_LOSSLESS,
casts::CAST_POSSIBLE_TRUNCATION,
casts::CAST_PTR_ALIGNMENT,
casts::CAST_REF_TO_MUT,
casts::CAST_SIGN_LOSS,
+ casts::CAST_SLICE_DIFFERENT_SIZES,
casts::CHAR_LIT_AS_U8,
casts::FN_TO_NUMERIC_CAST,
casts::FN_TO_NUMERIC_CAST_ANY,
loops::ITER_NEXT_LOOP,
loops::MANUAL_FLATTEN,
loops::MANUAL_MEMCPY,
+ loops::MISSING_SPIN_LOOP,
loops::MUT_RANGE_BOUND,
loops::NEEDLESS_COLLECT,
loops::NEEDLESS_RANGE_LOOP,
matches::MATCH_SINGLE_BINDING,
matches::MATCH_WILDCARD_FOR_SINGLE_VARIANTS,
matches::MATCH_WILD_ERR_ARM,
+ matches::NEEDLESS_MATCH,
matches::REDUNDANT_PATTERN_MATCHING,
matches::REST_PAT_IN_FULLY_BOUND_STRUCTS,
matches::SINGLE_MATCH,
methods::ITER_NTH_ZERO,
methods::ITER_OVEREAGER_CLONED,
methods::ITER_SKIP_NEXT,
+ methods::ITER_WITH_DRAIN,
methods::MANUAL_FILTER_MAP,
methods::MANUAL_FIND_MAP,
methods::MANUAL_SATURATING_ARITHMETIC,
methods::OPTION_FILTER_MAP,
methods::OPTION_MAP_OR_NONE,
methods::OR_FUN_CALL,
+ methods::OR_THEN_UNWRAP,
methods::RESULT_MAP_OR_INTO_OPTION,
methods::SEARCH_IS_SOME,
methods::SHOULD_IMPLEMENT_TRAIT,
methods::SUSPICIOUS_SPLITN,
methods::UNINIT_ASSUMED_INIT,
methods::UNNECESSARY_FILTER_MAP,
+ methods::UNNECESSARY_FIND_MAP,
methods::UNNECESSARY_FOLD,
+ methods::UNNECESSARY_JOIN,
methods::UNNECESSARY_LAZY_EVALUATIONS,
methods::UNNECESSARY_TO_OWNED,
methods::UNWRAP_OR_ELSE_DEFAULT,
non_send_fields_in_send_ty::NON_SEND_FIELDS_IN_SEND_TY,
nonstandard_macro_braces::NONSTANDARD_MACRO_BRACES,
octal_escapes::OCTAL_ESCAPES,
+ only_used_in_recursion::ONLY_USED_IN_RECURSION,
open_options::NONSENSICAL_OPEN_OPTIONS,
option_env_unwrap::OPTION_ENV_UNWRAP,
option_if_let_else::OPTION_IF_LET_ELSE,
LintId::of(future_not_send::FUTURE_NOT_SEND),
LintId::of(index_refutable_slice::INDEX_REFUTABLE_SLICE),
LintId::of(let_if_seq::USELESS_LET_IF_SEQ),
+ LintId::of(methods::ITER_WITH_DRAIN),
LintId::of(missing_const_for_fn::MISSING_CONST_FOR_FN),
LintId::of(mutable_debug_assertion::DEBUG_ASSERT_WITH_MUT_CALL),
LintId::of(mutex_atomic::MUTEX_ATOMIC),
LintId::of(methods::IMPLICIT_CLONE),
LintId::of(methods::INEFFICIENT_TO_STRING),
LintId::of(methods::MAP_UNWRAP_OR),
+ LintId::of(methods::UNNECESSARY_JOIN),
LintId::of(misc::FLOAT_CMP),
LintId::of(misc::USED_UNDERSCORE_BINDING),
LintId::of(mut_mut::MUT_MUT),
LintId::of(large_const_arrays::LARGE_CONST_ARRAYS),
LintId::of(large_enum_variant::LARGE_ENUM_VARIANT),
LintId::of(loops::MANUAL_MEMCPY),
+ LintId::of(loops::MISSING_SPIN_LOOP),
LintId::of(loops::NEEDLESS_COLLECT),
LintId::of(methods::EXPECT_FUN_CALL),
LintId::of(methods::EXTEND_WITH_DRAIN),
LintId::of(as_conversions::AS_CONVERSIONS),
LintId::of(asm_syntax::INLINE_ASM_X86_ATT_SYNTAX),
LintId::of(asm_syntax::INLINE_ASM_X86_INTEL_SYNTAX),
+ LintId::of(attrs::ALLOW_ATTRIBUTES_WITHOUT_REASON),
LintId::of(casts::FN_TO_NUMERIC_CAST_ANY),
LintId::of(create_dir::CREATE_DIR),
LintId::of(dbg_macro::DBG_MACRO),
LintId::of(strings::STRING_SLICE),
LintId::of(strings::STRING_TO_STRING),
LintId::of(strings::STR_TO_STRING),
+ LintId::of(try_err::TRY_ERR),
LintId::of(types::RC_BUFFER),
LintId::of(types::RC_MUTEX),
LintId::of(undocumented_unsafe_blocks::UNDOCUMENTED_UNSAFE_BLOCKS),
LintId::of(single_component_path_imports::SINGLE_COMPONENT_PATH_IMPORTS),
LintId::of(tabs_in_doc_comments::TABS_IN_DOC_COMMENTS),
LintId::of(to_digit_is_some::TO_DIGIT_IS_SOME),
- LintId::of(try_err::TRY_ERR),
LintId::of(unsafe_removed_from_name::UNSAFE_REMOVED_FROM_NAME),
LintId::of(unused_unit::UNUSED_UNIT),
LintId::of(upper_case_acronyms::UPPER_CASE_ACRONYMS),
LintId::of(attrs::BLANKET_CLIPPY_RESTRICTION_LINTS),
LintId::of(await_holding_invalid::AWAIT_HOLDING_LOCK),
LintId::of(await_holding_invalid::AWAIT_HOLDING_REFCELL_REF),
+ LintId::of(casts::CAST_ENUM_CONSTRUCTOR),
LintId::of(casts::CAST_ENUM_TRUNCATION),
LintId::of(eval_order_dependence::EVAL_ORDER_DEPENDENCE),
LintId::of(float_equality_without_abs::FLOAT_EQUALITY_WITHOUT_ABS),
// FIXME: switch to something more ergonomic here, once available.
// (Currently there is no way to opt into sysroot crates without `extern crate`.)
+extern crate rustc_arena;
extern crate rustc_ast;
extern crate rustc_ast_pretty;
extern crate rustc_attr;
mod non_send_fields_in_send_ty;
mod nonstandard_macro_braces;
mod octal_escapes;
+mod only_used_in_recursion;
mod open_options;
mod option_env_unwrap;
mod option_if_let_else;
store.register_late_pass(|| Box::new(utils::internal_lints::LintWithoutLintPass::default()));
store.register_late_pass(|| Box::new(utils::internal_lints::MatchTypeOnDiagItem));
store.register_late_pass(|| Box::new(utils::internal_lints::OuterExpnDataPass));
+ store.register_late_pass(|| Box::new(utils::internal_lints::MsrvAttrImpl));
}
store.register_late_pass(|| Box::new(utils::author::Author));
store.register_late_pass(move || Box::new(borrow_as_ptr::BorrowAsPtr::new(msrv)));
store.register_late_pass(move || Box::new(manual_bits::ManualBits::new(msrv)));
store.register_late_pass(|| Box::new(default_union_representation::DefaultUnionRepresentation));
+ store.register_late_pass(|| Box::new(only_used_in_recursion::OnlyUsedInRecursion));
store.register_late_pass(|| Box::new(dbg_macro::DbgMacro));
let cargo_ignore_publish = conf.cargo_ignore_publish;
store.register_late_pass(move || {
fn get_slice_like_element_ty<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
match ty.kind() {
- ty::Adt(adt, subs) if cx.tcx.is_diagnostic_item(sym::Vec, adt.did) => Some(subs.type_at(0)),
+ ty::Adt(adt, subs) if cx.tcx.is_diagnostic_item(sym::Vec, adt.did()) => Some(subs.type_at(0)),
ty::Ref(_, subty, _) => get_slice_like_element_ty(cx, *subty),
ty::Slice(ty) | ty::Array(ty, _) => Some(*ty),
_ => None,
--- /dev/null
+use super::MISSING_SPIN_LOOP;
+use clippy_utils::diagnostics::span_lint_and_sugg;
+use clippy_utils::is_no_std_crate;
+use rustc_errors::Applicability;
+use rustc_hir::{Block, Expr, ExprKind};
+use rustc_lint::LateContext;
+use rustc_middle::ty;
+use rustc_span::sym;
+
+fn unpack_cond<'tcx>(cond: &'tcx Expr<'tcx>) -> &'tcx Expr<'tcx> {
+ match &cond.kind {
+ ExprKind::Block(
+ Block {
+ stmts: [],
+ expr: Some(e),
+ ..
+ },
+ _,
+ )
+ | ExprKind::Unary(_, e) => unpack_cond(e),
+ ExprKind::Binary(_, l, r) => {
+ let l = unpack_cond(l);
+ if let ExprKind::MethodCall(..) = l.kind {
+ l
+ } else {
+ unpack_cond(r)
+ }
+ },
+ _ => cond,
+ }
+}
+
+pub(super) fn check<'tcx>(cx: &LateContext<'tcx>, cond: &'tcx Expr<'_>, body: &'tcx Expr<'_>) {
+ if_chain! {
+ if let ExprKind::Block(Block { stmts: [], expr: None, ..}, _) = body.kind;
+ if let ExprKind::MethodCall(method, [callee, ..], _) = unpack_cond(cond).kind;
+ if [sym::load, sym::compare_exchange, sym::compare_exchange_weak].contains(&method.ident.name);
+ if let ty::Adt(def, _substs) = cx.typeck_results().expr_ty(callee).kind();
+ if cx.tcx.is_diagnostic_item(sym::AtomicBool, def.did());
+ then {
+ span_lint_and_sugg(
+ cx,
+ MISSING_SPIN_LOOP,
+ body.span,
+ "busy-waiting loop should at least have a spin loop hint",
+ "try this",
+ (if is_no_std_crate(cx) {
+ "{ core::hint::spin_loop() }"
+ } else {
+ "{ std::hint::spin_loop() }"
+ }).into(),
+ Applicability::MachineApplicable
+ );
+ }
+ }
+}
mod iter_next_loop;
mod manual_flatten;
mod manual_memcpy;
+mod missing_spin_loop;
mod mut_range_bound;
mod needless_collect;
mod needless_range_loop;
"for loops over `Option`s or `Result`s with a single expression can be simplified"
}
+declare_clippy_lint! {
+ /// ### What it does
+ /// Check for empty spin loops
+ ///
+ /// ### Why is this bad?
+ /// The loop body should have something like `thread::park()` or at least
+ /// `std::hint::spin_loop()` to avoid needlessly burning cycles and conserve
+ /// energy. Perhaps even better use an actual lock, if possible.
+ ///
+ /// ### Known problems
+ /// This lint doesn't currently trigger on `while let` or
+ /// `loop { match .. { .. } }` loops, which would be considered idiomatic in
+ /// combination with e.g. `AtomicBool::compare_exchange_weak`.
+ ///
+ /// ### Example
+ ///
+ /// ```ignore
+ /// use core::sync::atomic::{AtomicBool, Ordering};
+ /// let b = AtomicBool::new(true);
+ /// // give a ref to `b` to another thread,wait for it to become false
+ /// while b.load(Ordering::Acquire) {};
+ /// ```
+ /// Use instead:
+ /// ```rust,no_run
+ ///# use core::sync::atomic::{AtomicBool, Ordering};
+ ///# let b = AtomicBool::new(true);
+ /// while b.load(Ordering::Acquire) {
+ /// std::hint::spin_loop()
+ /// }
+ /// ```
+ #[clippy::version = "1.59.0"]
+ pub MISSING_SPIN_LOOP,
+ perf,
+ "An empty busy waiting loop"
+}
+
declare_lint_pass!(Loops => [
MANUAL_MEMCPY,
MANUAL_FLATTEN,
WHILE_IMMUTABLE_CONDITION,
SAME_ITEM_PUSH,
SINGLE_ELEMENT_LOOP,
+ MISSING_SPIN_LOOP,
]);
impl<'tcx> LateLintPass<'tcx> for Loops {
if let Some(higher::While { condition, body }) = higher::While::hir(expr) {
while_immutable_condition::check(cx, condition, body);
+ missing_spin_loop::check(cx, condition, body);
}
needless_collect::check(expr, cx);
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::source::snippet;
use clippy_utils::{path_to_local, search_same, SpanlessEq, SpanlessHash};
-use rustc_hir::{Arm, Expr, HirId, HirIdMap, HirIdSet, Pat, PatKind};
+use core::cmp::Ordering;
+use core::iter;
+use core::slice;
+use rustc_arena::DroplessArena;
+use rustc_ast::ast::LitKind;
+use rustc_errors::Applicability;
+use rustc_hir::def_id::DefId;
+use rustc_hir::{Arm, Expr, ExprKind, HirId, HirIdMap, HirIdSet, Pat, PatKind, RangeEnd};
use rustc_lint::LateContext;
+use rustc_middle::ty;
+use rustc_span::Symbol;
use std::collections::hash_map::Entry;
use super::MATCH_SAME_ARMS;
-pub(crate) fn check<'tcx>(cx: &LateContext<'tcx>, arms: &'tcx [Arm<'_>]) {
+#[allow(clippy::too_many_lines)]
+pub(super) fn check<'tcx>(cx: &LateContext<'tcx>, arms: &'tcx [Arm<'_>]) {
let hash = |&(_, arm): &(usize, &Arm<'_>)| -> u64 {
let mut h = SpanlessHash::new(cx);
h.hash_expr(arm.body);
h.finish()
};
+ let arena = DroplessArena::default();
+ let normalized_pats: Vec<_> = arms
+ .iter()
+ .map(|a| NormalizedPat::from_pat(cx, &arena, a.pat))
+ .collect();
+
+ // The furthast forwards a pattern can move without semantic changes
+ let forwards_blocking_idxs: Vec<_> = normalized_pats
+ .iter()
+ .enumerate()
+ .map(|(i, pat)| {
+ normalized_pats[i + 1..]
+ .iter()
+ .enumerate()
+ .find_map(|(j, other)| pat.has_overlapping_values(other).then(|| i + 1 + j))
+ .unwrap_or(normalized_pats.len())
+ })
+ .collect();
+
+ // The furthast backwards a pattern can move without semantic changes
+ let backwards_blocking_idxs: Vec<_> = normalized_pats
+ .iter()
+ .enumerate()
+ .map(|(i, pat)| {
+ normalized_pats[..i]
+ .iter()
+ .enumerate()
+ .rev()
+ .zip(forwards_blocking_idxs[..i].iter().copied().rev())
+ .skip_while(|&(_, forward_block)| forward_block > i)
+ .find_map(|((j, other), forward_block)| {
+ (forward_block == i || pat.has_overlapping_values(other)).then(|| j)
+ })
+ .unwrap_or(0)
+ })
+ .collect();
+
let eq = |&(lindex, lhs): &(usize, &Arm<'_>), &(rindex, rhs): &(usize, &Arm<'_>)| -> bool {
let min_index = usize::min(lindex, rindex);
let max_index = usize::max(lindex, rindex);
}
};
// Arms with a guard are ignored, those can’t always be merged together
- // This is also the case for arms in-between each there is an arm with a guard
- (min_index..=max_index).all(|index| arms[index].guard.is_none())
- && SpanlessEq::new(cx)
- .expr_fallback(eq_fallback)
- .eq_expr(lhs.body, rhs.body)
- // these checks could be removed to allow unused bindings
- && bindings_eq(lhs.pat, local_map.keys().copied().collect())
- && bindings_eq(rhs.pat, local_map.values().copied().collect())
+ // If both arms overlap with an arm in between then these can't be merged either.
+ !(backwards_blocking_idxs[max_index] > min_index && forwards_blocking_idxs[min_index] < max_index)
+ && lhs.guard.is_none()
+ && rhs.guard.is_none()
+ && SpanlessEq::new(cx)
+ .expr_fallback(eq_fallback)
+ .eq_expr(lhs.body, rhs.body)
+ // these checks could be removed to allow unused bindings
+ && bindings_eq(lhs.pat, local_map.keys().copied().collect())
+ && bindings_eq(rhs.pat, local_map.values().copied().collect())
};
let indexed_arms: Vec<(usize, &Arm<'_>)> = arms.iter().enumerate().collect();
- for (&(_, i), &(_, j)) in search_same(&indexed_arms, hash, eq) {
- span_lint_and_then(
- cx,
- MATCH_SAME_ARMS,
- j.body.span,
- "this `match` has identical arm bodies",
- |diag| {
- diag.span_note(i.body.span, "same as this");
-
- // Note: this does not use `span_suggestion` on purpose:
- // there is no clean way
- // to remove the other arm. Building a span and suggest to replace it to ""
- // makes an even more confusing error message. Also in order not to make up a
- // span for the whole pattern, the suggestion is only shown when there is only
- // one pattern. The user should know about `|` if they are already using it…
-
- let lhs = snippet(cx, i.pat.span, "<pat1>");
- let rhs = snippet(cx, j.pat.span, "<pat2>");
-
- if let PatKind::Wild = j.pat.kind {
- // if the last arm is _, then i could be integrated into _
- // note that i.pat cannot be _, because that would mean that we're
- // hiding all the subsequent arms, and rust won't compile
- diag.span_note(
- i.body.span,
- &format!(
- "`{}` has the same arm body as the `_` wildcard, consider removing it",
- lhs
- ),
- );
+ for (&(i, arm1), &(j, arm2)) in search_same(&indexed_arms, hash, eq) {
+ if matches!(arm2.pat.kind, PatKind::Wild) {
+ span_lint_and_then(
+ cx,
+ MATCH_SAME_ARMS,
+ arm1.span,
+ "this match arm has an identical body to the `_` wildcard arm",
+ |diag| {
+ diag.span_suggestion(
+ arm1.span,
+ "try removing the arm",
+ String::new(),
+ Applicability::MaybeIncorrect,
+ )
+ .help("or try changing either arm body")
+ .span_note(arm2.span, "`_` wildcard arm here");
+ },
+ );
+ } else {
+ let back_block = backwards_blocking_idxs[j];
+ let (keep_arm, move_arm) = if back_block < i || (back_block == 0 && forwards_blocking_idxs[i] <= j) {
+ (arm1, arm2)
+ } else {
+ (arm2, arm1)
+ };
+
+ span_lint_and_then(
+ cx,
+ MATCH_SAME_ARMS,
+ keep_arm.span,
+ "this match arm has an identical body to another arm",
+ |diag| {
+ let move_pat_snip = snippet(cx, move_arm.pat.span, "<pat2>");
+ let keep_pat_snip = snippet(cx, keep_arm.pat.span, "<pat1>");
+
+ diag.span_suggestion(
+ keep_arm.pat.span,
+ "try merging the arm patterns",
+ format!("{} | {}", keep_pat_snip, move_pat_snip),
+ Applicability::MaybeIncorrect,
+ )
+ .help("or try changing either arm body")
+ .span_note(move_arm.span, "other arm here");
+ },
+ );
+ }
+ }
+}
+
+#[derive(Clone, Copy)]
+enum NormalizedPat<'a> {
+ Wild,
+ Struct(Option<DefId>, &'a [(Symbol, Self)]),
+ Tuple(Option<DefId>, &'a [Self]),
+ Or(&'a [Self]),
+ Path(Option<DefId>),
+ LitStr(Symbol),
+ LitBytes(&'a [u8]),
+ LitInt(u128),
+ LitBool(bool),
+ Range(PatRange),
+ /// A slice pattern. If the second value is `None`, then this matches an exact size. Otherwise
+ /// the first value contains everything before the `..` wildcard pattern, and the second value
+ /// contains everything afterwards. Note that either side, or both sides, may contain zero
+ /// patterns.
+ Slice(&'a [Self], Option<&'a [Self]>),
+}
+
+#[derive(Clone, Copy)]
+struct PatRange {
+ start: u128,
+ end: u128,
+ bounds: RangeEnd,
+}
+impl PatRange {
+ fn contains(&self, x: u128) -> bool {
+ x >= self.start
+ && match self.bounds {
+ RangeEnd::Included => x <= self.end,
+ RangeEnd::Excluded => x < self.end,
+ }
+ }
+
+ fn overlaps(&self, other: &Self) -> bool {
+ // Note: Empty ranges are impossible, so this is correct even though it would return true if an
+ // empty exclusive range were to reside within an inclusive range.
+ (match self.bounds {
+ RangeEnd::Included => self.end >= other.start,
+ RangeEnd::Excluded => self.end > other.start,
+ } && match other.bounds {
+ RangeEnd::Included => self.start <= other.end,
+ RangeEnd::Excluded => self.start < other.end,
+ })
+ }
+}
+
+/// Iterates over the pairs of fields with matching names.
+fn iter_matching_struct_fields<'a>(
+ left: &'a [(Symbol, NormalizedPat<'a>)],
+ right: &'a [(Symbol, NormalizedPat<'a>)],
+) -> impl Iterator<Item = (&'a NormalizedPat<'a>, &'a NormalizedPat<'a>)> + 'a {
+ struct Iter<'a>(
+ slice::Iter<'a, (Symbol, NormalizedPat<'a>)>,
+ slice::Iter<'a, (Symbol, NormalizedPat<'a>)>,
+ );
+ impl<'a> Iterator for Iter<'a> {
+ type Item = (&'a NormalizedPat<'a>, &'a NormalizedPat<'a>);
+ fn next(&mut self) -> Option<Self::Item> {
+ // Note: all the fields in each slice are sorted by symbol value.
+ let mut left = self.0.next()?;
+ let mut right = self.1.next()?;
+ loop {
+ match left.0.cmp(&right.0) {
+ Ordering::Equal => return Some((&left.1, &right.1)),
+ Ordering::Less => left = self.0.next()?,
+ Ordering::Greater => right = self.1.next()?,
+ }
+ }
+ }
+ }
+ Iter(left.iter(), right.iter())
+}
+
+#[allow(clippy::similar_names)]
+impl<'a> NormalizedPat<'a> {
+ #[allow(clippy::too_many_lines)]
+ fn from_pat(cx: &LateContext<'_>, arena: &'a DroplessArena, pat: &'a Pat<'_>) -> Self {
+ match pat.kind {
+ PatKind::Wild | PatKind::Binding(.., None) => Self::Wild,
+ PatKind::Binding(.., Some(pat)) | PatKind::Box(pat) | PatKind::Ref(pat, _) => {
+ Self::from_pat(cx, arena, pat)
+ },
+ PatKind::Struct(ref path, fields, _) => {
+ let fields =
+ arena.alloc_from_iter(fields.iter().map(|f| (f.ident.name, Self::from_pat(cx, arena, f.pat))));
+ fields.sort_by_key(|&(name, _)| name);
+ Self::Struct(cx.qpath_res(path, pat.hir_id).opt_def_id(), fields)
+ },
+ PatKind::TupleStruct(ref path, pats, wild_idx) => {
+ let adt = match cx.typeck_results().pat_ty(pat).ty_adt_def() {
+ Some(x) => x,
+ None => return Self::Wild,
+ };
+ let (var_id, variant) = if adt.is_enum() {
+ match cx.qpath_res(path, pat.hir_id).opt_def_id() {
+ Some(x) => (Some(x), adt.variant_with_ctor_id(x)),
+ None => return Self::Wild,
+ }
} else {
- diag.span_help(i.pat.span, &format!("consider refactoring into `{} | {}`", lhs, rhs,))
- .help("...or consider changing the match arm bodies");
+ (None, adt.non_enum_variant())
+ };
+ let (front, back) = match wild_idx {
+ Some(i) => pats.split_at(i),
+ None => (pats, [].as_slice()),
+ };
+ let pats = arena.alloc_from_iter(
+ front
+ .iter()
+ .map(|pat| Self::from_pat(cx, arena, pat))
+ .chain(iter::repeat_with(|| Self::Wild).take(variant.fields.len() - pats.len()))
+ .chain(back.iter().map(|pat| Self::from_pat(cx, arena, pat))),
+ );
+ Self::Tuple(var_id, pats)
+ },
+ PatKind::Or(pats) => Self::Or(arena.alloc_from_iter(pats.iter().map(|pat| Self::from_pat(cx, arena, pat)))),
+ PatKind::Path(ref path) => Self::Path(cx.qpath_res(path, pat.hir_id).opt_def_id()),
+ PatKind::Tuple(pats, wild_idx) => {
+ let field_count = match cx.typeck_results().pat_ty(pat).kind() {
+ ty::Tuple(subs) => subs.len(),
+ _ => return Self::Wild,
+ };
+ let (front, back) = match wild_idx {
+ Some(i) => pats.split_at(i),
+ None => (pats, [].as_slice()),
+ };
+ let pats = arena.alloc_from_iter(
+ front
+ .iter()
+ .map(|pat| Self::from_pat(cx, arena, pat))
+ .chain(iter::repeat_with(|| Self::Wild).take(field_count - pats.len()))
+ .chain(back.iter().map(|pat| Self::from_pat(cx, arena, pat))),
+ );
+ Self::Tuple(None, pats)
+ },
+ PatKind::Lit(e) => match &e.kind {
+ // TODO: Handle negative integers. They're currently treated as a wild match.
+ ExprKind::Lit(lit) => match lit.node {
+ LitKind::Str(sym, _) => Self::LitStr(sym),
+ LitKind::ByteStr(ref bytes) => Self::LitBytes(&**bytes),
+ LitKind::Byte(val) => Self::LitInt(val.into()),
+ LitKind::Char(val) => Self::LitInt(val.into()),
+ LitKind::Int(val, _) => Self::LitInt(val),
+ LitKind::Bool(val) => Self::LitBool(val),
+ LitKind::Float(..) | LitKind::Err(_) => Self::Wild,
+ },
+ _ => Self::Wild,
+ },
+ PatKind::Range(start, end, bounds) => {
+ // TODO: Handle negative integers. They're currently treated as a wild match.
+ let start = match start {
+ None => 0,
+ Some(e) => match &e.kind {
+ ExprKind::Lit(lit) => match lit.node {
+ LitKind::Int(val, _) => val,
+ LitKind::Char(val) => val.into(),
+ LitKind::Byte(val) => val.into(),
+ _ => return Self::Wild,
+ },
+ _ => return Self::Wild,
+ },
+ };
+ let (end, bounds) = match end {
+ None => (u128::MAX, RangeEnd::Included),
+ Some(e) => match &e.kind {
+ ExprKind::Lit(lit) => match lit.node {
+ LitKind::Int(val, _) => (val, bounds),
+ LitKind::Char(val) => (val.into(), bounds),
+ LitKind::Byte(val) => (val.into(), bounds),
+ _ => return Self::Wild,
+ },
+ _ => return Self::Wild,
+ },
+ };
+ Self::Range(PatRange { start, end, bounds })
+ },
+ PatKind::Slice(front, wild_pat, back) => Self::Slice(
+ arena.alloc_from_iter(front.iter().map(|pat| Self::from_pat(cx, arena, pat))),
+ wild_pat.map(|_| &*arena.alloc_from_iter(back.iter().map(|pat| Self::from_pat(cx, arena, pat)))),
+ ),
+ }
+ }
+
+ /// Checks if two patterns overlap in the values they can match assuming they are for the same
+ /// type.
+ fn has_overlapping_values(&self, other: &Self) -> bool {
+ match (*self, *other) {
+ (Self::Wild, _) | (_, Self::Wild) => true,
+ (Self::Or(pats), ref other) | (ref other, Self::Or(pats)) => {
+ pats.iter().any(|pat| pat.has_overlapping_values(other))
+ },
+ (Self::Struct(lpath, lfields), Self::Struct(rpath, rfields)) => {
+ if lpath != rpath {
+ return false;
+ }
+ iter_matching_struct_fields(lfields, rfields).all(|(lpat, rpat)| lpat.has_overlapping_values(rpat))
+ },
+ (Self::Tuple(lpath, lpats), Self::Tuple(rpath, rpats)) => {
+ if lpath != rpath {
+ return false;
}
+ lpats
+ .iter()
+ .zip(rpats.iter())
+ .all(|(lpat, rpat)| lpat.has_overlapping_values(rpat))
+ },
+ (Self::Path(x), Self::Path(y)) => x == y,
+ (Self::LitStr(x), Self::LitStr(y)) => x == y,
+ (Self::LitBytes(x), Self::LitBytes(y)) => x == y,
+ (Self::LitInt(x), Self::LitInt(y)) => x == y,
+ (Self::LitBool(x), Self::LitBool(y)) => x == y,
+ (Self::Range(ref x), Self::Range(ref y)) => x.overlaps(y),
+ (Self::Range(ref range), Self::LitInt(x)) | (Self::LitInt(x), Self::Range(ref range)) => range.contains(x),
+ (Self::Slice(lpats, None), Self::Slice(rpats, None)) => {
+ lpats.len() == rpats.len() && lpats.iter().zip(rpats.iter()).all(|(x, y)| x.has_overlapping_values(y))
},
- );
+ (Self::Slice(pats, None), Self::Slice(front, Some(back)))
+ | (Self::Slice(front, Some(back)), Self::Slice(pats, None)) => {
+ // Here `pats` is an exact size match. If the combined lengths of `front` and `back` are greater
+ // then the minium length required will be greater than the length of `pats`.
+ if pats.len() < front.len() + back.len() {
+ return false;
+ }
+ pats[..front.len()]
+ .iter()
+ .zip(front.iter())
+ .chain(pats[pats.len() - back.len()..].iter().zip(back.iter()))
+ .all(|(x, y)| x.has_overlapping_values(y))
+ },
+ (Self::Slice(lfront, Some(lback)), Self::Slice(rfront, Some(rback))) => lfront
+ .iter()
+ .zip(rfront.iter())
+ .chain(lback.iter().rev().zip(rback.iter().rev()))
+ .all(|(x, y)| x.has_overlapping_values(y)),
+
+ // Enums can mix unit variants with tuple/struct variants. These can never overlap.
+ (Self::Path(_), Self::Tuple(..) | Self::Struct(..))
+ | (Self::Tuple(..) | Self::Struct(..), Self::Path(_)) => false,
+
+ // Tuples can be matched like a struct.
+ (Self::Tuple(x, _), Self::Struct(y, _)) | (Self::Struct(x, _), Self::Tuple(y, _)) => {
+ // TODO: check fields here.
+ x == y
+ },
+
+ // TODO: Lit* with Path, Range with Path, LitBytes with Slice
+ _ => true,
+ }
}
}
// Accumulate the variants which should be put in place of the wildcard because they're not
// already covered.
- let has_hidden = adt_def.variants.iter().any(|x| is_hidden(cx, x));
- let mut missing_variants: Vec<_> = adt_def.variants.iter().filter(|x| !is_hidden(cx, x)).collect();
+ let has_hidden = adt_def.variants().iter().any(|x| is_hidden(cx, x));
+ let mut missing_variants: Vec<_> = adt_def.variants().iter().filter(|x| !is_hidden(cx, x)).collect();
let mut path_prefix = CommonPrefixSearcher::None;
for arm in arms {
}
s
} else {
- let mut s = cx.tcx.def_path_str(adt_def.did);
+ let mut s = cx.tcx.def_path_str(adt_def.did());
s.push_str("::");
s
},
mod match_single_binding;
mod match_wild_enum;
mod match_wild_err_arm;
+mod needless_match;
mod overlapping_arms;
mod redundant_pattern_match;
mod rest_pat_in_fully_bound_struct;
"`match` with identical arm bodies"
}
+declare_clippy_lint! {
+ /// ### What it does
+ /// Checks for unnecessary `match` or match-like `if let` returns for `Option` and `Result`
+ /// when function signatures are the same.
+ ///
+ /// ### Why is this bad?
+ /// This `match` block does nothing and might not be what the coder intended.
+ ///
+ /// ### Example
+ /// ```rust,ignore
+ /// fn foo() -> Result<(), i32> {
+ /// match result {
+ /// Ok(val) => Ok(val),
+ /// Err(err) => Err(err),
+ /// }
+ /// }
+ ///
+ /// fn bar() -> Option<i32> {
+ /// if let Some(val) = option {
+ /// Some(val)
+ /// } else {
+ /// None
+ /// }
+ /// }
+ /// ```
+ ///
+ /// Could be replaced as
+ ///
+ /// ```rust,ignore
+ /// fn foo() -> Result<(), i32> {
+ /// result
+ /// }
+ ///
+ /// fn bar() -> Option<i32> {
+ /// option
+ /// }
+ /// ```
+ #[clippy::version = "1.61.0"]
+ pub NEEDLESS_MATCH,
+ complexity,
+ "`match` or match-like `if let` that are unnecessary"
+}
+
#[derive(Default)]
pub struct Matches {
msrv: Option<RustcVersion>,
REDUNDANT_PATTERN_MATCHING,
MATCH_LIKE_MATCHES_MACRO,
MATCH_SAME_ARMS,
+ NEEDLESS_MATCH,
]);
impl<'tcx> LateLintPass<'tcx> for Matches {
overlapping_arms::check(cx, ex, arms);
match_wild_enum::check(cx, ex, arms);
match_as_ref::check(cx, ex, arms, expr);
+ needless_match::check_match(cx, ex, arms);
if self.infallible_destructuring_match_linted {
self.infallible_destructuring_match_linted = false;
match_like_matches::check(cx, expr);
}
redundant_pattern_match::check(cx, expr);
+ needless_match::check(cx, expr);
}
}
--- /dev/null
+use super::NEEDLESS_MATCH;
+use clippy_utils::diagnostics::span_lint_and_sugg;
+use clippy_utils::source::snippet_with_applicability;
+use clippy_utils::ty::is_type_diagnostic_item;
+use clippy_utils::{eq_expr_value, get_parent_expr, higher, is_else_clause, is_lang_ctor, peel_blocks_with_stmt};
+use rustc_errors::Applicability;
+use rustc_hir::LangItem::OptionNone;
+use rustc_hir::{Arm, BindingAnnotation, Expr, ExprKind, Pat, PatKind, Path, PathSegment, QPath, UnOp};
+use rustc_lint::LateContext;
+use rustc_span::sym;
+
+pub(crate) fn check_match(cx: &LateContext<'_>, ex: &Expr<'_>, arms: &[Arm<'_>]) {
+ // This is for avoiding collision with `match_single_binding`.
+ if arms.len() < 2 {
+ return;
+ }
+
+ for arm in arms {
+ if let PatKind::Wild = arm.pat.kind {
+ let ret_expr = strip_return(arm.body);
+ if !eq_expr_value(cx, ex, ret_expr) {
+ return;
+ }
+ } else if !pat_same_as_expr(arm.pat, arm.body) {
+ return;
+ }
+ }
+
+ if let Some(match_expr) = get_parent_expr(cx, ex) {
+ let mut applicability = Applicability::MachineApplicable;
+ span_lint_and_sugg(
+ cx,
+ NEEDLESS_MATCH,
+ match_expr.span,
+ "this match expression is unnecessary",
+ "replace it with",
+ snippet_with_applicability(cx, ex.span, "..", &mut applicability).to_string(),
+ applicability,
+ );
+ }
+}
+
+/// Check for nop `if let` expression that assembled as unnecessary match
+///
+/// ```rust,ignore
+/// if let Some(a) = option {
+/// Some(a)
+/// } else {
+/// None
+/// }
+/// ```
+/// OR
+/// ```rust,ignore
+/// if let SomeEnum::A = some_enum {
+/// SomeEnum::A
+/// } else if let SomeEnum::B = some_enum {
+/// SomeEnum::B
+/// } else {
+/// some_enum
+/// }
+/// ```
+pub(crate) fn check(cx: &LateContext<'_>, ex: &Expr<'_>) {
+ if_chain! {
+ if let Some(ref if_let) = higher::IfLet::hir(cx, ex);
+ if !is_else_clause(cx.tcx, ex);
+ if check_if_let(cx, if_let);
+ then {
+ let mut applicability = Applicability::MachineApplicable;
+ span_lint_and_sugg(
+ cx,
+ NEEDLESS_MATCH,
+ ex.span,
+ "this if-let expression is unnecessary",
+ "replace it with",
+ snippet_with_applicability(cx, if_let.let_expr.span, "..", &mut applicability).to_string(),
+ applicability,
+ );
+ }
+ }
+}
+
+fn check_if_let(cx: &LateContext<'_>, if_let: &higher::IfLet<'_>) -> bool {
+ if let Some(if_else) = if_let.if_else {
+ if !pat_same_as_expr(if_let.let_pat, peel_blocks_with_stmt(if_let.if_then)) {
+ return false;
+ }
+
+ // Recurrsively check for each `else if let` phrase,
+ if let Some(ref nested_if_let) = higher::IfLet::hir(cx, if_else) {
+ return check_if_let(cx, nested_if_let);
+ }
+
+ if matches!(if_else.kind, ExprKind::Block(..)) {
+ let else_expr = peel_blocks_with_stmt(if_else);
+ let ret = strip_return(else_expr);
+ let let_expr_ty = cx.typeck_results().expr_ty(if_let.let_expr);
+ if is_type_diagnostic_item(cx, let_expr_ty, sym::Option) {
+ if let ExprKind::Path(ref qpath) = ret.kind {
+ return is_lang_ctor(cx, qpath, OptionNone) || eq_expr_value(cx, if_let.let_expr, ret);
+ }
+ } else {
+ return eq_expr_value(cx, if_let.let_expr, ret);
+ }
+ return true;
+ }
+ }
+ false
+}
+
+/// Strip `return` keyword if the expression type is `ExprKind::Ret`.
+fn strip_return<'hir>(expr: &'hir Expr<'hir>) -> &'hir Expr<'hir> {
+ if let ExprKind::Ret(Some(ret)) = expr.kind {
+ ret
+ } else {
+ expr
+ }
+}
+
+fn pat_same_as_expr(pat: &Pat<'_>, expr: &Expr<'_>) -> bool {
+ let expr = strip_return(expr);
+ match (&pat.kind, &expr.kind) {
+ // Example: `Some(val) => Some(val)`
+ (
+ PatKind::TupleStruct(QPath::Resolved(_, path), [first_pat, ..], _),
+ ExprKind::Call(call_expr, [first_param, ..]),
+ ) => {
+ if let ExprKind::Path(QPath::Resolved(_, call_path)) = call_expr.kind {
+ if has_identical_segments(path.segments, call_path.segments)
+ && has_same_non_ref_symbol(first_pat, first_param)
+ {
+ return true;
+ }
+ }
+ },
+ // Example: `val => val`, or `ref val => *val`
+ (PatKind::Binding(annot, _, pat_ident, _), _) => {
+ let new_expr = if let (
+ BindingAnnotation::Ref | BindingAnnotation::RefMut,
+ ExprKind::Unary(UnOp::Deref, operand_expr),
+ ) = (annot, &expr.kind)
+ {
+ operand_expr
+ } else {
+ expr
+ };
+
+ if let ExprKind::Path(QPath::Resolved(
+ _,
+ Path {
+ segments: [first_seg, ..],
+ ..
+ },
+ )) = new_expr.kind
+ {
+ return pat_ident.name == first_seg.ident.name;
+ }
+ },
+ // Example: `Custom::TypeA => Custom::TypeB`, or `None => None`
+ (PatKind::Path(QPath::Resolved(_, p_path)), ExprKind::Path(QPath::Resolved(_, e_path))) => {
+ return has_identical_segments(p_path.segments, e_path.segments);
+ },
+ // Example: `5 => 5`
+ (PatKind::Lit(pat_lit_expr), ExprKind::Lit(expr_spanned)) => {
+ if let ExprKind::Lit(pat_spanned) = &pat_lit_expr.kind {
+ return pat_spanned.node == expr_spanned.node;
+ }
+ },
+ _ => {},
+ }
+
+ false
+}
+
+fn has_identical_segments(left_segs: &[PathSegment<'_>], right_segs: &[PathSegment<'_>]) -> bool {
+ if left_segs.len() != right_segs.len() {
+ return false;
+ }
+ for i in 0..left_segs.len() {
+ if left_segs[i].ident.name != right_segs[i].ident.name {
+ return false;
+ }
+ }
+ true
+}
+
+fn has_same_non_ref_symbol(pat: &Pat<'_>, expr: &Expr<'_>) -> bool {
+ if_chain! {
+ if let PatKind::Binding(annot, _, pat_ident, _) = pat.kind;
+ if !matches!(annot, BindingAnnotation::Ref | BindingAnnotation::RefMut);
+ if let ExprKind::Path(QPath::Resolved(_, Path {segments: [first_seg, ..], .. })) = expr.kind;
+ then {
+ return pat_ident.name == first_seg.ident.name;
+ }
+ }
+
+ false
+}
if_chain! {
if let Some(adt) = cx.typeck_results().expr_ty(recv).ty_adt_def();
if let Ok(vid) = cx.tcx.lang_items().require(Self::VARIANT_LANG_ITEM);
- if Some(adt.did) == cx.tcx.parent(vid);
+ if Some(adt.did()) == cx.tcx.parent(vid);
then {} else { return false; }
}
let inner_ty = match recv_ty.kind() {
// `Option<T>` -> `T`
ty::Adt(adt, subst)
- if cx.tcx.is_diagnostic_item(sym::Option, adt.did) && meets_msrv(msrv, &msrvs::OPTION_COPIED) =>
+ if cx.tcx.is_diagnostic_item(sym::Option, adt.did()) && meets_msrv(msrv, &msrvs::OPTION_COPIED) =>
{
subst.type_at(0)
},
if let PatKind::Binding(_, filter_param_id, _, None) = filter_pat.kind;
if let ExprKind::MethodCall(path, [filter_arg], _) = filter_body.value.kind;
if let Some(opt_ty) = cx.typeck_results().expr_ty(filter_arg).ty_adt_def();
- if let Some(is_result) = if cx.tcx.is_diagnostic_item(sym::Option, opt_ty.did) {
+ if let Some(is_result) = if cx.tcx.is_diagnostic_item(sym::Option, opt_ty.did()) {
Some(false)
- } else if cx.tcx.is_diagnostic_item(sym::Result, opt_ty.did) {
+ } else if cx.tcx.is_diagnostic_item(sym::Result, opt_ty.did()) {
Some(true)
} else {
None
let return_type = cx.typeck_results().expr_ty(expr);
let input_type = cx.typeck_results().expr_ty(recv);
let (input_type, ref_count) = peel_mid_ty_refs(input_type);
- if let Some(ty_name) = input_type.ty_adt_def().map(|adt_def| cx.tcx.item_name(adt_def.did));
+ if let Some(ty_name) = input_type.ty_adt_def().map(|adt_def| cx.tcx.item_name(adt_def.did()));
if return_type == input_type;
then {
let mut app = Applicability::MachineApplicable;
}
if let ty::Adt(adt, substs) = ty.kind() {
- match_def_path(cx, adt.did, &paths::COW) && substs.type_at(1).is_str()
+ match_def_path(cx, adt.did(), &paths::COW) && substs.type_at(1).is_str()
} else {
false
}
--- /dev/null
+use clippy_utils::diagnostics::span_lint_and_sugg;
+use clippy_utils::is_integer_const;
+use clippy_utils::ty::is_type_diagnostic_item;
+use clippy_utils::{
+ higher::{self, Range},
+ SpanlessEq,
+};
+use rustc_ast::ast::RangeLimits;
+use rustc_errors::Applicability;
+use rustc_hir::{Expr, ExprKind, QPath};
+use rustc_lint::LateContext;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::Span;
+
+use super::ITER_WITH_DRAIN;
+
+const DRAIN_TYPES: &[Symbol] = &[sym::Vec, sym::VecDeque];
+
+pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, recv: &Expr<'_>, span: Span, arg: &Expr<'_>) {
+ let ty = cx.typeck_results().expr_ty(recv).peel_refs();
+ if let Some(drained_type) = DRAIN_TYPES.iter().find(|&&sym| is_type_diagnostic_item(cx, ty, sym)) {
+ // Refuse to emit `into_iter` suggestion on draining struct fields due
+ // to the strong possibility of processing unmovable field.
+ if let ExprKind::Field(..) = recv.kind {
+ return;
+ }
+
+ if let Some(range) = higher::Range::hir(arg) {
+ let left_full = match range {
+ Range { start: Some(start), .. } if is_integer_const(cx, start, 0) => true,
+ Range { start: None, .. } => true,
+ _ => false,
+ };
+ let full = left_full
+ && match range {
+ Range {
+ end: Some(end),
+ limits: RangeLimits::HalfOpen,
+ ..
+ } => {
+ // `x.drain(..x.len())` call
+ if_chain! {
+ if let ExprKind::MethodCall(len_path, len_args, _) = end.kind;
+ if len_path.ident.name == sym::len && len_args.len() == 1;
+ if let ExprKind::Path(QPath::Resolved(_, drain_path)) = recv.kind;
+ if let ExprKind::Path(QPath::Resolved(_, len_path)) = len_args[0].kind;
+ if SpanlessEq::new(cx).eq_path(drain_path, len_path);
+ then { true }
+ else { false }
+ }
+ },
+ Range {
+ end: None,
+ limits: RangeLimits::HalfOpen,
+ ..
+ } => true,
+ _ => false,
+ };
+ if full {
+ span_lint_and_sugg(
+ cx,
+ ITER_WITH_DRAIN,
+ span.with_hi(expr.span.hi()),
+ &format!("`drain(..)` used on a `{}`", drained_type),
+ "try this",
+ "into_iter()".to_string(),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+}
-use clippy_utils::diagnostics::span_lint_and_sugg;
+use clippy_utils::diagnostics::span_lint_and_sugg_for_edges;
use clippy_utils::is_trait_method;
-use clippy_utils::source::snippet;
+use clippy_utils::source::snippet_with_applicability;
use clippy_utils::ty::is_type_diagnostic_item;
use rustc_errors::Applicability;
-use rustc_hir as hir;
+use rustc_hir::Expr;
use rustc_lint::LateContext;
use rustc_middle::ty;
-use rustc_span::symbol::sym;
+use rustc_span::{symbol::sym, Span};
use super::MAP_FLATTEN;
/// lint use of `map().flatten()` for `Iterators` and 'Options'
-pub(super) fn check<'tcx>(
- cx: &LateContext<'tcx>,
- expr: &'tcx hir::Expr<'_>,
- recv: &'tcx hir::Expr<'_>,
- map_arg: &'tcx hir::Expr<'_>,
-) {
- // lint if caller of `.map().flatten()` is an Iterator
- if is_trait_method(cx, expr, sym::Iterator) {
- let map_closure_ty = cx.typeck_results().expr_ty(map_arg);
- let is_map_to_option = match map_closure_ty.kind() {
- ty::Closure(_, _) | ty::FnDef(_, _) | ty::FnPtr(_) => {
- let map_closure_sig = match map_closure_ty.kind() {
- ty::Closure(_, substs) => substs.as_closure().sig(),
- _ => map_closure_ty.fn_sig(cx.tcx),
- };
- let map_closure_return_ty = cx.tcx.erase_late_bound_regions(map_closure_sig.output());
- is_type_diagnostic_item(cx, map_closure_return_ty, sym::Option)
- },
- _ => false,
- };
-
- let method_to_use = if is_map_to_option {
- // `(...).map(...)` has type `impl Iterator<Item=Option<...>>
- "filter_map"
- } else {
- // `(...).map(...)` has type `impl Iterator<Item=impl Iterator<...>>
- "flat_map"
- };
- let func_snippet = snippet(cx, map_arg.span, "..");
- let hint = format!(".{0}({1})", method_to_use, func_snippet);
- span_lint_and_sugg(
+pub(super) fn check(cx: &LateContext<'_>, expr: &Expr<'_>, recv: &Expr<'_>, map_arg: &Expr<'_>, map_span: Span) {
+ if let Some((caller_ty_name, method_to_use)) = try_get_caller_ty_name_and_method_name(cx, expr, recv, map_arg) {
+ let mut applicability = Applicability::MachineApplicable;
+ let help_msgs = [
+ &format!("try replacing `map` with `{}`", method_to_use),
+ "and remove the `.flatten()`",
+ ];
+ let closure_snippet = snippet_with_applicability(cx, map_arg.span, "..", &mut applicability);
+ span_lint_and_sugg_for_edges(
cx,
MAP_FLATTEN,
- expr.span.with_lo(recv.span.hi()),
- "called `map(..).flatten()` on an `Iterator`",
- &format!("try using `{}` instead", method_to_use),
- hint,
- Applicability::MachineApplicable,
+ expr.span.with_lo(map_span.lo()),
+ &format!("called `map(..).flatten()` on `{}`", caller_ty_name),
+ &help_msgs,
+ format!("{}({})", method_to_use, closure_snippet),
+ applicability,
);
}
+}
- // lint if caller of `.map().flatten()` is an Option or Result
- let caller_type = match cx.typeck_results().expr_ty(recv).kind() {
- ty::Adt(adt, _) => {
- if cx.tcx.is_diagnostic_item(sym::Option, adt.did) {
- "Option"
- } else if cx.tcx.is_diagnostic_item(sym::Result, adt.did) {
- "Result"
- } else {
- return;
+fn try_get_caller_ty_name_and_method_name(
+ cx: &LateContext<'_>,
+ expr: &Expr<'_>,
+ caller_expr: &Expr<'_>,
+ map_arg: &Expr<'_>,
+) -> Option<(&'static str, &'static str)> {
+ if is_trait_method(cx, expr, sym::Iterator) {
+ if is_map_to_option(cx, map_arg) {
+ // `(...).map(...)` has type `impl Iterator<Item=Option<...>>
+ Some(("Iterator", "filter_map"))
+ } else {
+ // `(...).map(...)` has type `impl Iterator<Item=impl Iterator<...>>
+ Some(("Iterator", "flat_map"))
+ }
+ } else {
+ if let ty::Adt(adt, _) = cx.typeck_results().expr_ty(caller_expr).kind() {
+ if cx.tcx.is_diagnostic_item(sym::Option, adt.did()) {
+ return Some(("Option", "and_then"));
+ } else if cx.tcx.is_diagnostic_item(sym::Result, adt.did()) {
+ return Some(("Result", "and_then"));
}
- },
- _ => {
- return;
- },
- };
+ }
+ None
+ }
+}
- let func_snippet = snippet(cx, map_arg.span, "..");
- let hint = format!(".and_then({})", func_snippet);
- let lint_info = format!("called `map(..).flatten()` on an `{}`", caller_type);
- span_lint_and_sugg(
- cx,
- MAP_FLATTEN,
- expr.span.with_lo(recv.span.hi()),
- &lint_info,
- "try using `and_then` instead",
- hint,
- Applicability::MachineApplicable,
- );
+fn is_map_to_option(cx: &LateContext<'_>, map_arg: &Expr<'_>) -> bool {
+ let map_closure_ty = cx.typeck_results().expr_ty(map_arg);
+ match map_closure_ty.kind() {
+ ty::Closure(_, _) | ty::FnDef(_, _) | ty::FnPtr(_) => {
+ let map_closure_sig = match map_closure_ty.kind() {
+ ty::Closure(_, substs) => substs.as_closure().sig(),
+ _ => map_closure_ty.fn_sig(cx.tcx),
+ };
+ let map_closure_return_ty = cx.tcx.erase_late_bound_regions(map_closure_sig.output());
+ is_type_diagnostic_item(cx, map_closure_return_ty, sym::Option)
+ },
+ _ => false,
+ }
}
mod iter_nth_zero;
mod iter_overeager_cloned;
mod iter_skip_next;
+mod iter_with_drain;
mod iterator_step_by_zero;
mod manual_saturating_arithmetic;
mod manual_str_repeat;
mod option_map_or_none;
mod option_map_unwrap_or;
mod or_fun_call;
+mod or_then_unwrap;
mod search_is_some;
mod single_char_add_str;
mod single_char_insert_string;
mod unnecessary_filter_map;
mod unnecessary_fold;
mod unnecessary_iter_cloned;
+mod unnecessary_join;
mod unnecessary_lazy_eval;
mod unnecessary_to_owned;
mod unwrap_or_else_default;
"using any `*or` method with a function call, which suggests `*or_else`"
}
+declare_clippy_lint! {
+ /// ### What it does
+ /// Checks for `.or(…).unwrap()` calls to Options and Results.
+ ///
+ /// ### Why is this bad?
+ /// You should use `.unwrap_or(…)` instead for clarity.
+ ///
+ /// ### Example
+ /// ```rust
+ /// # let fallback = "fallback";
+ /// // Result
+ /// # type Error = &'static str;
+ /// # let result: Result<&str, Error> = Err("error");
+ /// let value = result.or::<Error>(Ok(fallback)).unwrap();
+ ///
+ /// // Option
+ /// # let option: Option<&str> = None;
+ /// let value = option.or(Some(fallback)).unwrap();
+ /// ```
+ /// Use instead:
+ /// ```rust
+ /// # let fallback = "fallback";
+ /// // Result
+ /// # let result: Result<&str, &str> = Err("error");
+ /// let value = result.unwrap_or(fallback);
+ ///
+ /// // Option
+ /// # let option: Option<&str> = None;
+ /// let value = option.unwrap_or(fallback);
+ /// ```
+ #[clippy::version = "1.61.0"]
+ pub OR_THEN_UNWRAP,
+ complexity,
+ "checks for `.or(…).unwrap()` calls to Options and Results."
+}
+
declare_clippy_lint! {
/// ### What it does
/// Checks for calls to `.expect(&format!(...))`, `.expect(foo(..))`,
"using `.skip(x).next()` on an iterator"
}
+declare_clippy_lint! {
+ /// ### What it does
+ /// Checks for use of `.drain(..)` on `Vec` and `VecDeque` for iteration.
+ ///
+ /// ### Why is this bad?
+ /// `.into_iter()` is simpler with better performance.
+ ///
+ /// ### Example
+ /// ```rust
+ /// # use std::collections::HashSet;
+ /// let mut foo = vec![0, 1, 2, 3];
+ /// let bar: HashSet<usize> = foo.drain(..).collect();
+ /// ```
+ /// Use instead:
+ /// ```rust
+ /// # use std::collections::HashSet;
+ /// let foo = vec![0, 1, 2, 3];
+ /// let bar: HashSet<usize> = foo.into_iter().collect();
+ /// ```
+ #[clippy::version = "1.61.0"]
+ pub ITER_WITH_DRAIN,
+ nursery,
+ "replace `.drain(..)` with `.into_iter()`"
+}
+
declare_clippy_lint! {
/// ### What it does
/// Checks for use of `.get().unwrap()` (or
declare_clippy_lint! {
/// ### What it does
- /// Checks for `filter_map` calls which could be replaced by `filter` or `map`.
+ /// Checks for `filter_map` calls that could be replaced by `filter` or `map`.
/// More specifically it checks if the closure provided is only performing one of the
/// filter or map operations and suggests the appropriate option.
///
"using `filter_map` when a more succinct alternative exists"
}
+declare_clippy_lint! {
+ /// ### What it does
+ /// Checks for `find_map` calls that could be replaced by `find` or `map`. More
+ /// specifically it checks if the closure provided is only performing one of the
+ /// find or map operations and suggests the appropriate option.
+ ///
+ /// ### Why is this bad?
+ /// Complexity. The intent is also clearer if only a single
+ /// operation is being performed.
+ ///
+ /// ### Example
+ /// ```rust
+ /// let _ = (0..3).find_map(|x| if x > 2 { Some(x) } else { None });
+ ///
+ /// // As there is no transformation of the argument this could be written as:
+ /// let _ = (0..3).find(|&x| x > 2);
+ /// ```
+ ///
+ /// ```rust
+ /// let _ = (0..4).find_map(|x| Some(x + 1));
+ ///
+ /// // As there is no conditional check on the argument this could be written as:
+ /// let _ = (0..4).map(|x| x + 1).next();
+ /// ```
+ #[clippy::version = "1.61.0"]
+ pub UNNECESSARY_FIND_MAP,
+ complexity,
+ "using `find_map` when a more succinct alternative exists"
+}
+
declare_clippy_lint! {
/// ### What it does
/// Checks for `into_iter` calls on references which should be replaced by `iter`
"unnecessary calls to `to_owned`-like functions"
}
+declare_clippy_lint! {
+ /// ### What it does
+ /// Checks for use of `.collect::<Vec<String>>().join("")` on iterators.
+ ///
+ /// ### Why is this bad?
+ /// `.collect::<String>()` is more concise and usually more performant
+ ///
+ /// ### Example
+ /// ```rust
+ /// let vector = vec!["hello", "world"];
+ /// let output = vector.iter().map(|item| item.to_uppercase()).collect::<Vec<String>>().join("");
+ /// println!("{}", output);
+ /// ```
+ /// The correct use would be:
+ /// ```rust
+ /// let vector = vec!["hello", "world"];
+ /// let output = vector.iter().map(|item| item.to_uppercase()).collect::<String>();
+ /// println!("{}", output);
+ /// ```
+ /// ### Known problems
+ /// While `.collect::<String>()` is more performant in most cases, there are cases where
+ /// using `.collect::<String>()` over `.collect::<Vec<String>>().join("")`
+ /// will prevent loop unrolling and will result in a negative performance impact.
+ #[clippy::version = "1.61.0"]
+ pub UNNECESSARY_JOIN,
+ pedantic,
+ "using `.collect::<Vec<String>>().join(\"\")` on an iterator"
+}
+
pub struct Methods {
avoid_breaking_exported_api: bool,
msrv: Option<RustcVersion>,
OPTION_MAP_OR_NONE,
BIND_INSTEAD_OF_MAP,
OR_FUN_CALL,
+ OR_THEN_UNWRAP,
EXPECT_FUN_CALL,
CHARS_NEXT_CMP,
CHARS_LAST_CMP,
GET_UNWRAP,
STRING_EXTEND_CHARS,
ITER_CLONED_COLLECT,
+ ITER_WITH_DRAIN,
USELESS_ASREF,
UNNECESSARY_FOLD,
UNNECESSARY_FILTER_MAP,
+ UNNECESSARY_FIND_MAP,
INTO_ITER_ON_REF,
SUSPICIOUS_MAP,
UNINIT_ASSUMED_INIT,
MANUAL_SPLIT_ONCE,
NEEDLESS_SPLITN,
UNNECESSARY_TO_OWNED,
+ UNNECESSARY_JOIN,
]);
/// Extracts a method call name, args, and `Span` of the method name.
Some(("map", [_, arg], _)) => suspicious_map::check(cx, expr, recv, arg),
_ => {},
},
+ ("drain", [arg]) => {
+ iter_with_drain::check(cx, expr, recv, span, arg);
+ },
("expect", [_]) => match method_call(recv) {
Some(("ok", [recv], _)) => ok_expect::check(cx, expr, recv),
_ => expect_used::check(cx, expr, recv),
extend_with_drain::check(cx, expr, recv, arg);
},
("filter_map", [arg]) => {
- unnecessary_filter_map::check(cx, expr, arg);
+ unnecessary_filter_map::check(cx, expr, arg, name);
filter_map_identity::check(cx, expr, arg, span);
},
+ ("find_map", [arg]) => {
+ unnecessary_filter_map::check(cx, expr, arg, name);
+ },
("flat_map", [arg]) => {
flat_map_identity::check(cx, expr, arg, span);
flat_map_option::check(cx, expr, arg, span);
},
(name @ "flatten", args @ []) => match method_call(recv) {
- Some(("map", [recv, map_arg], _)) => map_flatten::check(cx, expr, recv, map_arg),
+ Some(("map", [recv, map_arg], map_span)) => map_flatten::check(cx, expr, recv, map_arg, map_span),
Some(("cloned", [recv2], _)) => iter_overeager_cloned::check(cx, expr, recv2, name, args),
_ => {},
},
("is_file", []) => filetype_is_file::check(cx, expr, recv),
("is_none", []) => check_is_some_is_none(cx, expr, recv, false),
("is_some", []) => check_is_some_is_none(cx, expr, recv, true),
+ ("join", [join_arg]) => {
+ if let Some(("collect", _, span)) = method_call(recv) {
+ unnecessary_join::check(cx, expr, recv, join_arg, span);
+ }
+ },
("last", args @ []) | ("skip", args @ [_]) => {
if let Some((name2, [recv2, args2 @ ..], _span2)) = method_call(recv) {
if let ("cloned", []) = (name2, args2) {
Some(("get_mut", [recv, get_arg], _)) => {
get_unwrap::check(cx, expr, recv, get_arg, true);
},
+ Some(("or", [recv, or_arg], or_span)) => {
+ or_then_unwrap::check(cx, expr, recv, or_arg, or_span);
+ },
_ => {},
}
unwrap_used::check(cx, expr, recv);
// The expression inside a closure may or may not have surrounding braces
// which causes problems when generating a suggestion.
-fn reduce_unit_expression<'a>(
- cx: &LateContext<'_>,
- expr: &'a hir::Expr<'_>,
-) -> Option<(&'a hir::Expr<'a>, &'a [hir::Expr<'a>])> {
+fn reduce_unit_expression<'a>(expr: &'a hir::Expr<'_>) -> Option<(&'a hir::Expr<'a>, &'a [hir::Expr<'a>])> {
match expr.kind {
hir::ExprKind::Call(func, arg_char) => Some((func, arg_char)),
hir::ExprKind::Block(block, _) => {
(&[], Some(inner_expr)) => {
// If block only contains an expression,
// reduce `|x| { x + 1 }` to `|x| x + 1`
- reduce_unit_expression(cx, inner_expr)
+ reduce_unit_expression(inner_expr)
},
_ => None,
}
if let hir::ExprKind::Closure(_, _, id, span, _) = map_arg.kind;
let arg_snippet = snippet(cx, span, "..");
let body = cx.tcx.hir().body(id);
- if let Some((func, [arg_char])) = reduce_unit_expression(cx, &body.value);
+ if let Some((func, [arg_char])) = reduce_unit_expression(&body.value);
if let Some(id) = path_def_id(cx, func).and_then(|ctor_id| cx.tcx.parent(ctor_id));
if Some(id) == cx.tcx.lang_items().option_some_variant();
then {
--- /dev/null
+use clippy_utils::source::snippet_with_applicability;
+use clippy_utils::ty::is_type_diagnostic_item;
+use clippy_utils::{diagnostics::span_lint_and_sugg, is_lang_ctor};
+use rustc_errors::Applicability;
+use rustc_hir::{lang_items::LangItem, Expr, ExprKind};
+use rustc_lint::LateContext;
+use rustc_span::{sym, Span};
+
+use super::OR_THEN_UNWRAP;
+
+pub(super) fn check<'tcx>(
+ cx: &LateContext<'tcx>,
+ unwrap_expr: &Expr<'_>,
+ recv: &'tcx Expr<'tcx>,
+ or_arg: &'tcx Expr<'_>,
+ or_span: Span,
+) {
+ let ty = cx.typeck_results().expr_ty(recv); // get type of x (we later check if it's Option or Result)
+ let title;
+ let or_arg_content: Span;
+
+ if is_type_diagnostic_item(cx, ty, sym::Option) {
+ title = "found `.or(Some(…)).unwrap()`";
+ if let Some(content) = get_content_if_ctor_matches(cx, or_arg, LangItem::OptionSome) {
+ or_arg_content = content;
+ } else {
+ return;
+ }
+ } else if is_type_diagnostic_item(cx, ty, sym::Result) {
+ title = "found `.or(Ok(…)).unwrap()`";
+ if let Some(content) = get_content_if_ctor_matches(cx, or_arg, LangItem::ResultOk) {
+ or_arg_content = content;
+ } else {
+ return;
+ }
+ } else {
+ // Someone has implemented a struct with .or(...).unwrap() chaining,
+ // but it's not an Option or a Result, so bail
+ return;
+ }
+
+ let mut applicability = Applicability::MachineApplicable;
+ let suggestion = format!(
+ "unwrap_or({})",
+ snippet_with_applicability(cx, or_arg_content, "..", &mut applicability)
+ );
+
+ span_lint_and_sugg(
+ cx,
+ OR_THEN_UNWRAP,
+ unwrap_expr.span.with_lo(or_span.lo()),
+ title,
+ "try this",
+ suggestion,
+ applicability,
+ );
+}
+
+fn get_content_if_ctor_matches(cx: &LateContext<'_>, expr: &Expr<'_>, item: LangItem) -> Option<Span> {
+ if let ExprKind::Call(some_expr, [arg]) = expr.kind
+ && let ExprKind::Path(qpath) = &some_expr.kind
+ && is_lang_ctor(cx, qpath, item)
+ {
+ Some(arg.span)
+ } else {
+ None
+ }
+}
return if_chain! {
if match_def_path(cx, did, &paths::ITERTOOLS_NEXT_TUPLE);
if let ty::Adt(adt_def, subs) = cx.typeck_results().expr_ty(e).kind();
- if cx.tcx.is_diagnostic_item(sym::Option, adt_def.did);
+ if cx.tcx.is_diagnostic_item(sym::Option, adt_def.did());
if let ty::Tuple(subs) = subs.type_at(0).kind();
if subs.len() == 2;
then {
+use super::utils::clone_or_copy_needed;
use clippy_utils::diagnostics::span_lint;
+use clippy_utils::ty::is_copy;
use clippy_utils::usage::mutated_variables;
use clippy_utils::{is_lang_ctor, is_trait_method, path_to_local_id};
use rustc_hir as hir;
use rustc_span::sym;
use super::UNNECESSARY_FILTER_MAP;
+use super::UNNECESSARY_FIND_MAP;
-pub(super) fn check(cx: &LateContext<'_>, expr: &hir::Expr<'_>, arg: &hir::Expr<'_>) {
+pub(super) fn check(cx: &LateContext<'_>, expr: &hir::Expr<'_>, arg: &hir::Expr<'_>, name: &str) {
if !is_trait_method(cx, expr, sym::Iterator) {
return;
}
let arg_id = body.params[0].pat.hir_id;
let mutates_arg =
mutated_variables(&body.value, cx).map_or(true, |used_mutably| used_mutably.contains(&arg_id));
+ let (clone_or_copy_needed, _) = clone_or_copy_needed(cx, body.params[0].pat, &body.value);
let (mut found_mapping, mut found_filtering) = check_expression(cx, arg_id, &body.value);
found_mapping |= return_visitor.found_mapping;
found_filtering |= return_visitor.found_filtering;
+ let in_ty = cx.typeck_results().node_type(body.params[0].hir_id);
let sugg = if !found_filtering {
- "map"
- } else if !found_mapping && !mutates_arg {
- let in_ty = cx.typeck_results().node_type(body.params[0].hir_id);
+ if name == "filter_map" { "map" } else { "map(..).next()" }
+ } else if !found_mapping && !mutates_arg && (!clone_or_copy_needed || is_copy(cx, in_ty)) {
match cx.typeck_results().expr_ty(&body.value).kind() {
- ty::Adt(adt, subst) if cx.tcx.is_diagnostic_item(sym::Option, adt.did) && in_ty == subst.type_at(0) => {
- "filter"
+ ty::Adt(adt, subst)
+ if cx.tcx.is_diagnostic_item(sym::Option, adt.did()) && in_ty == subst.type_at(0) =>
+ {
+ if name == "filter_map" { "filter" } else { "find" }
},
_ => return,
}
};
span_lint(
cx,
- UNNECESSARY_FILTER_MAP,
+ if name == "filter_map" {
+ UNNECESSARY_FILTER_MAP
+ } else {
+ UNNECESSARY_FIND_MAP
+ },
expr.span,
- &format!("this `.filter_map` can be written more simply using `.{}`", sugg),
+ &format!("this `.{}` can be written more simply using `.{}`", name, sugg),
);
}
}
+use super::utils::clone_or_copy_needed;
use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::higher::ForLoop;
use clippy_utils::source::snippet_opt;
use clippy_utils::ty::{get_associated_type, get_iterator_item_ty, implements_trait};
-use clippy_utils::{fn_def_id, get_parent_expr, path_to_local_id, usage};
+use clippy_utils::{fn_def_id, get_parent_expr};
use rustc_errors::Applicability;
-use rustc_hir::intravisit::{walk_expr, Visitor};
-use rustc_hir::{def_id::DefId, BorrowKind, Expr, ExprKind, HirId, LangItem, Mutability, Pat};
+use rustc_hir::{def_id::DefId, Expr, ExprKind, LangItem};
use rustc_lint::LateContext;
-use rustc_middle::hir::nested_filter;
-use rustc_middle::ty;
use rustc_span::{sym, Symbol};
use super::UNNECESSARY_TO_OWNED;
false
}
-/// The core logic of `check_for_loop_iter` above, this function wraps a use of
-/// `CloneOrCopyVisitor`.
-fn clone_or_copy_needed<'tcx>(
- cx: &LateContext<'tcx>,
- pat: &Pat<'tcx>,
- body: &'tcx Expr<'tcx>,
-) -> (bool, Vec<&'tcx Expr<'tcx>>) {
- let mut visitor = CloneOrCopyVisitor {
- cx,
- binding_hir_ids: pat_bindings(pat),
- clone_or_copy_needed: false,
- addr_of_exprs: Vec::new(),
- };
- visitor.visit_expr(body);
- (visitor.clone_or_copy_needed, visitor.addr_of_exprs)
-}
-
-/// Returns a vector of all `HirId`s bound by the pattern.
-fn pat_bindings(pat: &Pat<'_>) -> Vec<HirId> {
- let mut collector = usage::ParamBindingIdCollector {
- binding_hir_ids: Vec::new(),
- };
- collector.visit_pat(pat);
- collector.binding_hir_ids
-}
-
-/// `clone_or_copy_needed` will be false when `CloneOrCopyVisitor` is done visiting if the only
-/// operations performed on `binding_hir_ids` are:
-/// * to take non-mutable references to them
-/// * to use them as non-mutable `&self` in method calls
-/// If any of `binding_hir_ids` is used in any other way, then `clone_or_copy_needed` will be true
-/// when `CloneOrCopyVisitor` is done visiting.
-struct CloneOrCopyVisitor<'cx, 'tcx> {
- cx: &'cx LateContext<'tcx>,
- binding_hir_ids: Vec<HirId>,
- clone_or_copy_needed: bool,
- addr_of_exprs: Vec<&'tcx Expr<'tcx>>,
-}
-
-impl<'cx, 'tcx> Visitor<'tcx> for CloneOrCopyVisitor<'cx, 'tcx> {
- type NestedFilter = nested_filter::OnlyBodies;
-
- fn nested_visit_map(&mut self) -> Self::Map {
- self.cx.tcx.hir()
- }
-
- fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
- walk_expr(self, expr);
- if self.is_binding(expr) {
- if let Some(parent) = get_parent_expr(self.cx, expr) {
- match parent.kind {
- ExprKind::AddrOf(BorrowKind::Ref, Mutability::Not, _) => {
- self.addr_of_exprs.push(parent);
- return;
- },
- ExprKind::MethodCall(_, args, _) => {
- if_chain! {
- if args.iter().skip(1).all(|arg| !self.is_binding(arg));
- if let Some(method_def_id) = self.cx.typeck_results().type_dependent_def_id(parent.hir_id);
- let method_ty = self.cx.tcx.type_of(method_def_id);
- let self_ty = method_ty.fn_sig(self.cx.tcx).input(0).skip_binder();
- if matches!(self_ty.kind(), ty::Ref(_, _, Mutability::Not));
- then {
- return;
- }
- }
- },
- _ => {},
- }
- }
- self.clone_or_copy_needed = true;
- }
- }
-}
-
-impl<'cx, 'tcx> CloneOrCopyVisitor<'cx, 'tcx> {
- fn is_binding(&self, expr: &Expr<'tcx>) -> bool {
- self.binding_hir_ids
- .iter()
- .any(|hir_id| path_to_local_id(expr, *hir_id))
- }
-}
-
/// Returns true if the named method is `IntoIterator::into_iter`.
pub fn is_into_iter(cx: &LateContext<'_>, callee_def_id: DefId) -> bool {
cx.tcx.lang_items().require(LangItem::IntoIterIntoIter) == Ok(callee_def_id)
--- /dev/null
+use clippy_utils::{diagnostics::span_lint_and_sugg, ty::is_type_diagnostic_item};
+use rustc_ast::ast::LitKind;
+use rustc_errors::Applicability;
+use rustc_hir::{Expr, ExprKind};
+use rustc_lint::LateContext;
+use rustc_middle::ty::{Ref, Slice};
+use rustc_span::{sym, Span};
+
+use super::UNNECESSARY_JOIN;
+
+pub(super) fn check<'tcx>(
+ cx: &LateContext<'tcx>,
+ expr: &'tcx Expr<'tcx>,
+ join_self_arg: &'tcx Expr<'tcx>,
+ join_arg: &'tcx Expr<'tcx>,
+ span: Span,
+) {
+ let applicability = Applicability::MachineApplicable;
+ let collect_output_adjusted_type = cx.typeck_results().expr_ty_adjusted(join_self_arg);
+ if_chain! {
+ // the turbofish for collect is ::<Vec<String>>
+ if let Ref(_, ref_type, _) = collect_output_adjusted_type.kind();
+ if let Slice(slice) = ref_type.kind();
+ if is_type_diagnostic_item(cx, *slice, sym::String);
+ // the argument for join is ""
+ if let ExprKind::Lit(spanned) = &join_arg.kind;
+ if let LitKind::Str(symbol, _) = spanned.node;
+ if symbol.is_empty();
+ then {
+ span_lint_and_sugg(
+ cx,
+ UNNECESSARY_JOIN,
+ span.with_hi(expr.span.hi()),
+ r#"called `.collect<Vec<String>>().join("")` on an iterator"#,
+ "try using",
+ "collect::<String>()".to_owned(),
+ applicability,
+ );
+ }
+ }
+}
-use clippy_utils::diagnostics::span_lint_and_sugg;
+use clippy_utils::diagnostics::span_lint_and_then;
use clippy_utils::source::snippet;
use clippy_utils::ty::is_type_diagnostic_item;
use clippy_utils::{eager_or_lazy, usage};
Applicability::MaybeIncorrect
};
- span_lint_and_sugg(
- cx,
- UNNECESSARY_LAZY_EVALUATIONS,
- expr.span,
- msg,
- &format!("use `{}` instead", simplify_using),
- format!(
- "{0}.{1}({2})",
- snippet(cx, recv.span, ".."),
- simplify_using,
- snippet(cx, body_expr.span, ".."),
- ),
- applicability,
- );
+ // This is a duplicate of what's happening in clippy_lints::methods::method_call,
+ // which isn't ideal, We want to get the method call span,
+ // but prefer to avoid changing the signature of the function itself.
+ if let hir::ExprKind::MethodCall(_, _, span) = expr.kind {
+ span_lint_and_then(cx, UNNECESSARY_LAZY_EVALUATIONS, expr.span, msg, |diag| {
+ diag.span_suggestion(
+ span,
+ &format!("use `{}(..)` instead", simplify_using),
+ format!("{}({})", simplify_using, snippet(cx, body_expr.span, "..")),
+ applicability,
+ );
+ });
+ }
}
}
}
use super::unnecessary_iter_cloned::{self, is_into_iter};
use clippy_utils::diagnostics::span_lint_and_sugg;
use clippy_utils::source::snippet_opt;
-use clippy_utils::ty::{get_associated_type, get_iterator_item_ty, implements_trait, is_copy, peel_mid_ty_refs};
+use clippy_utils::ty::{
+ contains_ty, get_associated_type, get_iterator_item_ty, implements_trait, is_copy, peel_mid_ty_refs,
+};
use clippy_utils::{fn_def_id, get_parent_expr, is_diag_item_method, is_diag_trait_item};
use rustc_errors::Applicability;
use rustc_hir::{def_id::DefId, BorrowKind, Expr, ExprKind};
parent.span,
&format!("unnecessary use of `{}`", method_name),
"use",
- format!("{:&>width$}{}", "", receiver_snippet, width = n_target_refs - n_receiver_refs),
+ format!(
+ "{:&>width$}{}",
+ "",
+ receiver_snippet,
+ width = n_target_refs - n_receiver_refs
+ ),
Applicability::MachineApplicable,
);
return true;
if let Some(item_ty) = get_iterator_item_ty(cx, parent_ty);
if let Some(receiver_snippet) = snippet_opt(cx, receiver.span);
then {
- if unnecessary_iter_cloned::check_for_loop_iter(
- cx,
- parent,
- method_name,
- receiver,
- true,
- ) {
+ if unnecessary_iter_cloned::check_for_loop_iter(cx, parent, method_name, receiver, true) {
return true;
}
- let cloned_or_copied = if is_copy(cx, item_ty) {
- "copied"
- } else {
- "cloned"
- };
+ let cloned_or_copied = if is_copy(cx, item_ty) { "copied" } else { "cloned" };
// The next suggestion may be incorrect because the removal of the `to_owned`-like
// function could cause the iterator to hold a reference to a resource that is used
// mutably. See https://github.com/rust-lang/rust-clippy/issues/8148.
if if trait_predicate.def_id() == deref_trait_id {
if let [projection_predicate] = projection_predicates[..] {
let normalized_ty =
- cx.tcx.subst_and_normalize_erasing_regions(call_substs, cx.param_env, projection_predicate.term);
+ cx.tcx
+ .subst_and_normalize_erasing_regions(call_substs, cx.param_env, projection_predicate.term);
implements_trait(cx, receiver_ty, deref_trait_id, &[])
- && get_associated_type(cx, receiver_ty, deref_trait_id,
- "Target").map_or(false, |ty| ty::Term::Ty(ty) == normalized_ty)
+ && get_associated_type(cx, receiver_ty, deref_trait_id, "Target")
+ .map_or(false, |ty| ty::Term::Ty(ty) == normalized_ty)
} else {
false
}
let composed_substs = compose_substs(
cx,
&trait_predicate.trait_ref.substs.iter().skip(1).collect::<Vec<_>>()[..],
- call_substs
+ call_substs,
);
implements_trait(cx, receiver_ty, as_ref_trait_id, &composed_substs)
} else {
// `Target = T`.
if n_refs > 0 || is_copy(cx, receiver_ty) || trait_predicate.def_id() != deref_trait_id;
let n_refs = max(n_refs, if is_copy(cx, receiver_ty) { 0 } else { 1 });
+ // If the trait is `AsRef` and the input type variable `T` occurs in the output type, then
+ // `T` must not be instantiated with a reference
+ // (https://github.com/rust-lang/rust-clippy/issues/8507).
+ if (n_refs == 0 && !receiver_ty.is_ref())
+ || trait_predicate.def_id() != as_ref_trait_id
+ || !contains_ty(fn_sig.output(), input);
if let Some(receiver_snippet) = snippet_opt(cx, receiver.span);
then {
span_lint_and_sugg(
if let Some(arg) = substs.iter().next();
if let GenericArgKind::Type(arg_ty) = arg.unpack();
if arg_ty == input;
- then {
- true
- } else {
- false
- }
+ then { true } else { false }
}
};
match predicate.kind().skip_binder() {
use clippy_utils::source::snippet_with_applicability;
use clippy_utils::ty::is_type_diagnostic_item;
+use clippy_utils::{get_parent_expr, path_to_local_id, usage};
use if_chain::if_chain;
use rustc_ast::ast;
use rustc_errors::Applicability;
use rustc_hir as hir;
+use rustc_hir::intravisit::{walk_expr, Visitor};
+use rustc_hir::{BorrowKind, Expr, ExprKind, HirId, Mutability, Pat};
use rustc_lint::LateContext;
+use rustc_middle::hir::nested_filter;
use rustc_middle::ty::{self, Ty};
use rustc_span::symbol::sym;
}
}
}
+
+/// The core logic of `check_for_loop_iter` in `unnecessary_iter_cloned.rs`, this function wraps a
+/// use of `CloneOrCopyVisitor`.
+pub(super) fn clone_or_copy_needed<'tcx>(
+ cx: &LateContext<'tcx>,
+ pat: &Pat<'tcx>,
+ body: &'tcx Expr<'tcx>,
+) -> (bool, Vec<&'tcx Expr<'tcx>>) {
+ let mut visitor = CloneOrCopyVisitor {
+ cx,
+ binding_hir_ids: pat_bindings(pat),
+ clone_or_copy_needed: false,
+ addr_of_exprs: Vec::new(),
+ };
+ visitor.visit_expr(body);
+ (visitor.clone_or_copy_needed, visitor.addr_of_exprs)
+}
+
+/// Returns a vector of all `HirId`s bound by the pattern.
+fn pat_bindings(pat: &Pat<'_>) -> Vec<HirId> {
+ let mut collector = usage::ParamBindingIdCollector {
+ binding_hir_ids: Vec::new(),
+ };
+ collector.visit_pat(pat);
+ collector.binding_hir_ids
+}
+
+/// `clone_or_copy_needed` will be false when `CloneOrCopyVisitor` is done visiting if the only
+/// operations performed on `binding_hir_ids` are:
+/// * to take non-mutable references to them
+/// * to use them as non-mutable `&self` in method calls
+/// If any of `binding_hir_ids` is used in any other way, then `clone_or_copy_needed` will be true
+/// when `CloneOrCopyVisitor` is done visiting.
+struct CloneOrCopyVisitor<'cx, 'tcx> {
+ cx: &'cx LateContext<'tcx>,
+ binding_hir_ids: Vec<HirId>,
+ clone_or_copy_needed: bool,
+ addr_of_exprs: Vec<&'tcx Expr<'tcx>>,
+}
+
+impl<'cx, 'tcx> Visitor<'tcx> for CloneOrCopyVisitor<'cx, 'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.cx.tcx.hir()
+ }
+
+ fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
+ walk_expr(self, expr);
+ if self.is_binding(expr) {
+ if let Some(parent) = get_parent_expr(self.cx, expr) {
+ match parent.kind {
+ ExprKind::AddrOf(BorrowKind::Ref, Mutability::Not, _) => {
+ self.addr_of_exprs.push(parent);
+ return;
+ },
+ ExprKind::MethodCall(_, args, _) => {
+ if_chain! {
+ if args.iter().skip(1).all(|arg| !self.is_binding(arg));
+ if let Some(method_def_id) = self.cx.typeck_results().type_dependent_def_id(parent.hir_id);
+ let method_ty = self.cx.tcx.type_of(method_def_id);
+ let self_ty = method_ty.fn_sig(self.cx.tcx).input(0).skip_binder();
+ if matches!(self_ty.kind(), ty::Ref(_, _, Mutability::Not));
+ then {
+ return;
+ }
+ }
+ },
+ _ => {},
+ }
+ }
+ self.clone_or_copy_needed = true;
+ }
+ }
+}
+
+impl<'cx, 'tcx> CloneOrCopyVisitor<'cx, 'tcx> {
+ fn is_binding(&self, expr: &Expr<'tcx>) -> bool {
+ self.binding_hir_ids
+ .iter()
+ .any(|hir_id| path_to_local_id(expr, *hir_id))
+ }
+}
if let Adt(def, substs) = ty.kind() {
let is_keyed_type = [sym::HashMap, sym::BTreeMap, sym::HashSet, sym::BTreeSet]
.iter()
- .any(|diag_item| cx.tcx.is_diagnostic_item(*diag_item, def.did));
+ .any(|diag_item| cx.tcx.is_diagnostic_item(*diag_item, def.did()));
if is_keyed_type && is_interior_mutable_type(cx, substs.type_at(0), span) {
span_lint(cx, MUTABLE_KEY_TYPE, span, "mutable key type");
}
sym::Arc,
]
.iter()
- .any(|diag_item| cx.tcx.is_diagnostic_item(*diag_item, def.did));
- let is_box = Some(def.did) == cx.tcx.lang_items().owned_box();
+ .any(|diag_item| cx.tcx.is_diagnostic_item(*diag_item, def.did()));
+ let is_box = Some(def.did()) == cx.tcx.lang_items().owned_box();
if is_std_collection || is_box {
// The type is mutable if any of its type parameters are
substs.types().any(|ty| is_interior_mutable_type(cx, ty, span))
// Dereference suggestion
let sugg = |diag: &mut Diagnostic| {
if let ty::Adt(def, ..) = ty.kind() {
- if let Some(span) = cx.tcx.hir().span_if_local(def.did) {
- if can_type_implement_copy(cx.tcx, cx.param_env, ty, traits::ObligationCause::dummy_with_span(span)).is_ok() {
+ if let Some(span) = cx.tcx.hir().span_if_local(def.did()) {
+ if can_type_implement_copy(
+ cx.tcx,
+ cx.param_env,
+ ty,
+ traits::ObligationCause::dummy_with_span(span),
+ ).is_ok() {
diag.span_help(span, "consider marking this type as `Copy`");
}
}
let ty = cx.typeck_results().expr_ty(expr);
if let ty::Adt(def, _) = ty.kind() {
if fields.len() == def.non_enum_variant().fields.len()
- && !def.variants[0_usize.into()].is_field_list_non_exhaustive()
+ && !def.variant(0_usize.into()).is_field_list_non_exhaustive()
{
span_lint(
cx,
let mut impls = HirIdSet::default();
cx.tcx.for_each_impl(default_trait_id, |d| {
if let Some(ty_def) = cx.tcx.type_of(d).ty_adt_def() {
- if let Some(local_def_id) = ty_def.did.as_local() {
+ if let Some(local_def_id) = ty_def.did().as_local() {
impls.insert(cx.tcx.hir().local_def_id_to_hir_id(local_def_id));
}
}
if_chain! {
if let Some(ref impling_types) = self.impling_types;
if let Some(self_def) = cx.tcx.type_of(self_def_id).ty_adt_def();
- if let Some(self_local_did) = self_def.did.as_local();
+ if let Some(self_local_did) = self_def.did().as_local();
let self_id = cx.tcx.hir().local_def_id_to_hir_id(self_local_did);
if impling_types.contains(&self_id);
then {
match val.ty().kind() {
// the fact that we have to dig into every structs to search enums
// leads us to the point checking `UnsafeCell` directly is the only option.
- ty::Adt(ty_def, ..) if Some(ty_def.did) == cx.tcx.lang_items().unsafe_cell_type() => true,
+ ty::Adt(ty_def, ..) if Some(ty_def.did()) == cx.tcx.lang_items().unsafe_cell_type() => true,
ty::Array(..) | ty::Adt(..) | ty::Tuple(..) => {
let val = cx.tcx.destructure_const(cx.param_env.and(val));
val.fields.iter().any(|field| inner(cx, *field))
let mut non_send_fields = Vec::new();
let hir_map = cx.tcx.hir();
- for variant in &adt_def.variants {
+ for variant in adt_def.variants() {
for field in &variant.fields {
if_chain! {
if let Some(field_hir_id) = field
return true;
},
ty::Adt(adt_def, _) => {
- if match_def_path(cx, adt_def.did, &paths::PTR_NON_NULL) {
+ if match_def_path(cx, adt_def.did(), &paths::PTR_NON_NULL) {
return true;
}
},
--- /dev/null
+use std::collections::VecDeque;
+
+use clippy_utils::diagnostics::span_lint_and_sugg;
+use itertools::{izip, Itertools};
+use rustc_ast::{walk_list, Label, Mutability};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::Applicability;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
+use rustc_hir::intravisit::{walk_expr, FnKind, Visitor};
+use rustc_hir::{
+ Arm, Block, Body, Expr, ExprKind, Guard, HirId, ImplicitSelfKind, Let, Local, Pat, PatKind, Path, PathSegment,
+ QPath, Stmt, StmtKind, TyKind, UnOp,
+};
+use rustc_lint::{LateContext, LateLintPass};
+use rustc_middle::ty;
+use rustc_middle::ty::{Ty, TyCtxt, TypeckResults};
+use rustc_session::{declare_lint_pass, declare_tool_lint};
+use rustc_span::symbol::kw;
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+declare_clippy_lint! {
+ /// ### What it does
+ /// Checks for arguments that are only used in recursion with no side-effects.
+ ///
+ /// ### Why is this bad?
+ /// It could contain a useless calculation and can make function simpler.
+ ///
+ /// The arguments can be involved in calculations and assignments but as long as
+ /// the calculations have no side-effects (function calls or mutating dereference)
+ /// and the assigned variables are also only in recursion, it is useless.
+ ///
+ /// ### Known problems
+ /// In some cases, this would not catch all useless arguments.
+ ///
+ /// ```rust
+ /// fn foo(a: usize, b: usize) -> usize {
+ /// let f = |x| x + 1;
+ ///
+ /// if a == 0 {
+ /// 1
+ /// } else {
+ /// foo(a - 1, f(b))
+ /// }
+ /// }
+ /// ```
+ ///
+ /// For example, the argument `b` is only used in recursion, but the lint would not catch it.
+ ///
+ /// List of some examples that can not be caught:
+ /// - binary operation of non-primitive types
+ /// - closure usage
+ /// - some `break` relative operations
+ /// - struct pattern binding
+ ///
+ /// Also, when you recurse the function name with path segments, it is not possible to detect.
+ ///
+ /// ### Example
+ /// ```rust
+ /// fn f(a: usize, b: usize) -> usize {
+ /// if a == 0 {
+ /// 1
+ /// } else {
+ /// f(a - 1, b + 1)
+ /// }
+ /// }
+ /// # fn main() {
+ /// # print!("{}", f(1, 1));
+ /// # }
+ /// ```
+ /// Use instead:
+ /// ```rust
+ /// fn f(a: usize) -> usize {
+ /// if a == 0 {
+ /// 1
+ /// } else {
+ /// f(a - 1)
+ /// }
+ /// }
+ /// # fn main() {
+ /// # print!("{}", f(1));
+ /// # }
+ /// ```
+ #[clippy::version = "1.60.0"]
+ pub ONLY_USED_IN_RECURSION,
+ complexity,
+ "arguments that is only used in recursion can be removed"
+}
+declare_lint_pass!(OnlyUsedInRecursion => [ONLY_USED_IN_RECURSION]);
+
+impl<'tcx> LateLintPass<'tcx> for OnlyUsedInRecursion {
+ fn check_fn(
+ &mut self,
+ cx: &LateContext<'tcx>,
+ kind: FnKind<'tcx>,
+ decl: &'tcx rustc_hir::FnDecl<'tcx>,
+ body: &'tcx Body<'tcx>,
+ _: Span,
+ id: HirId,
+ ) {
+ if let FnKind::ItemFn(ident, ..) | FnKind::Method(ident, ..) = kind {
+ let def_id = id.owner.to_def_id();
+ let data = cx.tcx.def_path(def_id).data;
+
+ if data.len() > 1 {
+ match data.get(data.len() - 2) {
+ Some(DisambiguatedDefPathData {
+ data: DefPathData::Impl,
+ disambiguator,
+ }) if *disambiguator != 0 => return,
+ _ => {},
+ }
+ }
+
+ let has_self = !matches!(decl.implicit_self, ImplicitSelfKind::None);
+
+ let ty_res = cx.typeck_results();
+ let param_span = body
+ .params
+ .iter()
+ .flat_map(|param| {
+ let mut v = Vec::new();
+ param.pat.each_binding(|_, hir_id, span, ident| {
+ v.push((hir_id, span, ident));
+ });
+ v
+ })
+ .skip(if has_self { 1 } else { 0 })
+ .filter(|(_, _, ident)| !ident.name.as_str().starts_with('_'))
+ .collect_vec();
+
+ let params = body.params.iter().map(|param| param.pat).collect();
+
+ let mut visitor = SideEffectVisit {
+ graph: FxHashMap::default(),
+ has_side_effect: FxHashSet::default(),
+ ret_vars: Vec::new(),
+ contains_side_effect: false,
+ break_vars: FxHashMap::default(),
+ params,
+ fn_ident: ident,
+ fn_def_id: def_id,
+ is_method: matches!(kind, FnKind::Method(..)),
+ has_self,
+ ty_res,
+ ty_ctx: cx.tcx,
+ };
+
+ visitor.visit_expr(&body.value);
+ let vars = std::mem::take(&mut visitor.ret_vars);
+ // this would set the return variables to side effect
+ visitor.add_side_effect(vars);
+
+ let mut queue = visitor.has_side_effect.iter().copied().collect::<VecDeque<_>>();
+
+ // a simple BFS to check all the variables that have side effect
+ while let Some(id) = queue.pop_front() {
+ if let Some(next) = visitor.graph.get(&id) {
+ for i in next {
+ if !visitor.has_side_effect.contains(i) {
+ visitor.has_side_effect.insert(*i);
+ queue.push_back(*i);
+ }
+ }
+ }
+ }
+
+ for (id, span, ident) in param_span {
+ // if the variable is not used in recursion, it would be marked as unused
+ if !visitor.has_side_effect.contains(&id) {
+ let mut queue = VecDeque::new();
+ let mut visited = FxHashSet::default();
+
+ queue.push_back(id);
+
+ // a simple BFS to check the graph can reach to itself
+ // if it can't, it means the variable is never used in recursion
+ while let Some(id) = queue.pop_front() {
+ if let Some(next) = visitor.graph.get(&id) {
+ for i in next {
+ if !visited.contains(i) {
+ visited.insert(id);
+ queue.push_back(*i);
+ }
+ }
+ }
+ }
+
+ if visited.contains(&id) {
+ span_lint_and_sugg(
+ cx,
+ ONLY_USED_IN_RECURSION,
+ span,
+ "parameter is only used in recursion",
+ "if this is intentional, prefix with an underscore",
+ format!("_{}", ident.name.as_str()),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ }
+ }
+}
+
+pub fn is_primitive(ty: Ty<'_>) -> bool {
+ match ty.kind() {
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str => true,
+ ty::Ref(_, t, _) => is_primitive(*t),
+ _ => false,
+ }
+}
+
+pub fn is_array(ty: Ty<'_>) -> bool {
+ match ty.kind() {
+ ty::Array(..) | ty::Slice(..) => true,
+ ty::Ref(_, t, _) => is_array(*t),
+ _ => false,
+ }
+}
+
+/// This builds the graph of side effect.
+/// The edge `a -> b` means if `a` has side effect, `b` will have side effect.
+///
+/// There are some example in following code:
+/// ```rust, ignore
+/// let b = 1;
+/// let a = b; // a -> b
+/// let (c, d) = (a, b); // c -> b, d -> b
+///
+/// let e = if a == 0 { // e -> a
+/// c // e -> c
+/// } else {
+/// d // e -> d
+/// };
+/// ```
+pub struct SideEffectVisit<'tcx> {
+ graph: FxHashMap<HirId, FxHashSet<HirId>>,
+ has_side_effect: FxHashSet<HirId>,
+ // bool for if the variable was dereferenced from mutable reference
+ ret_vars: Vec<(HirId, bool)>,
+ contains_side_effect: bool,
+ // break label
+ break_vars: FxHashMap<Ident, Vec<(HirId, bool)>>,
+ params: Vec<&'tcx Pat<'tcx>>,
+ fn_ident: Ident,
+ fn_def_id: DefId,
+ is_method: bool,
+ has_self: bool,
+ ty_res: &'tcx TypeckResults<'tcx>,
+ ty_ctx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> Visitor<'tcx> for SideEffectVisit<'tcx> {
+ fn visit_block(&mut self, b: &'tcx Block<'tcx>) {
+ b.stmts.iter().for_each(|stmt| {
+ self.visit_stmt(stmt);
+ self.ret_vars.clear();
+ });
+ walk_list!(self, visit_expr, b.expr);
+ }
+
+ fn visit_stmt(&mut self, s: &'tcx Stmt<'tcx>) {
+ match s.kind {
+ StmtKind::Local(Local {
+ pat, init: Some(init), ..
+ }) => {
+ self.visit_pat_expr(pat, init, false);
+ self.ret_vars.clear();
+ },
+ StmtKind::Item(i) => {
+ let item = self.ty_ctx.hir().item(i);
+ self.visit_item(item);
+ self.ret_vars.clear();
+ },
+ StmtKind::Expr(e) | StmtKind::Semi(e) => {
+ self.visit_expr(e);
+ self.ret_vars.clear();
+ },
+ StmtKind::Local(_) => {},
+ }
+ }
+
+ fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
+ match ex.kind {
+ ExprKind::Array(exprs) | ExprKind::Tup(exprs) => {
+ self.ret_vars = exprs
+ .iter()
+ .flat_map(|expr| {
+ self.visit_expr(expr);
+ std::mem::take(&mut self.ret_vars)
+ })
+ .collect();
+ },
+ ExprKind::Call(callee, args) => self.visit_fn(callee, args),
+ ExprKind::MethodCall(path, args, _) => self.visit_method_call(path, args),
+ ExprKind::Binary(_, lhs, rhs) => {
+ self.visit_bin_op(lhs, rhs);
+ },
+ ExprKind::Unary(op, expr) => self.visit_un_op(op, expr),
+ ExprKind::Let(Let { pat, init, .. }) => self.visit_pat_expr(pat, init, false),
+ ExprKind::If(bind, then_expr, else_expr) => {
+ self.visit_if(bind, then_expr, else_expr);
+ },
+ ExprKind::Match(expr, arms, _) => self.visit_match(expr, arms),
+ // since analysing the closure is not easy, just set all variables in it to side-effect
+ ExprKind::Closure(_, _, body_id, _, _) => {
+ let body = self.ty_ctx.hir().body(body_id);
+ self.visit_body(body);
+ let vars = std::mem::take(&mut self.ret_vars);
+ self.add_side_effect(vars);
+ },
+ ExprKind::Loop(block, label, _, _) | ExprKind::Block(block, label) => {
+ self.visit_block_label(block, label);
+ },
+ ExprKind::Assign(bind, expr, _) => {
+ self.visit_assign(bind, expr);
+ },
+ ExprKind::AssignOp(_, bind, expr) => {
+ self.visit_assign(bind, expr);
+ self.visit_bin_op(bind, expr);
+ },
+ ExprKind::Field(expr, _) => {
+ self.visit_expr(expr);
+ if matches!(self.ty_res.expr_ty(expr).kind(), ty::Ref(_, _, Mutability::Mut)) {
+ self.ret_vars.iter_mut().for_each(|(_, b)| *b = true);
+ }
+ },
+ ExprKind::Index(expr, index) => {
+ self.visit_expr(expr);
+ let mut vars = std::mem::take(&mut self.ret_vars);
+ self.visit_expr(index);
+ self.ret_vars.append(&mut vars);
+
+ if !is_array(self.ty_res.expr_ty(expr)) {
+ self.add_side_effect(self.ret_vars.clone());
+ } else if matches!(self.ty_res.expr_ty(expr).kind(), ty::Ref(_, _, Mutability::Mut)) {
+ self.ret_vars.iter_mut().for_each(|(_, b)| *b = true);
+ }
+ },
+ ExprKind::Break(dest, Some(expr)) => {
+ self.visit_expr(expr);
+ if let Some(label) = dest.label {
+ self.break_vars
+ .entry(label.ident)
+ .or_insert(Vec::new())
+ .append(&mut self.ret_vars);
+ }
+ self.contains_side_effect = true;
+ },
+ ExprKind::Ret(Some(expr)) => {
+ self.visit_expr(expr);
+ let vars = std::mem::take(&mut self.ret_vars);
+ self.add_side_effect(vars);
+ self.contains_side_effect = true;
+ },
+ ExprKind::Break(_, None) | ExprKind::Continue(_) | ExprKind::Ret(None) => {
+ self.contains_side_effect = true;
+ },
+ ExprKind::Struct(_, exprs, expr) => {
+ let mut ret_vars = exprs
+ .iter()
+ .flat_map(|field| {
+ self.visit_expr(field.expr);
+ std::mem::take(&mut self.ret_vars)
+ })
+ .collect();
+
+ walk_list!(self, visit_expr, expr);
+ self.ret_vars.append(&mut ret_vars);
+ },
+ _ => walk_expr(self, ex),
+ }
+ }
+
+ fn visit_path(&mut self, path: &'tcx Path<'tcx>, _id: HirId) {
+ if let Res::Local(id) = path.res {
+ self.ret_vars.push((id, false));
+ }
+ }
+}
+
+impl<'tcx> SideEffectVisit<'tcx> {
+ fn visit_assign(&mut self, lhs: &'tcx Expr<'tcx>, rhs: &'tcx Expr<'tcx>) {
+ // Just support array and tuple unwrapping for now.
+ //
+ // ex) `(a, b) = (c, d);`
+ // The graph would look like this:
+ // a -> c
+ // b -> d
+ //
+ // This would minimize the connection of the side-effect graph.
+ match (&lhs.kind, &rhs.kind) {
+ (ExprKind::Array(lhs), ExprKind::Array(rhs)) | (ExprKind::Tup(lhs), ExprKind::Tup(rhs)) => {
+ // if not, it is a compile error
+ debug_assert!(lhs.len() == rhs.len());
+ izip!(*lhs, *rhs).for_each(|(lhs, rhs)| self.visit_assign(lhs, rhs));
+ },
+ // in other assigns, we have to connect all each other
+ // because they can be connected somehow
+ _ => {
+ self.visit_expr(lhs);
+ let lhs_vars = std::mem::take(&mut self.ret_vars);
+ self.visit_expr(rhs);
+ let rhs_vars = std::mem::take(&mut self.ret_vars);
+ self.connect_assign(&lhs_vars, &rhs_vars, false);
+ },
+ }
+ }
+
+ fn visit_block_label(&mut self, block: &'tcx Block<'tcx>, label: Option<Label>) {
+ self.visit_block(block);
+ let _ = label.and_then(|label| {
+ self.break_vars
+ .remove(&label.ident)
+ .map(|mut break_vars| self.ret_vars.append(&mut break_vars))
+ });
+ }
+
+ fn visit_bin_op(&mut self, lhs: &'tcx Expr<'tcx>, rhs: &'tcx Expr<'tcx>) {
+ self.visit_expr(lhs);
+ let mut ret_vars = std::mem::take(&mut self.ret_vars);
+ self.visit_expr(rhs);
+ self.ret_vars.append(&mut ret_vars);
+
+ // the binary operation between non primitive values are overloaded operators
+ // so they can have side-effects
+ if !is_primitive(self.ty_res.expr_ty(lhs)) || !is_primitive(self.ty_res.expr_ty(rhs)) {
+ self.ret_vars.iter().for_each(|id| {
+ self.has_side_effect.insert(id.0);
+ });
+ self.contains_side_effect = true;
+ }
+ }
+
+ fn visit_un_op(&mut self, op: UnOp, expr: &'tcx Expr<'tcx>) {
+ self.visit_expr(expr);
+ let ty = self.ty_res.expr_ty(expr);
+ // dereferencing a reference has no side-effect
+ if !is_primitive(ty) && !matches!((op, ty.kind()), (UnOp::Deref, ty::Ref(..))) {
+ self.add_side_effect(self.ret_vars.clone());
+ }
+
+ if matches!((op, ty.kind()), (UnOp::Deref, ty::Ref(_, _, Mutability::Mut))) {
+ self.ret_vars.iter_mut().for_each(|(_, b)| *b = true);
+ }
+ }
+
+ fn visit_pat_expr(&mut self, pat: &'tcx Pat<'tcx>, expr: &'tcx Expr<'tcx>, connect_self: bool) {
+ match (&pat.kind, &expr.kind) {
+ (PatKind::Tuple(pats, _), ExprKind::Tup(exprs)) => {
+ self.ret_vars = izip!(*pats, *exprs)
+ .flat_map(|(pat, expr)| {
+ self.visit_pat_expr(pat, expr, connect_self);
+ std::mem::take(&mut self.ret_vars)
+ })
+ .collect();
+ },
+ (PatKind::Slice(front_exprs, _, back_exprs), ExprKind::Array(exprs)) => {
+ let mut vars = izip!(*front_exprs, *exprs)
+ .flat_map(|(pat, expr)| {
+ self.visit_pat_expr(pat, expr, connect_self);
+ std::mem::take(&mut self.ret_vars)
+ })
+ .collect();
+ self.ret_vars = izip!(back_exprs.iter().rev(), exprs.iter().rev())
+ .flat_map(|(pat, expr)| {
+ self.visit_pat_expr(pat, expr, connect_self);
+ std::mem::take(&mut self.ret_vars)
+ })
+ .collect();
+ self.ret_vars.append(&mut vars);
+ },
+ _ => {
+ let mut lhs_vars = Vec::new();
+ pat.each_binding(|_, id, _, _| lhs_vars.push((id, false)));
+ self.visit_expr(expr);
+ let rhs_vars = std::mem::take(&mut self.ret_vars);
+ self.connect_assign(&lhs_vars, &rhs_vars, connect_self);
+ self.ret_vars = rhs_vars;
+ },
+ }
+ }
+
+ fn visit_fn(&mut self, callee: &'tcx Expr<'tcx>, args: &'tcx [Expr<'tcx>]) {
+ self.visit_expr(callee);
+ let mut ret_vars = std::mem::take(&mut self.ret_vars);
+ self.add_side_effect(ret_vars.clone());
+
+ let mut is_recursive = false;
+
+ if_chain! {
+ if !self.has_self;
+ if let ExprKind::Path(QPath::Resolved(_, path)) = callee.kind;
+ if let Res::Def(DefKind::Fn, def_id) = path.res;
+ if self.fn_def_id == def_id;
+ then {
+ is_recursive = true;
+ }
+ }
+
+ if_chain! {
+ if !self.has_self && self.is_method;
+ if let ExprKind::Path(QPath::TypeRelative(ty, segment)) = callee.kind;
+ if segment.ident == self.fn_ident;
+ if let TyKind::Path(QPath::Resolved(_, path)) = ty.kind;
+ if let Res::SelfTy{ .. } = path.res;
+ then {
+ is_recursive = true;
+ }
+ }
+
+ if is_recursive {
+ izip!(self.params.clone(), args).for_each(|(pat, expr)| {
+ self.visit_pat_expr(pat, expr, true);
+ self.ret_vars.clear();
+ });
+ } else {
+ // This would set arguments used in closure that does not have side-effect.
+ // Closure itself can be detected whether there is a side-effect, but the
+ // value of variable that is holding closure can change.
+ // So, we just check the variables.
+ self.ret_vars = args
+ .iter()
+ .flat_map(|expr| {
+ self.visit_expr(expr);
+ std::mem::take(&mut self.ret_vars)
+ })
+ .collect_vec()
+ .into_iter()
+ .map(|id| {
+ self.has_side_effect.insert(id.0);
+ id
+ })
+ .collect();
+ self.contains_side_effect = true;
+ }
+
+ self.ret_vars.append(&mut ret_vars);
+ }
+
+ fn visit_method_call(&mut self, path: &'tcx PathSegment<'tcx>, args: &'tcx [Expr<'tcx>]) {
+ if_chain! {
+ if self.is_method;
+ if path.ident == self.fn_ident;
+ if let ExprKind::Path(QPath::Resolved(_, path)) = args.first().unwrap().kind;
+ if let Res::Local(..) = path.res;
+ let ident = path.segments.last().unwrap().ident;
+ if ident.name == kw::SelfLower;
+ then {
+ izip!(self.params.clone(), args.iter())
+ .for_each(|(pat, expr)| {
+ self.visit_pat_expr(pat, expr, true);
+ self.ret_vars.clear();
+ });
+ } else {
+ self.ret_vars = args
+ .iter()
+ .flat_map(|expr| {
+ self.visit_expr(expr);
+ std::mem::take(&mut self.ret_vars)
+ })
+ .collect_vec()
+ .into_iter()
+ .map(|a| {
+ self.has_side_effect.insert(a.0);
+ a
+ })
+ .collect();
+ self.contains_side_effect = true;
+ }
+ }
+ }
+
+ fn visit_if(&mut self, bind: &'tcx Expr<'tcx>, then_expr: &'tcx Expr<'tcx>, else_expr: Option<&'tcx Expr<'tcx>>) {
+ let contains_side_effect = self.contains_side_effect;
+ self.contains_side_effect = false;
+ self.visit_expr(bind);
+ let mut vars = std::mem::take(&mut self.ret_vars);
+ self.visit_expr(then_expr);
+ let mut then_vars = std::mem::take(&mut self.ret_vars);
+ walk_list!(self, visit_expr, else_expr);
+ if self.contains_side_effect {
+ self.add_side_effect(vars.clone());
+ }
+ self.contains_side_effect |= contains_side_effect;
+ self.ret_vars.append(&mut vars);
+ self.ret_vars.append(&mut then_vars);
+ }
+
+ fn visit_match(&mut self, expr: &'tcx Expr<'tcx>, arms: &'tcx [Arm<'tcx>]) {
+ self.visit_expr(expr);
+ let mut expr_vars = std::mem::take(&mut self.ret_vars);
+ self.ret_vars = arms
+ .iter()
+ .flat_map(|arm| {
+ let contains_side_effect = self.contains_side_effect;
+ self.contains_side_effect = false;
+ // this would visit `expr` multiple times
+ // but couldn't think of a better way
+ self.visit_pat_expr(arm.pat, expr, false);
+ let mut vars = std::mem::take(&mut self.ret_vars);
+ let _ = arm.guard.as_ref().map(|guard| {
+ self.visit_expr(match guard {
+ Guard::If(expr) | Guard::IfLet(_, expr) => expr,
+ });
+ vars.append(&mut self.ret_vars);
+ });
+ self.visit_expr(arm.body);
+ if self.contains_side_effect {
+ self.add_side_effect(vars.clone());
+ self.add_side_effect(expr_vars.clone());
+ }
+ self.contains_side_effect |= contains_side_effect;
+ vars.append(&mut self.ret_vars);
+ vars
+ })
+ .collect();
+ self.ret_vars.append(&mut expr_vars);
+ }
+
+ fn connect_assign(&mut self, lhs: &[(HirId, bool)], rhs: &[(HirId, bool)], connect_self: bool) {
+ // if mutable dereference is on assignment it can have side-effect
+ // (this can lead to parameter mutable dereference and change the original value)
+ // too hard to detect whether this value is from parameter, so this would all
+ // check mutable dereference assignment to side effect
+ lhs.iter().filter(|(_, b)| *b).for_each(|(id, _)| {
+ self.has_side_effect.insert(*id);
+ self.contains_side_effect = true;
+ });
+
+ // there is no connection
+ if lhs.is_empty() || rhs.is_empty() {
+ return;
+ }
+
+ // by connected rhs in cycle, the connections would decrease
+ // from `n * m` to `n + m`
+ // where `n` and `m` are length of `lhs` and `rhs`.
+
+ // unwrap is possible since rhs is not empty
+ let rhs_first = rhs.first().unwrap();
+ for (id, _) in lhs.iter() {
+ if connect_self || *id != rhs_first.0 {
+ self.graph
+ .entry(*id)
+ .or_insert_with(FxHashSet::default)
+ .insert(rhs_first.0);
+ }
+ }
+
+ let rhs = rhs.iter();
+ izip!(rhs.clone().cycle().skip(1), rhs).for_each(|(from, to)| {
+ if connect_self || from.0 != to.0 {
+ self.graph.entry(from.0).or_insert_with(FxHashSet::default).insert(to.0);
+ }
+ });
+ }
+
+ fn add_side_effect(&mut self, v: Vec<(HirId, bool)>) {
+ for (id, _) in v {
+ self.has_side_effect.insert(id);
+ self.contains_side_effect = true;
+ }
+ }
+}
// Check that the name as typed matches the actual name of the type.
// e.g. `fn foo(_: &Foo)` shouldn't trigger the lint when `Foo` is an alias for `Vec`
if let [.., name] = path.segments;
- if cx.tcx.item_name(adt.did) == name.ident.name;
+ if cx.tcx.item_name(adt.did()) == name.ident.name;
if !is_lint_allowed(cx, PTR_ARG, hir_ty.hir_id);
if params.get(i).map_or(true, |p| !is_lint_allowed(cx, PTR_ARG, p.hir_id));
then {
- let (method_renames, deref_ty, deref_impl_id) = match cx.tcx.get_diagnostic_name(adt.did) {
+ let (method_renames, deref_ty, deref_impl_id) = match cx.tcx.get_diagnostic_name(adt.did()) {
Some(sym::Vec) => (
[("clone", ".to_owned()")].as_slice(),
DerefTy::Slice(
DerefTy::Path,
None,
),
- Some(sym::Cow) => {
+ Some(sym::Cow) if mutability == Mutability::Not => {
let ty_name = name.args
.and_then(|args| {
args.args.iter().find_map(|a| match a {
return Some(PtrArg {
idx: i,
span: hir_ty.span,
- ty_did: adt.did,
+ ty_did: adt.did(),
ty_name: name.ident.name,
method_renames,
ref_prefix: RefPrefix {
.map(|sig| sig.input(i).skip_binder().peel_refs())
.map_or(true, |ty| match *ty.kind() {
ty::Param(_) => true,
- ty::Adt(def, _) => def.did == args.ty_did,
+ ty::Adt(def, _) => def.did() == args.ty_did,
_ => false,
})
{
// If the types match check for methods which exist on both types. e.g. `Vec::len` and
// `slice::len`
ty::Adt(def, _)
- if def.did == args.ty_did
+ if def.did() == args.ty_did
&& (i != 0
|| self.cx.tcx.trait_of_item(id).is_some()
|| !args.deref_assoc_items.map_or(false, |(id, items)| {
}
fn result_check_and_early_return(cx: &LateContext<'_>, expr: &Expr<'_>, nested_expr: &Expr<'_>) -> bool {
- Self::is_result(cx, expr) && Self::expression_returns_unmodified_err(cx, nested_expr, expr)
+ Self::is_result(cx, expr) && Self::expression_returns_unmodified_err(nested_expr, expr)
}
fn option_check_and_early_return(cx: &LateContext<'_>, expr: &Expr<'_>, nested_expr: &Expr<'_>) -> bool {
}
}
- fn expression_returns_unmodified_err(cx: &LateContext<'_>, expr: &Expr<'_>, cond_expr: &Expr<'_>) -> bool {
+ fn expression_returns_unmodified_err(expr: &Expr<'_>, cond_expr: &Expr<'_>) -> bool {
match peel_blocks_with_stmt(expr).kind {
- ExprKind::Ret(Some(ret_expr)) => Self::expression_returns_unmodified_err(cx, ret_expr, cond_expr),
+ ExprKind::Ret(Some(ret_expr)) => Self::expression_returns_unmodified_err(ret_expr, cond_expr),
ExprKind::Path(_) => path_to_local(expr).is_some() && path_to_local(expr) == path_to_local(cond_expr),
_ => false,
}
use clippy_utils::ty::{has_drop, is_copy, is_type_diagnostic_item, walk_ptrs_ty_depth};
use clippy_utils::{fn_has_unsatisfiable_preds, match_def_path, paths};
use if_chain::if_chain;
-use rustc_data_structures::{fx::FxHashMap, transitive_relation::TransitiveRelation};
+use rustc_data_structures::fx::FxHashMap;
use rustc_errors::Applicability;
use rustc_hir::intravisit::FnKind;
use rustc_hir::{def_id, Body, FnDecl, HirId};
/// For example, `b = &a; c = &a;` will make `b` and (transitively) `c`
/// possible borrowers of `a`.
struct PossibleBorrowerVisitor<'a, 'tcx> {
- possible_borrower: TransitiveRelation<mir::Local>,
+ possible_borrower: TransitiveRelation,
body: &'a mir::Body<'tcx>,
cx: &'a LateContext<'tcx>,
possible_origin: FxHashMap<mir::Local, HybridBitSet<mir::Local>>,
continue;
}
- let borrowers = self.possible_borrower.reachable_from(row);
+ let mut borrowers = self.possible_borrower.reachable_from(row, self.body.local_decls.len());
+ borrowers.remove(mir::Local::from_usize(0));
if !borrowers.is_empty() {
- let mut bs = HybridBitSet::new_empty(self.body.local_decls.len());
- for c in borrowers {
- if c != mir::Local::from_usize(0) {
- bs.insert(c);
- }
- }
-
- if !bs.is_empty() {
- map.insert(row, bs);
- }
+ map.insert(row, borrowers);
}
}
/// For exampel, `_1 = &mut _2` generate _1: {_2,...}
/// Known Problems: not sure all borrowed are tracked
struct PossibleOriginVisitor<'a, 'tcx> {
- possible_origin: TransitiveRelation<mir::Local>,
+ possible_origin: TransitiveRelation,
body: &'a mir::Body<'tcx>,
}
continue;
}
- let borrowers = self.possible_origin.reachable_from(row);
+ let mut borrowers = self.possible_origin.reachable_from(row, self.body.local_decls.len());
+ borrowers.remove(mir::Local::from_usize(0));
if !borrowers.is_empty() {
- let mut bs = HybridBitSet::new_empty(self.body.local_decls.len());
- for c in borrowers {
- if c != mir::Local::from_usize(0) {
- bs.insert(c);
- }
- }
-
- if !bs.is_empty() {
- map.insert(row, bs);
- }
+ map.insert(row, borrowers);
}
}
map
self.maybe_live.contains(local)
}
}
+
+#[derive(Default)]
+struct TransitiveRelation {
+ relations: FxHashMap<mir::Local, Vec<mir::Local>>,
+}
+impl TransitiveRelation {
+ fn add(&mut self, a: mir::Local, b: mir::Local) {
+ self.relations.entry(a).or_default().push(b);
+ }
+
+ fn reachable_from(&self, a: mir::Local, domain_size: usize) -> HybridBitSet<mir::Local> {
+ let mut seen = HybridBitSet::new_empty(domain_size);
+ let mut stack = vec![a];
+ while let Some(u) = stack.pop() {
+ if let Some(edges) = self.relations.get(&u) {
+ for &v in edges {
+ if seen.insert(v) {
+ stack.push(v);
+ }
+ }
+ }
+ }
+ seen
+ }
+}
if_chain! {
if let Some(self_def) = self_ty.ty_adt_def();
- if let Some(self_local_did) = self_def.did.as_local();
+ if let Some(self_local_did) = self_def.did().as_local();
let self_id = cx.tcx.hir().local_def_id_to_hir_id(self_local_did);
if let Some(Node::Item(x)) = cx.tcx.hir().find(self_id);
let type_name = x.ident.name.as_str().to_lowercase();
use clippy_utils::diagnostics::{span_lint_and_help, span_lint_and_sugg};
-use rustc_ast::{ptr::P, Crate, Item, ItemKind, MacroDef, ModKind, UseTreeKind, VisibilityKind};
+use rustc_ast::{ptr::P, Crate, Item, ItemKind, MacroDef, ModKind, UseTreeKind};
use rustc_errors::Applicability;
use rustc_lint::{EarlyContext, EarlyLintPass, LintContext};
use rustc_session::{declare_lint_pass, declare_tool_lint};
);
}
- for single_use in &single_use_usages {
- if !imports_reused_with_self.contains(&single_use.0) {
- let can_suggest = single_use.2;
+ for (name, span, can_suggest) in single_use_usages {
+ if !imports_reused_with_self.contains(&name) {
if can_suggest {
span_lint_and_sugg(
cx,
SINGLE_COMPONENT_PATH_IMPORTS,
- single_use.1,
+ span,
"this import is redundant",
"remove it entirely",
String::new(),
span_lint_and_help(
cx,
SINGLE_COMPONENT_PATH_IMPORTS,
- single_use.1,
+ span,
"this import is redundant",
None,
"remove this import",
ItemKind::Use(use_tree) => {
let segments = &use_tree.prefix.segments;
- let should_report =
- |name: &Symbol| !macros.contains(name) || matches!(item.vis.kind, VisibilityKind::Inherited);
-
// keep track of `use some_module;` usages
if segments.len() == 1 {
if let UseTreeKind::Simple(None, _, _) = use_tree.kind {
let name = segments[0].ident.name;
- if should_report(&name) {
+ if !macros.contains(&name) {
single_use_usages.push((name, item.span, true));
}
}
if segments.len() == 1 {
if let UseTreeKind::Simple(None, _, _) = tree.0.kind {
let name = segments[0].ident.name;
- if should_report(&name) {
+ if !macros.contains(&name) {
single_use_usages.push((name, tree.0.span, false));
}
}
// used instead, in these cases.
*applicability = Applicability::MaybeIncorrect;
- // We arbitraily choose one side to suggest changing,
+ // We arbitrarily choose one side to suggest changing,
// since we don't have a better guess. If the user
// ends up duplicating a clause, the `logic_bug` lint
// should catch it.
}
fn extract_related_binops(kind: &ExprKind) -> Option<Vec<BinaryOp<'_>>> {
- append_opt_vecs(chained_binops(kind), if_statment_binops(kind))
+ append_opt_vecs(chained_binops(kind), if_statement_binops(kind))
}
-fn if_statment_binops(kind: &ExprKind) -> Option<Vec<BinaryOp<'_>>> {
+fn if_statement_binops(kind: &ExprKind) -> Option<Vec<BinaryOp<'_>>> {
match kind {
ExprKind::If(ref condition, _, _) => chained_binops(&condition.kind),
- ExprKind::Paren(ref e) => if_statment_binops(&e.kind),
+ ExprKind::Paren(ref e) => if_statement_binops(&e.kind),
ExprKind::Block(ref block, _) => {
let mut output = None;
for stmt in &block.stmts {
match stmt.kind {
StmtKind::Expr(ref e) | StmtKind::Semi(ref e) => {
- output = append_opt_vecs(output, if_statment_binops(&e.kind));
+ output = append_opt_vecs(output, if_statement_binops(&e.kind));
},
_ => {},
}
fn append_opt_vecs<A>(target_opt: Option<Vec<A>>, source_opt: Option<Vec<A>>) -> Option<Vec<A>> {
match (target_opt, source_opt) {
- (Some(mut target), Some(mut source)) => {
+ (Some(mut target), Some(source)) => {
target.reserve(source.len());
- for op in source.drain(..) {
+ for op in source {
target.push(op);
}
Some(target)
chained_binops_helper(left_left, left_right),
chained_binops_helper(right_left, right_right),
) {
- (Some(mut left_ops), Some(mut right_ops)) => {
+ (Some(mut left_ops), Some(right_ops)) => {
left_ops.reserve(right_ops.len());
- for op in right_ops.drain(..) {
+ for op in right_ops {
left_ops.push(op);
}
Some(left_ops)
/// Displays a warning when a struct with a trailing zero-sized array is declared without a `repr` attribute.
///
/// ### Why is this bad?
- /// Zero-sized arrays aren't very useful in Rust itself, so such a struct is likely being created to pass to C code or in some other situation where control over memory layout matters (for example, in conjuction with manual allocation to make it easy to compute the offset of the array). Either way, `#[repr(C)]` (or another `repr` attribute) is needed.
+ /// Zero-sized arrays aren't very useful in Rust itself, so such a struct is likely being created to pass to C code or in some other situation where control over memory layout matters (for example, in conjunction with manual allocation to make it easy to compute the offset of the array). Either way, `#[repr(C)]` (or another `repr` attribute) is needed.
///
/// ### Example
/// ```rust
///
/// ### Why is this bad?
/// Duplicate bounds makes the code
- /// less readable than specifing them only once.
+ /// less readable than specifying them only once.
///
/// ### Example
/// ```rust
/// ### Why is this bad?
/// The results of such a transmute are not defined.
///
+ /// ### Known problems
+ /// This lint has had multiple problems in the past and was moved to `nursery`. See issue
+ /// [#8496](https://github.com/rust-lang/rust-clippy/issues/8496) for more details.
+ ///
/// ### Example
/// ```rust
/// struct Foo<T>(u32, T);
// And see https://github.com/rust-lang/rust/issues/51911 for dereferencing raw pointers.
let const_context = in_constant(cx, e.hir_id);
- let from_ty = cx.typeck_results().expr_ty(arg);
+ let from_ty = cx.typeck_results().expr_ty_adjusted(arg);
+ // Adjustments for `to_ty` happen after the call to `transmute`, so don't use them.
let to_ty = cx.typeck_results().expr_ty(e);
// If useless_transmute is triggered, the other lints can be skipped.
use clippy_utils::ty::is_c_void;
use rustc_hir::Expr;
use rustc_lint::LateContext;
-use rustc_middle::ty::subst::Subst;
-use rustc_middle::ty::{self, Ty, TypeAndMut};
+use rustc_middle::ty::subst::{Subst, SubstsRef};
+use rustc_middle::ty::{self, IntTy, Ty, TypeAndMut, UintTy};
use rustc_span::Span;
#[allow(clippy::too_many_lines)]
unsized_ty,
to_ty: to_sub_ty,
} => match reduce_ty(cx, to_sub_ty) {
- ReducedTy::IntArray | ReducedTy::TypeErasure => break,
+ ReducedTy::TypeErasure => break,
+ ReducedTy::UnorderedFields(ty) if is_size_pair(ty) => break,
ReducedTy::Ref(to_sub_ty) => {
from_ty = unsized_ty;
to_ty = to_sub_ty;
unsized_ty,
from_ty: from_sub_ty,
} => match reduce_ty(cx, from_sub_ty) {
- ReducedTy::IntArray | ReducedTy::TypeErasure => break,
+ ReducedTy::TypeErasure => break,
+ ReducedTy::UnorderedFields(ty) if is_size_pair(ty) => break,
ReducedTy::Ref(from_sub_ty) => {
from_ty = from_sub_ty;
to_ty = unsized_ty;
from_ty: from_sub_ty,
to_ty: to_sub_ty,
} => match (reduce_ty(cx, from_sub_ty), reduce_ty(cx, to_sub_ty)) {
- (ReducedTy::IntArray | ReducedTy::TypeErasure, _)
- | (_, ReducedTy::IntArray | ReducedTy::TypeErasure) => return false,
+ (ReducedTy::TypeErasure, _) | (_, ReducedTy::TypeErasure) => return false,
(ReducedTy::UnorderedFields(from_ty), ReducedTy::UnorderedFields(to_ty)) if from_ty != to_ty => {
+ let same_adt_did = if let (ty::Adt(from_def, from_subs), ty::Adt(to_def, to_subs))
+ = (from_ty.kind(), to_ty.kind())
+ && from_def == to_def
+ {
+ if same_except_params(from_subs, to_subs) {
+ return false;
+ }
+ Some(from_def.did())
+ } else {
+ None
+ };
span_lint_and_then(
cx,
TRANSMUTE_UNDEFINED_REPR,
from_ty_orig, to_ty_orig
),
|diag| {
- if_chain! {
- if let (Some(from_def), Some(to_def)) = (from_ty.ty_adt_def(), to_ty.ty_adt_def());
- if from_def == to_def;
- then {
- diag.note(&format!(
- "two instances of the same generic type (`{}`) may have different layouts",
- cx.tcx.item_name(from_def.did)
- ));
- } else {
- if from_ty_orig.peel_refs() != from_ty {
- diag.note(&format!("the contained type `{}` has an undefined layout", from_ty));
- }
- if to_ty_orig.peel_refs() != to_ty {
- diag.note(&format!("the contained type `{}` has an undefined layout", to_ty));
- }
+ if let Some(same_adt_did) = same_adt_did {
+ diag.note(&format!(
+ "two instances of the same generic type (`{}`) may have different layouts",
+ cx.tcx.item_name(same_adt_did)
+ ));
+ } else {
+ if from_ty_orig.peel_refs() != from_ty {
+ diag.note(&format!("the contained type `{}` has an undefined layout", from_ty));
+ }
+ if to_ty_orig.peel_refs() != to_ty {
+ diag.note(&format!("the contained type `{}` has an undefined layout", to_ty));
}
}
},
continue;
},
(
- ReducedTy::OrderedFields(_) | ReducedTy::Ref(_) | ReducedTy::Other(_),
- ReducedTy::OrderedFields(_) | ReducedTy::Ref(_) | ReducedTy::Other(_),
+ ReducedTy::OrderedFields(_) | ReducedTy::Ref(_) | ReducedTy::Other(_) | ReducedTy::Param,
+ ReducedTy::OrderedFields(_) | ReducedTy::Ref(_) | ReducedTy::Other(_) | ReducedTy::Param,
)
- | (ReducedTy::UnorderedFields(_), ReducedTy::UnorderedFields(_)) => break,
+ | (
+ ReducedTy::UnorderedFields(_) | ReducedTy::Param,
+ ReducedTy::UnorderedFields(_) | ReducedTy::Param,
+ ) => break,
},
}
}
UnorderedFields(Ty<'tcx>),
/// The type is a reference to the contained type.
Ref(Ty<'tcx>),
- /// The type is an array of a primitive integer type. These can be used as storage for a value
- /// of another type.
- IntArray,
+ /// The type is a generic parameter.
+ Param,
/// Any other type.
Other(Ty<'tcx>),
}
loop {
ty = cx.tcx.try_normalize_erasing_regions(cx.param_env, ty).unwrap_or(ty);
return match *ty.kind() {
- ty::Array(sub_ty, _) if matches!(sub_ty.kind(), ty::Int(_) | ty::Uint(_)) => ReducedTy::IntArray,
+ ty::Array(sub_ty, _) if matches!(sub_ty.kind(), ty::Int(_) | ty::Uint(_)) => ReducedTy::TypeErasure,
ty::Array(sub_ty, _) | ty::Slice(sub_ty) => {
ty = sub_ty;
continue;
},
ty::Tuple(args) if args.is_empty() => ReducedTy::TypeErasure,
ty::Tuple(args) => {
- let Some(sized_ty) = args.iter().find(|&ty| !is_zero_sized_ty(cx, ty)) else {
+ let mut iter = args.iter();
+ let Some(sized_ty) = iter.find(|&ty| !is_zero_sized_ty(cx, ty)) else {
return ReducedTy::OrderedFields(ty);
};
- if args.iter().all(|ty| is_zero_sized_ty(cx, ty)) {
+ if iter.all(|ty| is_zero_sized_ty(cx, ty)) {
ty = sized_ty;
continue;
}
ty = sized_ty;
continue;
}
- if def.repr.inhibit_struct_field_reordering_opt() {
+ if def.repr().inhibit_struct_field_reordering_opt() {
ReducedTy::OrderedFields(ty)
} else {
ReducedTy::UnorderedFields(ty)
}
},
- ty::Adt(def, _) if def.is_enum() && (def.variants.is_empty() || is_c_void(cx, ty)) => {
+ ty::Adt(def, _) if def.is_enum() && (def.variants().is_empty() || is_c_void(cx, ty)) => {
ReducedTy::TypeErasure
},
+ // TODO: Check if the conversion to or from at least one of a union's fields is valid.
+ ty::Adt(def, _) if def.is_union() => ReducedTy::TypeErasure,
ty::Foreign(_) => ReducedTy::TypeErasure,
ty::Ref(_, ty, _) => ReducedTy::Ref(ty),
ty::RawPtr(ty) => ReducedTy::Ref(ty.ty),
+ ty::Param(_) => ReducedTy::Param,
_ => ReducedTy::Other(ty),
};
}
}
}
}
+
+fn is_size_pair(ty: Ty<'_>) -> bool {
+ if let ty::Tuple(tys) = *ty.kind()
+ && let [ty1, ty2] = &**tys
+ {
+ matches!(ty1.kind(), ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize))
+ && matches!(ty2.kind(), ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize))
+ } else {
+ false
+ }
+}
+
+fn same_except_params(subs1: SubstsRef<'_>, subs2: SubstsRef<'_>) -> bool {
+ // TODO: check const parameters as well. Currently this will consider `Array<5>` the same as
+ // `Array<6>`
+ for (ty1, ty2) in subs1.types().zip(subs2.types()).filter(|(ty1, ty2)| ty1 != ty2) {
+ match (ty1.kind(), ty2.kind()) {
+ (ty::Param(_), _) | (_, ty::Param(_)) => (),
+ (ty::Adt(adt1, subs1), ty::Adt(adt2, subs2)) if adt1 == adt2 && same_except_params(subs1, subs2) => (),
+ _ => return false,
+ }
+ }
+ true
+}
pub(super) fn check<'tcx>(cx: &LateContext<'tcx>, e: &'tcx Expr<'_>, from_ty: Ty<'tcx>, to_ty: Ty<'tcx>) -> bool {
match (&from_ty.kind(), &to_ty.kind()) {
(ty::Adt(from_adt, from_substs), ty::Adt(to_adt, to_substs)) => {
- if from_adt.did != to_adt.did {
+ if from_adt.did() != to_adt.did() {
return false;
}
if !matches!(
- cx.tcx.get_diagnostic_name(to_adt.did),
+ cx.tcx.get_diagnostic_name(to_adt.did()),
Some(
sym::BTreeMap
| sym::BTreeSet
/// ```
#[clippy::version = "1.38.0"]
pub TRY_ERR,
- style,
+ restriction,
"return errors explicitly rather than hiding them behind a `?`"
}
fn poll_result_error_type<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
if_chain! {
if let ty::Adt(def, subst) = ty.kind();
- if match_def_path(cx, def.did, &paths::POLL);
+ if match_def_path(cx, def.did(), &paths::POLL);
let ready_ty = subst.type_at(0);
if let ty::Adt(ready_def, ready_subst) = ready_ty.kind();
- if cx.tcx.is_diagnostic_item(sym::Result, ready_def.did);
+ if cx.tcx.is_diagnostic_item(sym::Result, ready_def.did());
then {
Some(ready_subst.type_at(1))
} else {
fn poll_option_result_error_type<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
if_chain! {
if let ty::Adt(def, subst) = ty.kind();
- if match_def_path(cx, def.did, &paths::POLL);
+ if match_def_path(cx, def.did(), &paths::POLL);
let ready_ty = subst.type_at(0);
if let ty::Adt(ready_def, ready_subst) = ready_ty.kind();
- if cx.tcx.is_diagnostic_item(sym::Option, ready_def.did);
+ if cx.tcx.is_diagnostic_item(sym::Option, ready_def.did());
let some_ty = ready_subst.type_at(0);
if let ty::Adt(some_def, some_subst) = some_ty.kind();
- if cx.tcx.is_diagnostic_item(sym::Result, some_def.did);
+ if cx.tcx.is_diagnostic_item(sym::Result, some_def.did());
then {
Some(some_subst.type_at(1))
} else {
/// Detect if the two expressions are mirrored (identical, except one
/// contains a and the other replaces it with b)
-fn mirrored_exprs(
- cx: &LateContext<'_>,
- a_expr: &Expr<'_>,
- a_ident: &Ident,
- b_expr: &Expr<'_>,
- b_ident: &Ident,
-) -> bool {
+fn mirrored_exprs(a_expr: &Expr<'_>, a_ident: &Ident, b_expr: &Expr<'_>, b_ident: &Ident) -> bool {
match (&a_expr.kind, &b_expr.kind) {
// Two boxes with mirrored contents
(ExprKind::Box(left_expr), ExprKind::Box(right_expr)) => {
- mirrored_exprs(cx, left_expr, a_ident, right_expr, b_ident)
+ mirrored_exprs(left_expr, a_ident, right_expr, b_ident)
},
// Two arrays with mirrored contents
(ExprKind::Array(left_exprs), ExprKind::Array(right_exprs)) => {
- iter::zip(*left_exprs, *right_exprs).all(|(left, right)| mirrored_exprs(cx, left, a_ident, right, b_ident))
+ iter::zip(*left_exprs, *right_exprs).all(|(left, right)| mirrored_exprs(left, a_ident, right, b_ident))
},
// The two exprs are function calls.
// Check to see that the function itself and its arguments are mirrored
(ExprKind::Call(left_expr, left_args), ExprKind::Call(right_expr, right_args)) => {
- mirrored_exprs(cx, left_expr, a_ident, right_expr, b_ident)
- && iter::zip(*left_args, *right_args)
- .all(|(left, right)| mirrored_exprs(cx, left, a_ident, right, b_ident))
+ mirrored_exprs(left_expr, a_ident, right_expr, b_ident)
+ && iter::zip(*left_args, *right_args).all(|(left, right)| mirrored_exprs(left, a_ident, right, b_ident))
},
// The two exprs are method calls.
// Check to see that the function is the same and the arguments are mirrored
// This is enough because the receiver of the method is listed in the arguments
(ExprKind::MethodCall(left_segment, left_args, _), ExprKind::MethodCall(right_segment, right_args, _)) => {
left_segment.ident == right_segment.ident
- && iter::zip(*left_args, *right_args)
- .all(|(left, right)| mirrored_exprs(cx, left, a_ident, right, b_ident))
+ && iter::zip(*left_args, *right_args).all(|(left, right)| mirrored_exprs(left, a_ident, right, b_ident))
},
// Two tuples with mirrored contents
(ExprKind::Tup(left_exprs), ExprKind::Tup(right_exprs)) => {
- iter::zip(*left_exprs, *right_exprs).all(|(left, right)| mirrored_exprs(cx, left, a_ident, right, b_ident))
+ iter::zip(*left_exprs, *right_exprs).all(|(left, right)| mirrored_exprs(left, a_ident, right, b_ident))
},
// Two binary ops, which are the same operation and which have mirrored arguments
(ExprKind::Binary(left_op, left_left, left_right), ExprKind::Binary(right_op, right_left, right_right)) => {
left_op.node == right_op.node
- && mirrored_exprs(cx, left_left, a_ident, right_left, b_ident)
- && mirrored_exprs(cx, left_right, a_ident, right_right, b_ident)
+ && mirrored_exprs(left_left, a_ident, right_left, b_ident)
+ && mirrored_exprs(left_right, a_ident, right_right, b_ident)
},
// Two unary ops, which are the same operation and which have the same argument
(ExprKind::Unary(left_op, left_expr), ExprKind::Unary(right_op, right_expr)) => {
- left_op == right_op && mirrored_exprs(cx, left_expr, a_ident, right_expr, b_ident)
+ left_op == right_op && mirrored_exprs(left_expr, a_ident, right_expr, b_ident)
},
// The two exprs are literals of some kind
(ExprKind::Lit(left_lit), ExprKind::Lit(right_lit)) => left_lit.node == right_lit.node,
- (ExprKind::Cast(left, _), ExprKind::Cast(right, _)) => mirrored_exprs(cx, left, a_ident, right, b_ident),
+ (ExprKind::Cast(left, _), ExprKind::Cast(right, _)) => mirrored_exprs(left, a_ident, right, b_ident),
(ExprKind::DropTemps(left_block), ExprKind::DropTemps(right_block)) => {
- mirrored_exprs(cx, left_block, a_ident, right_block, b_ident)
+ mirrored_exprs(left_block, a_ident, right_block, b_ident)
},
(ExprKind::Field(left_expr, left_ident), ExprKind::Field(right_expr, right_ident)) => {
- left_ident.name == right_ident.name && mirrored_exprs(cx, left_expr, a_ident, right_expr, right_ident)
+ left_ident.name == right_ident.name && mirrored_exprs(left_expr, a_ident, right_expr, right_ident)
},
// Two paths: either one is a and the other is b, or they're identical to each other
(
(
ExprKind::AddrOf(left_kind, Mutability::Not, left_expr),
ExprKind::AddrOf(right_kind, Mutability::Not, right_expr),
- ) => left_kind == right_kind && mirrored_exprs(cx, left_expr, a_ident, right_expr, b_ident),
- (_, ExprKind::AddrOf(_, Mutability::Not, right_expr)) => {
- mirrored_exprs(cx, a_expr, a_ident, right_expr, b_ident)
- },
- (ExprKind::AddrOf(_, Mutability::Not, left_expr), _) => mirrored_exprs(cx, left_expr, a_ident, b_expr, b_ident),
+ ) => left_kind == right_kind && mirrored_exprs(left_expr, a_ident, right_expr, b_ident),
+ (_, ExprKind::AddrOf(_, Mutability::Not, right_expr)) => mirrored_exprs(a_expr, a_ident, right_expr, b_ident),
+ (ExprKind::AddrOf(_, Mutability::Not, left_expr), _) => mirrored_exprs(left_expr, a_ident, b_expr, b_ident),
_ => false,
}
}
if method_path.ident.name == sym::cmp;
then {
let (closure_body, closure_arg, reverse) = if mirrored_exprs(
- cx,
left_expr,
left_ident,
right_expr,
right_ident
) {
(Sugg::hir(cx, left_expr, "..").to_string(), left_ident.name.to_string(), false)
- } else if mirrored_exprs(cx, left_expr, right_ident, right_expr, left_ident) {
+ } else if mirrored_exprs(left_expr, right_ident, right_expr, left_ident) {
(Sugg::hir(cx, left_expr, "..").to_string(), right_ident.name.to_string(), true)
} else {
return None;
if trigger.unstable { "_unstable" } else { "" },
trigger.closure_arg,
if trigger.reverse {
- format!("Reverse({})", trigger.closure_body)
+ format!("std::cmp::Reverse({})", trigger.closure_body)
} else {
trigger.closure_body.to_string()
},
// Get the wrapper and inner types, if can't, abort.
let (return_type_label, lang_item, inner_type) = if let ty::Adt(adt_def, subst) = return_ty(cx, hir_id).kind() {
- if cx.tcx.is_diagnostic_item(sym::Option, adt_def.did) {
+ if cx.tcx.is_diagnostic_item(sym::Option, adt_def.did()) {
("Option", OptionSome, subst.type_at(0))
- } else if cx.tcx.is_diagnostic_item(sym::Result, adt_def.did) {
+ } else if cx.tcx.is_diagnostic_item(sym::Result, adt_def.did()) {
("Result", ResultOk, subst.type_at(0))
} else {
return;
fn visit_expr(&mut self, expr: &'tcx Expr<'_>) {
// check for `expect`
if let Some(arglists) = method_chain_args(expr, &["expect"]) {
- let reciever_ty = self.typeck_results.expr_ty(&arglists[0][0]).peel_refs();
- if is_type_diagnostic_item(self.lcx, reciever_ty, sym::Option)
- || is_type_diagnostic_item(self.lcx, reciever_ty, sym::Result)
+ let receiver_ty = self.typeck_results.expr_ty(&arglists[0][0]).peel_refs();
+ if is_type_diagnostic_item(self.lcx, receiver_ty, sym::Option)
+ || is_type_diagnostic_item(self.lcx, receiver_ty, sym::Result)
{
self.result.push(expr.span);
}
// check for `unwrap`
if let Some(arglists) = method_chain_args(expr, &["unwrap"]) {
- let reciever_ty = self.typeck_results.expr_ty(&arglists[0][0]).peel_refs();
- if is_type_diagnostic_item(self.lcx, reciever_ty, sym::Option)
- || is_type_diagnostic_item(self.lcx, reciever_ty, sym::Result)
+ let receiver_ty = self.typeck_results.expr_ty(&arglists[0][0]).peel_refs();
+ if is_type_diagnostic_item(self.lcx, receiver_ty, sym::Option)
+ || is_type_diagnostic_item(self.lcx, receiver_ty, sym::Result)
{
self.result.push(expr.span);
}
check_ident(cx, &it.ident, self.upper_case_acronyms_aggressive);
},
ItemKind::Enum(ref enumdef, _) => {
- // check enum variants seperately because again we only want to lint on private enums and
+ // check enum variants separately because again we only want to lint on private enums and
// the fn check_variant does not know about the vis of the enum of its variants
enumdef
.variants
def::{CtorOf, DefKind, Res},
def_id::LocalDefId,
intravisit::{walk_inf, walk_ty, Visitor},
- Expr, ExprKind, FnRetTy, FnSig, GenericArg, HirId, Impl, ImplItemKind, Item, ItemKind, Path, QPath, TyKind,
+ Expr, ExprKind, FnRetTy, FnSig, GenericArg, HirId, Impl, ImplItemKind, Item, ItemKind, Pat, PatKind, Path, QPath,
+ TyKind,
};
use rustc_lint::{LateContext, LateLintPass};
use rustc_semver::RustcVersion;
}
}
+ fn check_pat(&mut self, cx: &LateContext<'_>, pat: &Pat<'_>) {
+ if_chain! {
+ if !pat.span.from_expansion();
+ if meets_msrv(self.msrv.as_ref(), &msrvs::TYPE_ALIAS_ENUM_VARIANTS);
+ if let Some(&StackItem::Check { impl_id, .. }) = self.stack.last();
+ if let PatKind::Path(QPath::Resolved(_, path)) = pat.kind;
+ if !matches!(path.res, Res::SelfTy { .. } | Res::Def(DefKind::TyParam, _));
+ if cx.typeck_results().pat_ty(pat) == cx.tcx.type_of(impl_id);
+ if let [first, ..] = path.segments;
+ if let Some(hir_id) = first.hir_id;
+ then {
+ span_lint(cx, cx.tcx.hir().span(hir_id));
+ }
+ }
+ }
+
extract_msrv_attr!(LateContext);
}
use rustc_lint::{EarlyContext, EarlyLintPass, LateContext, LateLintPass, LintContext};
use rustc_middle::hir::nested_filter;
use rustc_middle::mir::interpret::ConstValue;
-use rustc_middle::ty;
+use rustc_middle::ty::{self, subst::GenericArgKind};
use rustc_semver::RustcVersion;
use rustc_session::{declare_lint_pass, declare_tool_lint, impl_lint_pass};
use rustc_span::source_map::Spanned;
"found clippy lint without `clippy::version` attribute"
}
+declare_clippy_lint! {
+ /// ### What it does
+ /// Check that the `extract_msrv_attr!` macro is used, when a lint has a MSRV.
+ ///
+ pub MISSING_MSRV_ATTR_IMPL,
+ internal,
+ "checking if all necessary steps were taken when adding a MSRV to a lint"
+}
+
declare_lint_pass!(ClippyLintsInternal => [CLIPPY_LINTS_INTERNAL]);
impl EarlyLintPass for ClippyLintsInternal {
span.parent(),
)
}
+
+declare_lint_pass!(MsrvAttrImpl => [MISSING_MSRV_ATTR_IMPL]);
+
+impl LateLintPass<'_> for MsrvAttrImpl {
+ fn check_item(&mut self, cx: &LateContext<'_>, item: &hir::Item<'_>) {
+ if_chain! {
+ if let hir::ItemKind::Impl(hir::Impl {
+ of_trait: Some(lint_pass_trait_ref),
+ self_ty,
+ items,
+ ..
+ }) = &item.kind;
+ if let Some(lint_pass_trait_def_id) = lint_pass_trait_ref.trait_def_id();
+ let is_late_pass = match_def_path(cx, lint_pass_trait_def_id, &paths::LATE_LINT_PASS);
+ if is_late_pass || match_def_path(cx, lint_pass_trait_def_id, &paths::EARLY_LINT_PASS);
+ let self_ty = hir_ty_to_ty(cx.tcx, self_ty);
+ if let ty::Adt(self_ty_def, _) = self_ty.kind();
+ if self_ty_def.is_struct();
+ if self_ty_def.all_fields().any(|f| {
+ cx.tcx
+ .type_of(f.did)
+ .walk()
+ .filter(|t| matches!(t.unpack(), GenericArgKind::Type(_)))
+ .any(|t| match_type(cx, t.expect_ty(), &paths::RUSTC_VERSION))
+ });
+ if !items.iter().any(|item| item.ident.name == sym!(enter_lint_attrs));
+ then {
+ let context = if is_late_pass { "LateContext" } else { "EarlyContext" };
+ let lint_pass = if is_late_pass { "LateLintPass" } else { "EarlyLintPass" };
+ let span = cx.sess().source_map().span_through_char(item.span, '{');
+ span_lint_and_sugg(
+ cx,
+ MISSING_MSRV_ATTR_IMPL,
+ span,
+ &format!("`extract_msrv_attr!` macro missing from `{lint_pass}` implementation"),
+ &format!("add `extract_msrv_attr!({context})` to the `{lint_pass}` implementation"),
+ format!("{}\n extract_msrv_attr!({context});", snippet(cx, span, "..")),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ }
+}
};
}
-const LINT_EMISSION_FUNCTIONS: [&[&str]; 7] = [
+const LINT_EMISSION_FUNCTIONS: [&[&str]; 8] = [
&["clippy_utils", "diagnostics", "span_lint"],
&["clippy_utils", "diagnostics", "span_lint_and_help"],
&["clippy_utils", "diagnostics", "span_lint_and_note"],
&["clippy_utils", "diagnostics", "span_lint_and_sugg"],
&["clippy_utils", "diagnostics", "span_lint_and_then"],
&["clippy_utils", "diagnostics", "span_lint_hir_and_then"],
+ &["clippy_utils", "diagnostics", "span_lint_and_sugg_for_edges"],
];
const SUGGESTION_DIAGNOSTIC_BUILDER_METHODS: [(&str, bool); 9] = [
("span_suggestion", false),
/// ```
fn check_expr(&mut self, cx: &LateContext<'hir>, expr: &'hir hir::Expr<'_>) {
if let Some(args) = match_lint_emission(cx, expr) {
- let mut emission_info = extract_emission_info(cx, args);
+ let emission_info = extract_emission_info(cx, args);
if emission_info.is_empty() {
// See:
// - src/misc.rs:734:9
return;
}
- for (lint_name, applicability, is_multi_part) in emission_info.drain(..) {
+ for (lint_name, applicability, is_multi_part) in emission_info {
let app_info = self.applicability_info.entry(lint_name).or_default();
app_info.applicability = applicability;
app_info.is_multi_part_suggestion = is_multi_part;
}
lints
- .drain(..)
+ .into_iter()
.map(|lint_name| (lint_name, applicability, multi_part))
.collect()
}
use rustc_ast::ast::{Expr, ExprKind, Impl, Item, ItemKind, MacCall, Path, StrLit, StrStyle};
use rustc_ast::token::{self, LitKind};
use rustc_ast::tokenstream::TokenStream;
-use rustc_errors::Applicability;
+use rustc_errors::{Applicability, DiagnosticBuilder};
use rustc_lexer::unescape::{self, EscapeError};
use rustc_lint::{EarlyContext, EarlyLintPass, LintContext};
use rustc_parse::parser;
match parser
.parse_expr()
.map(rustc_ast::ptr::P::into_inner)
- .map_err(|e| e.cancel())
+ .map_err(DiagnosticBuilder::cancel)
{
// write!(e, ...)
Ok(p) if parser.eat(&token::Comma) => Some(p),
}
let comma_span = parser.prev_token.span;
- let token_expr = if let Ok(expr) = parser.parse_expr().map_err(|err| err.cancel()) {
+ let token_expr = if let Ok(expr) = parser.parse_expr().map_err(DiagnosticBuilder::cancel) {
expr
} else {
return (Some(fmtstr), None);
};
let replacement: String = match lit.token.kind {
- LitKind::Integer | LitKind::Float | LitKind::Err => continue,
LitKind::StrRaw(_) | LitKind::ByteStrRaw(_) if matches!(fmtstr.style, StrStyle::Raw(_)) => {
lit.token.symbol.as_str().replace('{', "{{").replace('}', "}}")
},
LitKind::Str | LitKind::ByteStr if matches!(fmtstr.style, StrStyle::Cooked) => {
lit.token.symbol.as_str().replace('{', "{{").replace('}', "}}")
},
- LitKind::StrRaw(_) | LitKind::Str | LitKind::ByteStrRaw(_) | LitKind::ByteStr => continue,
+ LitKind::StrRaw(_)
+ | LitKind::Str
+ | LitKind::ByteStrRaw(_)
+ | LitKind::ByteStr
+ | LitKind::Integer
+ | LitKind::Float
+ | LitKind::Err => continue,
LitKind::Byte | LitKind::Char => match lit.token.symbol.as_str() {
"\"" if matches!(fmtstr.style, StrStyle::Cooked) => "\\\"",
"\"" if matches!(fmtstr.style, StrStyle::Raw(0)) => continue,
(ForeignMod(l), ForeignMod(r)) => {
both(&l.abi, &r.abi, eq_str_lit) && over(&l.items, &r.items, |l, r| eq_item(l, r, eq_foreign_item_kind))
},
- (TyAlias(box ast::TyAlias { defaultness: ld, generics: lg, bounds: lb, ty: lt, .. }),
- TyAlias(box ast::TyAlias { defaultness: rd, generics: rg, bounds: rb, ty: rt, .. })) => {
+ (
+ TyAlias(box ast::TyAlias {
+ defaultness: ld,
+ generics: lg,
+ bounds: lb,
+ ty: lt,
+ ..
+ }),
+ TyAlias(box ast::TyAlias {
+ defaultness: rd,
+ generics: rg,
+ bounds: rb,
+ ty: rt,
+ ..
+ }),
+ ) => {
eq_defaultness(*ld, *rd)
&& eq_generics(lg, rg)
&& over(lb, rb, eq_generic_bound)
) => {
eq_defaultness(*ld, *rd) && eq_fn_sig(lf, rf) && eq_generics(lg, rg) && both(lb, rb, |l, r| eq_block(l, r))
},
- (TyAlias(box ast::TyAlias { defaultness: ld, generics: lg, bounds: lb, ty: lt, .. }),
- TyAlias(box ast::TyAlias { defaultness: rd, generics: rg, bounds: rb, ty: rt, .. })) => {
+ (
+ TyAlias(box ast::TyAlias {
+ defaultness: ld,
+ generics: lg,
+ bounds: lb,
+ ty: lt,
+ ..
+ }),
+ TyAlias(box ast::TyAlias {
+ defaultness: rd,
+ generics: rg,
+ bounds: rb,
+ ty: rt,
+ ..
+ }),
+ ) => {
eq_defaultness(*ld, *rd)
&& eq_generics(lg, rg)
&& over(lb, rb, eq_generic_bound)
) => {
eq_defaultness(*ld, *rd) && eq_fn_sig(lf, rf) && eq_generics(lg, rg) && both(lb, rb, |l, r| eq_block(l, r))
},
- (TyAlias(box ast::TyAlias { defaultness: ld, generics: lg, bounds: lb, ty: lt, .. }),
- TyAlias(box ast::TyAlias { defaultness: rd, generics: rg, bounds: rb, ty: rt, .. })) => {
+ (
+ TyAlias(box ast::TyAlias {
+ defaultness: ld,
+ generics: lg,
+ bounds: lb,
+ ty: lt,
+ ..
+ }),
+ TyAlias(box ast::TyAlias {
+ defaultness: rd,
+ generics: rg,
+ bounds: rb,
+ ty: rt,
+ ..
+ }),
+ ) => {
eq_defaultness(*ld, *rd)
&& eq_generics(lg, rg)
&& over(lb, rb, eq_generic_bound)
ty::ConstKind::Value(ConstValue::Slice { data, start, end }) => match result.ty().kind() {
ty::Ref(_, tam, _) => match tam.kind() {
ty::Str => String::from_utf8(
- data.inner().inspect_with_uninit_and_ptr_outside_interpreter(start..end)
+ data.inner()
+ .inspect_with_uninit_and_ptr_outside_interpreter(start..end)
.to_owned(),
)
.ok()
ty::ConstKind::Value(ConstValue::ByRef { alloc, offset: _ }) => match result.ty().kind() {
ty::Array(sub_type, len) => match sub_type.kind() {
ty::Float(FloatTy::F32) => match miri_to_const(*len) {
- Some(Constant::Int(len)) => alloc.inner()
+ Some(Constant::Int(len)) => alloc
+ .inner()
.inspect_with_uninit_and_ptr_outside_interpreter(0..(4 * len as usize))
.to_owned()
.chunks(4)
_ => None,
},
ty::Float(FloatTy::F64) => match miri_to_const(*len) {
- Some(Constant::Int(len)) => alloc.inner()
+ Some(Constant::Int(len)) => alloc
+ .inner()
.inspect_with_uninit_and_ptr_outside_interpreter(0..(8 * len as usize))
.to_owned()
.chunks(8)
//! Thank you!
//! ~The `INTERNAL_METADATA_COLLECTOR` lint
-use rustc_errors::{Applicability, Diagnostic};
+use rustc_errors::{emitter::MAX_SUGGESTION_HIGHLIGHT_LINES, Applicability, Diagnostic};
use rustc_hir::HirId;
use rustc_lint::{LateContext, Lint, LintContext};
use rustc_span::source_map::{MultiSpan, Span};
});
}
+/// Like [`span_lint_and_sugg`] with a focus on the edges. The output will either
+/// emit single span or multispan suggestion depending on the number of its lines.
+///
+/// If the given suggestion string has more lines than the maximum display length defined by
+/// [`MAX_SUGGESTION_HIGHLIGHT_LINES`][`rustc_errors::emitter::MAX_SUGGESTION_HIGHLIGHT_LINES`],
+/// this function will split the suggestion and span to showcase the change for the top and
+/// bottom edge of the code. For normal suggestions, in one display window, the help message
+/// will be combined with a colon.
+///
+/// Multipart suggestions like the one being created here currently cannot be
+/// applied by rustfix (See [rustfix#141](https://github.com/rust-lang/rustfix/issues/141)).
+/// Testing rustfix with this lint emission function might require a file with
+/// suggestions that can be fixed and those that can't. See
+/// [clippy#8520](https://github.com/rust-lang/rust-clippy/pull/8520/files) for
+/// an example and of this.
+///
+/// # Example for a long suggestion
+///
+/// ```text
+/// error: called `map(..).flatten()` on `Option`
+/// --> $DIR/map_flatten.rs:8:10
+/// |
+/// LL | .map(|x| {
+/// | __________^
+/// LL | | if x <= 5 {
+/// LL | | Some(x)
+/// LL | | } else {
+/// ... |
+/// LL | | })
+/// LL | | .flatten();
+/// | |__________________^
+/// |
+/// = note: `-D clippy::map-flatten` implied by `-D warnings`
+/// help: try replacing `map` with `and_then`
+/// |
+/// LL ~ .and_then(|x| {
+/// LL + if x <= 5 {
+/// LL + Some(x)
+/// |
+/// help: and remove the `.flatten()`
+/// |
+/// LL + None
+/// LL + }
+/// LL ~ });
+/// |
+/// ```
+pub fn span_lint_and_sugg_for_edges(
+ cx: &LateContext<'_>,
+ lint: &'static Lint,
+ sp: Span,
+ msg: &str,
+ helps: &[&str; 2],
+ sugg: String,
+ applicability: Applicability,
+) {
+ span_lint_and_then(cx, lint, sp, msg, |diag| {
+ let sugg_lines_count = sugg.lines().count();
+ if sugg_lines_count > MAX_SUGGESTION_HIGHLIGHT_LINES {
+ let sm = cx.sess().source_map();
+ if let (Ok(line_upper), Ok(line_bottom)) = (sm.lookup_line(sp.lo()), sm.lookup_line(sp.hi())) {
+ let split_idx = MAX_SUGGESTION_HIGHLIGHT_LINES / 2;
+ let span_upper = sm.span_until_char(sp.with_hi(line_upper.sf.lines[line_upper.line + split_idx]), '\n');
+ let span_bottom = sp.with_lo(line_bottom.sf.lines[line_bottom.line - split_idx]);
+
+ let sugg_lines_vec = sugg.lines().collect::<Vec<&str>>();
+ let sugg_upper = sugg_lines_vec[..split_idx].join("\n");
+ let sugg_bottom = sugg_lines_vec[sugg_lines_count - split_idx..].join("\n");
+
+ diag.span_suggestion(span_upper, helps[0], sugg_upper, applicability);
+ diag.span_suggestion(span_bottom, helps[1], sugg_bottom, applicability);
+
+ return;
+ }
+ }
+ diag.span_suggestion_with_style(
+ sp,
+ &helps.join(", "),
+ sugg,
+ applicability,
+ rustc_errors::SuggestionStyle::ShowAlways,
+ );
+ });
+}
+
/// Create a suggestion made from several `span → replacement`.
///
/// Note: in the JSON format (used by `compiletest_rs`), the help message will
// than marker traits.
// Due to the limited operations on these types functions should be fairly cheap.
if def
- .variants
+ .variants()
.iter()
.flat_map(|v| v.fields.iter())
.any(|x| matches!(cx.tcx.type_of(x.did).peel_refs().kind(), ty::Param(_)))
self.inter_expr().eq_expr(left, right)
}
+ pub fn eq_path(&mut self, left: &Path<'_>, right: &Path<'_>) -> bool {
+ self.inter_expr().eq_path(left, right)
+ }
+
pub fn eq_path_segment(&mut self, left: &PathSegment<'_>, right: &PathSegment<'_>) -> bool {
self.inter_expr().eq_path_segment(left, right)
}
}
}
- fn eq_path(&mut self, left: &Path<'_>, right: &Path<'_>) -> bool {
+ pub fn eq_path(&mut self, left: &Path<'_>, right: &Path<'_>) -> bool {
match (left.res, right.res) {
(Res::Local(l), Res::Local(r)) => l == r || self.locals.get(&l) == Some(&r),
(Res::Local(_), _) | (_, Res::Local(_)) => false,
pub fn is_diag_item_method(cx: &LateContext<'_>, def_id: DefId, diag_item: Symbol) -> bool {
if let Some(impl_did) = cx.tcx.impl_of_method(def_id) {
if let Some(adt) = cx.tcx.type_of(impl_did).ty_adt_def() {
- return cx.tcx.is_diagnostic_item(diag_item, adt.did);
+ return cx.tcx.is_diagnostic_item(diag_item, adt.did());
}
}
false
if let Some(adt) = cx.tcx.type_of(impl_did).ty_adt_def() {
return std_types_symbols
.iter()
- .any(|&symbol| cx.tcx.is_diagnostic_item(symbol, adt.did));
+ .any(|&symbol| cx.tcx.is_diagnostic_item(symbol, adt.did()));
}
}
}
1,46,0 { CONST_IF_MATCH }
1,45,0 { STR_STRIP_PREFIX }
1,43,0 { LOG2_10, LOG10_2 }
- 1,42,0 { MATCHES_MACRO, SLICE_PATTERNS }
+ 1,42,0 { MATCHES_MACRO, SLICE_PATTERNS, PTR_SLICE_RAW_PARTS }
1,41,0 { RE_REBALANCING_COHERENCE, RESULT_MAP_OR_ELSE }
1,40,0 { MEM_TAKE, NON_EXHAUSTIVE, OPTION_AS_DEREF }
1,38,0 { POINTER_CAST }
pub const DISPLAY_TRAIT: [&str; 3] = ["core", "fmt", "Display"];
#[cfg(feature = "internal")]
pub const EARLY_CONTEXT: [&str; 2] = ["rustc_lint", "EarlyContext"];
+#[cfg(feature = "internal")]
+pub const EARLY_LINT_PASS: [&str; 3] = ["rustc_lint", "passes", "EarlyLintPass"];
pub const EXIT: [&str; 3] = ["std", "process", "exit"];
pub const F32_EPSILON: [&str; 4] = ["core", "f32", "<impl f32>", "EPSILON"];
pub const F64_EPSILON: [&str; 4] = ["core", "f64", "<impl f64>", "EPSILON"];
#[cfg(feature = "internal")]
pub const LATE_CONTEXT: [&str; 2] = ["rustc_lint", "LateContext"];
#[cfg(feature = "internal")]
+pub const LATE_LINT_PASS: [&str; 3] = ["rustc_lint", "passes", "LateLintPass"];
+#[cfg(feature = "internal")]
pub const LINT: [&str; 2] = ["rustc_lint_defs", "Lint"];
pub const MUTEX_GUARD: [&str; 4] = ["std", "sync", "mutex", "MutexGuard"];
pub const OPEN_OPTIONS: [&str; 3] = ["std", "fs", "OpenOptions"];
pub const RESULT: [&str; 3] = ["core", "result", "Result"];
pub const RESULT_ERR: [&str; 4] = ["core", "result", "Result", "Err"];
pub const RESULT_OK: [&str; 4] = ["core", "result", "Result", "Ok"];
+#[cfg(feature = "internal")]
+pub const RUSTC_VERSION: [&str; 2] = ["rustc_semver", "RustcVersion"];
pub const RWLOCK_READ_GUARD: [&str; 4] = ["std", "sync", "rwlock", "RwLockReadGuard"];
pub const RWLOCK_WRITE_GUARD: [&str; 4] = ["std", "sync", "rwlock", "RwLockWriteGuard"];
pub const SERDE_DESERIALIZE: [&str; 3] = ["serde", "de", "Deserialize"];
closure_arg_is_type_annotated_double_ref,
next_pos: closure.span.lo(),
suggestion_start: String::new(),
- applicability: Applicability::MaybeIncorrect,
+ applicability: Applicability::MachineApplicable,
};
let fn_def_id = cx.tcx.hir().local_def_id(closure.hir_id);
/// Walks into `ty` and returns `true` if any inner type is an instance of the given adt
/// constructor.
-pub fn contains_adt_constructor(ty: Ty<'_>, adt: &AdtDef) -> bool {
+pub fn contains_adt_constructor(ty: Ty<'_>, adt: AdtDef<'_>) -> bool {
ty.walk().any(|inner| match inner.unpack() {
GenericArgKind::Type(inner_ty) => inner_ty.ty_adt_def() == Some(adt),
GenericArgKind::Lifetime(_) | GenericArgKind::Const(_) => false,
let def_id = match ty_to_check.kind() {
ty::Array(..) => return Some(sym::array),
ty::Slice(..) => return Some(sym::slice),
- ty::Adt(adt, _) => adt.did,
+ ty::Adt(adt, _) => adt.did(),
_ => return None,
};
// Returns whether the type has #[must_use] attribute
pub fn is_must_use_ty<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> bool {
match ty.kind() {
- ty::Adt(adt, _) => must_use_attr(cx.tcx.get_attrs(adt.did)).is_some(),
+ ty::Adt(adt, _) => must_use_attr(cx.tcx.get_attrs(adt.did())).is_some(),
ty::Foreign(ref did) => must_use_attr(cx.tcx.get_attrs(*did)).is_some(),
ty::Slice(ty) | ty::Array(ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _) => {
// for the Array case we don't need to care for the len == 0 case
let cause = rustc_middle::traits::ObligationCause::dummy();
if infcx.at(&cause, param_env).normalize(ty).is_ok() {
match ty.kind() {
- ty::Adt(def, substs) => def.variants.iter().all(|variant| {
+ ty::Adt(def, substs) => def.variants().iter().all(|variant| {
variant
.fields
.iter()
pub fn is_type_ref_to_diagnostic_item(cx: &LateContext<'_>, ty: Ty<'_>, diag_item: Symbol) -> bool {
match ty.kind() {
ty::Ref(_, ref_ty, _) => match ref_ty.kind() {
- ty::Adt(adt, _) => cx.tcx.is_diagnostic_item(diag_item, adt.did),
+ ty::Adt(adt, _) => cx.tcx.is_diagnostic_item(diag_item, adt.did()),
_ => false,
},
_ => false,
/// [Diagnostic Items]: https://rustc-dev-guide.rust-lang.org/diagnostics/diagnostic-items.html
pub fn is_type_diagnostic_item(cx: &LateContext<'_>, ty: Ty<'_>, diag_item: Symbol) -> bool {
match ty.kind() {
- ty::Adt(adt, _) => cx.tcx.is_diagnostic_item(diag_item, adt.did),
+ ty::Adt(adt, _) => cx.tcx.is_diagnostic_item(diag_item, adt.did()),
_ => false,
}
}
/// Returns `false` if the `LangItem` is not defined.
pub fn is_type_lang_item(cx: &LateContext<'_>, ty: Ty<'_>, lang_item: hir::LangItem) -> bool {
match ty.kind() {
- ty::Adt(adt, _) => cx.tcx.lang_items().require(lang_item).map_or(false, |li| li == adt.did),
+ ty::Adt(adt, _) => cx
+ .tcx
+ .lang_items()
+ .require(lang_item)
+ .map_or(false, |li| li == adt.did()),
_ => false,
}
}
/// If you change the signature, remember to update the internal lint `MatchTypeOnDiagItem`
pub fn match_type(cx: &LateContext<'_>, ty: Ty<'_>, path: &[&str]) -> bool {
match ty.kind() {
- ty::Adt(adt, _) => match_def_path(cx, adt.did, path),
+ ty::Adt(adt, _) => match_def_path(cx, adt.did(), path),
_ => false,
}
}
match *ty.kind() {
ty::Array(component, _) => is_uninit_value_valid_for_ty(cx, component),
ty::Tuple(types) => types.iter().all(|ty| is_uninit_value_valid_for_ty(cx, ty)),
- ty::Adt(adt, _) => cx.tcx.lang_items().maybe_uninit() == Some(adt.did),
+ ty::Adt(adt, _) => cx.tcx.lang_items().maybe_uninit() == Some(adt.did()),
_ => false,
}
}
}
/// Gets the value of the given variant.
-pub fn get_discriminant_value(tcx: TyCtxt<'_>, adt: &'_ AdtDef, i: VariantIdx) -> EnumValue {
- let variant = &adt.variants[i];
+pub fn get_discriminant_value(tcx: TyCtxt<'_>, adt: AdtDef<'_>, i: VariantIdx) -> EnumValue {
+ let variant = &adt.variant(i);
match variant.discr {
VariantDiscr::Explicit(id) => read_explicit_enum_value(tcx, id).unwrap(),
- VariantDiscr::Relative(x) => match adt.variants[(i.as_usize() - x as usize).into()].discr {
+ VariantDiscr::Relative(x) => match adt.variant((i.as_usize() - x as usize).into()).discr {
VariantDiscr::Explicit(id) => read_explicit_enum_value(tcx, id).unwrap() + x,
VariantDiscr::Relative(_) => EnumValue::Unsigned(x.into()),
},
/// platform specific `libc::<platform>::c_void` types in libc.
pub fn is_c_void(cx: &LateContext<'_>, ty: Ty<'_>) -> bool {
if let ty::Adt(adt, _) = ty.kind()
- && let &[krate, .., name] = &*cx.get_def_path(adt.did)
+ && let &[krate, .., name] = &*cx.get_def_path(adt.did())
&& let sym::libc | sym::core | sym::std = krate
&& name.as_str() == "c_void"
{
[toolchain]
-channel = "nightly-2022-02-24"
+channel = "nightly-2022-03-24"
components = ["cargo", "llvm-tools-preview", "rust-src", "rust-std", "rustc", "rustc-dev", "rustfmt"]
comment_width = 100
match_block_trailing_comma = true
wrap_comments = true
-edition = "2018"
+edition = "2021"
error_on_line_overflow = true
version = "Two"
// a .span_bug or .bug call has already printed what
// it wants to print.
if !info.payload().is::<rustc_errors::ExplicitBug>() {
- let d = rustc_errors::Diagnostic::new(rustc_errors::Level::Bug, "unexpected panic");
- handler.emit_diagnostic(&d);
+ let mut d = rustc_errors::Diagnostic::new(rustc_errors::Level::Bug, "unexpected panic");
+ handler.emit_diagnostic(&mut d);
}
let version_info = rustc_tools_util::get_version_info!();
"syn",
"tokio",
"parking_lot",
+ "rustc_semver",
];
// Test dependencies may need an `extern crate` here to ensure that they show up
#[allow(unused_extern_crates)]
extern crate quote;
#[allow(unused_extern_crates)]
+extern crate rustc_semver;
+#[allow(unused_extern_crates)]
extern crate syn;
#[allow(unused_extern_crates)]
extern crate tokio;
let _threads = VarGuard::set(
"RUST_TEST_THREADS",
// if RUST_TEST_THREADS is set, adhere to it, otherwise override it
- env::var("RUST_TEST_THREADS").unwrap_or_else(|_| num_cpus::get().to_string()),
+ env::var("RUST_TEST_THREADS").unwrap_or_else(|_| {
+ std::thread::available_parallelism()
+ .map_or(1, std::num::NonZeroUsize::get)
+ .to_string()
+ }),
);
compiletest::run_tests(&config);
}
fn new(path: PathBuf) -> Self {
let content: String = std::fs::read_to_string(&path).unwrap();
// we don't want the first letter after "error: ", "help: " ... to be capitalized
- // also no puncutation (except for "?" ?) at the end of a line
+ // also no punctuation (except for "?" ?) at the end of a line
let regex_set: RegexSet = RegexSet::new(&[
r"error: [A-Z]",
r"help: [A-Z]",
--- /dev/null
+// run-rustfix
+
+#![deny(clippy::internal)]
+#![allow(clippy::missing_clippy_version_attribute)]
+#![feature(rustc_private)]
+
+extern crate rustc_ast;
+extern crate rustc_hir;
+extern crate rustc_lint;
+extern crate rustc_middle;
+#[macro_use]
+extern crate rustc_session;
+use clippy_utils::extract_msrv_attr;
+use rustc_hir::Expr;
+use rustc_lint::{EarlyContext, EarlyLintPass, LateContext, LateLintPass};
+use rustc_semver::RustcVersion;
+
+declare_lint! {
+ pub TEST_LINT,
+ Warn,
+ ""
+}
+
+struct Pass {
+ msrv: Option<RustcVersion>,
+}
+
+impl_lint_pass!(Pass => [TEST_LINT]);
+
+impl LateLintPass<'_> for Pass {
+ extract_msrv_attr!(LateContext);
+ fn check_expr(&mut self, _: &LateContext<'_>, _: &Expr<'_>) {}
+}
+
+impl EarlyLintPass for Pass {
+ extract_msrv_attr!(EarlyContext);
+ fn check_expr(&mut self, _: &EarlyContext<'_>, _: &rustc_ast::Expr) {}
+}
+
+fn main() {}
--- /dev/null
+// run-rustfix
+
+#![deny(clippy::internal)]
+#![allow(clippy::missing_clippy_version_attribute)]
+#![feature(rustc_private)]
+
+extern crate rustc_ast;
+extern crate rustc_hir;
+extern crate rustc_lint;
+extern crate rustc_middle;
+#[macro_use]
+extern crate rustc_session;
+use clippy_utils::extract_msrv_attr;
+use rustc_hir::Expr;
+use rustc_lint::{EarlyContext, EarlyLintPass, LateContext, LateLintPass};
+use rustc_semver::RustcVersion;
+
+declare_lint! {
+ pub TEST_LINT,
+ Warn,
+ ""
+}
+
+struct Pass {
+ msrv: Option<RustcVersion>,
+}
+
+impl_lint_pass!(Pass => [TEST_LINT]);
+
+impl LateLintPass<'_> for Pass {
+ fn check_expr(&mut self, _: &LateContext<'_>, _: &Expr<'_>) {}
+}
+
+impl EarlyLintPass for Pass {
+ fn check_expr(&mut self, _: &EarlyContext<'_>, _: &rustc_ast::Expr) {}
+}
+
+fn main() {}
--- /dev/null
+error: `extract_msrv_attr!` macro missing from `LateLintPass` implementation
+ --> $DIR/invalid_msrv_attr_impl.rs:30:1
+ |
+LL | impl LateLintPass<'_> for Pass {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+note: the lint level is defined here
+ --> $DIR/invalid_msrv_attr_impl.rs:3:9
+ |
+LL | #![deny(clippy::internal)]
+ | ^^^^^^^^^^^^^^^^
+ = note: `#[deny(clippy::missing_msrv_attr_impl)]` implied by `#[deny(clippy::internal)]`
+help: add `extract_msrv_attr!(LateContext)` to the `LateLintPass` implementation
+ |
+LL + impl LateLintPass<'_> for Pass {
+LL + extract_msrv_attr!(LateContext);
+ |
+
+error: `extract_msrv_attr!` macro missing from `EarlyLintPass` implementation
+ --> $DIR/invalid_msrv_attr_impl.rs:34:1
+ |
+LL | impl EarlyLintPass for Pass {
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+help: add `extract_msrv_attr!(EarlyContext)` to the `EarlyLintPass` implementation
+ |
+LL + impl EarlyLintPass for Pass {
+LL + extract_msrv_attr!(EarlyContext);
+ |
+
+error: aborting due to 2 previous errors
+
--- /dev/null
+#![feature(lint_reasons)]
+#![deny(clippy::allow_attributes_without_reason)]
+
+// These should trigger the lint
+#[allow(dead_code)]
+#[allow(dead_code, deprecated)]
+// These should be fine
+#[allow(dead_code, reason = "This should be allowed")]
+#[warn(dyn_drop, reason = "Warnings can also have reasons")]
+#[warn(deref_nullptr)]
+#[deny(deref_nullptr)]
+#[forbid(deref_nullptr)]
+
+fn main() {}
--- /dev/null
+error: `allow` attribute without specifying a reason
+ --> $DIR/allow_attributes_without_reason.rs:5:1
+ |
+LL | #[allow(dead_code)]
+ | ^^^^^^^^^^^^^^^^^^^
+ |
+note: the lint level is defined here
+ --> $DIR/allow_attributes_without_reason.rs:2:9
+ |
+LL | #![deny(clippy::allow_attributes_without_reason)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ = help: try adding a reason at the end with `, reason = ".."`
+
+error: `allow` attribute without specifying a reason
+ --> $DIR/allow_attributes_without_reason.rs:6:1
+ |
+LL | #[allow(dead_code, deprecated)]
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = help: try adding a reason at the end with `, reason = ".."`
+
+error: aborting due to 2 previous errors
+
fn main() {
let s = String::from("String");
- s.as_bytes().get(3);
+ let _ = s.as_bytes().get(3);
let _ = &s.as_bytes().get(3);
- s[..].as_bytes().get(3);
+ let _ = s[..].as_bytes().get(3);
}
fn main() {
let s = String::from("String");
- s.bytes().nth(3);
+ let _ = s.bytes().nth(3);
let _ = &s.bytes().nth(3);
- s[..].bytes().nth(3);
+ let _ = s[..].bytes().nth(3);
}
error: called `.byte().nth()` on a `String`
- --> $DIR/bytes_nth.rs:8:5
+ --> $DIR/bytes_nth.rs:8:13
|
-LL | s.bytes().nth(3);
- | ^^^^^^^^^^^^^^^^ help: try: `s.as_bytes().get(3)`
+LL | let _ = s.bytes().nth(3);
+ | ^^^^^^^^^^^^^^^^ help: try: `s.as_bytes().get(3)`
|
= note: `-D clippy::bytes-nth` implied by `-D warnings`
| ^^^^^^^^^^^^^^^^ help: try: `s.as_bytes().get(3)`
error: called `.byte().nth()` on a `str`
- --> $DIR/bytes_nth.rs:10:5
+ --> $DIR/bytes_nth.rs:10:13
|
-LL | s[..].bytes().nth(3);
- | ^^^^^^^^^^^^^^^^^^^^ help: try: `s[..].as_bytes().get(3)`
+LL | let _ = s[..].bytes().nth(3);
+ | ^^^^^^^^^^^^^^^^^^^^ help: try: `s[..].as_bytes().get(3)`
error: aborting due to 3 previous errors
--- /dev/null
+#![warn(clippy::cast_enum_constructor)]
+#![allow(clippy::fn_to_numeric_cast)]
+
+fn main() {
+ enum Foo {
+ Y(u32),
+ }
+
+ enum Bar {
+ X,
+ }
+
+ let _ = Foo::Y as usize;
+ let _ = Foo::Y as isize;
+ let _ = Foo::Y as fn(u32) -> Foo;
+ let _ = Bar::X as usize;
+}
--- /dev/null
+error: cast of an enum tuple constructor to an integer
+ --> $DIR/cast_enum_constructor.rs:13:13
+ |
+LL | let _ = Foo::Y as usize;
+ | ^^^^^^^^^^^^^^^
+ |
+ = note: `-D clippy::cast-enum-constructor` implied by `-D warnings`
+
+error: cast of an enum tuple constructor to an integer
+ --> $DIR/cast_enum_constructor.rs:14:13
+ |
+LL | let _ = Foo::Y as isize;
+ | ^^^^^^^^^^^^^^^
+
+error: aborting due to 2 previous errors
+
--- /dev/null
+fn main() {
+ let x: [i32; 3] = [1_i32, 2, 3];
+ let r_x = &x;
+ // Check casting through multiple bindings
+ // Because it's separate, it does not check the cast back to something of the same size
+ let a = r_x as *const [i32];
+ let b = a as *const [u8];
+ let c = b as *const [u32];
+
+ // loses data
+ let loss = r_x as *const [i32] as *const [u8];
+
+ // Cast back to same size but different type loses no data, just type conversion
+ // This is weird code but there's no reason for this lint specifically to fire *twice* on it
+ let restore = r_x as *const [i32] as *const [u8] as *const [u32];
+
+ // Check casting through blocks is detected
+ let loss_block_1 = { r_x as *const [i32] } as *const [u8];
+ let loss_block_2 = {
+ let _ = ();
+ r_x as *const [i32]
+ } as *const [u8];
+
+ // Check that resores of the same size are detected through blocks
+ let restore_block_1 = { r_x as *const [i32] } as *const [u8] as *const [u32];
+ let restore_block_2 = { ({ r_x as *const [i32] }) as *const [u8] } as *const [u32];
+ let restore_block_3 = {
+ let _ = ();
+ ({
+ let _ = ();
+ r_x as *const [i32]
+ }) as *const [u8]
+ } as *const [u32];
+
+ // Check that the result of a long chain of casts is detected
+ let long_chain_loss = r_x as *const [i32] as *const [u32] as *const [u16] as *const [i8] as *const [u8];
+ let long_chain_restore =
+ r_x as *const [i32] as *const [u32] as *const [u16] as *const [i8] as *const [u8] as *const [u32];
+}
--- /dev/null
+error: casting between raw pointers to `[i32]` (element size 4) and `[u8]` (element size 1) does not adjust the count
+ --> $DIR/cast_slice_different_sizes.rs:7:13
+ |
+LL | let b = a as *const [u8];
+ | ^^^^^^^^^^^^^^^^ help: replace with `ptr::slice_from_raw_parts`: `core::ptr::slice_from_raw_parts(a as *const u8, ..)`
+ |
+ = note: `#[deny(clippy::cast_slice_different_sizes)]` on by default
+
+error: casting between raw pointers to `[u8]` (element size 1) and `[u32]` (element size 4) does not adjust the count
+ --> $DIR/cast_slice_different_sizes.rs:8:13
+ |
+LL | let c = b as *const [u32];
+ | ^^^^^^^^^^^^^^^^^ help: replace with `ptr::slice_from_raw_parts`: `core::ptr::slice_from_raw_parts(b as *const u32, ..)`
+
+error: casting between raw pointers to `[i32]` (element size 4) and `[u8]` (element size 1) does not adjust the count
+ --> $DIR/cast_slice_different_sizes.rs:11:16
+ |
+LL | let loss = r_x as *const [i32] as *const [u8];
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: replace with `ptr::slice_from_raw_parts`: `core::ptr::slice_from_raw_parts(r_x as *const [i32] as *const u8, ..)`
+
+error: casting between raw pointers to `[i32]` (element size 4) and `[u8]` (element size 1) does not adjust the count
+ --> $DIR/cast_slice_different_sizes.rs:18:24
+ |
+LL | let loss_block_1 = { r_x as *const [i32] } as *const [u8];
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: replace with `ptr::slice_from_raw_parts`: `core::ptr::slice_from_raw_parts({ r_x as *const [i32] } as *const u8, ..)`
+
+error: casting between raw pointers to `[i32]` (element size 4) and `[u8]` (element size 1) does not adjust the count
+ --> $DIR/cast_slice_different_sizes.rs:19:24
+ |
+LL | let loss_block_2 = {
+ | ________________________^
+LL | | let _ = ();
+LL | | r_x as *const [i32]
+LL | | } as *const [u8];
+ | |____________________^
+ |
+help: replace with `ptr::slice_from_raw_parts`
+ |
+LL ~ let loss_block_2 = core::ptr::slice_from_raw_parts({
+LL + let _ = ();
+LL + r_x as *const [i32]
+LL ~ } as *const u8, ..);
+ |
+
+error: casting between raw pointers to `[i32]` (element size 4) and `[u8]` (element size 1) does not adjust the count
+ --> $DIR/cast_slice_different_sizes.rs:36:27
+ |
+LL | let long_chain_loss = r_x as *const [i32] as *const [u32] as *const [u16] as *const [i8] as *const [u8];
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: replace with `ptr::slice_from_raw_parts`: `core::ptr::slice_from_raw_parts(r_x as *const [i32] as *const [u32] as *const [u16] as *const [i8] as *const u8, ..)`
+
+error: aborting due to 6 previous errors
+
--- /dev/null
+error: this operation will panic at runtime
+ --> $DIR/ice-5497.rs:9:22
+ |
+LL | const OOB: i32 = [1][1] + T::OOB;
+ | ^^^^^^ index out of bounds: the length is 1 but the index is 1
+ |
+ = note: `#[deny(unconditional_panic)]` on by default
+
+error: aborting due to previous error
+
#[warn(clippy::main_recursion)]
#[start]
-fn main(argc: isize, argv: *const *const u8) -> isize {
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
let x = N.load(Ordering::Relaxed);
N.store(x + 1, Ordering::Relaxed);
if x < 3 {
- main(argc, argv);
+ main(_argc, _argv);
}
0
+// compile-flags: --test
#![warn(clippy::dbg_macro)]
fn foo(n: u32) -> u32 {
dbg!(2);
});
}
+
+#[test]
+pub fn issue8481() {
+ dbg!(1);
+}
error: `dbg!` macro is intended as a debugging tool
- --> $DIR/dbg_macro.rs:4:22
+ --> $DIR/dbg_macro.rs:5:22
|
LL | if let Some(n) = dbg!(n.checked_sub(4)) { n } else { n }
| ^^^^^^^^^^^^^^^^^^^^^^
| ~~~~~~~~~~~~~~~~
error: `dbg!` macro is intended as a debugging tool
- --> $DIR/dbg_macro.rs:8:8
+ --> $DIR/dbg_macro.rs:9:8
|
LL | if dbg!(n <= 1) {
| ^^^^^^^^^^^^
| ~~~~~~
error: `dbg!` macro is intended as a debugging tool
- --> $DIR/dbg_macro.rs:9:9
+ --> $DIR/dbg_macro.rs:10:9
|
LL | dbg!(1)
| ^^^^^^^
|
error: `dbg!` macro is intended as a debugging tool
- --> $DIR/dbg_macro.rs:11:9
+ --> $DIR/dbg_macro.rs:12:9
|
LL | dbg!(n * factorial(n - 1))
| ^^^^^^^^^^^^^^^^^^^^^^^^^^
|
error: `dbg!` macro is intended as a debugging tool
- --> $DIR/dbg_macro.rs:16:5
+ --> $DIR/dbg_macro.rs:17:5
|
LL | dbg!(42);
| ^^^^^^^^
| ~~
error: `dbg!` macro is intended as a debugging tool
- --> $DIR/dbg_macro.rs:17:5
+ --> $DIR/dbg_macro.rs:18:5
|
LL | dbg!(dbg!(dbg!(42)));
| ^^^^^^^^^^^^^^^^^^^^
| ~~~~~~~~~~~~~~
error: `dbg!` macro is intended as a debugging tool
- --> $DIR/dbg_macro.rs:18:14
+ --> $DIR/dbg_macro.rs:19:14
|
LL | foo(3) + dbg!(factorial(4));
| ^^^^^^^^^^^^^^^^^^
| ~~~~~~~~~~~~
error: `dbg!` macro is intended as a debugging tool
- --> $DIR/dbg_macro.rs:19:5
+ --> $DIR/dbg_macro.rs:20:5
|
LL | dbg!(1, 2, dbg!(3, 4));
| ^^^^^^^^^^^^^^^^^^^^^^
| ~~~~~~~~~~~~~~~~~~
error: `dbg!` macro is intended as a debugging tool
- --> $DIR/dbg_macro.rs:20:5
+ --> $DIR/dbg_macro.rs:21:5
|
LL | dbg!(1, 2, 3, 4, 5);
| ^^^^^^^^^^^^^^^^^^^
| ~~~~~~~~~~~~~~~
error: `dbg!` macro is intended as a debugging tool
- --> $DIR/dbg_macro.rs:40:9
+ --> $DIR/dbg_macro.rs:41:9
|
LL | dbg!(2);
| ^^^^^^^
// run-rustfix
#![warn(clippy::extend_with_drain)]
+#![allow(clippy::iter_with_drain)]
use std::collections::BinaryHeap;
fn main() {
//gets linted
let mut vec1 = vec![0u8; 1024];
let mut vec2: std::vec::Vec<u8> = Vec::new();
-
vec2.append(&mut vec1);
let mut vec3 = vec![0u8; 1024];
vec11.append(&mut return_vector());
- //won't get linted it dosen't move the entire content of a vec into another
+ //won't get linted it doesn't move the entire content of a vec into another
let mut test1 = vec![0u8, 10];
let mut test2: std::vec::Vec<u8> = Vec::new();
// run-rustfix
#![warn(clippy::extend_with_drain)]
+#![allow(clippy::iter_with_drain)]
use std::collections::BinaryHeap;
fn main() {
//gets linted
let mut vec1 = vec![0u8; 1024];
let mut vec2: std::vec::Vec<u8> = Vec::new();
-
vec2.extend(vec1.drain(..));
let mut vec3 = vec![0u8; 1024];
vec11.extend(return_vector().drain(..));
- //won't get linted it dosen't move the entire content of a vec into another
+ //won't get linted it doesn't move the entire content of a vec into another
let mut test1 = vec![0u8, 10];
let mut test2: std::vec::Vec<u8> = Vec::new();
let s = [1, 2, 3];
let v = vec![1, 2, 3];
- s.get(0);
+ let _ = s.get(0);
// Should be replaced by s.get(0)
- s.get(2);
+ let _ = s.get(2);
// Should be replaced by s.get(2)
- v.get(5);
+ let _ = v.get(5);
// Should be replaced by v.get(5)
- v.get(0);
+ let _ = v.get(0);
// Should be replaced by v.get(0)
let o = Some(5);
let s = [1, 2, 3];
let v = vec![1, 2, 3];
- s.iter().next();
+ let _ = s.iter().next();
// Should be replaced by s.get(0)
- s[2..].iter().next();
+ let _ = s[2..].iter().next();
// Should be replaced by s.get(2)
- v[5..].iter().next();
+ let _ = v[5..].iter().next();
// Should be replaced by v.get(5)
- v.iter().next();
+ let _ = v.iter().next();
// Should be replaced by v.get(0)
let o = Some(5);
error: using `.iter().next()` on an array
- --> $DIR/iter_next_slice.rs:9:5
+ --> $DIR/iter_next_slice.rs:9:13
|
-LL | s.iter().next();
- | ^^^^^^^^^^^^^^^ help: try calling: `s.get(0)`
+LL | let _ = s.iter().next();
+ | ^^^^^^^^^^^^^^^ help: try calling: `s.get(0)`
|
= note: `-D clippy::iter-next-slice` implied by `-D warnings`
error: using `.iter().next()` on a Slice without end index
- --> $DIR/iter_next_slice.rs:12:5
+ --> $DIR/iter_next_slice.rs:12:13
|
-LL | s[2..].iter().next();
- | ^^^^^^^^^^^^^^^^^^^^ help: try calling: `s.get(2)`
+LL | let _ = s[2..].iter().next();
+ | ^^^^^^^^^^^^^^^^^^^^ help: try calling: `s.get(2)`
error: using `.iter().next()` on a Slice without end index
- --> $DIR/iter_next_slice.rs:15:5
+ --> $DIR/iter_next_slice.rs:15:13
|
-LL | v[5..].iter().next();
- | ^^^^^^^^^^^^^^^^^^^^ help: try calling: `v.get(5)`
+LL | let _ = v[5..].iter().next();
+ | ^^^^^^^^^^^^^^^^^^^^ help: try calling: `v.get(5)`
error: using `.iter().next()` on an array
- --> $DIR/iter_next_slice.rs:18:5
+ --> $DIR/iter_next_slice.rs:18:13
|
-LL | v.iter().next();
- | ^^^^^^^^^^^^^^^ help: try calling: `v.get(0)`
+LL | let _ = v.iter().next();
+ | ^^^^^^^^^^^^^^^ help: try calling: `v.get(0)`
error: aborting due to 4 previous errors
--- /dev/null
+// run-rustfix
+// will emits unused mut warnings after fixing
+#![allow(unused_mut)]
+// will emits needless collect warnings after fixing
+#![allow(clippy::needless_collect)]
+#![warn(clippy::iter_with_drain)]
+use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque};
+
+fn full() {
+ let mut a = vec!["aaa".to_string(), "bbb".to_string()];
+ let mut a: BinaryHeap<_> = a.into_iter().collect();
+ let mut a: HashSet<_> = a.drain().collect();
+ let mut a: VecDeque<_> = a.drain().collect();
+ let mut a: Vec<_> = a.into_iter().collect();
+ let mut a: HashMap<_, _> = a.into_iter().map(|x| (x.clone(), x)).collect();
+ let _: Vec<(String, String)> = a.drain().collect();
+}
+
+fn closed() {
+ let mut a = vec!["aaa".to_string(), "bbb".to_string()];
+ let mut a: BinaryHeap<_> = a.into_iter().collect();
+ let mut a: HashSet<_> = a.drain().collect();
+ let mut a: VecDeque<_> = a.drain().collect();
+ let mut a: Vec<_> = a.into_iter().collect();
+ let mut a: HashMap<_, _> = a.into_iter().map(|x| (x.clone(), x)).collect();
+ let _: Vec<(String, String)> = a.drain().collect();
+}
+
+fn should_not_help() {
+ let mut a = vec!["aaa".to_string(), "bbb".to_string()];
+ let mut a: BinaryHeap<_> = a.drain(1..).collect();
+ let mut a: HashSet<_> = a.drain().collect();
+ let mut a: VecDeque<_> = a.drain().collect();
+ let mut a: Vec<_> = a.drain(..a.len() - 1).collect();
+ let mut a: HashMap<_, _> = a.drain(1..a.len() - 1).map(|x| (x.clone(), x)).collect();
+ let _: Vec<(String, String)> = a.drain().collect();
+
+ let mut b = vec!["aaa".to_string(), "bbb".to_string()];
+ let _: Vec<_> = b.drain(0..a.len()).collect();
+}
+
+#[derive(Default)]
+struct Bomb {
+ fire: Vec<u8>,
+}
+
+fn should_not_help_0(bomb: &mut Bomb) {
+ let _: Vec<u8> = bomb.fire.drain(..).collect();
+}
+
+fn main() {
+ full();
+ closed();
+ should_not_help();
+ should_not_help_0(&mut Bomb::default());
+}
--- /dev/null
+// run-rustfix
+// will emits unused mut warnings after fixing
+#![allow(unused_mut)]
+// will emits needless collect warnings after fixing
+#![allow(clippy::needless_collect)]
+#![warn(clippy::iter_with_drain)]
+use std::collections::{BinaryHeap, HashMap, HashSet, VecDeque};
+
+fn full() {
+ let mut a = vec!["aaa".to_string(), "bbb".to_string()];
+ let mut a: BinaryHeap<_> = a.drain(..).collect();
+ let mut a: HashSet<_> = a.drain().collect();
+ let mut a: VecDeque<_> = a.drain().collect();
+ let mut a: Vec<_> = a.drain(..).collect();
+ let mut a: HashMap<_, _> = a.drain(..).map(|x| (x.clone(), x)).collect();
+ let _: Vec<(String, String)> = a.drain().collect();
+}
+
+fn closed() {
+ let mut a = vec!["aaa".to_string(), "bbb".to_string()];
+ let mut a: BinaryHeap<_> = a.drain(0..).collect();
+ let mut a: HashSet<_> = a.drain().collect();
+ let mut a: VecDeque<_> = a.drain().collect();
+ let mut a: Vec<_> = a.drain(..a.len()).collect();
+ let mut a: HashMap<_, _> = a.drain(0..a.len()).map(|x| (x.clone(), x)).collect();
+ let _: Vec<(String, String)> = a.drain().collect();
+}
+
+fn should_not_help() {
+ let mut a = vec!["aaa".to_string(), "bbb".to_string()];
+ let mut a: BinaryHeap<_> = a.drain(1..).collect();
+ let mut a: HashSet<_> = a.drain().collect();
+ let mut a: VecDeque<_> = a.drain().collect();
+ let mut a: Vec<_> = a.drain(..a.len() - 1).collect();
+ let mut a: HashMap<_, _> = a.drain(1..a.len() - 1).map(|x| (x.clone(), x)).collect();
+ let _: Vec<(String, String)> = a.drain().collect();
+
+ let mut b = vec!["aaa".to_string(), "bbb".to_string()];
+ let _: Vec<_> = b.drain(0..a.len()).collect();
+}
+
+#[derive(Default)]
+struct Bomb {
+ fire: Vec<u8>,
+}
+
+fn should_not_help_0(bomb: &mut Bomb) {
+ let _: Vec<u8> = bomb.fire.drain(..).collect();
+}
+
+fn main() {
+ full();
+ closed();
+ should_not_help();
+ should_not_help_0(&mut Bomb::default());
+}
--- /dev/null
+error: `drain(..)` used on a `Vec`
+ --> $DIR/iter_with_drain.rs:11:34
+ |
+LL | let mut a: BinaryHeap<_> = a.drain(..).collect();
+ | ^^^^^^^^^ help: try this: `into_iter()`
+ |
+ = note: `-D clippy::iter-with-drain` implied by `-D warnings`
+
+error: `drain(..)` used on a `VecDeque`
+ --> $DIR/iter_with_drain.rs:14:27
+ |
+LL | let mut a: Vec<_> = a.drain(..).collect();
+ | ^^^^^^^^^ help: try this: `into_iter()`
+
+error: `drain(..)` used on a `Vec`
+ --> $DIR/iter_with_drain.rs:15:34
+ |
+LL | let mut a: HashMap<_, _> = a.drain(..).map(|x| (x.clone(), x)).collect();
+ | ^^^^^^^^^ help: try this: `into_iter()`
+
+error: `drain(..)` used on a `Vec`
+ --> $DIR/iter_with_drain.rs:21:34
+ |
+LL | let mut a: BinaryHeap<_> = a.drain(0..).collect();
+ | ^^^^^^^^^^ help: try this: `into_iter()`
+
+error: `drain(..)` used on a `VecDeque`
+ --> $DIR/iter_with_drain.rs:24:27
+ |
+LL | let mut a: Vec<_> = a.drain(..a.len()).collect();
+ | ^^^^^^^^^^^^^^^^ help: try this: `into_iter()`
+
+error: `drain(..)` used on a `Vec`
+ --> $DIR/iter_with_drain.rs:25:34
+ |
+LL | let mut a: HashMap<_, _> = a.drain(0..a.len()).map(|x| (x.clone(), x)).collect();
+ | ^^^^^^^^^^^^^^^^^ help: try this: `into_iter()`
+
+error: aborting due to 6 previous errors
+
// #7077
let s = &String::new();
+ #[allow(clippy::needless_match)]
let _: Option<&str> = match Some(s) {
Some(s) => Some(s),
None => None,
// #7077
let s = &String::new();
+ #[allow(clippy::needless_match)]
let _: Option<&str> = match Some(s) {
Some(s) => Some(s),
None => None,
}
// make sure parentheses are added properly to bitwise operators, which have lower precedence than
- // arithmetric ones
+ // arithmetic ones
let mut count = 0 << 1;
for i in 0..1 << 1 {
dst[count] = src[i + 2];
+++ /dev/null
-// run-rustfix
-
-#![warn(clippy::all, clippy::pedantic)]
-#![allow(clippy::let_underscore_drop)]
-#![allow(clippy::missing_docs_in_private_items)]
-#![allow(clippy::map_identity)]
-#![allow(clippy::redundant_closure)]
-#![allow(clippy::unnecessary_wraps)]
-#![feature(result_flattening)]
-
-fn main() {
- // mapping to Option on Iterator
- fn option_id(x: i8) -> Option<i8> {
- Some(x)
- }
- let option_id_ref: fn(i8) -> Option<i8> = option_id;
- let option_id_closure = |x| Some(x);
- let _: Vec<_> = vec![5_i8; 6].into_iter().filter_map(option_id).collect();
- let _: Vec<_> = vec![5_i8; 6].into_iter().filter_map(option_id_ref).collect();
- let _: Vec<_> = vec![5_i8; 6].into_iter().filter_map(option_id_closure).collect();
- let _: Vec<_> = vec![5_i8; 6].into_iter().filter_map(|x| x.checked_add(1)).collect();
-
- // mapping to Iterator on Iterator
- let _: Vec<_> = vec![5_i8; 6].into_iter().flat_map(|x| 0..x).collect();
-
- // mapping to Option on Option
- let _: Option<_> = (Some(Some(1))).and_then(|x| x);
-
- // mapping to Result on Result
- let _: Result<_, &str> = (Ok(Ok(1))).and_then(|x| x);
-}
-// run-rustfix
-
-#![warn(clippy::all, clippy::pedantic)]
-#![allow(clippy::let_underscore_drop)]
-#![allow(clippy::missing_docs_in_private_items)]
-#![allow(clippy::map_identity)]
-#![allow(clippy::redundant_closure)]
-#![allow(clippy::unnecessary_wraps)]
+#![warn(clippy::map_flatten)]
#![feature(result_flattening)]
-fn main() {
- // mapping to Option on Iterator
- fn option_id(x: i8) -> Option<i8> {
- Some(x)
- }
- let option_id_ref: fn(i8) -> Option<i8> = option_id;
- let option_id_closure = |x| Some(x);
- let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id).flatten().collect();
- let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id_ref).flatten().collect();
- let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id_closure).flatten().collect();
- let _: Vec<_> = vec![5_i8; 6].into_iter().map(|x| x.checked_add(1)).flatten().collect();
+// issue #8506, multi-line
+#[rustfmt::skip]
+fn long_span() {
+ let _: Option<i32> = Some(1)
+ .map(|x| {
+ if x <= 5 {
+ Some(x)
+ } else {
+ None
+ }
+ })
+ .flatten();
- // mapping to Iterator on Iterator
- let _: Vec<_> = vec![5_i8; 6].into_iter().map(|x| 0..x).flatten().collect();
+ let _: Result<i32, i32> = Ok(1)
+ .map(|x| {
+ if x == 1 {
+ Ok(x)
+ } else {
+ Err(0)
+ }
+ })
+ .flatten();
- // mapping to Option on Option
- let _: Option<_> = (Some(Some(1))).map(|x| x).flatten();
+ let result: Result<i32, i32> = Ok(2);
+ fn do_something() { }
+ let _: Result<i32, i32> = result
+ .map(|res| {
+ if res > 0 {
+ do_something();
+ Ok(res)
+ } else {
+ Err(0)
+ }
+ })
+ .flatten();
+
+ let _: Vec<_> = vec![5_i8; 6]
+ .into_iter()
+ .map(|some_value| {
+ if some_value > 3 {
+ Some(some_value)
+ } else {
+ None
+ }
+ })
+ .flatten()
+ .collect();
+}
- // mapping to Result on Result
- let _: Result<_, &str> = (Ok(Ok(1))).map(|x| x).flatten();
+fn main() {
+ long_span();
}
-error: called `map(..).flatten()` on an `Iterator`
- --> $DIR/map_flatten.rs:18:46
+error: called `map(..).flatten()` on `Option`
+ --> $DIR/map_flatten.rs:8:10
|
-LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id).flatten().collect();
- | ^^^^^^^^^^^^^^^^^^^^^^^^^ help: try using `filter_map` instead: `.filter_map(option_id)`
+LL | .map(|x| {
+ | __________^
+LL | | if x <= 5 {
+LL | | Some(x)
+LL | | } else {
+... |
+LL | | })
+LL | | .flatten();
+ | |__________________^
|
= note: `-D clippy::map-flatten` implied by `-D warnings`
-
-error: called `map(..).flatten()` on an `Iterator`
- --> $DIR/map_flatten.rs:19:46
+help: try replacing `map` with `and_then`
|
-LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id_ref).flatten().collect();
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try using `filter_map` instead: `.filter_map(option_id_ref)`
-
-error: called `map(..).flatten()` on an `Iterator`
- --> $DIR/map_flatten.rs:20:46
+LL ~ .and_then(|x| {
+LL + if x <= 5 {
+LL + Some(x)
|
-LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id_closure).flatten().collect();
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try using `filter_map` instead: `.filter_map(option_id_closure)`
-
-error: called `map(..).flatten()` on an `Iterator`
- --> $DIR/map_flatten.rs:21:46
+help: and remove the `.flatten()`
+ |
+LL + None
+LL + }
+LL ~ });
|
-LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(|x| x.checked_add(1)).flatten().collect();
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try using `filter_map` instead: `.filter_map(|x| x.checked_add(1))`
-error: called `map(..).flatten()` on an `Iterator`
- --> $DIR/map_flatten.rs:24:46
+error: called `map(..).flatten()` on `Result`
+ --> $DIR/map_flatten.rs:18:10
+ |
+LL | .map(|x| {
+ | __________^
+LL | | if x == 1 {
+LL | | Ok(x)
+LL | | } else {
+... |
+LL | | })
+LL | | .flatten();
+ | |__________________^
+ |
+help: try replacing `map` with `and_then`
+ |
+LL ~ .and_then(|x| {
+LL + if x == 1 {
+LL + Ok(x)
+ |
+help: and remove the `.flatten()`
+ |
+LL + Err(0)
+LL + }
+LL ~ });
|
-LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(|x| 0..x).flatten().collect();
- | ^^^^^^^^^^^^^^^^^^^^^^^^ help: try using `flat_map` instead: `.flat_map(|x| 0..x)`
-error: called `map(..).flatten()` on an `Option`
- --> $DIR/map_flatten.rs:27:39
+error: called `map(..).flatten()` on `Result`
+ --> $DIR/map_flatten.rs:30:10
+ |
+LL | .map(|res| {
+ | __________^
+LL | | if res > 0 {
+LL | | do_something();
+LL | | Ok(res)
+... |
+LL | | })
+LL | | .flatten();
+ | |__________________^
+ |
+help: try replacing `map` with `and_then`
+ |
+LL ~ .and_then(|res| {
+LL + if res > 0 {
+LL + do_something();
+ |
+help: and remove the `.flatten()`
+ |
+LL + Err(0)
+LL + }
+LL ~ });
|
-LL | let _: Option<_> = (Some(Some(1))).map(|x| x).flatten();
- | ^^^^^^^^^^^^^^^^^^^^^ help: try using `and_then` instead: `.and_then(|x| x)`
-error: called `map(..).flatten()` on an `Result`
- --> $DIR/map_flatten.rs:30:41
+error: called `map(..).flatten()` on `Iterator`
+ --> $DIR/map_flatten.rs:42:10
+ |
+LL | .map(|some_value| {
+ | __________^
+LL | | if some_value > 3 {
+LL | | Some(some_value)
+LL | | } else {
+... |
+LL | | })
+LL | | .flatten()
+ | |__________________^
+ |
+help: try replacing `map` with `filter_map`
+ |
+LL ~ .filter_map(|some_value| {
+LL + if some_value > 3 {
+LL + Some(some_value)
+ |
+help: and remove the `.flatten()`
+ |
+LL + None
+LL + }
+LL + })
|
-LL | let _: Result<_, &str> = (Ok(Ok(1))).map(|x| x).flatten();
- | ^^^^^^^^^^^^^^^^^^^^^ help: try using `and_then` instead: `.and_then(|x| x)`
-error: aborting due to 7 previous errors
+error: aborting due to 4 previous errors
--- /dev/null
+// run-rustfix
+
+#![warn(clippy::all, clippy::pedantic)]
+#![allow(clippy::let_underscore_drop)]
+#![allow(clippy::missing_docs_in_private_items)]
+#![allow(clippy::map_identity)]
+#![allow(clippy::redundant_closure)]
+#![allow(clippy::unnecessary_wraps)]
+#![feature(result_flattening)]
+
+fn main() {
+ // mapping to Option on Iterator
+ fn option_id(x: i8) -> Option<i8> {
+ Some(x)
+ }
+ let option_id_ref: fn(i8) -> Option<i8> = option_id;
+ let option_id_closure = |x| Some(x);
+ let _: Vec<_> = vec![5_i8; 6].into_iter().filter_map(option_id).collect();
+ let _: Vec<_> = vec![5_i8; 6].into_iter().filter_map(option_id_ref).collect();
+ let _: Vec<_> = vec![5_i8; 6].into_iter().filter_map(option_id_closure).collect();
+ let _: Vec<_> = vec![5_i8; 6].into_iter().filter_map(|x| x.checked_add(1)).collect();
+
+ // mapping to Iterator on Iterator
+ let _: Vec<_> = vec![5_i8; 6].into_iter().flat_map(|x| 0..x).collect();
+
+ // mapping to Option on Option
+ let _: Option<_> = (Some(Some(1))).and_then(|x| x);
+
+ // mapping to Result on Result
+ let _: Result<_, &str> = (Ok(Ok(1))).and_then(|x| x);
+}
--- /dev/null
+// run-rustfix
+
+#![warn(clippy::all, clippy::pedantic)]
+#![allow(clippy::let_underscore_drop)]
+#![allow(clippy::missing_docs_in_private_items)]
+#![allow(clippy::map_identity)]
+#![allow(clippy::redundant_closure)]
+#![allow(clippy::unnecessary_wraps)]
+#![feature(result_flattening)]
+
+fn main() {
+ // mapping to Option on Iterator
+ fn option_id(x: i8) -> Option<i8> {
+ Some(x)
+ }
+ let option_id_ref: fn(i8) -> Option<i8> = option_id;
+ let option_id_closure = |x| Some(x);
+ let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id).flatten().collect();
+ let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id_ref).flatten().collect();
+ let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id_closure).flatten().collect();
+ let _: Vec<_> = vec![5_i8; 6].into_iter().map(|x| x.checked_add(1)).flatten().collect();
+
+ // mapping to Iterator on Iterator
+ let _: Vec<_> = vec![5_i8; 6].into_iter().map(|x| 0..x).flatten().collect();
+
+ // mapping to Option on Option
+ let _: Option<_> = (Some(Some(1))).map(|x| x).flatten();
+
+ // mapping to Result on Result
+ let _: Result<_, &str> = (Ok(Ok(1))).map(|x| x).flatten();
+}
--- /dev/null
+error: called `map(..).flatten()` on `Iterator`
+ --> $DIR/map_flatten_fixable.rs:18:47
+ |
+LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id).flatten().collect();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `-D clippy::map-flatten` implied by `-D warnings`
+help: try replacing `map` with `filter_map`, and remove the `.flatten()`
+ |
+LL | let _: Vec<_> = vec![5_i8; 6].into_iter().filter_map(option_id).collect();
+ | ~~~~~~~~~~~~~~~~~~~~~
+
+error: called `map(..).flatten()` on `Iterator`
+ --> $DIR/map_flatten_fixable.rs:19:47
+ |
+LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id_ref).flatten().collect();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+help: try replacing `map` with `filter_map`, and remove the `.flatten()`
+ |
+LL | let _: Vec<_> = vec![5_i8; 6].into_iter().filter_map(option_id_ref).collect();
+ | ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+error: called `map(..).flatten()` on `Iterator`
+ --> $DIR/map_flatten_fixable.rs:20:47
+ |
+LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id_closure).flatten().collect();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+help: try replacing `map` with `filter_map`, and remove the `.flatten()`
+ |
+LL | let _: Vec<_> = vec![5_i8; 6].into_iter().filter_map(option_id_closure).collect();
+ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+error: called `map(..).flatten()` on `Iterator`
+ --> $DIR/map_flatten_fixable.rs:21:47
+ |
+LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(|x| x.checked_add(1)).flatten().collect();
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+help: try replacing `map` with `filter_map`, and remove the `.flatten()`
+ |
+LL | let _: Vec<_> = vec![5_i8; 6].into_iter().filter_map(|x| x.checked_add(1)).collect();
+ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+error: called `map(..).flatten()` on `Iterator`
+ --> $DIR/map_flatten_fixable.rs:24:47
+ |
+LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(|x| 0..x).flatten().collect();
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+help: try replacing `map` with `flat_map`, and remove the `.flatten()`
+ |
+LL | let _: Vec<_> = vec![5_i8; 6].into_iter().flat_map(|x| 0..x).collect();
+ | ~~~~~~~~~~~~~~~~~~
+
+error: called `map(..).flatten()` on `Option`
+ --> $DIR/map_flatten_fixable.rs:27:40
+ |
+LL | let _: Option<_> = (Some(Some(1))).map(|x| x).flatten();
+ | ^^^^^^^^^^^^^^^^^^^^
+ |
+help: try replacing `map` with `and_then`, and remove the `.flatten()`
+ |
+LL | let _: Option<_> = (Some(Some(1))).and_then(|x| x);
+ | ~~~~~~~~~~~~~~~
+
+error: called `map(..).flatten()` on `Result`
+ --> $DIR/map_flatten_fixable.rs:30:42
+ |
+LL | let _: Result<_, &str> = (Ok(Ok(1))).map(|x| x).flatten();
+ | ^^^^^^^^^^^^^^^^^^^^
+ |
+help: try replacing `map` with `and_then`, and remove the `.flatten()`
+ |
+LL | let _: Result<_, &str> = (Ok(Ok(1))).and_then(|x| x);
+ | ~~~~~~~~~~~~~~~
+
+error: aborting due to 7 previous errors
+
-error: this `match` has identical arm bodies
- --> $DIR/match_same_arms.rs:13:14
+error: this match arm has an identical body to the `_` wildcard arm
+ --> $DIR/match_same_arms.rs:11:9
|
-LL | _ => 0, //~ ERROR match arms have same body
- | ^
+LL | Abc::A => 0,
+ | ^^^^^^^^^^^ help: try removing the arm
|
= note: `-D clippy::match-same-arms` implied by `-D warnings`
-note: same as this
- --> $DIR/match_same_arms.rs:11:19
+ = help: or try changing either arm body
+note: `_` wildcard arm here
+ --> $DIR/match_same_arms.rs:13:9
|
-LL | Abc::A => 0,
- | ^
-note: `Abc::A` has the same arm body as the `_` wildcard, consider removing it
- --> $DIR/match_same_arms.rs:11:19
- |
-LL | Abc::A => 0,
- | ^
+LL | _ => 0, //~ ERROR match arms have same body
+ | ^^^^^^
-error: this `match` has identical arm bodies
- --> $DIR/match_same_arms.rs:18:20
- |
-LL | (.., 3) => 42, //~ ERROR match arms have same body
- | ^^
- |
-note: same as this
- --> $DIR/match_same_arms.rs:17:23
- |
-LL | (1, .., 3) => 42,
- | ^^
-help: consider refactoring into `(1, .., 3) | (.., 3)`
+error: this match arm has an identical body to another arm
--> $DIR/match_same_arms.rs:17:9
|
LL | (1, .., 3) => 42,
- | ^^^^^^^^^^
- = help: ...or consider changing the match arm bodies
+ | ----------^^^^^^
+ | |
+ | help: try merging the arm patterns: `(1, .., 3) | (.., 3)`
+ |
+ = help: or try changing either arm body
+note: other arm here
+ --> $DIR/match_same_arms.rs:18:9
+ |
+LL | (.., 3) => 42, //~ ERROR match arms have same body
+ | ^^^^^^^^^^^^^
-error: this `match` has identical arm bodies
- --> $DIR/match_same_arms.rs:24:15
+error: this match arm has an identical body to another arm
+ --> $DIR/match_same_arms.rs:24:9
|
LL | 51 => 1, //~ ERROR match arms have same body
- | ^
+ | --^^^^^
+ | |
+ | help: try merging the arm patterns: `51 | 42`
|
-note: same as this
- --> $DIR/match_same_arms.rs:23:15
- |
-LL | 42 => 1,
- | ^
-help: consider refactoring into `42 | 51`
+ = help: or try changing either arm body
+note: other arm here
--> $DIR/match_same_arms.rs:23:9
|
LL | 42 => 1,
- | ^^
- = help: ...or consider changing the match arm bodies
+ | ^^^^^^^
-error: this `match` has identical arm bodies
- --> $DIR/match_same_arms.rs:26:15
- |
-LL | 52 => 2, //~ ERROR match arms have same body
- | ^
- |
-note: same as this
- --> $DIR/match_same_arms.rs:25:15
- |
-LL | 41 => 2,
- | ^
-help: consider refactoring into `41 | 52`
+error: this match arm has an identical body to another arm
--> $DIR/match_same_arms.rs:25:9
|
LL | 41 => 2,
- | ^^
- = help: ...or consider changing the match arm bodies
+ | --^^^^^
+ | |
+ | help: try merging the arm patterns: `41 | 52`
+ |
+ = help: or try changing either arm body
+note: other arm here
+ --> $DIR/match_same_arms.rs:26:9
+ |
+LL | 52 => 2, //~ ERROR match arms have same body
+ | ^^^^^^^
-error: this `match` has identical arm bodies
- --> $DIR/match_same_arms.rs:32:14
+error: this match arm has an identical body to another arm
+ --> $DIR/match_same_arms.rs:32:9
|
LL | 2 => 2, //~ ERROR 2nd matched arms have same body
- | ^
- |
-note: same as this
- --> $DIR/match_same_arms.rs:31:14
+ | -^^^^^
+ | |
+ | help: try merging the arm patterns: `2 | 1`
|
-LL | 1 => 2,
- | ^
-help: consider refactoring into `1 | 2`
+ = help: or try changing either arm body
+note: other arm here
--> $DIR/match_same_arms.rs:31:9
|
LL | 1 => 2,
- | ^
- = help: ...or consider changing the match arm bodies
+ | ^^^^^^
-error: this `match` has identical arm bodies
- --> $DIR/match_same_arms.rs:33:14
+error: this match arm has an identical body to another arm
+ --> $DIR/match_same_arms.rs:33:9
|
LL | 3 => 2, //~ ERROR 3rd matched arms have same body
- | ^
- |
-note: same as this
- --> $DIR/match_same_arms.rs:31:14
+ | -^^^^^
+ | |
+ | help: try merging the arm patterns: `3 | 1`
|
-LL | 1 => 2,
- | ^
-help: consider refactoring into `1 | 3`
+ = help: or try changing either arm body
+note: other arm here
--> $DIR/match_same_arms.rs:31:9
|
LL | 1 => 2,
- | ^
- = help: ...or consider changing the match arm bodies
+ | ^^^^^^
-error: this `match` has identical arm bodies
- --> $DIR/match_same_arms.rs:50:55
+error: this match arm has an identical body to another arm
+ --> $DIR/match_same_arms.rs:32:9
|
-LL | CommandInfo::External { name, .. } => name.to_string(),
- | ^^^^^^^^^^^^^^^^
+LL | 2 => 2, //~ ERROR 2nd matched arms have same body
+ | -^^^^^
+ | |
+ | help: try merging the arm patterns: `2 | 3`
|
-note: same as this
- --> $DIR/match_same_arms.rs:49:54
+ = help: or try changing either arm body
+note: other arm here
+ --> $DIR/match_same_arms.rs:33:9
|
-LL | CommandInfo::BuiltIn { name, .. } => name.to_string(),
- | ^^^^^^^^^^^^^^^^
-help: consider refactoring into `CommandInfo::BuiltIn { name, .. } | CommandInfo::External { name, .. }`
+LL | 3 => 2, //~ ERROR 3rd matched arms have same body
+ | ^^^^^^
+
+error: this match arm has an identical body to another arm
+ --> $DIR/match_same_arms.rs:50:17
+ |
+LL | CommandInfo::External { name, .. } => name.to_string(),
+ | ----------------------------------^^^^^^^^^^^^^^^^^^^^
+ | |
+ | help: try merging the arm patterns: `CommandInfo::External { name, .. } | CommandInfo::BuiltIn { name, .. }`
+ |
+ = help: or try changing either arm body
+note: other arm here
--> $DIR/match_same_arms.rs:49:17
|
LL | CommandInfo::BuiltIn { name, .. } => name.to_string(),
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- = help: ...or consider changing the match arm bodies
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-error: aborting due to 7 previous errors
+error: aborting due to 8 previous errors
Some(2) => 2,
_ => 1,
};
+
+ enum Foo {
+ X(u32),
+ Y(u32),
+ Z(u32),
+ }
+
+ // Don't lint. `Foo::X(0)` and `Foo::Z(_)` overlap with the arm in between.
+ let _ = match Foo::X(0) {
+ Foo::X(0) => 1,
+ Foo::X(_) | Foo::Y(_) | Foo::Z(0) => 2,
+ Foo::Z(_) => 1,
+ _ => 0,
+ };
+
+ // Suggest moving `Foo::Z(_)` up.
+ let _ = match Foo::X(0) {
+ Foo::X(0) => 1,
+ Foo::X(_) | Foo::Y(_) => 2,
+ Foo::Z(_) => 1,
+ _ => 0,
+ };
+
+ // Suggest moving `Foo::X(0)` down.
+ let _ = match Foo::X(0) {
+ Foo::X(0) => 1,
+ Foo::Y(_) | Foo::Z(0) => 2,
+ Foo::Z(_) => 1,
+ _ => 0,
+ };
+
+ // Don't lint.
+ let _ = match 0 {
+ -2 => 1,
+ -5..=50 => 2,
+ -150..=88 => 1,
+ _ => 3,
+ };
+
+ struct Bar {
+ x: u32,
+ y: u32,
+ z: u32,
+ }
+
+ // Lint.
+ let _ = match None {
+ Some(Bar { x: 0, y: 5, .. }) => 1,
+ Some(Bar { y: 10, z: 0, .. }) => 2,
+ None => 50,
+ Some(Bar { y: 0, x: 5, .. }) => 1,
+ _ => 200,
+ };
}
-error: this `match` has identical arm bodies
- --> $DIR/match_same_arms2.rs:20:14
+error: this match arm has an identical body to the `_` wildcard arm
+ --> $DIR/match_same_arms2.rs:11:9
|
-LL | _ => {
- | ______________^
-LL | | //~ ERROR match arms have same body
+LL | / 42 => {
LL | | foo();
LL | | let mut a = 42 + [23].len() as i32;
+LL | | if true {
... |
LL | | a
LL | | },
- | |_________^
+ | |_________^ help: try removing the arm
|
= note: `-D clippy::match-same-arms` implied by `-D warnings`
-note: same as this
- --> $DIR/match_same_arms2.rs:11:15
+ = help: or try changing either arm body
+note: `_` wildcard arm here
+ --> $DIR/match_same_arms2.rs:20:9
|
-LL | 42 => {
- | _______________^
-LL | | foo();
-LL | | let mut a = 42 + [23].len() as i32;
-LL | | if true {
-... |
-LL | | a
-LL | | },
- | |_________^
-note: `42` has the same arm body as the `_` wildcard, consider removing it
- --> $DIR/match_same_arms2.rs:11:15
- |
-LL | 42 => {
- | _______________^
+LL | / _ => {
+LL | | //~ ERROR match arms have same body
LL | | foo();
LL | | let mut a = 42 + [23].len() as i32;
-LL | | if true {
... |
LL | | a
LL | | },
| |_________^
-error: this `match` has identical arm bodies
- --> $DIR/match_same_arms2.rs:34:15
+error: this match arm has an identical body to another arm
+ --> $DIR/match_same_arms2.rs:34:9
|
LL | 51 => foo(), //~ ERROR match arms have same body
- | ^^^^^
+ | --^^^^^^^^^
+ | |
+ | help: try merging the arm patterns: `51 | 42`
|
-note: same as this
- --> $DIR/match_same_arms2.rs:33:15
- |
-LL | 42 => foo(),
- | ^^^^^
-help: consider refactoring into `42 | 51`
+ = help: or try changing either arm body
+note: other arm here
--> $DIR/match_same_arms2.rs:33:9
|
LL | 42 => foo(),
- | ^^
- = help: ...or consider changing the match arm bodies
+ | ^^^^^^^^^^^
-error: this `match` has identical arm bodies
- --> $DIR/match_same_arms2.rs:40:17
+error: this match arm has an identical body to another arm
+ --> $DIR/match_same_arms2.rs:40:9
|
LL | None => 24, //~ ERROR match arms have same body
- | ^^
+ | ----^^^^^^
+ | |
+ | help: try merging the arm patterns: `None | Some(_)`
|
-note: same as this
- --> $DIR/match_same_arms2.rs:39:20
- |
-LL | Some(_) => 24,
- | ^^
-help: consider refactoring into `Some(_) | None`
+ = help: or try changing either arm body
+note: other arm here
--> $DIR/match_same_arms2.rs:39:9
|
LL | Some(_) => 24,
- | ^^^^^^^
- = help: ...or consider changing the match arm bodies
+ | ^^^^^^^^^^^^^
-error: this `match` has identical arm bodies
- --> $DIR/match_same_arms2.rs:62:28
+error: this match arm has an identical body to another arm
+ --> $DIR/match_same_arms2.rs:62:9
|
LL | (None, Some(a)) => bar(a), //~ ERROR match arms have same body
- | ^^^^^^
- |
-note: same as this
- --> $DIR/match_same_arms2.rs:61:28
+ | ---------------^^^^^^^^^^
+ | |
+ | help: try merging the arm patterns: `(None, Some(a)) | (Some(a), None)`
|
-LL | (Some(a), None) => bar(a),
- | ^^^^^^
-help: consider refactoring into `(Some(a), None) | (None, Some(a))`
+ = help: or try changing either arm body
+note: other arm here
--> $DIR/match_same_arms2.rs:61:9
|
LL | (Some(a), None) => bar(a),
- | ^^^^^^^^^^^^^^^
- = help: ...or consider changing the match arm bodies
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^
-error: this `match` has identical arm bodies
- --> $DIR/match_same_arms2.rs:68:26
- |
-LL | (.., Some(a)) => bar(a), //~ ERROR match arms have same body
- | ^^^^^^
- |
-note: same as this
- --> $DIR/match_same_arms2.rs:67:26
- |
-LL | (Some(a), ..) => bar(a),
- | ^^^^^^
-help: consider refactoring into `(Some(a), ..) | (.., Some(a))`
+error: this match arm has an identical body to another arm
--> $DIR/match_same_arms2.rs:67:9
|
LL | (Some(a), ..) => bar(a),
- | ^^^^^^^^^^^^^
- = help: ...or consider changing the match arm bodies
-
-error: this `match` has identical arm bodies
- --> $DIR/match_same_arms2.rs:102:29
- |
-LL | (Ok(_), Some(x)) => println!("ok {}", x),
- | ^^^^^^^^^^^^^^^^^^^^
+ | -------------^^^^^^^^^^
+ | |
+ | help: try merging the arm patterns: `(Some(a), ..) | (.., Some(a))`
|
-note: same as this
- --> $DIR/match_same_arms2.rs:101:29
+ = help: or try changing either arm body
+note: other arm here
+ --> $DIR/match_same_arms2.rs:68:9
|
-LL | (Ok(x), Some(_)) => println!("ok {}", x),
- | ^^^^^^^^^^^^^^^^^^^^
-help: consider refactoring into `(Ok(x), Some(_)) | (Ok(_), Some(x))`
+LL | (.., Some(a)) => bar(a), //~ ERROR match arms have same body
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+
+error: this match arm has an identical body to another arm
--> $DIR/match_same_arms2.rs:101:9
|
LL | (Ok(x), Some(_)) => println!("ok {}", x),
- | ^^^^^^^^^^^^^^^^
- = help: ...or consider changing the match arm bodies
- = note: this error originates in the macro `println` (in Nightly builds, run with -Z macro-backtrace for more info)
+ | ----------------^^^^^^^^^^^^^^^^^^^^^^^^
+ | |
+ | help: try merging the arm patterns: `(Ok(x), Some(_)) | (Ok(_), Some(x))`
+ |
+ = help: or try changing either arm body
+note: other arm here
+ --> $DIR/match_same_arms2.rs:102:9
+ |
+LL | (Ok(_), Some(x)) => println!("ok {}", x),
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-error: this `match` has identical arm bodies
- --> $DIR/match_same_arms2.rs:117:18
+error: this match arm has an identical body to another arm
+ --> $DIR/match_same_arms2.rs:117:9
|
LL | Ok(_) => println!("ok"),
- | ^^^^^^^^^^^^^^
- |
-note: same as this
- --> $DIR/match_same_arms2.rs:116:18
+ | -----^^^^^^^^^^^^^^^^^^
+ | |
+ | help: try merging the arm patterns: `Ok(_) | Ok(3)`
|
-LL | Ok(3) => println!("ok"),
- | ^^^^^^^^^^^^^^
-help: consider refactoring into `Ok(3) | Ok(_)`
+ = help: or try changing either arm body
+note: other arm here
--> $DIR/match_same_arms2.rs:116:9
|
LL | Ok(3) => println!("ok"),
- | ^^^^^
- = help: ...or consider changing the match arm bodies
- = note: this error originates in the macro `println` (in Nightly builds, run with -Z macro-backtrace for more info)
+ | ^^^^^^^^^^^^^^^^^^^^^^^
-error: this `match` has identical arm bodies
- --> $DIR/match_same_arms2.rs:144:14
+error: this match arm has an identical body to another arm
+ --> $DIR/match_same_arms2.rs:144:9
|
LL | 1 => {
- | ______________^
+ | ^ help: try merging the arm patterns: `1 | 0`
+ | _________|
+ | |
LL | | empty!(0);
LL | | },
| |_________^
|
-note: same as this
- --> $DIR/match_same_arms2.rs:141:14
+ = help: or try changing either arm body
+note: other arm here
+ --> $DIR/match_same_arms2.rs:141:9
|
-LL | 0 => {
- | ______________^
+LL | / 0 => {
LL | | empty!(0);
LL | | },
| |_________^
-help: consider refactoring into `0 | 1`
- --> $DIR/match_same_arms2.rs:141:9
- |
-LL | 0 => {
- | ^
- = help: ...or consider changing the match arm bodies
error: match expression looks like `matches!` macro
--> $DIR/match_same_arms2.rs:162:16
|
= note: `-D clippy::match-like-matches-macro` implied by `-D warnings`
-error: aborting due to 9 previous errors
+error: this match arm has an identical body to another arm
+ --> $DIR/match_same_arms2.rs:194:9
+ |
+LL | Foo::X(0) => 1,
+ | ---------^^^^^
+ | |
+ | help: try merging the arm patterns: `Foo::X(0) | Foo::Z(_)`
+ |
+ = help: or try changing either arm body
+note: other arm here
+ --> $DIR/match_same_arms2.rs:196:9
+ |
+LL | Foo::Z(_) => 1,
+ | ^^^^^^^^^^^^^^
+
+error: this match arm has an identical body to another arm
+ --> $DIR/match_same_arms2.rs:204:9
+ |
+LL | Foo::Z(_) => 1,
+ | ---------^^^^^
+ | |
+ | help: try merging the arm patterns: `Foo::Z(_) | Foo::X(0)`
+ |
+ = help: or try changing either arm body
+note: other arm here
+ --> $DIR/match_same_arms2.rs:202:9
+ |
+LL | Foo::X(0) => 1,
+ | ^^^^^^^^^^^^^^
+
+error: this match arm has an identical body to another arm
+ --> $DIR/match_same_arms2.rs:227:9
+ |
+LL | Some(Bar { y: 0, x: 5, .. }) => 1,
+ | ----------------------------^^^^^
+ | |
+ | help: try merging the arm patterns: `Some(Bar { y: 0, x: 5, .. }) | Some(Bar { x: 0, y: 5, .. })`
+ |
+ = help: or try changing either arm body
+note: other arm here
+ --> $DIR/match_same_arms2.rs:224:9
+ |
+LL | Some(Bar { x: 0, y: 5, .. }) => 1,
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error: aborting due to 12 previous errors
--- /dev/null
+// run-rustfix
+#![warn(clippy::missing_spin_loop)]
+#![allow(clippy::bool_comparison)]
+#![allow(unused_braces)]
+
+use core::sync::atomic::{AtomicBool, Ordering};
+
+fn main() {
+ let b = AtomicBool::new(true);
+ // Those should lint
+ while b.load(Ordering::Acquire) { std::hint::spin_loop() }
+
+ while !b.load(Ordering::SeqCst) { std::hint::spin_loop() }
+
+ while b.load(Ordering::Acquire) == false { std::hint::spin_loop() }
+
+ while { true == b.load(Ordering::Acquire) } { std::hint::spin_loop() }
+
+ while b.compare_exchange(true, false, Ordering::Acquire, Ordering::Relaxed) != Ok(true) { std::hint::spin_loop() }
+
+ while Ok(false) != b.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) { std::hint::spin_loop() }
+
+ // This is OK, as the body is not empty
+ while b.load(Ordering::Acquire) {
+ std::hint::spin_loop()
+ }
+ // TODO: also match on loop+match or while let
+}
--- /dev/null
+// run-rustfix
+#![warn(clippy::missing_spin_loop)]
+#![allow(clippy::bool_comparison)]
+#![allow(unused_braces)]
+
+use core::sync::atomic::{AtomicBool, Ordering};
+
+fn main() {
+ let b = AtomicBool::new(true);
+ // Those should lint
+ while b.load(Ordering::Acquire) {}
+
+ while !b.load(Ordering::SeqCst) {}
+
+ while b.load(Ordering::Acquire) == false {}
+
+ while { true == b.load(Ordering::Acquire) } {}
+
+ while b.compare_exchange(true, false, Ordering::Acquire, Ordering::Relaxed) != Ok(true) {}
+
+ while Ok(false) != b.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) {}
+
+ // This is OK, as the body is not empty
+ while b.load(Ordering::Acquire) {
+ std::hint::spin_loop()
+ }
+ // TODO: also match on loop+match or while let
+}
--- /dev/null
+error: busy-waiting loop should at least have a spin loop hint
+ --> $DIR/missing_spin_loop.rs:11:37
+ |
+LL | while b.load(Ordering::Acquire) {}
+ | ^^ help: try this: `{ std::hint::spin_loop() }`
+ |
+ = note: `-D clippy::missing-spin-loop` implied by `-D warnings`
+
+error: busy-waiting loop should at least have a spin loop hint
+ --> $DIR/missing_spin_loop.rs:13:37
+ |
+LL | while !b.load(Ordering::SeqCst) {}
+ | ^^ help: try this: `{ std::hint::spin_loop() }`
+
+error: busy-waiting loop should at least have a spin loop hint
+ --> $DIR/missing_spin_loop.rs:15:46
+ |
+LL | while b.load(Ordering::Acquire) == false {}
+ | ^^ help: try this: `{ std::hint::spin_loop() }`
+
+error: busy-waiting loop should at least have a spin loop hint
+ --> $DIR/missing_spin_loop.rs:17:49
+ |
+LL | while { true == b.load(Ordering::Acquire) } {}
+ | ^^ help: try this: `{ std::hint::spin_loop() }`
+
+error: busy-waiting loop should at least have a spin loop hint
+ --> $DIR/missing_spin_loop.rs:19:93
+ |
+LL | while b.compare_exchange(true, false, Ordering::Acquire, Ordering::Relaxed) != Ok(true) {}
+ | ^^ help: try this: `{ std::hint::spin_loop() }`
+
+error: busy-waiting loop should at least have a spin loop hint
+ --> $DIR/missing_spin_loop.rs:21:94
+ |
+LL | while Ok(false) != b.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) {}
+ | ^^ help: try this: `{ std::hint::spin_loop() }`
+
+error: aborting due to 6 previous errors
+
--- /dev/null
+// run-rustfix
+#![warn(clippy::missing_spin_loop)]
+#![feature(lang_items, start, libc)]
+#![no_std]
+
+use core::sync::atomic::{AtomicBool, Ordering};
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+ // This should trigger the lint
+ let b = AtomicBool::new(true);
+ // This should lint with `core::hint::spin_loop()`
+ while b.load(Ordering::Acquire) { core::hint::spin_loop() }
+ 0
+}
+
+#[panic_handler]
+fn panic(_info: &core::panic::PanicInfo) -> ! {
+ loop {}
+}
+
+#[lang = "eh_personality"]
+extern "C" fn eh_personality() {}
--- /dev/null
+// run-rustfix
+#![warn(clippy::missing_spin_loop)]
+#![feature(lang_items, start, libc)]
+#![no_std]
+
+use core::sync::atomic::{AtomicBool, Ordering};
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+ // This should trigger the lint
+ let b = AtomicBool::new(true);
+ // This should lint with `core::hint::spin_loop()`
+ while b.load(Ordering::Acquire) {}
+ 0
+}
+
+#[panic_handler]
+fn panic(_info: &core::panic::PanicInfo) -> ! {
+ loop {}
+}
+
+#[lang = "eh_personality"]
+extern "C" fn eh_personality() {}
--- /dev/null
+error: busy-waiting loop should at least have a spin loop hint
+ --> $DIR/missing_spin_loop_no_std.rs:13:37
+ |
+LL | while b.load(Ordering::Acquire) {}
+ | ^^ help: try this: `{ core::hint::spin_loop() }`
+ |
+ = note: `-D clippy::missing-spin-loop` implied by `-D warnings`
+
+error: aborting due to previous error
+
--- /dev/null
+// run-rustfix
+#![warn(clippy::needless_match)]
+#![allow(clippy::manual_map)]
+#![allow(dead_code)]
+
+#[derive(Clone, Copy)]
+enum Choice {
+ A,
+ B,
+ C,
+ D,
+}
+
+#[allow(unused_mut)]
+fn useless_match() {
+ let mut i = 10;
+ let _: i32 = i;
+ let _: i32 = i;
+ let mut _i_mut = i;
+
+ let s = "test";
+ let _: &str = s;
+}
+
+fn custom_type_match(se: Choice) {
+ let _: Choice = se;
+ // Don't trigger
+ let _: Choice = match se {
+ Choice::A => Choice::A,
+ Choice::B => Choice::B,
+ _ => Choice::C,
+ };
+ // Mingled, don't trigger
+ let _: Choice = match se {
+ Choice::A => Choice::B,
+ Choice::B => Choice::C,
+ Choice::C => Choice::D,
+ Choice::D => Choice::A,
+ };
+}
+
+fn option_match(x: Option<i32>) {
+ let _: Option<i32> = x;
+ // Don't trigger, this is the case for manual_map_option
+ let _: Option<i32> = match x {
+ Some(a) => Some(-a),
+ None => None,
+ };
+}
+
+fn func_ret_err<T>(err: T) -> Result<i32, T> {
+ Err(err)
+}
+
+fn result_match() {
+ let _: Result<i32, i32> = Ok(1);
+ let _: Result<i32, i32> = func_ret_err(0_i32);
+}
+
+fn if_let_option() -> Option<i32> {
+ Some(1)
+}
+
+fn if_let_result(x: Result<(), i32>) {
+ let _: Result<(), i32> = x;
+ let _: Result<(), i32> = x;
+ // Input type mismatch, don't trigger
+ let _: Result<(), i32> = if let Err(e) = Ok(1) { Err(e) } else { x };
+}
+
+fn if_let_custom_enum(x: Choice) {
+ let _: Choice = x;
+ // Don't trigger
+ let _: Choice = if let Choice::A = x {
+ Choice::A
+ } else if true {
+ Choice::B
+ } else {
+ x
+ };
+}
+
+fn main() {}
--- /dev/null
+// run-rustfix
+#![warn(clippy::needless_match)]
+#![allow(clippy::manual_map)]
+#![allow(dead_code)]
+
+#[derive(Clone, Copy)]
+enum Choice {
+ A,
+ B,
+ C,
+ D,
+}
+
+#[allow(unused_mut)]
+fn useless_match() {
+ let mut i = 10;
+ let _: i32 = match i {
+ 0 => 0,
+ 1 => 1,
+ 2 => 2,
+ _ => i,
+ };
+ let _: i32 = match i {
+ 0 => 0,
+ 1 => 1,
+ ref i => *i,
+ };
+ let mut _i_mut = match i {
+ 0 => 0,
+ 1 => 1,
+ ref mut i => *i,
+ };
+
+ let s = "test";
+ let _: &str = match s {
+ "a" => "a",
+ "b" => "b",
+ s => s,
+ };
+}
+
+fn custom_type_match(se: Choice) {
+ let _: Choice = match se {
+ Choice::A => Choice::A,
+ Choice::B => Choice::B,
+ Choice::C => Choice::C,
+ Choice::D => Choice::D,
+ };
+ // Don't trigger
+ let _: Choice = match se {
+ Choice::A => Choice::A,
+ Choice::B => Choice::B,
+ _ => Choice::C,
+ };
+ // Mingled, don't trigger
+ let _: Choice = match se {
+ Choice::A => Choice::B,
+ Choice::B => Choice::C,
+ Choice::C => Choice::D,
+ Choice::D => Choice::A,
+ };
+}
+
+fn option_match(x: Option<i32>) {
+ let _: Option<i32> = match x {
+ Some(a) => Some(a),
+ None => None,
+ };
+ // Don't trigger, this is the case for manual_map_option
+ let _: Option<i32> = match x {
+ Some(a) => Some(-a),
+ None => None,
+ };
+}
+
+fn func_ret_err<T>(err: T) -> Result<i32, T> {
+ Err(err)
+}
+
+fn result_match() {
+ let _: Result<i32, i32> = match Ok(1) {
+ Ok(a) => Ok(a),
+ Err(err) => Err(err),
+ };
+ let _: Result<i32, i32> = match func_ret_err(0_i32) {
+ Err(err) => Err(err),
+ Ok(a) => Ok(a),
+ };
+}
+
+fn if_let_option() -> Option<i32> {
+ if let Some(a) = Some(1) { Some(a) } else { None }
+}
+
+fn if_let_result(x: Result<(), i32>) {
+ let _: Result<(), i32> = if let Err(e) = x { Err(e) } else { x };
+ let _: Result<(), i32> = if let Ok(val) = x { Ok(val) } else { x };
+ // Input type mismatch, don't trigger
+ let _: Result<(), i32> = if let Err(e) = Ok(1) { Err(e) } else { x };
+}
+
+fn if_let_custom_enum(x: Choice) {
+ let _: Choice = if let Choice::A = x {
+ Choice::A
+ } else if let Choice::B = x {
+ Choice::B
+ } else if let Choice::C = x {
+ Choice::C
+ } else {
+ x
+ };
+ // Don't trigger
+ let _: Choice = if let Choice::A = x {
+ Choice::A
+ } else if true {
+ Choice::B
+ } else {
+ x
+ };
+}
+
+fn main() {}
--- /dev/null
+error: this match expression is unnecessary
+ --> $DIR/needless_match.rs:17:18
+ |
+LL | let _: i32 = match i {
+ | __________________^
+LL | | 0 => 0,
+LL | | 1 => 1,
+LL | | 2 => 2,
+LL | | _ => i,
+LL | | };
+ | |_____^ help: replace it with: `i`
+ |
+ = note: `-D clippy::needless-match` implied by `-D warnings`
+
+error: this match expression is unnecessary
+ --> $DIR/needless_match.rs:23:18
+ |
+LL | let _: i32 = match i {
+ | __________________^
+LL | | 0 => 0,
+LL | | 1 => 1,
+LL | | ref i => *i,
+LL | | };
+ | |_____^ help: replace it with: `i`
+
+error: this match expression is unnecessary
+ --> $DIR/needless_match.rs:28:22
+ |
+LL | let mut _i_mut = match i {
+ | ______________________^
+LL | | 0 => 0,
+LL | | 1 => 1,
+LL | | ref mut i => *i,
+LL | | };
+ | |_____^ help: replace it with: `i`
+
+error: this match expression is unnecessary
+ --> $DIR/needless_match.rs:35:19
+ |
+LL | let _: &str = match s {
+ | ___________________^
+LL | | "a" => "a",
+LL | | "b" => "b",
+LL | | s => s,
+LL | | };
+ | |_____^ help: replace it with: `s`
+
+error: this match expression is unnecessary
+ --> $DIR/needless_match.rs:43:21
+ |
+LL | let _: Choice = match se {
+ | _____________________^
+LL | | Choice::A => Choice::A,
+LL | | Choice::B => Choice::B,
+LL | | Choice::C => Choice::C,
+LL | | Choice::D => Choice::D,
+LL | | };
+ | |_____^ help: replace it with: `se`
+
+error: this match expression is unnecessary
+ --> $DIR/needless_match.rs:65:26
+ |
+LL | let _: Option<i32> = match x {
+ | __________________________^
+LL | | Some(a) => Some(a),
+LL | | None => None,
+LL | | };
+ | |_____^ help: replace it with: `x`
+
+error: this match expression is unnecessary
+ --> $DIR/needless_match.rs:81:31
+ |
+LL | let _: Result<i32, i32> = match Ok(1) {
+ | _______________________________^
+LL | | Ok(a) => Ok(a),
+LL | | Err(err) => Err(err),
+LL | | };
+ | |_____^ help: replace it with: `Ok(1)`
+
+error: this match expression is unnecessary
+ --> $DIR/needless_match.rs:85:31
+ |
+LL | let _: Result<i32, i32> = match func_ret_err(0_i32) {
+ | _______________________________^
+LL | | Err(err) => Err(err),
+LL | | Ok(a) => Ok(a),
+LL | | };
+ | |_____^ help: replace it with: `func_ret_err(0_i32)`
+
+error: this if-let expression is unnecessary
+ --> $DIR/needless_match.rs:92:5
+ |
+LL | if let Some(a) = Some(1) { Some(a) } else { None }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: replace it with: `Some(1)`
+
+error: this if-let expression is unnecessary
+ --> $DIR/needless_match.rs:96:30
+ |
+LL | let _: Result<(), i32> = if let Err(e) = x { Err(e) } else { x };
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: replace it with: `x`
+
+error: this if-let expression is unnecessary
+ --> $DIR/needless_match.rs:97:30
+ |
+LL | let _: Result<(), i32> = if let Ok(val) = x { Ok(val) } else { x };
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: replace it with: `x`
+
+error: this if-let expression is unnecessary
+ --> $DIR/needless_match.rs:103:21
+ |
+LL | let _: Choice = if let Choice::A = x {
+ | _____________________^
+LL | | Choice::A
+LL | | } else if let Choice::B = x {
+LL | | Choice::B
+... |
+LL | | x
+LL | | };
+ | |_____^ help: replace it with: `x`
+
+error: aborting due to 12 previous errors
+
--- /dev/null
+#![warn(clippy::only_used_in_recursion)]
+
+fn simple(a: usize, b: usize) -> usize {
+ if a == 0 { 1 } else { simple(a - 1, b) }
+}
+
+fn with_calc(a: usize, b: isize) -> usize {
+ if a == 0 { 1 } else { with_calc(a - 1, -b + 1) }
+}
+
+fn tuple((a, b): (usize, usize)) -> usize {
+ if a == 0 { 1 } else { tuple((a - 1, b + 1)) }
+}
+
+fn let_tuple(a: usize, b: usize) -> usize {
+ let (c, d) = (a, b);
+ if c == 0 { 1 } else { let_tuple(c - 1, d + 1) }
+}
+
+fn array([a, b]: [usize; 2]) -> usize {
+ if a == 0 { 1 } else { array([a - 1, b + 1]) }
+}
+
+fn index(a: usize, mut b: &[usize], c: usize) -> usize {
+ if a == 0 { 1 } else { index(a - 1, b, c + b[0]) }
+}
+
+fn break_(a: usize, mut b: usize, mut c: usize) -> usize {
+ let c = loop {
+ b += 1;
+ c += 1;
+ if c == 10 {
+ break b;
+ }
+ };
+
+ if a == 0 { 1 } else { break_(a - 1, c, c) }
+}
+
+// this has a side effect
+fn mut_ref(a: usize, b: &mut usize) -> usize {
+ *b = 1;
+ if a == 0 { 1 } else { mut_ref(a - 1, b) }
+}
+
+fn mut_ref2(a: usize, b: &mut usize) -> usize {
+ let mut c = *b;
+ if a == 0 { 1 } else { mut_ref2(a - 1, &mut c) }
+}
+
+fn not_primitive(a: usize, b: String) -> usize {
+ if a == 0 { 1 } else { not_primitive(a - 1, b) }
+}
+
+// this doesn't have a side effect,
+// but `String` is not primitive.
+fn not_primitive_op(a: usize, b: String, c: &str) -> usize {
+ if a == 1 { 1 } else { not_primitive_op(a, b + c, c) }
+}
+
+struct A;
+
+impl A {
+ fn method(a: usize, b: usize) -> usize {
+ if a == 0 { 1 } else { A::method(a - 1, b - 1) }
+ }
+
+ fn method2(&self, a: usize, b: usize) -> usize {
+ if a == 0 { 1 } else { self.method2(a - 1, b + 1) }
+ }
+}
+
+trait B {
+ fn hello(a: usize, b: usize) -> usize;
+
+ fn hello2(&self, a: usize, b: usize) -> usize;
+}
+
+impl B for A {
+ fn hello(a: usize, b: usize) -> usize {
+ if a == 0 { 1 } else { A::hello(a - 1, b + 1) }
+ }
+
+ fn hello2(&self, a: usize, b: usize) -> usize {
+ if a == 0 { 1 } else { self.hello2(a - 1, b + 1) }
+ }
+}
+
+trait C {
+ fn hello(a: usize, b: usize) -> usize {
+ if a == 0 { 1 } else { Self::hello(a - 1, b + 1) }
+ }
+
+ fn hello2(&self, a: usize, b: usize) -> usize {
+ if a == 0 { 1 } else { self.hello2(a - 1, b + 1) }
+ }
+}
+
+fn ignore(a: usize, _: usize) -> usize {
+ if a == 1 { 1 } else { ignore(a - 1, 0) }
+}
+
+fn ignore2(a: usize, _b: usize) -> usize {
+ if a == 1 { 1 } else { ignore2(a - 1, _b) }
+}
+
+fn f1(a: u32) -> u32 {
+ a
+}
+
+fn f2(a: u32) -> u32 {
+ f1(a)
+}
+
+fn inner_fn(a: u32) -> u32 {
+ fn inner_fn(a: u32) -> u32 {
+ a
+ }
+ inner_fn(a)
+}
+
+fn main() {}
--- /dev/null
+error: parameter is only used in recursion
+ --> $DIR/only_used_in_recursion.rs:3:21
+ |
+LL | fn simple(a: usize, b: usize) -> usize {
+ | ^ help: if this is intentional, prefix with an underscore: `_b`
+ |
+ = note: `-D clippy::only-used-in-recursion` implied by `-D warnings`
+
+error: parameter is only used in recursion
+ --> $DIR/only_used_in_recursion.rs:7:24
+ |
+LL | fn with_calc(a: usize, b: isize) -> usize {
+ | ^ help: if this is intentional, prefix with an underscore: `_b`
+
+error: parameter is only used in recursion
+ --> $DIR/only_used_in_recursion.rs:11:14
+ |
+LL | fn tuple((a, b): (usize, usize)) -> usize {
+ | ^ help: if this is intentional, prefix with an underscore: `_b`
+
+error: parameter is only used in recursion
+ --> $DIR/only_used_in_recursion.rs:15:24
+ |
+LL | fn let_tuple(a: usize, b: usize) -> usize {
+ | ^ help: if this is intentional, prefix with an underscore: `_b`
+
+error: parameter is only used in recursion
+ --> $DIR/only_used_in_recursion.rs:20:14
+ |
+LL | fn array([a, b]: [usize; 2]) -> usize {
+ | ^ help: if this is intentional, prefix with an underscore: `_b`
+
+error: parameter is only used in recursion
+ --> $DIR/only_used_in_recursion.rs:24:20
+ |
+LL | fn index(a: usize, mut b: &[usize], c: usize) -> usize {
+ | ^^^^^ help: if this is intentional, prefix with an underscore: `_b`
+
+error: parameter is only used in recursion
+ --> $DIR/only_used_in_recursion.rs:24:37
+ |
+LL | fn index(a: usize, mut b: &[usize], c: usize) -> usize {
+ | ^ help: if this is intentional, prefix with an underscore: `_c`
+
+error: parameter is only used in recursion
+ --> $DIR/only_used_in_recursion.rs:28:21
+ |
+LL | fn break_(a: usize, mut b: usize, mut c: usize) -> usize {
+ | ^^^^^ help: if this is intentional, prefix with an underscore: `_b`
+
+error: parameter is only used in recursion
+ --> $DIR/only_used_in_recursion.rs:46:23
+ |
+LL | fn mut_ref2(a: usize, b: &mut usize) -> usize {
+ | ^ help: if this is intentional, prefix with an underscore: `_b`
+
+error: parameter is only used in recursion
+ --> $DIR/only_used_in_recursion.rs:51:28
+ |
+LL | fn not_primitive(a: usize, b: String) -> usize {
+ | ^ help: if this is intentional, prefix with an underscore: `_b`
+
+error: parameter is only used in recursion
+ --> $DIR/only_used_in_recursion.rs:68:33
+ |
+LL | fn method2(&self, a: usize, b: usize) -> usize {
+ | ^ help: if this is intentional, prefix with an underscore: `_b`
+
+error: parameter is only used in recursion
+ --> $DIR/only_used_in_recursion.rs:90:24
+ |
+LL | fn hello(a: usize, b: usize) -> usize {
+ | ^ help: if this is intentional, prefix with an underscore: `_b`
+
+error: parameter is only used in recursion
+ --> $DIR/only_used_in_recursion.rs:94:32
+ |
+LL | fn hello2(&self, a: usize, b: usize) -> usize {
+ | ^ help: if this is intentional, prefix with an underscore: `_b`
+
+error: aborting due to 13 previous errors
+
--- /dev/null
+// run-rustfix
+
+#![warn(clippy::or_then_unwrap)]
+#![allow(clippy::map_identity)]
+
+struct SomeStruct {}
+impl SomeStruct {
+ fn or(self, _: Option<Self>) -> Self {
+ self
+ }
+ fn unwrap(&self) {}
+}
+
+struct SomeOtherStruct {}
+impl SomeOtherStruct {
+ fn or(self) -> Self {
+ self
+ }
+ fn unwrap(&self) {}
+}
+
+fn main() {
+ let option: Option<&str> = None;
+ let _ = option.unwrap_or("fallback"); // should trigger lint
+
+ let result: Result<&str, &str> = Err("Error");
+ let _ = result.unwrap_or("fallback"); // should trigger lint
+
+ // as part of a method chain
+ let option: Option<&str> = None;
+ let _ = option.map(|v| v).unwrap_or("fallback").to_string().chars(); // should trigger lint
+
+ // Not Option/Result
+ let instance = SomeStruct {};
+ let _ = instance.or(Some(SomeStruct {})).unwrap(); // should not trigger lint
+
+ // or takes no argument
+ let instance = SomeOtherStruct {};
+ let _ = instance.or().unwrap(); // should not trigger lint and should not panic
+
+ // None in or
+ let option: Option<&str> = None;
+ let _ = option.or(None).unwrap(); // should not trigger lint
+
+ // Not Err in or
+ let result: Result<&str, &str> = Err("Error");
+ let _ = result.or::<&str>(Err("Other Error")).unwrap(); // should not trigger lint
+
+ // other function between
+ let option: Option<&str> = None;
+ let _ = option.or(Some("fallback")).map(|v| v).unwrap(); // should not trigger lint
+}
--- /dev/null
+// run-rustfix
+
+#![warn(clippy::or_then_unwrap)]
+#![allow(clippy::map_identity)]
+
+struct SomeStruct {}
+impl SomeStruct {
+ fn or(self, _: Option<Self>) -> Self {
+ self
+ }
+ fn unwrap(&self) {}
+}
+
+struct SomeOtherStruct {}
+impl SomeOtherStruct {
+ fn or(self) -> Self {
+ self
+ }
+ fn unwrap(&self) {}
+}
+
+fn main() {
+ let option: Option<&str> = None;
+ let _ = option.or(Some("fallback")).unwrap(); // should trigger lint
+
+ let result: Result<&str, &str> = Err("Error");
+ let _ = result.or::<&str>(Ok("fallback")).unwrap(); // should trigger lint
+
+ // as part of a method chain
+ let option: Option<&str> = None;
+ let _ = option.map(|v| v).or(Some("fallback")).unwrap().to_string().chars(); // should trigger lint
+
+ // Not Option/Result
+ let instance = SomeStruct {};
+ let _ = instance.or(Some(SomeStruct {})).unwrap(); // should not trigger lint
+
+ // or takes no argument
+ let instance = SomeOtherStruct {};
+ let _ = instance.or().unwrap(); // should not trigger lint and should not panic
+
+ // None in or
+ let option: Option<&str> = None;
+ let _ = option.or(None).unwrap(); // should not trigger lint
+
+ // Not Err in or
+ let result: Result<&str, &str> = Err("Error");
+ let _ = result.or::<&str>(Err("Other Error")).unwrap(); // should not trigger lint
+
+ // other function between
+ let option: Option<&str> = None;
+ let _ = option.or(Some("fallback")).map(|v| v).unwrap(); // should not trigger lint
+}
--- /dev/null
+error: found `.or(Some(…)).unwrap()`
+ --> $DIR/or_then_unwrap.rs:24:20
+ |
+LL | let _ = option.or(Some("fallback")).unwrap(); // should trigger lint
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try this: `unwrap_or("fallback")`
+ |
+ = note: `-D clippy::or-then-unwrap` implied by `-D warnings`
+
+error: found `.or(Ok(…)).unwrap()`
+ --> $DIR/or_then_unwrap.rs:27:20
+ |
+LL | let _ = result.or::<&str>(Ok("fallback")).unwrap(); // should trigger lint
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try this: `unwrap_or("fallback")`
+
+error: found `.or(Some(…)).unwrap()`
+ --> $DIR/or_then_unwrap.rs:31:31
+ |
+LL | let _ = option.map(|v| v).or(Some("fallback")).unwrap().to_string().chars(); // should trigger lint
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try this: `unwrap_or("fallback")`
+
+error: aborting due to 3 previous errors
+
a.push(0);
b.push(1);
}
+
+// Issue #8495
+fn cow_conditional_to_mut(a: &mut Cow<str>) {
+ if a.is_empty() {
+ a.to_mut().push_str("foo");
+ }
+}
// run-rustfix
#![allow(unused_parens)]
-
+#![allow(clippy::iter_with_drain)]
fn f() -> usize {
42
}
// run-rustfix
#![allow(unused_parens)]
-
+#![allow(clippy::iter_with_drain)]
fn f() -> usize {
42
}
+++ /dev/null
-// run-rustfix
-#![warn(clippy::single_component_path_imports)]
-#![allow(unused_imports)]
-
-// #7106: use statements exporting a macro within a crate should not trigger lint
-
-macro_rules! m1 {
- () => {};
-}
-pub(crate) use m1; // ok
-
-macro_rules! m2 {
- () => {};
-}
- // fail
-
-fn main() {
- m1!();
- m2!();
-}
-// run-rustfix
#![warn(clippy::single_component_path_imports)]
#![allow(unused_imports)]
// #7106: use statements exporting a macro within a crate should not trigger lint
+// #7923: normal `use` statements of macros should also not trigger the lint
macro_rules! m1 {
() => {};
macro_rules! m2 {
() => {};
}
-use m2; // fail
+use m2; // ok
fn main() {
m1!();
+++ /dev/null
-error: this import is redundant
- --> $DIR/single_component_path_imports_macro.rs:15:1
- |
-LL | use m2; // fail
- | ^^^^^^^ help: remove it entirely
- |
- = note: `-D clippy::single-component-path-imports` implied by `-D warnings`
-
-error: aborting due to previous error
-
#![warn(clippy::transmute_undefined_repr)]
#![allow(clippy::unit_arg, clippy::transmute_ptr_to_ref)]
+use core::any::TypeId;
use core::ffi::c_void;
-use core::mem::{size_of, transmute};
+use core::mem::{size_of, transmute, MaybeUninit};
fn value<T>() -> T {
unimplemented!()
let _: *const [u8] = transmute(value::<Box<[u8]>>()); // Ok
let _: Box<[u8]> = transmute(value::<*mut [u8]>()); // Ok
+
+ let _: Ty2<u32, u32> = transmute(value::<(Ty2<u32, u32>,)>()); // Ok
+ let _: (Ty2<u32, u32>,) = transmute(value::<Ty2<u32, u32>>()); // Ok
+
+ let _: Ty2<u32, u32> = transmute(value::<(Ty2<u32, u32>, ())>()); // Ok
+ let _: (Ty2<u32, u32>, ()) = transmute(value::<Ty2<u32, u32>>()); // Ok
+
+ let _: Ty2<u32, u32> = transmute(value::<((), Ty2<u32, u32>)>()); // Ok
+ let _: ((), Ty2<u32, u32>) = transmute(value::<Ty2<u32, u32>>()); // Ok
+
+ let _: (usize, usize) = transmute(value::<&[u8]>()); // Ok
+ let _: &[u8] = transmute(value::<(usize, usize)>()); // Ok
+
+ trait Trait {}
+ let _: (isize, isize) = transmute(value::<&dyn Trait>()); // Ok
+ let _: &dyn Trait = transmute(value::<(isize, isize)>()); // Ok
+
+ let _: MaybeUninit<Ty2<u32, u32>> = transmute(value::<Ty2<u32, u32>>()); // Ok
+ let _: Ty2<u32, u32> = transmute(value::<MaybeUninit<Ty2<u32, u32>>>()); // Ok
+
+ let _: Ty<&[u32]> = transmute::<&[u32], _>(value::<&Vec<u32>>()); // Ok
+ }
+}
+
+fn _with_generics<T: 'static, U: 'static>() {
+ if TypeId::of::<T>() != TypeId::of::<u32>() || TypeId::of::<T>() != TypeId::of::<U>() {
+ return;
+ }
+ unsafe {
+ let _: &u32 = transmute(value::<&T>()); // Ok
+ let _: &T = transmute(value::<&u32>()); // Ok
+
+ let _: Vec<U> = transmute(value::<Vec<T>>()); // Ok
+ let _: Vec<T> = transmute(value::<Vec<U>>()); // Ok
+
+ let _: Ty<&u32> = transmute(value::<&T>()); // Ok
+ let _: Ty<&T> = transmute(value::<&u32>()); // Ok
+
+ let _: Vec<u32> = transmute(value::<Vec<T>>()); // Ok
+ let _: Vec<T> = transmute(value::<Vec<u32>>()); // Ok
+
+ let _: &Ty2<u32, u32> = transmute(value::<&Ty2<T, U>>()); // Ok
+ let _: &Ty2<T, U> = transmute(value::<&Ty2<u32, u32>>()); // Ok
+
+ let _: Vec<Vec<u32>> = transmute(value::<Vec<Vec<T>>>()); // Ok
+ let _: Vec<Vec<T>> = transmute(value::<Vec<Vec<u32>>>()); // Ok
+
+ let _: Vec<Ty2<T, u32>> = transmute(value::<Vec<Ty2<U, i32>>>()); // Err
+ let _: Vec<Ty2<U, i32>> = transmute(value::<Vec<Ty2<T, u32>>>()); // Err
+
+ let _: *const u32 = transmute(value::<Box<T>>()); // Ok
+ let _: Box<T> = transmute(value::<*const u32>()); // Ok
}
}
error: transmute from `Ty2<u32, i32>` which has an undefined layout
- --> $DIR/transmute_undefined_repr.rs:26:33
+ --> $DIR/transmute_undefined_repr.rs:27:33
|
LL | let _: Ty2C<u32, i32> = transmute(value::<Ty2<u32, i32>>()); // Lint, Ty2 is unordered
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: `-D clippy::transmute-undefined-repr` implied by `-D warnings`
error: transmute into `Ty2<u32, i32>` which has an undefined layout
- --> $DIR/transmute_undefined_repr.rs:27:32
+ --> $DIR/transmute_undefined_repr.rs:28:32
|
LL | let _: Ty2<u32, i32> = transmute(value::<Ty2C<u32, i32>>()); // Lint, Ty2 is unordered
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
error: transmute from `Ty<Ty2<u32, i32>>` to `Ty2<u32, f32>`, both of which have an undefined layout
- --> $DIR/transmute_undefined_repr.rs:32:32
+ --> $DIR/transmute_undefined_repr.rs:33:32
|
LL | let _: Ty2<u32, f32> = transmute(value::<Ty<Ty2<u32, i32>>>()); // Lint, different Ty2 instances
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: two instances of the same generic type (`Ty2`) may have different layouts
error: transmute from `Ty2<u32, f32>` to `Ty<Ty2<u32, i32>>`, both of which have an undefined layout
- --> $DIR/transmute_undefined_repr.rs:33:36
+ --> $DIR/transmute_undefined_repr.rs:34:36
|
LL | let _: Ty<Ty2<u32, i32>> = transmute(value::<Ty2<u32, f32>>()); // Lint, different Ty2 instances
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: two instances of the same generic type (`Ty2`) may have different layouts
error: transmute from `Ty<&Ty2<u32, i32>>` to `&Ty2<u32, f32>`, both of which have an undefined layout
- --> $DIR/transmute_undefined_repr.rs:38:33
+ --> $DIR/transmute_undefined_repr.rs:39:33
|
LL | let _: &Ty2<u32, f32> = transmute(value::<Ty<&Ty2<u32, i32>>>()); // Lint, different Ty2 instances
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: two instances of the same generic type (`Ty2`) may have different layouts
error: transmute from `&Ty2<u32, f32>` to `Ty<&Ty2<u32, i32>>`, both of which have an undefined layout
- --> $DIR/transmute_undefined_repr.rs:39:37
+ --> $DIR/transmute_undefined_repr.rs:40:37
|
LL | let _: Ty<&Ty2<u32, i32>> = transmute(value::<&Ty2<u32, f32>>()); // Lint, different Ty2 instances
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: two instances of the same generic type (`Ty2`) may have different layouts
error: transmute from `std::boxed::Box<Ty2<u32, u32>>` to `&mut Ty2<u32, f32>`, both of which have an undefined layout
- --> $DIR/transmute_undefined_repr.rs:56:45
+ --> $DIR/transmute_undefined_repr.rs:57:45
|
LL | let _: &'static mut Ty2<u32, f32> = transmute(value::<Box<Ty2<u32, u32>>>()); // Lint, different Ty2 instances
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: two instances of the same generic type (`Ty2`) may have different layouts
error: transmute from `&mut Ty2<u32, f32>` to `std::boxed::Box<Ty2<u32, u32>>`, both of which have an undefined layout
- --> $DIR/transmute_undefined_repr.rs:57:37
+ --> $DIR/transmute_undefined_repr.rs:58:37
|
LL | let _: Box<Ty2<u32, u32>> = transmute(value::<&'static mut Ty2<u32, f32>>()); // Lint, different Ty2 instances
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
= note: two instances of the same generic type (`Ty2`) may have different layouts
-error: aborting due to 8 previous errors
+error: transmute from `std::vec::Vec<Ty2<U, i32>>` to `std::vec::Vec<Ty2<T, u32>>`, both of which have an undefined layout
+ --> $DIR/transmute_undefined_repr.rs:138:35
+ |
+LL | let _: Vec<Ty2<T, u32>> = transmute(value::<Vec<Ty2<U, i32>>>()); // Err
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: two instances of the same generic type (`Vec`) may have different layouts
+
+error: transmute from `std::vec::Vec<Ty2<T, u32>>` to `std::vec::Vec<Ty2<U, i32>>`, both of which have an undefined layout
+ --> $DIR/transmute_undefined_repr.rs:139:35
+ |
+LL | let _: Vec<Ty2<U, i32>> = transmute(value::<Vec<Ty2<T, u32>>>()); // Err
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: two instances of the same generic type (`Vec`) may have different layouts
+
+error: aborting due to 10 previous errors
let slice_ptr = &[0, 1, 2, 3] as *const [i32];
// ... or pointer_kind(T) = pointer_kind(U_0); ptr-ptr-cast
- let _ptr_to_unsized_transmute = unsafe { slice_ptr as *const [u16] };
- let _ptr_to_unsized = slice_ptr as *const [u16];
+ let _ptr_to_unsized_transmute = unsafe { slice_ptr as *const [u32] };
+ let _ptr_to_unsized = slice_ptr as *const [u32];
// TODO: We could try testing vtable casts here too, but maybe
// we should wait until std::raw::TraitObject is stabilized?
let slice_ptr = &[0, 1, 2, 3] as *const [i32];
// ... or pointer_kind(T) = pointer_kind(U_0); ptr-ptr-cast
- let _ptr_to_unsized_transmute = unsafe { transmute::<*const [i32], *const [u16]>(slice_ptr) };
- let _ptr_to_unsized = slice_ptr as *const [u16];
+ let _ptr_to_unsized_transmute = unsafe { transmute::<*const [i32], *const [u32]>(slice_ptr) };
+ let _ptr_to_unsized = slice_ptr as *const [u32];
// TODO: We could try testing vtable casts here too, but maybe
// we should wait until std::raw::TraitObject is stabilized?
error: transmute from a pointer to a pointer
--> $DIR/transmutes_expressible_as_ptr_casts.rs:28:46
|
-LL | let _ptr_to_unsized_transmute = unsafe { transmute::<*const [i32], *const [u16]>(slice_ptr) };
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `slice_ptr as *const [u16]`
+LL | let _ptr_to_unsized_transmute = unsafe { transmute::<*const [i32], *const [u32]>(slice_ptr) };
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `slice_ptr as *const [u32]`
error: transmute from `*const i32` to `usize` which could be expressed as a pointer cast instead
--> $DIR/transmutes_expressible_as_ptr_casts.rs:34:50
LL | let _array_ptr_transmute = unsafe { transmute::<&[i32; 4], *const [i32; 4]>(array_ref) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `array_ref as *const [i32; 4]`
-error: transmute from `fn(usize) -> u8 {main::foo}` to `*const usize` which could be expressed as a pointer cast instead
+error: transmute from `fn(usize) -> u8` to `*const usize` which could be expressed as a pointer cast instead
--> $DIR/transmutes_expressible_as_ptr_casts.rs:48:41
|
LL | let _usize_ptr_transmute = unsafe { transmute::<fn(usize) -> u8, *const usize>(foo) };
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `foo as *const usize`
-error: transmute from `fn(usize) -> u8 {main::foo}` to `usize` which could be expressed as a pointer cast instead
+error: transmute from `fn(usize) -> u8` to `usize` which could be expressed as a pointer cast instead
--> $DIR/transmutes_expressible_as_ptr_casts.rs:52:49
|
LL | let _usize_from_fn_ptr_transmute = unsafe { transmute::<fn(usize) -> u8, usize>(foo) };
+#![allow(dead_code)]
+
fn main() {
let _ = (0..4).filter_map(|x| if x > 1 { Some(x) } else { None });
let _ = (0..4).filter_map(|x| {
fn filter_map_none_changes_item_type() -> impl Iterator<Item = bool> {
"".chars().filter_map(|_| None)
}
+
+// https://github.com/rust-lang/rust-clippy/issues/4433#issue-483920107
+mod comment_483920107 {
+ enum Severity {
+ Warning,
+ Other,
+ }
+
+ struct ServerError;
+
+ impl ServerError {
+ fn severity(&self) -> Severity {
+ Severity::Warning
+ }
+ }
+
+ struct S {
+ warnings: Vec<ServerError>,
+ }
+
+ impl S {
+ fn foo(&mut self, server_errors: Vec<ServerError>) {
+ #[allow(unused_variables)]
+ let errors: Vec<ServerError> = server_errors
+ .into_iter()
+ .filter_map(|se| match se.severity() {
+ Severity::Warning => {
+ self.warnings.push(se);
+ None
+ },
+ _ => Some(se),
+ })
+ .collect();
+ }
+ }
+}
+
+// https://github.com/rust-lang/rust-clippy/issues/4433#issuecomment-611006622
+mod comment_611006622 {
+ struct PendingRequest {
+ reply_to: u8,
+ token: u8,
+ expires: u8,
+ group_id: u8,
+ }
+
+ enum Value {
+ Null,
+ }
+
+ struct Node;
+
+ impl Node {
+ fn send_response(&self, _reply_to: u8, _token: u8, _value: Value) -> &Self {
+ self
+ }
+ fn on_error_warn(&self) -> &Self {
+ self
+ }
+ }
+
+ struct S {
+ pending_requests: Vec<PendingRequest>,
+ }
+
+ impl S {
+ fn foo(&mut self, node: Node, now: u8, group_id: u8) {
+ // "drain_filter"
+ self.pending_requests = self
+ .pending_requests
+ .drain(..)
+ .filter_map(|pending| {
+ if pending.expires <= now {
+ return None; // Expired, remove
+ }
+
+ if pending.group_id == group_id {
+ // Matched - reuse strings and remove
+ node.send_response(pending.reply_to, pending.token, Value::Null)
+ .on_error_warn();
+ None
+ } else {
+ // Keep waiting
+ Some(pending)
+ }
+ })
+ .collect();
+ }
+ }
+}
+
+// https://github.com/rust-lang/rust-clippy/issues/4433#issuecomment-621925270
+// This extrapolation doesn't reproduce the false positive. Additional context seems necessary.
+mod comment_621925270 {
+ struct Signature(u8);
+
+ fn foo(sig_packets: impl Iterator<Item = Result<Signature, ()>>) -> impl Iterator<Item = u8> {
+ sig_packets.filter_map(|res| match res {
+ Ok(Signature(sig_packet)) => Some(sig_packet),
+ _ => None,
+ })
+ }
+}
+
+// https://github.com/rust-lang/rust-clippy/issues/4433#issuecomment-1052978898
+mod comment_1052978898 {
+ #![allow(clippy::redundant_closure)]
+
+ pub struct S(u8);
+
+ impl S {
+ pub fn consume(self) {
+ println!("yum");
+ }
+ }
+
+ pub fn filter_owned() -> impl Iterator<Item = S> {
+ (0..10).map(|i| S(i)).filter_map(|s| {
+ if s.0 & 1 == 0 {
+ s.consume();
+ None
+ } else {
+ Some(s)
+ }
+ })
+ }
+}
error: this `.filter_map` can be written more simply using `.filter`
- --> $DIR/unnecessary_filter_map.rs:2:13
+ --> $DIR/unnecessary_filter_map.rs:4:13
|
LL | let _ = (0..4).filter_map(|x| if x > 1 { Some(x) } else { None });
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
= note: `-D clippy::unnecessary-filter-map` implied by `-D warnings`
error: this `.filter_map` can be written more simply using `.filter`
- --> $DIR/unnecessary_filter_map.rs:3:13
+ --> $DIR/unnecessary_filter_map.rs:5:13
|
LL | let _ = (0..4).filter_map(|x| {
| _____________^
| |______^
error: this `.filter_map` can be written more simply using `.filter`
- --> $DIR/unnecessary_filter_map.rs:9:13
+ --> $DIR/unnecessary_filter_map.rs:11:13
|
LL | let _ = (0..4).filter_map(|x| match x {
| _____________^
| |______^
error: this `.filter_map` can be written more simply using `.map`
- --> $DIR/unnecessary_filter_map.rs:14:13
+ --> $DIR/unnecessary_filter_map.rs:16:13
|
LL | let _ = (0..4).filter_map(|x| Some(x + 1));
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
--- /dev/null
+#![allow(dead_code)]
+
+fn main() {
+ let _ = (0..4).find_map(|x| if x > 1 { Some(x) } else { None });
+ let _ = (0..4).find_map(|x| {
+ if x > 1 {
+ return Some(x);
+ };
+ None
+ });
+ let _ = (0..4).find_map(|x| match x {
+ 0 | 1 => None,
+ _ => Some(x),
+ });
+
+ let _ = (0..4).find_map(|x| Some(x + 1));
+
+ let _ = (0..4).find_map(i32::checked_abs);
+}
+
+fn find_map_none_changes_item_type() -> Option<bool> {
+ "".chars().find_map(|_| None)
+}
--- /dev/null
+error: this `.find_map` can be written more simply using `.find`
+ --> $DIR/unnecessary_find_map.rs:4:13
+ |
+LL | let _ = (0..4).find_map(|x| if x > 1 { Some(x) } else { None });
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `-D clippy::unnecessary-find-map` implied by `-D warnings`
+
+error: this `.find_map` can be written more simply using `.find`
+ --> $DIR/unnecessary_find_map.rs:5:13
+ |
+LL | let _ = (0..4).find_map(|x| {
+ | _____________^
+LL | | if x > 1 {
+LL | | return Some(x);
+LL | | };
+LL | | None
+LL | | });
+ | |______^
+
+error: this `.find_map` can be written more simply using `.find`
+ --> $DIR/unnecessary_find_map.rs:11:13
+ |
+LL | let _ = (0..4).find_map(|x| match x {
+ | _____________^
+LL | | 0 | 1 => None,
+LL | | _ => Some(x),
+LL | | });
+ | |______^
+
+error: this `.find_map` can be written more simply using `.map(..).next()`
+ --> $DIR/unnecessary_find_map.rs:16:13
+ |
+LL | let _ = (0..4).find_map(|x| Some(x + 1));
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error: aborting due to 4 previous errors
+
--- /dev/null
+// run-rustfix
+
+#![warn(clippy::unnecessary_join)]
+
+fn main() {
+ // should be linted
+ let vector = vec!["hello", "world"];
+ let output = vector
+ .iter()
+ .map(|item| item.to_uppercase())
+ .collect::<String>();
+ println!("{}", output);
+
+ // should be linted
+ let vector = vec!["hello", "world"];
+ let output = vector
+ .iter()
+ .map(|item| item.to_uppercase())
+ .collect::<String>();
+ println!("{}", output);
+
+ // should not be linted
+ let vector = vec!["hello", "world"];
+ let output = vector
+ .iter()
+ .map(|item| item.to_uppercase())
+ .collect::<Vec<String>>()
+ .join("\n");
+ println!("{}", output);
+
+ // should not be linted
+ let vector = vec!["hello", "world"];
+ let output = vector.iter().map(|item| item.to_uppercase()).collect::<String>();
+ println!("{}", output);
+}
--- /dev/null
+// run-rustfix
+
+#![warn(clippy::unnecessary_join)]
+
+fn main() {
+ // should be linted
+ let vector = vec!["hello", "world"];
+ let output = vector
+ .iter()
+ .map(|item| item.to_uppercase())
+ .collect::<Vec<String>>()
+ .join("");
+ println!("{}", output);
+
+ // should be linted
+ let vector = vec!["hello", "world"];
+ let output = vector
+ .iter()
+ .map(|item| item.to_uppercase())
+ .collect::<Vec<_>>()
+ .join("");
+ println!("{}", output);
+
+ // should not be linted
+ let vector = vec!["hello", "world"];
+ let output = vector
+ .iter()
+ .map(|item| item.to_uppercase())
+ .collect::<Vec<String>>()
+ .join("\n");
+ println!("{}", output);
+
+ // should not be linted
+ let vector = vec!["hello", "world"];
+ let output = vector.iter().map(|item| item.to_uppercase()).collect::<String>();
+ println!("{}", output);
+}
--- /dev/null
+error: called `.collect<Vec<String>>().join("")` on an iterator
+ --> $DIR/unnecessary_join.rs:11:10
+ |
+LL | .collect::<Vec<String>>()
+ | __________^
+LL | | .join("");
+ | |_________________^ help: try using: `collect::<String>()`
+ |
+ = note: `-D clippy::unnecessary-join` implied by `-D warnings`
+
+error: called `.collect<Vec<String>>().join("")` on an iterator
+ --> $DIR/unnecessary_join.rs:20:10
+ |
+LL | .collect::<Vec<_>>()
+ | __________^
+LL | | .join("");
+ | |_________________^ help: try using: `collect::<String>()`
+
+error: aborting due to 2 previous errors
+
let _: Result<usize, usize> = res.or(Ok(2));
let _: Result<usize, usize> = res.or(Ok(astronomers_pi));
let _: Result<usize, usize> = res.or(Ok(ext_str.some_field));
+ let _: Result<usize, usize> = res.
+ // some lines
+ // some lines
+ // some lines
+ // some lines
+ // some lines
+ // some lines
+ or(Ok(ext_str.some_field));
// neither bind_instead_of_map nor unnecessary_lazy_eval applies here
let _: Result<usize, usize> = res.and_then(|x| Err(x));
let _: Result<usize, usize> = res.or_else(|_| Ok(2));
let _: Result<usize, usize> = res.or_else(|_| Ok(astronomers_pi));
let _: Result<usize, usize> = res.or_else(|_| Ok(ext_str.some_field));
+ let _: Result<usize, usize> = res.
+ // some lines
+ // some lines
+ // some lines
+ // some lines
+ // some lines
+ // some lines
+ or_else(|_| Ok(ext_str.some_field));
// neither bind_instead_of_map nor unnecessary_lazy_eval applies here
let _: Result<usize, usize> = res.and_then(|x| Err(x));
--> $DIR/unnecessary_lazy_eval.rs:35:13
|
LL | let _ = opt.unwrap_or_else(|| 2);
- | ^^^^^^^^^^^^^^^^^^^^^^^^ help: use `unwrap_or` instead: `opt.unwrap_or(2)`
+ | ^^^^--------------------
+ | |
+ | help: use `unwrap_or(..)` instead: `unwrap_or(2)`
|
= note: `-D clippy::unnecessary-lazy-evaluations` implied by `-D warnings`
--> $DIR/unnecessary_lazy_eval.rs:36:13
|
LL | let _ = opt.unwrap_or_else(|| astronomers_pi);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `unwrap_or` instead: `opt.unwrap_or(astronomers_pi)`
+ | ^^^^---------------------------------
+ | |
+ | help: use `unwrap_or(..)` instead: `unwrap_or(astronomers_pi)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:37:13
|
LL | let _ = opt.unwrap_or_else(|| ext_str.some_field);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `unwrap_or` instead: `opt.unwrap_or(ext_str.some_field)`
+ | ^^^^-------------------------------------
+ | |
+ | help: use `unwrap_or(..)` instead: `unwrap_or(ext_str.some_field)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:39:13
|
LL | let _ = opt.and_then(|_| ext_opt);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `and` instead: `opt.and(ext_opt)`
+ | ^^^^---------------------
+ | |
+ | help: use `and(..)` instead: `and(ext_opt)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:40:13
|
LL | let _ = opt.or_else(|| ext_opt);
- | ^^^^^^^^^^^^^^^^^^^^^^^ help: use `or` instead: `opt.or(ext_opt)`
+ | ^^^^-------------------
+ | |
+ | help: use `or(..)` instead: `or(ext_opt)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:41:13
|
LL | let _ = opt.or_else(|| None);
- | ^^^^^^^^^^^^^^^^^^^^ help: use `or` instead: `opt.or(None)`
+ | ^^^^----------------
+ | |
+ | help: use `or(..)` instead: `or(None)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:42:13
|
LL | let _ = opt.get_or_insert_with(|| 2);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `get_or_insert` instead: `opt.get_or_insert(2)`
+ | ^^^^------------------------
+ | |
+ | help: use `get_or_insert(..)` instead: `get_or_insert(2)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:43:13
|
LL | let _ = opt.ok_or_else(|| 2);
- | ^^^^^^^^^^^^^^^^^^^^ help: use `ok_or` instead: `opt.ok_or(2)`
+ | ^^^^----------------
+ | |
+ | help: use `ok_or(..)` instead: `ok_or(2)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:44:13
|
LL | let _ = nested_tuple_opt.unwrap_or_else(|| Some((1, 2)));
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `unwrap_or` instead: `nested_tuple_opt.unwrap_or(Some((1, 2)))`
+ | ^^^^^^^^^^^^^^^^^-------------------------------
+ | |
+ | help: use `unwrap_or(..)` instead: `unwrap_or(Some((1, 2)))`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:47:13
|
LL | let _ = Some(10).unwrap_or_else(|| 2);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `unwrap_or` instead: `Some(10).unwrap_or(2)`
+ | ^^^^^^^^^--------------------
+ | |
+ | help: use `unwrap_or(..)` instead: `unwrap_or(2)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:48:13
|
LL | let _ = Some(10).and_then(|_| ext_opt);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `and` instead: `Some(10).and(ext_opt)`
+ | ^^^^^^^^^---------------------
+ | |
+ | help: use `and(..)` instead: `and(ext_opt)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:49:28
|
LL | let _: Option<usize> = None.or_else(|| ext_opt);
- | ^^^^^^^^^^^^^^^^^^^^^^^^ help: use `or` instead: `None.or(ext_opt)`
+ | ^^^^^-------------------
+ | |
+ | help: use `or(..)` instead: `or(ext_opt)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:50:13
|
LL | let _ = None.get_or_insert_with(|| 2);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `get_or_insert` instead: `None.get_or_insert(2)`
+ | ^^^^^------------------------
+ | |
+ | help: use `get_or_insert(..)` instead: `get_or_insert(2)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:51:35
|
LL | let _: Result<usize, usize> = None.ok_or_else(|| 2);
- | ^^^^^^^^^^^^^^^^^^^^^ help: use `ok_or` instead: `None.ok_or(2)`
+ | ^^^^^----------------
+ | |
+ | help: use `ok_or(..)` instead: `ok_or(2)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:52:28
|
LL | let _: Option<usize> = None.or_else(|| None);
- | ^^^^^^^^^^^^^^^^^^^^^ help: use `or` instead: `None.or(None)`
+ | ^^^^^----------------
+ | |
+ | help: use `or(..)` instead: `or(None)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:55:13
|
LL | let _ = deep.0.unwrap_or_else(|| 2);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `unwrap_or` instead: `deep.0.unwrap_or(2)`
+ | ^^^^^^^--------------------
+ | |
+ | help: use `unwrap_or(..)` instead: `unwrap_or(2)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:56:13
|
LL | let _ = deep.0.and_then(|_| ext_opt);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `and` instead: `deep.0.and(ext_opt)`
+ | ^^^^^^^---------------------
+ | |
+ | help: use `and(..)` instead: `and(ext_opt)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:57:13
|
LL | let _ = deep.0.or_else(|| None);
- | ^^^^^^^^^^^^^^^^^^^^^^^ help: use `or` instead: `deep.0.or(None)`
+ | ^^^^^^^----------------
+ | |
+ | help: use `or(..)` instead: `or(None)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:58:13
|
LL | let _ = deep.0.get_or_insert_with(|| 2);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `get_or_insert` instead: `deep.0.get_or_insert(2)`
+ | ^^^^^^^------------------------
+ | |
+ | help: use `get_or_insert(..)` instead: `get_or_insert(2)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:59:13
|
LL | let _ = deep.0.ok_or_else(|| 2);
- | ^^^^^^^^^^^^^^^^^^^^^^^ help: use `ok_or` instead: `deep.0.ok_or(2)`
+ | ^^^^^^^----------------
+ | |
+ | help: use `ok_or(..)` instead: `ok_or(2)`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:79:28
|
LL | let _: Option<usize> = None.or_else(|| Some(3));
- | ^^^^^^^^^^^^^^^^^^^^^^^^ help: use `or` instead: `None.or(Some(3))`
+ | ^^^^^-------------------
+ | |
+ | help: use `or(..)` instead: `or(Some(3))`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:80:13
|
LL | let _ = deep.0.or_else(|| Some(3));
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `or` instead: `deep.0.or(Some(3))`
+ | ^^^^^^^-------------------
+ | |
+ | help: use `or(..)` instead: `or(Some(3))`
error: unnecessary closure used to substitute value for `Option::None`
--> $DIR/unnecessary_lazy_eval.rs:81:13
|
LL | let _ = opt.or_else(|| Some(3));
- | ^^^^^^^^^^^^^^^^^^^^^^^ help: use `or` instead: `opt.or(Some(3))`
+ | ^^^^-------------------
+ | |
+ | help: use `or(..)` instead: `or(Some(3))`
error: unnecessary closure used to substitute value for `Result::Err`
--> $DIR/unnecessary_lazy_eval.rs:87:13
|
LL | let _ = res2.unwrap_or_else(|_| 2);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `unwrap_or` instead: `res2.unwrap_or(2)`
+ | ^^^^^---------------------
+ | |
+ | help: use `unwrap_or(..)` instead: `unwrap_or(2)`
error: unnecessary closure used to substitute value for `Result::Err`
--> $DIR/unnecessary_lazy_eval.rs:88:13
|
LL | let _ = res2.unwrap_or_else(|_| astronomers_pi);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `unwrap_or` instead: `res2.unwrap_or(astronomers_pi)`
+ | ^^^^^----------------------------------
+ | |
+ | help: use `unwrap_or(..)` instead: `unwrap_or(astronomers_pi)`
error: unnecessary closure used to substitute value for `Result::Err`
--> $DIR/unnecessary_lazy_eval.rs:89:13
|
LL | let _ = res2.unwrap_or_else(|_| ext_str.some_field);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `unwrap_or` instead: `res2.unwrap_or(ext_str.some_field)`
+ | ^^^^^--------------------------------------
+ | |
+ | help: use `unwrap_or(..)` instead: `unwrap_or(ext_str.some_field)`
error: unnecessary closure used to substitute value for `Result::Err`
--> $DIR/unnecessary_lazy_eval.rs:111:35
|
LL | let _: Result<usize, usize> = res.and_then(|_| Err(2));
- | ^^^^^^^^^^^^^^^^^^^^^^^^ help: use `and` instead: `res.and(Err(2))`
+ | ^^^^--------------------
+ | |
+ | help: use `and(..)` instead: `and(Err(2))`
error: unnecessary closure used to substitute value for `Result::Err`
--> $DIR/unnecessary_lazy_eval.rs:112:35
|
LL | let _: Result<usize, usize> = res.and_then(|_| Err(astronomers_pi));
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `and` instead: `res.and(Err(astronomers_pi))`
+ | ^^^^---------------------------------
+ | |
+ | help: use `and(..)` instead: `and(Err(astronomers_pi))`
error: unnecessary closure used to substitute value for `Result::Err`
--> $DIR/unnecessary_lazy_eval.rs:113:35
|
LL | let _: Result<usize, usize> = res.and_then(|_| Err(ext_str.some_field));
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `and` instead: `res.and(Err(ext_str.some_field))`
+ | ^^^^-------------------------------------
+ | |
+ | help: use `and(..)` instead: `and(Err(ext_str.some_field))`
error: unnecessary closure used to substitute value for `Result::Err`
--> $DIR/unnecessary_lazy_eval.rs:115:35
|
LL | let _: Result<usize, usize> = res.or_else(|_| Ok(2));
- | ^^^^^^^^^^^^^^^^^^^^^^ help: use `or` instead: `res.or(Ok(2))`
+ | ^^^^------------------
+ | |
+ | help: use `or(..)` instead: `or(Ok(2))`
error: unnecessary closure used to substitute value for `Result::Err`
--> $DIR/unnecessary_lazy_eval.rs:116:35
|
LL | let _: Result<usize, usize> = res.or_else(|_| Ok(astronomers_pi));
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `or` instead: `res.or(Ok(astronomers_pi))`
+ | ^^^^-------------------------------
+ | |
+ | help: use `or(..)` instead: `or(Ok(astronomers_pi))`
error: unnecessary closure used to substitute value for `Result::Err`
--> $DIR/unnecessary_lazy_eval.rs:117:35
|
LL | let _: Result<usize, usize> = res.or_else(|_| Ok(ext_str.some_field));
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `or` instead: `res.or(Ok(ext_str.some_field))`
+ | ^^^^-----------------------------------
+ | |
+ | help: use `or(..)` instead: `or(Ok(ext_str.some_field))`
-error: aborting due to 32 previous errors
+error: unnecessary closure used to substitute value for `Result::Err`
+ --> $DIR/unnecessary_lazy_eval.rs:118:35
+ |
+LL | let _: Result<usize, usize> = res.
+ | ___________________________________^
+LL | | // some lines
+LL | | // some lines
+LL | | // some lines
+... |
+LL | | // some lines
+LL | | or_else(|_| Ok(ext_str.some_field));
+ | |_________----------------------------------^
+ | |
+ | help: use `or(..)` instead: `or(Ok(ext_str.some_field))`
+
+error: aborting due to 33 previous errors
--> $DIR/unnecessary_lazy_eval_unfixable.rs:12:13
|
LL | let _ = Ok(1).unwrap_or_else(|()| 2);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `unwrap_or` instead: `Ok(1).unwrap_or(2)`
+ | ^^^^^^----------------------
+ | |
+ | help: use `unwrap_or(..)` instead: `unwrap_or(2)`
|
= note: `-D clippy::unnecessary-lazy-evaluations` implied by `-D warnings`
--> $DIR/unnecessary_lazy_eval_unfixable.rs:16:13
|
LL | let _ = Ok(1).unwrap_or_else(|e::E| 2);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `unwrap_or` instead: `Ok(1).unwrap_or(2)`
+ | ^^^^^^------------------------
+ | |
+ | help: use `unwrap_or(..)` instead: `unwrap_or(2)`
error: unnecessary closure used to substitute value for `Result::Err`
--> $DIR/unnecessary_lazy_eval_unfixable.rs:17:13
|
LL | let _ = Ok(1).unwrap_or_else(|SomeStruct { .. }| 2);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: use `unwrap_or` instead: `Ok(1).unwrap_or(2)`
+ | ^^^^^^-------------------------------------
+ | |
+ | help: use `unwrap_or(..)` instead: `unwrap_or(2)`
error: aborting due to 3 previous errors
#![allow(clippy::stable_sort_primitive)]
use std::cell::Ref;
-use std::cmp::Reverse;
fn unnecessary_sort_by() {
fn id(x: isize) -> isize {
vec.sort_unstable_by_key(|a| id(-a));
// Reverse examples
vec.sort_by(|a, b| b.cmp(a)); // not linted to avoid suggesting `Reverse(b)` which would borrow
- vec.sort_by_key(|b| Reverse((b + 5).abs()));
- vec.sort_unstable_by_key(|b| Reverse(id(-b)));
+ vec.sort_by_key(|b| std::cmp::Reverse((b + 5).abs()));
+ vec.sort_unstable_by_key(|b| std::cmp::Reverse(id(-b)));
// Negative examples (shouldn't be changed)
let c = &7;
vec.sort_by(|a, b| (b - a).cmp(&(a - b)));
// The closure parameter is not dereferenced anymore, so non-Copy types can be linted
mod issue_6001 {
- use super::*;
struct Test(String);
impl Test {
args.sort_by_key(|a| a.name());
args.sort_unstable_by_key(|a| a.name());
// Reverse
- args.sort_by_key(|b| Reverse(b.name()));
- args.sort_unstable_by_key(|b| Reverse(b.name()));
+ args.sort_by_key(|b| std::cmp::Reverse(b.name()));
+ args.sort_unstable_by_key(|b| std::cmp::Reverse(b.name()));
}
}
#![allow(clippy::stable_sort_primitive)]
use std::cell::Ref;
-use std::cmp::Reverse;
fn unnecessary_sort_by() {
fn id(x: isize) -> isize {
// The closure parameter is not dereferenced anymore, so non-Copy types can be linted
mod issue_6001 {
- use super::*;
struct Test(String);
impl Test {
error: use Vec::sort here instead
- --> $DIR/unnecessary_sort_by.rs:15:5
+ --> $DIR/unnecessary_sort_by.rs:14:5
|
LL | vec.sort_by(|a, b| a.cmp(b));
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `vec.sort()`
= note: `-D clippy::unnecessary-sort-by` implied by `-D warnings`
error: use Vec::sort here instead
- --> $DIR/unnecessary_sort_by.rs:16:5
+ --> $DIR/unnecessary_sort_by.rs:15:5
|
LL | vec.sort_unstable_by(|a, b| a.cmp(b));
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `vec.sort_unstable()`
error: use Vec::sort_by_key here instead
- --> $DIR/unnecessary_sort_by.rs:17:5
+ --> $DIR/unnecessary_sort_by.rs:16:5
|
LL | vec.sort_by(|a, b| (a + 5).abs().cmp(&(b + 5).abs()));
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `vec.sort_by_key(|a| (a + 5).abs())`
error: use Vec::sort_by_key here instead
- --> $DIR/unnecessary_sort_by.rs:18:5
+ --> $DIR/unnecessary_sort_by.rs:17:5
|
LL | vec.sort_unstable_by(|a, b| id(-a).cmp(&id(-b)));
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `vec.sort_unstable_by_key(|a| id(-a))`
error: use Vec::sort_by_key here instead
- --> $DIR/unnecessary_sort_by.rs:21:5
+ --> $DIR/unnecessary_sort_by.rs:20:5
|
LL | vec.sort_by(|a, b| (b + 5).abs().cmp(&(a + 5).abs()));
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `vec.sort_by_key(|b| Reverse((b + 5).abs()))`
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `vec.sort_by_key(|b| std::cmp::Reverse((b + 5).abs()))`
error: use Vec::sort_by_key here instead
- --> $DIR/unnecessary_sort_by.rs:22:5
+ --> $DIR/unnecessary_sort_by.rs:21:5
|
LL | vec.sort_unstable_by(|a, b| id(-b).cmp(&id(-a)));
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `vec.sort_unstable_by_key(|b| Reverse(id(-b)))`
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `vec.sort_unstable_by_key(|b| std::cmp::Reverse(id(-b)))`
error: use Vec::sort_by_key here instead
- --> $DIR/unnecessary_sort_by.rs:32:5
+ --> $DIR/unnecessary_sort_by.rs:31:5
|
LL | vec.sort_by(|a, b| (***a).abs().cmp(&(***b).abs()));
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `vec.sort_by_key(|a| (***a).abs())`
error: use Vec::sort_by_key here instead
- --> $DIR/unnecessary_sort_by.rs:33:5
+ --> $DIR/unnecessary_sort_by.rs:32:5
|
LL | vec.sort_unstable_by(|a, b| (***a).abs().cmp(&(***b).abs()));
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `vec.sort_unstable_by_key(|a| (***a).abs())`
error: use Vec::sort_by_key here instead
- --> $DIR/unnecessary_sort_by.rs:93:9
+ --> $DIR/unnecessary_sort_by.rs:91:9
|
LL | args.sort_by(|a, b| a.name().cmp(&b.name()));
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `args.sort_by_key(|a| a.name())`
error: use Vec::sort_by_key here instead
- --> $DIR/unnecessary_sort_by.rs:94:9
+ --> $DIR/unnecessary_sort_by.rs:92:9
|
LL | args.sort_unstable_by(|a, b| a.name().cmp(&b.name()));
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `args.sort_unstable_by_key(|a| a.name())`
error: use Vec::sort_by_key here instead
- --> $DIR/unnecessary_sort_by.rs:96:9
+ --> $DIR/unnecessary_sort_by.rs:94:9
|
LL | args.sort_by(|a, b| b.name().cmp(&a.name()));
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `args.sort_by_key(|b| Reverse(b.name()))`
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `args.sort_by_key(|b| std::cmp::Reverse(b.name()))`
error: use Vec::sort_by_key here instead
- --> $DIR/unnecessary_sort_by.rs:97:9
+ --> $DIR/unnecessary_sort_by.rs:95:9
|
LL | args.sort_unstable_by(|a, b| b.name().cmp(&a.name()));
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `args.sort_unstable_by_key(|b| Reverse(b.name()))`
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try: `args.sort_unstable_by_key(|b| std::cmp::Reverse(b.name()))`
error: aborting due to 12 previous errors
}
fn require_string(_: &String) {}
+
+// https://github.com/rust-lang/rust-clippy/issues/8507
+mod issue_8507 {
+ #![allow(dead_code)]
+
+ struct Opaque<P>(P);
+
+ pub trait Abstracted {}
+
+ impl<P> Abstracted for Opaque<P> {}
+
+ fn build<P>(p: P) -> Opaque<P>
+ where
+ P: AsRef<str>,
+ {
+ Opaque(p)
+ }
+
+ // Should not lint.
+ fn test_str(s: &str) -> Box<dyn Abstracted> {
+ Box::new(build(s.to_string()))
+ }
+
+ // Should not lint.
+ fn test_x(x: super::X) -> Box<dyn Abstracted> {
+ Box::new(build(x))
+ }
+
+ #[derive(Clone, Copy)]
+ struct Y(&'static str);
+
+ impl AsRef<str> for Y {
+ fn as_ref(&self) -> &str {
+ self.0
+ }
+ }
+
+ impl ToString for Y {
+ fn to_string(&self) -> String {
+ self.0.to_string()
+ }
+ }
+
+ // Should lint because Y is copy.
+ fn test_y(y: Y) -> Box<dyn Abstracted> {
+ Box::new(build(y))
+ }
+}
}
fn require_string(_: &String) {}
+
+// https://github.com/rust-lang/rust-clippy/issues/8507
+mod issue_8507 {
+ #![allow(dead_code)]
+
+ struct Opaque<P>(P);
+
+ pub trait Abstracted {}
+
+ impl<P> Abstracted for Opaque<P> {}
+
+ fn build<P>(p: P) -> Opaque<P>
+ where
+ P: AsRef<str>,
+ {
+ Opaque(p)
+ }
+
+ // Should not lint.
+ fn test_str(s: &str) -> Box<dyn Abstracted> {
+ Box::new(build(s.to_string()))
+ }
+
+ // Should not lint.
+ fn test_x(x: super::X) -> Box<dyn Abstracted> {
+ Box::new(build(x))
+ }
+
+ #[derive(Clone, Copy)]
+ struct Y(&'static str);
+
+ impl AsRef<str> for Y {
+ fn as_ref(&self) -> &str {
+ self.0
+ }
+ }
+
+ impl ToString for Y {
+ fn to_string(&self) -> String {
+ self.0.to_string()
+ }
+ }
+
+ // Should lint because Y is copy.
+ fn test_y(y: Y) -> Box<dyn Abstracted> {
+ Box::new(build(y.to_string()))
+ }
+}
LL + let path = match get_file_path(t) {
|
-error: aborting due to 76 previous errors
+error: unnecessary use of `to_string`
+ --> $DIR/unnecessary_to_owned.rs:260:24
+ |
+LL | Box::new(build(y.to_string()))
+ | ^^^^^^^^^^^^^ help: use: `y`
+
+error: aborting due to 77 previous errors
// aux-build:proc_macro_derive.rs
#![warn(clippy::use_self)]
-#![allow(dead_code)]
+#![allow(dead_code, unreachable_code)]
#![allow(
clippy::should_implement_trait,
clippy::upper_case_acronyms,
}
}
}
+
+mod use_self_in_pat {
+ enum Foo {
+ Bar,
+ Baz,
+ }
+
+ impl Foo {
+ fn do_stuff(self) {
+ match self {
+ Self::Bar => unimplemented!(),
+ Self::Baz => unimplemented!(),
+ }
+ match Some(1) {
+ Some(_) => unimplemented!(),
+ None => unimplemented!(),
+ }
+ if let Self::Bar = self {
+ unimplemented!()
+ }
+ }
+ }
+}
// aux-build:proc_macro_derive.rs
#![warn(clippy::use_self)]
-#![allow(dead_code)]
+#![allow(dead_code, unreachable_code)]
#![allow(
clippy::should_implement_trait,
clippy::upper_case_acronyms,
}
}
}
+
+mod use_self_in_pat {
+ enum Foo {
+ Bar,
+ Baz,
+ }
+
+ impl Foo {
+ fn do_stuff(self) {
+ match self {
+ Foo::Bar => unimplemented!(),
+ Foo::Baz => unimplemented!(),
+ }
+ match Some(1) {
+ Some(_) => unimplemented!(),
+ None => unimplemented!(),
+ }
+ if let Foo::Bar = self {
+ unimplemented!()
+ }
+ }
+ }
+}
LL | S2::new()
| ^^ help: use the applicable keyword: `Self`
-error: aborting due to 28 previous errors
+error: unnecessary structure name repetition
+ --> $DIR/use_self.rs:532:17
+ |
+LL | Foo::Bar => unimplemented!(),
+ | ^^^ help: use the applicable keyword: `Self`
+
+error: unnecessary structure name repetition
+ --> $DIR/use_self.rs:533:17
+ |
+LL | Foo::Baz => unimplemented!(),
+ | ^^^ help: use the applicable keyword: `Self`
+
+error: unnecessary structure name repetition
+ --> $DIR/use_self.rs:539:20
+ |
+LL | if let Foo::Bar = self {
+ | ^^^ help: use the applicable keyword: `Self`
+
+error: aborting due to 31 previous errors
output
};
- // Trigger a sucessful build, so Cargo would like to cache the build result.
+ // Trigger a successful build, so Cargo would like to cache the build result.
successful_build();
// Make sure there's no spurious rebuild when nothing changes.
blockquote { font-size: 1em; }
[ng\:cloak], [ng-cloak], [data-ng-cloak], [x-ng-cloak], .ng-cloak, .x-ng-cloak { display: none !important; }
- .form-inline .checkbox { margin-right: 0.6em }
+ .dropdown-menu {
+ color: var(--fg);
+ background: var(--theme-popup-bg);
+ border: 1px solid var(--theme-popup-border);
+ }
+
+ .dropdown-menu .divider {
+ background-color: var(--theme-popup-border);
+ }
+
+ .dropdown-menu .checkbox {
+ display: block;
+ white-space: nowrap;
+ margin: 0;
+ }
+ .dropdown-menu .checkbox label {
+ padding: 3px 20px;
+ width: 100%;
+ }
+
+ .dropdown-menu .checkbox input {
+ position: relative;
+ margin: 0 0.5rem 0;
+ padding: 0;
+ }
+
+ .dropdown-menu .checkbox:hover {
+ background-color: var(--theme-hover);
+ }
+
+ div.panel div.panel-body button.dropdown-toggle {
+ background: var(--searchbar-bg);
+ color: var(--searchbar-fg);
+ border-color: var(--theme-popup-border);
+ }
+
+ div.panel div.panel-body button.dropdown-toggle:hover {
+ box-shadow: 0 0 3px var(--searchbar-shadow-color);
+ }
+
+ div.panel div.panel-body .open button.dropdown-toggle {
+ background: var(--searchbar-bg);
+ color: var(--searchbar-fg);
+ border-color: var(--theme-popup-border);
+ filter: brightness(90%);
+ }
+
+ .dropdown-toggle .badge {
+ background-color: #777;
+ }
.panel-heading { cursor: pointer; }
.panel .panel-title-name .anchor { display: none; }
.panel:hover .panel-title-name .anchor { display: inline;}
+ .search-control {
+ margin-top: 15px;
+ }
+
+ @media (min-width: 992px) {
+ .search-control {
+ margin-top: 0;
+ }
+ }
+
.label {
padding-top: 0.3em;
padding-bottom: 0.3em;
--inline-code-bg: #191f26;
}
+ .theme-dropdown {
+ position: absolute;
+ margin: 0.7em;
+ z-index: 10;
+ }
+
/* Applying the mdBook theme */
.theme-icon {
- position: absolute;
text-align: center;
width: 2em;
height: 2em;
- margin: 0.7em;
line-height: 2em;
border: solid 1px var(--icons);
border-radius: 5px;
background: var(--theme-hover);
}
.theme-choice {
- position: absolute;
- margin-top: calc(2em + 0.7em);
- margin-left: 0.7em;
+ display: none;
list-style: none;
border: 1px solid var(--theme-popup-border);
border-radius: 5px;
color: var(--fg);
background: var(--theme-popup-bg);
padding: 0 0;
+ overflow: hidden;
}
+
+ .theme-dropdown.open .theme-choice {
+ display: block;
+ }
+
.theme-choice > li {
padding: 5px 10px;
font-size: 0.8em;
user-select: none;
cursor: pointer;
}
- .theme-choice > li:hover {
+
+ .theme-choice>li:hover {
background: var(--theme-hover);
}
</style>
</head>
-<body>
- <div id="theme-icon" class="theme-icon">🖌</div>
- <ul id="theme-menu" class="theme-choice" style="display: none;">
- <li id="light">Light</li>
- <li id="rust">Rust</li>
- <li id="coal">Coal</li>
- <li id="navy">Navy</li>
- <li id="ayu">Ayu</li>
- </ul>
-
- <div class="container" ng-app="clippy" ng-controller="lintList">
+<body ng-app="clippy" ng-controller="lintList">
+ <div theme-dropdown class="theme-dropdown">
+ <div id="theme-icon" class="theme-icon">🖌</div>
+ <ul id="theme-menu" class="theme-choice">
+ <li id="{{id}}" ng-repeat="(id, name) in themes" ng-click="selectTheme(id)">{{name}}</li>
+ </ul>
+ </div>
+
+ <div class="container">
<div class="page-header">
<h1>Clippy Lints</h1>
</div>
</div>
<div class="panel panel-default" ng-show="data">
- <div class="panel-body row filter-panel">
- <div class="col-md-6 form-inline">
- <div class="form-group form-group-lg">
- <p class="h4">
- Lint levels
- <a href="https://doc.rust-lang.org/rustc/lints/levels.html">(?)</a>
- </p>
- <div class="checkbox" ng-repeat="(level, enabled) in levels">
- <label class="text-capitalize">
- <input type="checkbox" ng-model="levels[level]" />
- {{level}}
- </label>
- </div>
+ <div class="panel-body row">
+ <div class="col-12 col-md-4">
+ <div class="btn-group" filter-dropdown>
+ <button type="button" class="btn btn-default dropdown-toggle">
+ Lint levels <span class="badge">{{selectedValuesCount(levels)}}</span> <span class="caret"></span>
+ </button>
+ <ul class="dropdown-menu">
+ <li class="checkbox">
+ <label ng-click="toggleLevels(true)">
+ <input type="checkbox" class="invisible" />
+ All
+ </label>
+ </li>
+ <li class="checkbox">
+ <label ng-click="toggleLevels(false)">
+ <input type="checkbox" class="invisible" />
+ None
+ </label>
+ </li>
+ <li role="separator" class="divider"></li>
+ <li class="checkbox" ng-repeat="(level, enabled) in levels">
+ <label class="text-capitalize">
+ <input type="checkbox" ng-model="levels[level]" />
+ {{level}}
+ </label>
+ </li>
+ </ul>
</div>
- </div>
- <div class="col-md-6 form-inline">
- <div class="form-group form-group-lg">
- <p class="h4">
- Lint groups
- <a href="https://github.com/rust-lang/rust-clippy/#clippy">(?)</a>
- </p>
- <div class="checkbox" ng-repeat="(group, enabled) in groups">
- <label class="text-capitalize">
- <input type="checkbox" ng-model="groups[group]" />
- {{group}}
- </label>
- </div>
+ <div class="btn-group" filter-dropdown>
+ <button type="button" class="btn btn-default dropdown-toggle">
+ Lint groups <span class="badge">{{selectedValuesCount(groups)}}</span> <span class="caret"></span>
+ </button>
+ <ul class="dropdown-menu">
+ <li class="checkbox">
+ <label ng-click="toggleGroups(true)">
+ <input type="checkbox" class="invisible" />
+ All
+ </label>
+ </li>
+ <li class="checkbox">
+ <label ng-click="toggleGroups(false)">
+ <input type="checkbox" class="invisible" />
+ None
+ </label>
+ </li>
+ <li role="separator" class="divider"></li>
+ <li class="checkbox" ng-repeat="(group, enabled) in groups">
+ <label class="text-capitalize">
+ <input type="checkbox" ng-model="groups[group]" />
+ {{group}}
+ </label>
+ </li>
+ </ul>
</div>
</div>
- </div>
- <div class="panel-body row">
- <div class="col-md-12 form-horizontal">
+ <div class="col-12 col-md-8 search-control">
<div class="input-group">
<label class="input-group-addon" id="filter-label" for="filter-input">Filter:</label>
<input type="text" class="form-control" placeholder="Keywords or search string" id="filter-input" ng-model="search" ng-model-options="{debounce: 50}"/>
</h2>
</header>
- <div class="list-group lint-docs" ng-class="{collapse: true, in: open[lint.id]}">
+ <div class="list-group lint-docs" ng-if="open[lint.id]" ng-class="{collapse: true, in: open[lint.id]}">
<div class="list-group-item lint-doc-md" ng-bind-html="lint.docs | markdown"></div>
<div class="lint-additional-info-container">
<!-- Applicability -->
</div>
<a href="https://github.com/rust-lang/rust-clippy">
- <img style="position: absolute; top: 0; right: 0; border: 0;" src="https://s3.amazonaws.com/github/ribbons/forkme_right_darkblue_121621.png" alt="Fork me on Github"/>
+ <img style="position: absolute; top: 0; right: 0; border: 0; clip-path: polygon(0% 0%, 100% 0%, 100% 100%);" src="https://s3.amazonaws.com/github/ribbons/forkme_right_darkblue_121621.png" alt="Fork me on Github"/>
</a>
<script src="https://cdnjs.cloudflare.com/ajax/libs/markdown-it/12.3.2/markdown-it.min.js"></script>
);
};
})
+ .directive('themeDropdown', function ($document) {
+ return {
+ restrict: 'A',
+ link: function ($scope, $element, $attr) {
+ $element.bind('click', function () {
+ $element.toggleClass('open');
+ $element.addClass('open-recent');
+ });
+
+ $document.bind('click', function () {
+ if (!$element.hasClass('open-recent')) {
+ $element.removeClass('open');
+ }
+ $element.removeClass('open-recent');
+ })
+ }
+ }
+ })
+ .directive('filterDropdown', function ($document) {
+ return {
+ restrict: 'A',
+ link: function ($scope, $element, $attr) {
+ $element.bind('click', function (event) {
+ if (event.target.closest('button')) {
+ $element.toggleClass('open');
+ } else {
+ $element.addClass('open');
+ }
+ $element.addClass('open-recent');
+ });
+
+ $document.bind('click', function () {
+ if (!$element.hasClass('open-recent')) {
+ $element.removeClass('open');
+ }
+ $element.removeClass('open-recent');
+ })
+ }
+ }
+ })
.directive('onFinishRender', function ($timeout) {
return {
restrict: 'A',
suspicious: true,
};
$scope.groups = GROUPS_FILTER_DEFAULT;
+ const THEMES_DEFAULT = {
+ light: "Light",
+ rust: "Rust",
+ coal: "Coal",
+ navy: "Navy",
+ ayu: "Ayu"
+ };
+ $scope.themes = THEMES_DEFAULT;
+
+ $scope.selectTheme = function (theme) {
+ setTheme(theme, true);
+ }
+
+ $scope.toggleLevels = function (value) {
+ const levels = $scope.levels;
+ for (const key in levels) {
+ if (levels.hasOwnProperty(key)) {
+ levels[key] = value;
+ }
+ }
+ };
+ $scope.toggleGroups = function (value) {
+ const groups = $scope.groups;
+ for (const key in groups) {
+ if (groups.hasOwnProperty(key)) {
+ groups[key] = value;
+ }
+ }
+ };
+ $scope.selectedValuesCount = function (obj) {
+ return Object.values(obj).filter(x => x).length;
+ }
$scope.byGroups = function (lint) {
return $scope.groups[lint.group];
};
}
}
- function setupListeners() {
- let themeIcon = document.getElementById("theme-icon");
- let themeMenu = document.getElementById("theme-menu");
- themeIcon.addEventListener("click", function(e) {
- if (themeMenu.style.display == "none") {
- themeMenu.style.display = "block";
- } else {
- themeMenu.style.display = "none";
- }
- });
-
- let children = themeMenu.children;
- for (let index = 0; index < children.length; index++) {
- let child = children[index];
- child.addEventListener("click", function(e) {
- setTheme(child.id, true);
- });
- }
- }
-
- setupListeners();
-
function setTheme(theme, store) {
let enableHighlight = false;
let enableNight = false;
return None;
}
- // Some older versions of LLDB seem to have problems with multiple
- // instances running in parallel, so only run one test thread at a
- // time.
- env::set_var("RUST_TEST_THREADS", "1");
-
Some(Config { debugger: Some(Debugger::Lldb), ..config.clone() })
}
}
fn run_debuginfo_cdb_test_no_opt(&self) {
+ let exe_file = self.make_exe_name();
+
+ // Existing PDB files are update in-place. When changing the debuginfo
+ // the compiler generates for something, this can lead to the situation
+ // where both the old and the new version of the debuginfo for the same
+ // type is present in the PDB, which is very confusing.
+ // Therefore we delete any existing PDB file before compiling the test
+ // case.
+ // FIXME: If can reliably detect that MSVC's link.exe is used, then
+ // passing `/INCREMENTAL:NO` might be a cleaner way to do this.
+ let pdb_file = exe_file.with_extension(".pdb");
+ if pdb_file.exists() {
+ std::fs::remove_file(pdb_file).unwrap();
+ }
+
// compile test file (it should have 'compile-flags:-g' in the header)
let should_run = self.run_if_enabled();
let compile_result = self.compile_test(should_run, EmitMetadata::No);
return;
}
- let exe_file = self.make_exe_name();
-
let prefixes = {
static PREFIXES: &[&str] = &["cdb", "cdbg"];
// No "native rust support" variation for CDB yet.
if let CommandKind::Count = self {
if args[2].parse::<usize>().is_err() {
- print_err(&format!("Third argument to @count must be a valid usize"), lineno);
+ print_err(
+ &format!("Third argument to @count must be a valid usize (got `{}`)", args[2]),
+ lineno,
+ );
return false;
}
}
assert_eq!(
results.len(),
1,
- "Didn't get 1 result for `{}`: got {:?}",
+ "Expected 1 match for `{}` (because of @set): matched to {:?}",
command.args[3],
results
);
-Subproject commit 722475ccc143d2dbf9fad5891207dcb5576e3d17
+Subproject commit 346f8f2219562dae3fce5a35cc7eed4df8353b6c
-Subproject commit 5fae65dd28b450a437ebc800a410164c3af1d516
+Subproject commit b594f9c441cf12319d10c14ba6a511d5c9db1b87
pub(crate) fn extract_pre_comment(pre_snippet: &str) -> (Option<String>, ListItemCommentStyle) {
let trimmed_pre_snippet = pre_snippet.trim();
// Both start and end are checked to support keeping a block comment inline with
- // the item, even if there are preceeding line comments, while still supporting
+ // the item, even if there are preceding line comments, while still supporting
// a snippet that starts with a block comment but also contains one or more
// trailing single line comments.
// https://github.com/rust-lang/rustfmt/issues/3025
mut self,
krate: &'ast ast::Crate,
) -> Result<FileModMap<'ast>, ModuleResolutionError> {
- let root_filename = self.parse_sess.span_to_filename(krate.span);
+ let root_filename = self.parse_sess.span_to_filename(krate.spans.inner_span);
self.directory.path = match root_filename {
FileName::Real(ref p) => p.parent().unwrap_or(Path::new("")).to_path_buf(),
_ => PathBuf::new(),
self.visit_mod_from_ast(&krate.items)?;
}
- let snippet_provider = self.parse_sess.snippet_provider(krate.span);
+ let snippet_provider = self.parse_sess.snippet_provider(krate.spans.inner_span);
self.file_map.insert(
root_filename,
($method:ident $(,)* $($arg:expr),* $(,)*) => {
match parser.$method($($arg,)*) {
Ok(val) => {
- if parser.sess.span_diagnostic.has_errors() {
+ if parser.sess.span_diagnostic.has_errors().is_some() {
parser.sess.span_diagnostic.reset_err_count();
return None;
} else {
let mut cloned_parser = (*parser).clone();
match $parser(&mut cloned_parser) {
Ok(x) => {
- if parser.sess.span_diagnostic.has_errors() {
+ if parser.sess.span_diagnostic.has_errors().is_some() {
parser.sess.span_diagnostic.reset_err_count();
} else {
// Parsing succeeded.
let result = catch_unwind(AssertUnwindSafe(|| {
let mut parser = new_parser_from_file(sess.inner(), path, Some(span));
match parser.parse_mod(&TokenKind::Eof) {
- Ok(result) => Some(result),
+ Ok((a, i, spans)) => Some((a, i, spans.inner_span)),
Err(mut e) => {
e.emit();
if sess.can_reset_errors() {
// Methods that should be restricted within the parse module.
impl ParseSess {
pub(super) fn emit_diagnostics(&self, diagnostics: Vec<Diagnostic>) {
- for diagnostic in diagnostics {
- self.parse_sess.span_diagnostic.emit_diagnostic(&diagnostic);
+ for mut diagnostic in diagnostics {
+ self.parse_sess
+ .span_diagnostic
+ .emit_diagnostic(&mut diagnostic);
}
}
}
pub(super) fn has_errors(&self) -> bool {
- self.parse_sess.span_diagnostic.has_errors()
+ self.parse_sess.span_diagnostic.has_errors().is_some()
}
pub(super) fn reset_errors(&self) {
match **args {
ast::GenericArgs::AngleBracketed(ref data) if !data.args.is_empty() => {
// HACK: squeeze out the span between the identifier and the parameters.
- // The hack is requried so that we don't remove the separator inside macro calls.
+ // The hack is required so that we don't remove the separator inside macro calls.
// This does not work in the presence of comment, hoping that people are
// sane about where to put their comment.
let separator_snippet = context
let ident_str = rewrite_ident(&self.get_context(), ident).to_owned();
self.push_str(&ident_str);
- if let ast::ModKind::Loaded(ref items, ast::Inline::Yes, inner_span) = mod_kind {
+ if let ast::ModKind::Loaded(ref items, ast::Inline::Yes, ref spans) = mod_kind {
+ let ast::ModSpans {
+ inner_span,
+ inject_use_span: _,
+ } = *spans;
match self.config.brace_style() {
BraceStyle::AlwaysNextLine => {
let indent_str = self.block_indent.to_string_with_newline(self.config);
"mach",
"memchr",
"object",
+ "once_cell",
"regalloc",
"region",
"rustc-hash",
continue;
}
- let preceeded_by_doc_comment = {
+ let preceded_by_doc_comment = {
let pre_contents = &contents[..idx];
let pre_newline = pre_contents.rfind('\n');
let pre_doc_comment = pre_contents.rfind("///");
}
};
- if preceeded_by_doc_comment {
+ if preceded_by_doc_comment {
continue;
}
};
suppressible_tidy_err!(err, skip_file_length, "");
} else if lines > (LINES * 7) / 10 {
- // Just set it to something that doesn't trigger the "unneccessarily ignored" warning.
+ // Just set it to something that doesn't trigger the "unnecessarily ignored" warning.
skip_file_length = Directive::Ignore(true);
}
const ENTRY_LIMIT: usize = 1000;
// FIXME: The following limits should be reduced eventually.
-const ROOT_ENTRY_LIMIT: usize = 983;
+const ROOT_ENTRY_LIMIT: usize = 985;
const ISSUES_ENTRY_LIMIT: usize = 2310;
fn check_entries(path: &Path, bad: &mut bool) {