[[package]]
name = "rustfmt-nightly"
-version = "1.4.24"
+version = "1.4.25"
dependencies = [
"annotate-snippets 0.6.1",
"anyhow",
// `Expr` is used a lot. Make sure it doesn't unintentionally get bigger.
#[cfg(target_arch = "x86_64")]
-rustc_data_structures::static_assert_size!(Expr, 112);
+rustc_data_structures::static_assert_size!(Expr, 120);
impl Expr {
/// Returns `true` if this expression would be valid somewhere that expects a value;
Closed,
}
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum StructRest {
+ /// `..x`.
+ Base(P<Expr>),
+ /// `..`.
+ Rest(Span),
+ /// No trailing `..` or expression.
+ None,
+}
+
#[derive(Clone, Encodable, Decodable, Debug)]
pub enum ExprKind {
/// A `box x` expression.
Field(P<Expr>, Ident),
/// An indexing operation (e.g., `foo[2]`).
Index(P<Expr>, P<Expr>),
- /// A range (e.g., `1..2`, `1..`, `..2`, `1..=2`, `..=2`).
+ /// A range (e.g., `1..2`, `1..`, `..2`, `1..=2`, `..=2`; and `..` in destructuring assingment).
Range(Option<P<Expr>>, Option<P<Expr>>, RangeLimits),
/// Variable reference, possibly containing `::` and/or type
/// A struct literal expression.
///
- /// E.g., `Foo {x: 1, y: 2}`, or `Foo {x: 1, .. base}`,
- /// where `base` is the `Option<Expr>`.
- Struct(Path, Vec<Field>, Option<P<Expr>>),
+ /// E.g., `Foo {x: 1, y: 2}`, or `Foo {x: 1, .. rest}`.
+ Struct(Path, Vec<Field>, StructRest),
/// An array literal constructed from one repeated element.
///
ExprKind::Struct(path, fields, expr) => {
vis.visit_path(path);
fields.flat_map_in_place(|field| vis.flat_map_field(field));
- visit_opt(expr, |expr| vis.visit_expr(expr));
+ match expr {
+ StructRest::Base(expr) => vis.visit_expr(expr),
+ StructRest::Rest(_span) => {}
+ StructRest::None => {}
+ }
}
ExprKind::Paren(expr) => {
vis.visit_expr(expr);
}
}
if let Some((pos, comma, sp)) = suggestion {
- let mut new_stream = vec![];
+ let mut new_stream = Vec::with_capacity(self.0.len() + 1);
let parts = self.0.split_at(pos + 1);
new_stream.extend_from_slice(parts.0);
new_stream.push(comma);
ExprKind::Struct(ref path, ref fields, ref optional_base) => {
visitor.visit_path(path, expression.id);
walk_list!(visitor, visit_field, fields);
- walk_list!(visitor, visit_expr, optional_base);
+ match optional_base {
+ StructRest::Base(expr) => visitor.visit_expr(expr),
+ StructRest::Rest(_span) => {}
+ StructRest::None => {}
+ }
}
ExprKind::Tup(ref subexpressions) => {
walk_list!(visitor, visit_expr, subexpressions);
}
ExprKind::InlineAsm(ref asm) => self.lower_expr_asm(e.span, asm),
ExprKind::LlvmInlineAsm(ref asm) => self.lower_expr_llvm_asm(asm),
- ExprKind::Struct(ref path, ref fields, ref maybe_expr) => {
- let maybe_expr = maybe_expr.as_ref().map(|x| self.lower_expr(x));
+ ExprKind::Struct(ref path, ref fields, ref rest) => {
+ let rest = match rest {
+ StructRest::Base(e) => Some(self.lower_expr(e)),
+ StructRest::Rest(sp) => {
+ self.sess
+ .struct_span_err(*sp, "base expression required after `..`")
+ .span_label(*sp, "add a base expression here")
+ .emit();
+ Some(&*self.arena.alloc(self.expr_err(*sp)))
+ }
+ StructRest::None => None,
+ };
hir::ExprKind::Struct(
self.arena.alloc(self.lower_qpath(
e.id,
ImplTraitContext::disallowed(),
)),
self.arena.alloc_from_iter(fields.iter().map(|x| self.lower_field(x))),
- maybe_expr,
+ rest,
)
}
ExprKind::Yield(ref opt_expr) => self.lower_expr_yield(e.span, opt_expr.as_deref()),
whole_span: Span,
) -> hir::ExprKind<'hir> {
// Return early in case of an ordinary assignment.
- fn is_ordinary(lhs: &Expr) -> bool {
+ fn is_ordinary(lower_ctx: &mut LoweringContext<'_, '_>, lhs: &Expr) -> bool {
match &lhs.kind {
- ExprKind::Tup(..) => false,
+ ExprKind::Array(..) | ExprKind::Struct(..) | ExprKind::Tup(..) => false,
+ // Check for tuple struct constructor.
+ ExprKind::Call(callee, ..) => lower_ctx.extract_tuple_struct_path(callee).is_none(),
ExprKind::Paren(e) => {
match e.kind {
// We special-case `(..)` for consistency with patterns.
ExprKind::Range(None, None, RangeLimits::HalfOpen) => false,
- _ => is_ordinary(e),
+ _ => is_ordinary(lower_ctx, e),
}
}
_ => true,
}
}
- if is_ordinary(lhs) {
+ if is_ordinary(self, lhs) {
return hir::ExprKind::Assign(self.lower_expr(lhs), self.lower_expr(rhs), eq_sign_span);
}
if !self.sess.features_untracked().destructuring_assignment {
hir::ExprKind::Block(&self.block_all(whole_span, stmts, None), None)
}
+ /// If the given expression is a path to a tuple struct, returns that path.
+ /// It is not a complete check, but just tries to reject most paths early
+ /// if they are not tuple structs.
+ /// Type checking will take care of the full validation later.
+ fn extract_tuple_struct_path<'a>(&mut self, expr: &'a Expr) -> Option<&'a Path> {
+ // For tuple struct destructuring, it must be a non-qualified path (like in patterns).
+ if let ExprKind::Path(None, path) = &expr.kind {
+ // Does the path resolves to something disallowed in a tuple struct/variant pattern?
+ if let Some(partial_res) = self.resolver.get_partial_res(expr.id) {
+ if partial_res.unresolved_segments() == 0
+ && !partial_res.base_res().expected_in_tuple_struct_pat()
+ {
+ return None;
+ }
+ }
+ return Some(path);
+ }
+ None
+ }
+
/// Convert the LHS of a destructuring assignment to a pattern.
/// Each sub-assignment is recorded in `assignments`.
fn destructure_assign(
assignments: &mut Vec<hir::Stmt<'hir>>,
) -> &'hir hir::Pat<'hir> {
match &lhs.kind {
+ // Slice patterns.
+ ExprKind::Array(elements) => {
+ let (pats, rest) =
+ self.destructure_sequence(elements, "slice", eq_sign_span, assignments);
+ let slice_pat = if let Some((i, span)) = rest {
+ let (before, after) = pats.split_at(i);
+ hir::PatKind::Slice(
+ before,
+ Some(self.pat_without_dbm(span, hir::PatKind::Wild)),
+ after,
+ )
+ } else {
+ hir::PatKind::Slice(pats, None, &[])
+ };
+ return self.pat_without_dbm(lhs.span, slice_pat);
+ }
+ // Tuple structs.
+ ExprKind::Call(callee, args) => {
+ if let Some(path) = self.extract_tuple_struct_path(callee) {
+ let (pats, rest) = self.destructure_sequence(
+ args,
+ "tuple struct or variant",
+ eq_sign_span,
+ assignments,
+ );
+ let qpath = self.lower_qpath(
+ callee.id,
+ &None,
+ path,
+ ParamMode::Optional,
+ ImplTraitContext::disallowed(),
+ );
+ // Destructure like a tuple struct.
+ let tuple_struct_pat =
+ hir::PatKind::TupleStruct(qpath, pats, rest.map(|r| r.0));
+ return self.pat_without_dbm(lhs.span, tuple_struct_pat);
+ }
+ }
+ // Structs.
+ ExprKind::Struct(path, fields, rest) => {
+ let field_pats = self.arena.alloc_from_iter(fields.iter().map(|f| {
+ let pat = self.destructure_assign(&f.expr, eq_sign_span, assignments);
+ hir::FieldPat {
+ hir_id: self.next_id(),
+ ident: f.ident,
+ pat,
+ is_shorthand: f.is_shorthand,
+ span: f.span,
+ }
+ }));
+ let qpath = self.lower_qpath(
+ lhs.id,
+ &None,
+ path,
+ ParamMode::Optional,
+ ImplTraitContext::disallowed(),
+ );
+ let fields_omitted = match rest {
+ StructRest::Base(e) => {
+ self.sess
+ .struct_span_err(
+ e.span,
+ "functional record updates are not allowed in destructuring \
+ assignments",
+ )
+ .span_suggestion(
+ e.span,
+ "consider removing the trailing pattern",
+ String::new(),
+ rustc_errors::Applicability::MachineApplicable,
+ )
+ .emit();
+ true
+ }
+ StructRest::Rest(_) => true,
+ StructRest::None => false,
+ };
+ let struct_pat = hir::PatKind::Struct(qpath, field_pats, fields_omitted);
+ return self.pat_without_dbm(lhs.span, struct_pat);
+ }
// Tuples.
ExprKind::Tup(elements) => {
let (pats, rest) =
let mut used_input_regs = FxHashMap::default();
let mut used_output_regs = FxHashMap::default();
+ let mut required_features: Vec<&str> = vec![];
for (idx, op) in operands.iter().enumerate() {
let op_sp = asm.operands[idx].1;
if let Some(reg) = op.reg() {
+ // Make sure we don't accidentally carry features from the
+ // previous iteration.
+ required_features.clear();
+
// Validate register classes against currently enabled target
// features. We check that at least one type is available for
// the current target.
let reg_class = reg.reg_class();
- let mut required_features: Vec<&str> = vec![];
for &(_, feature) in reg_class.supported_types(asm_arch) {
if let Some(feature) = feature {
if self.sess.target_features.contains(&Symbol::intern(feature)) {
// Check if this is a binding pattern, if so, we can optimize and avoid adding a
// `let <pat> = __argN;` statement. In this case, we do not rename the parameter.
let (ident, is_simple_parameter) = match parameter.pat.kind {
- hir::PatKind::Binding(hir::BindingAnnotation::Unannotated, _, ident, _) => {
- (ident, true)
+ hir::PatKind::Binding(
+ hir::BindingAnnotation::Unannotated | hir::BindingAnnotation::Mutable,
+ _,
+ ident,
+ _,
+ ) => (ident, true),
+ // For `ref mut` or wildcard arguments, we can't reuse the binding, but
+ // we can keep the same name for the parameter.
+ // This lets rustdoc render it correctly in documentation.
+ hir::PatKind::Binding(_, _, ident, _) => (ident, false),
+ hir::PatKind::Wild => {
+ (Ident::with_dummy_span(rustc_span::symbol::kw::Underscore), false)
}
_ => {
// Replace the ident for bindings that aren't simple.
//
// For the "output" lifetime parameters, we just want to
// generate `'_`.
- let mut generic_args: Vec<_> = lifetime_params[..input_lifetimes_count]
- .iter()
- .map(|&(span, hir_name)| {
+ let mut generic_args = Vec::with_capacity(lifetime_params.len());
+ generic_args.extend(lifetime_params[..input_lifetimes_count].iter().map(
+ |&(span, hir_name)| {
// Input lifetime like `'a` or `'1`:
GenericArg::Lifetime(hir::Lifetime {
hir_id: self.next_id(),
span,
name: hir::LifetimeName::Param(hir_name),
})
- })
- .collect();
+ },
+ ));
generic_args.extend(lifetime_params[input_lifetimes_count..].iter().map(|&(span, _)|
// Output lifetime like `'_`.
GenericArg::Lifetime(hir::Lifetime {
}
fn lower_block_noalloc(&mut self, b: &Block, targeted_by_break: bool) -> hir::Block<'hir> {
- let mut stmts = vec![];
let mut expr: Option<&'hir _> = None;
- for (index, stmt) in b.stmts.iter().enumerate() {
- if index == b.stmts.len() - 1 {
- if let StmtKind::Expr(ref e) = stmt.kind {
- expr = Some(self.lower_expr(e));
- } else {
- stmts.extend(self.lower_stmt(stmt));
- }
- } else {
- stmts.extend(self.lower_stmt(stmt));
- }
- }
+ let stmts = self.arena.alloc_from_iter(
+ b.stmts
+ .iter()
+ .enumerate()
+ .filter_map(|(index, stmt)| {
+ if index == b.stmts.len() - 1 {
+ if let StmtKind::Expr(ref e) = stmt.kind {
+ expr = Some(self.lower_expr(e));
+ None
+ } else {
+ Some(self.lower_stmt(stmt))
+ }
+ } else {
+ Some(self.lower_stmt(stmt))
+ }
+ })
+ .flatten(),
+ );
+ let rules = self.lower_block_check_mode(&b.rules);
+ let hir_id = self.lower_node_id(b.id);
- hir::Block {
- hir_id: self.lower_node_id(b.id),
- stmts: self.arena.alloc_from_iter(stmts),
- expr,
- rules: self.lower_block_check_mode(&b.rules),
- span: b.span,
- targeted_by_break,
- }
+ hir::Block { hir_id, stmts, expr, rules, span: b.span, targeted_by_break }
}
/// Lowers a block directly to an expression, presuming that it
gate_all!(const_trait_impl, "const trait impls are experimental");
gate_all!(half_open_range_patterns, "half-open range patterns are unstable");
gate_all!(inline_const, "inline-const is experimental");
+ gate_all!(destructuring_assignment, "destructuring assignments are unstable");
// All uses of `gate_all!` below this point were added in #65742,
// and subsequently disabled (with the non-early gating readded).
&mut self,
path: &ast::Path,
fields: &[ast::Field],
- wth: &Option<P<ast::Expr>>,
+ rest: &ast::StructRest,
attrs: &[ast::Attribute],
) {
self.print_path(path, true, 0);
},
|f| f.span,
);
- match *wth {
- Some(ref expr) => {
+ match rest {
+ ast::StructRest::Base(_) | ast::StructRest::Rest(_) => {
self.ibox(INDENT_UNIT);
if !fields.is_empty() {
self.s.word(",");
self.s.space();
}
self.s.word("..");
- self.print_expr(expr);
- self.end();
- }
- _ => {
- if !fields.is_empty() {
- self.s.word(",")
+ if let ast::StructRest::Base(ref expr) = *rest {
+ self.print_expr(expr);
}
+ self.end();
}
+ ast::StructRest::None if !fields.is_empty() => self.s.word(","),
+ _ => {}
}
self.s.word("}");
}
ast::ExprKind::Repeat(ref element, ref count) => {
self.print_expr_repeat(element, count, attrs);
}
- ast::ExprKind::Struct(ref path, ref fields, ref wth) => {
- self.print_expr_struct(path, &fields[..], wth, attrs);
+ ast::ExprKind::Struct(ref path, ref fields, ref rest) => {
+ self.print_expr_struct(path, &fields[..], rest, attrs);
}
ast::ExprKind::Tup(ref exprs) => {
self.print_expr_tup(&exprs[..], attrs);
let fmt = substr.nonself_args[0].clone();
- let mut stmts = vec![];
+ let mut stmts = Vec::with_capacity(fields.len() + 2);
match vdata {
ast::VariantData::Tuple(..) | ast::VariantData::Unit(..) => {
// tuple struct/"normal" variant
use rustc_codegen_ssa::traits::*;
use rustc_data_structures::fx::FxHashMap;
use rustc_hir as hir;
-use rustc_middle::span_bug;
use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::{bug, span_bug};
use rustc_span::{Pos, Span};
use rustc_target::abi::*;
use rustc_target::asm::*;
InlineAsmArch::Nvptx64 => {}
InlineAsmArch::Hexagon => {}
InlineAsmArch::Mips | InlineAsmArch::Mips64 => {}
+ InlineAsmArch::SpirV => {}
}
}
if !options.contains(InlineAsmOptions::NOMEM) {
| InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ }
}
.to_string(),
}
_ => unreachable!(),
},
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ }
}
}
| InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ }
}
}
|| cgcx.opts.target_triple.triple().starts_with("asmjs")
{
// nothing to do here
- } else if cgcx.opts.target_triple.triple().contains("windows")
- || cgcx.opts.target_triple.triple().contains("uefi")
- {
+ } else if cgcx.is_pe_coff {
let asm = "
.section .llvmbc,\"n\"
.section .llvmcmd,\"n\"
pub allocator_module_config: Arc<ModuleConfig>,
pub tm_factory: TargetMachineFactory<B>,
pub msvc_imps_needed: bool,
+ pub is_pe_coff: bool,
pub target_pointer_width: u32,
pub target_arch: String,
pub debuginfo: config::DebugInfo,
tm_factory: TargetMachineFactory(backend.target_machine_factory(tcx.sess, ol)),
total_cgus,
msvc_imps_needed: msvc_imps_needed(tcx),
+ is_pe_coff: tcx.sess.target.is_like_windows,
target_pointer_width: tcx.sess.target.pointer_width,
target_arch: tcx.sess.target.arch.clone(),
debuginfo: tcx.sess.opts.debuginfo,
path: ast::Path,
fields: Vec<ast::Field>,
) -> P<ast::Expr> {
- self.expr(span, ast::ExprKind::Struct(path, fields, None))
+ self.expr(span, ast::ExprKind::Struct(path, fields, ast::StructRest::None))
}
pub fn expr_struct_ident(
&self,
pub fn matches_ns(&self, ns: Namespace) -> bool {
self.ns().map_or(true, |actual_ns| actual_ns == ns)
}
+
+ /// Returns whether such a resolved path can occur in a tuple struct/variant pattern
+ pub fn expected_in_tuple_struct_pat(&self) -> bool {
+ matches!(self, Res::Def(DefKind::Ctor(_, CtorKind::Fn), _) | Res::SelfCtor(..))
+ }
}
struct Canonicalizer<'cx, 'tcx> {
infcx: Option<&'cx InferCtxt<'cx, 'tcx>>,
tcx: TyCtxt<'tcx>,
- variables: SmallVec<[CanonicalVarInfo; 8]>,
+ variables: SmallVec<[CanonicalVarInfo<'tcx>; 8]>,
query_state: &'cx mut OriginalQueryValues<'tcx>,
// Note that indices is only used once `var_values` is big enough to be
// heap-allocated.
/// or returns an existing variable if `kind` has already been
/// seen. `kind` is expected to be an unbound variable (or
/// potentially a free region).
- fn canonical_var(&mut self, info: CanonicalVarInfo, kind: GenericArg<'tcx>) -> BoundVar {
+ fn canonical_var(&mut self, info: CanonicalVarInfo<'tcx>, kind: GenericArg<'tcx>) -> BoundVar {
let Canonicalizer { variables, query_state, indices, .. } = self;
let var_values = &mut query_state.var_values;
/// representing the region `r`; return a region referencing it.
fn canonical_var_for_region(
&mut self,
- info: CanonicalVarInfo,
+ info: CanonicalVarInfo<'tcx>,
r: ty::Region<'tcx>,
) -> ty::Region<'tcx> {
let var = self.canonical_var(info, r.into());
/// if `ty_var` is bound to anything; if so, canonicalize
/// *that*. Otherwise, create a new canonical variable for
/// `ty_var`.
- fn canonicalize_ty_var(&mut self, info: CanonicalVarInfo, ty_var: Ty<'tcx>) -> Ty<'tcx> {
+ fn canonicalize_ty_var(&mut self, info: CanonicalVarInfo<'tcx>, ty_var: Ty<'tcx>) -> Ty<'tcx> {
let infcx = self.infcx.expect("encountered ty-var without infcx");
let bound_to = infcx.shallow_resolve(ty_var);
if bound_to != ty_var {
/// `const_var`.
fn canonicalize_const_var(
&mut self,
- info: CanonicalVarInfo,
+ info: CanonicalVarInfo<'tcx>,
const_var: &'tcx ty::Const<'tcx>,
) -> &'tcx ty::Const<'tcx> {
let infcx = self.infcx.expect("encountered const-var without infcx");
fn instantiate_canonical_vars(
&self,
span: Span,
- variables: &List<CanonicalVarInfo>,
+ variables: &List<CanonicalVarInfo<'tcx>>,
universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
) -> CanonicalVarValues<'tcx> {
let var_values: IndexVec<BoundVar, GenericArg<'tcx>> = variables
fn instantiate_canonical_var(
&self,
span: Span,
- cv_info: CanonicalVarInfo,
+ cv_info: CanonicalVarInfo<'tcx>,
universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
) -> GenericArg<'tcx> {
match cv_info.kind {
self.tcx
.mk_const(ty::Const {
val: ty::ConstKind::Placeholder(placeholder_mapped),
- ty: self.tcx.ty_error(), // FIXME(const_generics)
+ ty: name.ty,
})
.into()
}
self.tcx.mk_const(ty::Const {
val: ty::ConstKind::Placeholder(ty::PlaceholderConst {
universe: next_universe,
- name: bound_var,
+ name: ty::BoundConst { var: bound_var, ty },
}),
ty,
})
let mut builder = LintLevelMapBuilder { levels, tcx, store };
let krate = tcx.hir().krate();
+ builder.levels.id_to_set.reserve(krate.exported_macros.len() + 1);
+
let push = builder.levels.push(&krate.item.attrs, &store, true);
builder.levels.register_id(hir::CRATE_HIR_ID);
for macro_def in krate.exported_macros {
}
};
+ let attrs: Vec<_> = self.get_item_attrs(id, sess).collect();
SyntaxExtension::new(
sess,
kind,
helper_attrs,
self.root.edition,
Symbol::intern(name),
- &self.get_item_attrs(id, sess),
+ &attrs,
)
}
// within the crate. We only need this for fictive constructors,
// for other constructors correct visibilities
// were already encoded in metadata.
- let attrs = self.get_item_attrs(def_id.index, sess);
+ let attrs: Vec<_> =
+ self.get_item_attrs(def_id.index, sess).collect();
if sess.contains_name(&attrs, sym::non_exhaustive) {
let crate_def_id = self.local_def_id(CRATE_DEF_INDEX);
vis = ty::Visibility::Restricted(crate_def_id);
}
}
- fn get_item_variances(&self, id: DefIndex) -> Vec<ty::Variance> {
- self.root.tables.variances.get(self, id).unwrap_or_else(Lazy::empty).decode(self).collect()
+ fn get_item_variances(&'a self, id: DefIndex) -> impl Iterator<Item = ty::Variance> + 'a {
+ self.root.tables.variances.get(self, id).unwrap_or_else(Lazy::empty).decode(self)
}
fn get_ctor_kind(&self, node_id: DefIndex) -> CtorKind {
}
}
- fn get_item_attrs(&self, node_id: DefIndex, sess: &Session) -> Vec<ast::Attribute> {
+ fn get_item_attrs(
+ &'a self,
+ node_id: DefIndex,
+ sess: &'a Session,
+ ) -> impl Iterator<Item = ast::Attribute> + 'a {
// The attributes for a tuple struct/variant are attached to the definition, not the ctor;
// we assume that someone passing in a tuple struct ctor is actually wanting to
// look at the definition
.get(self, item_id)
.unwrap_or_else(Lazy::empty)
.decode((self, sess))
- .collect::<Vec<_>>()
}
fn get_struct_field_names(&self, id: DefIndex, sess: &Session) -> Vec<Spanned<Symbol>> {
cdata.get_deprecation(def_id.index).map(DeprecationEntry::external)
}
item_attrs => { tcx.arena.alloc_from_iter(
- cdata.get_item_attrs(def_id.index, tcx.sess).into_iter()
+ cdata.get_item_attrs(def_id.index, tcx.sess)
) }
fn_arg_names => { cdata.get_fn_param_names(tcx, def_id.index) }
rendered_const => { cdata.get_rendered_const(def_id.index) }
let span = data.get_span(id.index, sess);
- // Mark the attrs as used
- let attrs = data.get_item_attrs(id.index, sess);
- for attr in attrs.iter() {
- sess.mark_attr_used(attr);
- }
+ let attrs = data.get_item_attrs(id.index, sess).collect();
let ident = data.item_ident(id.index, sess);
ident,
id: ast::DUMMY_NODE_ID,
span,
- attrs: attrs.to_vec(),
+ attrs,
kind: ast::ItemKind::MacroDef(data.get_macro(id.index, sess)),
vis: ast::Visibility {
span: span.shrink_to_lo(),
pub value: V,
}
-pub type CanonicalVarInfos<'tcx> = &'tcx List<CanonicalVarInfo>;
+pub type CanonicalVarInfos<'tcx> = &'tcx List<CanonicalVarInfo<'tcx>>;
/// A set of values corresponding to the canonical variables from some
/// `Canonical`. You can give these values to
/// a copy of the canonical value in some other inference context,
/// with fresh inference variables replacing the canonical values.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)]
-pub struct CanonicalVarInfo {
- pub kind: CanonicalVarKind,
+pub struct CanonicalVarInfo<'tcx> {
+ pub kind: CanonicalVarKind<'tcx>,
}
-impl CanonicalVarInfo {
+impl<'tcx> CanonicalVarInfo<'tcx> {
pub fn universe(&self) -> ty::UniverseIndex {
self.kind.universe()
}
/// in the type-theory sense of the term -- i.e., a "meta" type system
/// that analyzes type-like values.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)]
-pub enum CanonicalVarKind {
+pub enum CanonicalVarKind<'tcx> {
/// Some kind of type inference variable.
Ty(CanonicalTyVarKind),
Const(ty::UniverseIndex),
/// A "placeholder" that represents "any const".
- PlaceholderConst(ty::PlaceholderConst),
+ PlaceholderConst(ty::PlaceholderConst<'tcx>),
}
-impl CanonicalVarKind {
+impl<'tcx> CanonicalVarKind<'tcx> {
pub fn universe(self) -> ty::UniverseIndex {
match self {
CanonicalVarKind::Ty(kind) => match kind {
ty::Binder<ty::OutlivesPredicate<GenericArg<'tcx>, Region<'tcx>>>;
CloneTypeFoldableAndLiftImpls! {
- crate::infer::canonical::Certainty,
- crate::infer::canonical::CanonicalVarInfo,
- crate::infer::canonical::CanonicalVarKind,
+ for <'tcx> {
+ crate::infer::canonical::Certainty,
+ crate::infer::canonical::CanonicalVarInfo<'tcx>,
+ crate::infer::canonical::CanonicalVarKind<'tcx>,
+ }
}
CloneTypeFoldableImpls! {
impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for CanonicalVarInfos<'tcx> {
fn decode(decoder: &mut D) -> Result<Self, D::Error> {
let len = decoder.read_usize()?;
- let interned: Result<Vec<CanonicalVarInfo>, _> =
+ let interned: Result<Vec<CanonicalVarInfo<'tcx>>, _> =
(0..len).map(|_| Decodable::decode(decoder)).collect();
Ok(decoder.tcx().intern_canonical_var_infos(interned?.as_slice()))
}
Bound(ty::DebruijnIndex, ty::BoundVar),
/// A placeholder const - universally quantified higher-ranked const.
- Placeholder(ty::PlaceholderConst),
+ Placeholder(ty::PlaceholderConst<'tcx>),
/// Used in the HIR by using `Unevaluated` everywhere and later normalizing to one of the other
/// variants when the code is monomorphic enough for that.
type_: InternedSet<'tcx, TyS<'tcx>>,
type_list: InternedSet<'tcx, List<Ty<'tcx>>>,
substs: InternedSet<'tcx, InternalSubsts<'tcx>>,
- canonical_var_infos: InternedSet<'tcx, List<CanonicalVarInfo>>,
+ canonical_var_infos: InternedSet<'tcx, List<CanonicalVarInfo<'tcx>>>,
region: InternedSet<'tcx, RegionKind>,
existential_predicates: InternedSet<'tcx, List<ExistentialPredicate<'tcx>>>,
predicate: InternedSet<'tcx, PredicateInner<'tcx>>,
nop_list_lift! {type_list; Ty<'a> => Ty<'tcx>}
nop_list_lift! {existential_predicates; ExistentialPredicate<'a> => ExistentialPredicate<'tcx>}
nop_list_lift! {predicates; Predicate<'a> => Predicate<'tcx>}
-nop_list_lift! {canonical_var_infos; CanonicalVarInfo => CanonicalVarInfo}
+nop_list_lift! {canonical_var_infos; CanonicalVarInfo<'a> => CanonicalVarInfo<'tcx>}
nop_list_lift! {projs; ProjectionKind => ProjectionKind}
// This is the impl for `&'a InternalSubsts<'a>`.
slice_interners!(
type_list: _intern_type_list(Ty<'tcx>),
substs: _intern_substs(GenericArg<'tcx>),
- canonical_var_infos: _intern_canonical_var_infos(CanonicalVarInfo),
+ canonical_var_infos: _intern_canonical_var_infos(CanonicalVarInfo<'tcx>),
existential_predicates: _intern_existential_predicates(ExistentialPredicate<'tcx>),
predicates: _intern_predicates(Predicate<'tcx>),
projs: _intern_projs(ProjectionKind),
if ts.is_empty() { List::empty() } else { self._intern_place_elems(ts) }
}
- pub fn intern_canonical_var_infos(self, ts: &[CanonicalVarInfo]) -> CanonicalVarInfos<'tcx> {
+ pub fn intern_canonical_var_infos(
+ self,
+ ts: &[CanonicalVarInfo<'tcx>],
+ ) -> CanonicalVarInfos<'tcx> {
if ts.is_empty() { List::empty() } else { self._intern_canonical_var_infos(ts) }
}
}
}
-/// The "placeholder index" fully defines a placeholder region.
-/// Placeholder regions are identified by both a **universe** as well
-/// as a "bound-region" within that universe. The `bound_region` is
-/// basically a name -- distinct bound regions within the same
-/// universe are just two regions with an unknown relationship to one
+/// The "placeholder index" fully defines a placeholder region, type, or const. Placeholders are
+/// identified by both a universe, as well as a name residing within that universe. Distinct bound
+/// regions/types/consts within the same universe simply have an unknown relationship to one
/// another.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, PartialOrd, Ord)]
pub struct Placeholder<T> {
pub type PlaceholderType = Placeholder<BoundVar>;
-pub type PlaceholderConst = Placeholder<BoundVar>;
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable)]
+#[derive(TyEncodable, TyDecodable, PartialOrd, Ord)]
+pub struct BoundConst<'tcx> {
+ pub var: BoundVar,
+ pub ty: Ty<'tcx>,
+}
+
+pub type PlaceholderConst<'tcx> = Placeholder<BoundConst<'tcx>>;
/// A `DefId` which is potentially bundled with its corresponding generic parameter
/// in case `did` is a const argument.
recover: bool,
) -> PResult<'a, P<Expr>> {
let mut fields = Vec::new();
- let mut base = None;
+ let mut base = ast::StructRest::None;
let mut recover_async = false;
attrs.extend(self.parse_inner_attributes()?);
while self.token != token::CloseDelim(token::Brace) {
if self.eat(&token::DotDot) {
let exp_span = self.prev_token.span;
+ // We permit `.. }` on the left-hand side of a destructuring assignment.
+ if self.check(&token::CloseDelim(token::Brace)) {
+ self.sess.gated_spans.gate(sym::destructuring_assignment, self.prev_token.span);
+ base = ast::StructRest::Rest(self.prev_token.span.shrink_to_hi());
+ break;
+ }
match self.parse_expr() {
- Ok(e) => base = Some(e),
+ Ok(e) => base = ast::StructRest::Base(e),
Err(mut e) if recover => {
e.emit();
self.recover_stmt();
/// Records all tokens consumed by the provided callback,
/// including the current token. These tokens are collected
/// into a `LazyTokenStream`, and returned along with the result
- /// of the callback. The returned `LazyTokenStream` will be `None`
- /// if not tokens were captured.
+ /// of the callback.
///
/// Note: If your callback consumes an opening delimiter
/// (including the case where you call `collect_tokens`
let ret = f(self)?;
- // We didn't capture any tokens
- let num_calls = self.token_cursor.num_next_calls - cursor_snapshot.num_next_calls;
- if num_calls == 0 {
- return Ok((ret, None));
- }
-
// Produces a `TokenStream` on-demand. Using `cursor_snapshot`
// and `num_calls`, we can reconstruct the `TokenStream` seen
// by the callback. This allows us to avoid producing a `TokenStream`
// if it is never needed - for example, a captured `macro_rules!`
// argument that is never passed to a proc macro.
+ // In practice token stream creation happens rarely compared to
+ // calls to `collect_tokens` (see some statistics in #78736),
+ // so we are doing as little up-front work as possible.
//
// This also makes `Parser` very cheap to clone, since
// there is no intermediate collection buffer to clone.
let lazy_impl = LazyTokenStreamImpl {
start_token,
+ num_calls: self.token_cursor.num_next_calls - cursor_snapshot.num_next_calls,
cursor_snapshot,
- num_calls,
desugar_doc_comments: self.desugar_doc_comments,
};
Ok((ret, Some(LazyTokenStream::new(lazy_impl))))
use crate::def_collector::collect_definitions;
use crate::imports::{Import, ImportKind};
-use crate::macros::{MacroRulesBinding, MacroRulesScope};
+use crate::macros::{MacroRulesBinding, MacroRulesScope, MacroRulesScopeRef};
use crate::Namespace::{self, MacroNS, TypeNS, ValueNS};
use crate::{CrateLint, Determinacy, PathResult, ResolutionError, VisResolutionError};
use crate::{
&mut self,
fragment: &AstFragment,
parent_scope: ParentScope<'a>,
- ) -> MacroRulesScope<'a> {
+ ) -> MacroRulesScopeRef<'a> {
collect_definitions(self, fragment, parent_scope.expansion);
let mut visitor = BuildReducedGraphVisitor { r: self, parent_scope };
fragment.visit_with(&mut visitor);
let def_id = module.def_id().expect("unpopulated module without a def-id");
for child in self.cstore().item_children_untracked(def_id, self.session) {
let child = child.map_id(|_| panic!("unexpected id"));
- BuildReducedGraphVisitor { r: self, parent_scope: ParentScope::module(module) }
+ let parent_scope = ParentScope::module(module, self);
+ BuildReducedGraphVisitor { r: self, parent_scope }
.build_reduced_graph_for_external_crate_res(child);
}
}
false
}
- fn visit_invoc(&mut self, id: NodeId) -> MacroRulesScope<'a> {
+ fn visit_invoc(&mut self, id: NodeId) -> MacroRulesScopeRef<'a> {
let invoc_id = id.placeholder_to_expn_id();
self.parent_scope.module.unexpanded_invocations.borrow_mut().insert(invoc_id);
let old_parent_scope = self.r.invocation_parent_scopes.insert(invoc_id, self.parent_scope);
assert!(old_parent_scope.is_none(), "invocation data is reset for an invocation");
- MacroRulesScope::Invocation(invoc_id)
+ let scope = self.r.arenas.alloc_macro_rules_scope(MacroRulesScope::Invocation(invoc_id));
+ self.r.invocation_macro_rules_scopes.entry(invoc_id).or_default().insert(scope);
+ scope
}
fn proc_macro_stub(&self, item: &ast::Item) -> Option<(MacroKind, Ident, Span)> {
}
}
- fn define_macro(&mut self, item: &ast::Item) -> MacroRulesScope<'a> {
+ fn define_macro(&mut self, item: &ast::Item) -> MacroRulesScopeRef<'a> {
let parent_scope = self.parent_scope;
let expansion = parent_scope.expansion;
let def_id = self.r.local_def_id(item.id);
self.insert_unused_macro(ident, def_id, item.id, span);
}
self.r.visibilities.insert(def_id, vis);
- MacroRulesScope::Binding(self.r.arenas.alloc_macro_rules_binding(MacroRulesBinding {
- parent_macro_rules_scope: parent_scope.macro_rules,
- binding,
- ident,
- }))
+ self.r.arenas.alloc_macro_rules_scope(MacroRulesScope::Binding(
+ self.r.arenas.alloc_macro_rules_binding(MacroRulesBinding {
+ parent_macro_rules_scope: parent_scope.macro_rules,
+ binding,
+ ident,
+ }),
+ ))
} else {
let module = parent_scope.module;
let vis = match item.kind {
}
}
Scope::MacroRules(macro_rules_scope) => {
- if let MacroRulesScope::Binding(macro_rules_binding) = macro_rules_scope {
+ if let MacroRulesScope::Binding(macro_rules_binding) = macro_rules_scope.get() {
let res = macro_rules_binding.binding.res();
if filter_fn(res) {
suggestions
_,
)
| Res::SelfCtor(..)),
- PathSource::TupleStruct(..) => {
- matches!(res, Res::Def(DefKind::Ctor(_, CtorKind::Fn), _) | Res::SelfCtor(..))
- }
+ PathSource::TupleStruct(..) => res.expected_in_tuple_struct_pat(),
PathSource::Struct => matches!(res, Res::Def(
DefKind::Struct
| DefKind::Union
// During late resolution we only track the module component of the parent scope,
// although it may be useful to track other components as well for diagnostics.
let graph_root = resolver.graph_root;
- let parent_scope = ParentScope::module(graph_root);
+ let parent_scope = ParentScope::module(graph_root, resolver);
let start_rib_kind = ModuleRibKind(graph_root);
LateResolutionVisitor {
r: resolver,
use diagnostics::{ImportSuggestion, LabelSuggestion, Suggestion};
use imports::{Import, ImportKind, ImportResolver, NameResolution};
use late::{HasGenericParams, PathSource, Rib, RibKind::*};
-use macros::{MacroRulesBinding, MacroRulesScope};
+use macros::{MacroRulesBinding, MacroRulesScope, MacroRulesScopeRef};
type Res = def::Res<NodeId>;
enum Scope<'a> {
DeriveHelpers(ExpnId),
DeriveHelpersCompat,
- MacroRules(MacroRulesScope<'a>),
+ MacroRules(MacroRulesScopeRef<'a>),
CrateRoot,
Module(Module<'a>),
RegisteredAttrs,
pub struct ParentScope<'a> {
module: Module<'a>,
expansion: ExpnId,
- macro_rules: MacroRulesScope<'a>,
+ macro_rules: MacroRulesScopeRef<'a>,
derives: &'a [ast::Path],
}
impl<'a> ParentScope<'a> {
/// Creates a parent scope with the passed argument used as the module scope component,
/// and other scope components set to default empty values.
- pub fn module(module: Module<'a>) -> ParentScope<'a> {
+ pub fn module(module: Module<'a>, resolver: &Resolver<'a>) -> ParentScope<'a> {
ParentScope {
module,
expansion: ExpnId::root(),
- macro_rules: MacroRulesScope::Empty,
+ macro_rules: resolver.arenas.alloc_macro_rules_scope(MacroRulesScope::Empty),
derives: &[],
}
}
invocation_parent_scopes: FxHashMap<ExpnId, ParentScope<'a>>,
/// `macro_rules` scopes *produced* by expanding the macro invocations,
/// include all the `macro_rules` items and other invocations generated by them.
- output_macro_rules_scopes: FxHashMap<ExpnId, MacroRulesScope<'a>>,
+ output_macro_rules_scopes: FxHashMap<ExpnId, MacroRulesScopeRef<'a>>,
+ /// References to all `MacroRulesScope::Invocation(invoc_id)`s, used to update such scopes
+ /// when their corresponding `invoc_id`s get expanded.
+ invocation_macro_rules_scopes: FxHashMap<ExpnId, FxHashSet<MacroRulesScopeRef<'a>>>,
/// Helper attributes that are in scope for the given expansion.
helper_attrs: FxHashMap<ExpnId, Vec<Ident>>,
fn alloc_name_resolution(&'a self) -> &'a RefCell<NameResolution<'a>> {
self.name_resolutions.alloc(Default::default())
}
+ fn alloc_macro_rules_scope(&'a self, scope: MacroRulesScope<'a>) -> MacroRulesScopeRef<'a> {
+ PtrKey(self.dropless.alloc(Cell::new(scope)))
+ }
fn alloc_macro_rules_binding(
&'a self,
binding: MacroRulesBinding<'a>,
let (registered_attrs, registered_tools) =
macros::registered_attrs_and_tools(session, &krate.attrs);
- let mut invocation_parent_scopes = FxHashMap::default();
- invocation_parent_scopes.insert(ExpnId::root(), ParentScope::module(graph_root));
-
let features = session.features_untracked();
let non_macro_attr =
|mark_used| Lrc::new(SyntaxExtension::non_macro_attr(mark_used, session.edition()));
- Resolver {
+ let mut resolver = Resolver {
session,
definitions,
dummy_ext_bang: Lrc::new(SyntaxExtension::dummy_bang(session.edition())),
dummy_ext_derive: Lrc::new(SyntaxExtension::dummy_derive(session.edition())),
non_macro_attrs: [non_macro_attr(false), non_macro_attr(true)],
- invocation_parent_scopes,
+ invocation_parent_scopes: Default::default(),
output_macro_rules_scopes: Default::default(),
+ invocation_macro_rules_scopes: Default::default(),
helper_attrs: Default::default(),
local_macro_def_scopes: FxHashMap::default(),
name_already_seen: FxHashMap::default(),
invocation_parents,
next_disambiguator: Default::default(),
trait_impl_items: Default::default(),
- }
+ };
+
+ let root_parent_scope = ParentScope::module(graph_root, &resolver);
+ resolver.invocation_parent_scopes.insert(ExpnId::root(), root_parent_scope);
+
+ resolver
}
pub fn next_node_id(&mut self) -> NodeId {
}
Scope::DeriveHelpers(..) => Scope::DeriveHelpersCompat,
Scope::DeriveHelpersCompat => Scope::MacroRules(parent_scope.macro_rules),
- Scope::MacroRules(macro_rules_scope) => match macro_rules_scope {
+ Scope::MacroRules(macro_rules_scope) => match macro_rules_scope.get() {
MacroRulesScope::Binding(binding) => {
Scope::MacroRules(binding.parent_macro_rules_scope)
}
}
};
let module = self.get_module(module_id);
- let parent_scope = &ParentScope::module(module);
+ let parent_scope = &ParentScope::module(module, self);
let res = self.resolve_ast_path(&path, ns, parent_scope).map_err(|_| ())?;
Ok((path, res))
}
use rustc_ast_pretty::pprust;
use rustc_attr::StabilityLevel;
use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::ptr_key::PtrKey;
use rustc_errors::struct_span_err;
use rustc_expand::base::{Indeterminate, InvocationRes, ResolverExpand, SyntaxExtension};
use rustc_expand::compile_declarative_macro;
use rustc_data_structures::sync::Lrc;
use rustc_span::hygiene::{AstPass, MacroKind};
+use std::cell::Cell;
use std::{mem, ptr};
type Res = def::Res<NodeId>;
pub struct MacroRulesBinding<'a> {
crate binding: &'a NameBinding<'a>,
/// `macro_rules` scope into which the `macro_rules` item was planted.
- crate parent_macro_rules_scope: MacroRulesScope<'a>,
+ crate parent_macro_rules_scope: MacroRulesScopeRef<'a>,
crate ident: Ident,
}
Invocation(ExpnId),
}
+/// `macro_rules!` scopes are always kept by reference and inside a cell.
+/// The reason is that we update all scopes with value `MacroRulesScope::Invocation(invoc_id)`
+/// in-place immediately after `invoc_id` gets expanded.
+/// This helps to avoid uncontrollable growth of `macro_rules!` scope chains,
+/// which usually grow lineraly with the number of macro invocations
+/// in a module (including derives) and hurt performance.
+pub(crate) type MacroRulesScopeRef<'a> = PtrKey<'a, Cell<MacroRulesScope<'a>>>;
+
// Macro namespace is separated into two sub-namespaces, one for bang macros and
// one for attribute-like macros (attributes, derives).
// We ignore resolutions from one sub-namespace when searching names in scope for another.
let output_macro_rules_scope = self.build_reduced_graph(fragment, parent_scope);
self.output_macro_rules_scopes.insert(expansion, output_macro_rules_scope);
+ // Update all `macro_rules` scopes referring to this invocation. This is an optimization
+ // used to avoid long scope chains, see the comments on `MacroRulesScopeRef`.
+ if let Some(invocation_scopes) = self.invocation_macro_rules_scopes.remove(&expansion) {
+ for invocation_scope in &invocation_scopes {
+ invocation_scope.set(output_macro_rules_scope.get());
+ }
+ // All `macro_rules` scopes that previously referred to `expansion`
+ // are now rerouted to its output scope, if it's also an invocation.
+ if let MacroRulesScope::Invocation(invoc_id) = output_macro_rules_scope.get() {
+ self.invocation_macro_rules_scopes
+ .entry(invoc_id)
+ .or_default()
+ .extend(invocation_scopes);
+ }
+ }
+
parent_scope.module.unexpanded_invocations.borrow_mut().remove(&expansion);
}
}
result
}
- Scope::MacroRules(macro_rules_scope) => match macro_rules_scope {
+ Scope::MacroRules(macro_rules_scope) => match macro_rules_scope.get() {
MacroRulesScope::Binding(macro_rules_binding)
if ident == macro_rules_binding.ident =>
{
path: &'tcx hir::QPath<'tcx>,
fields: &'tcx [hir::Field<'tcx>],
variant: &'tcx ty::VariantDef,
- base: Option<&'tcx hir::Expr<'tcx>>,
+ rest: Option<&'tcx hir::Expr<'tcx>>,
) {
if let Some(struct_lit_data) = self.save_ctxt.get_expr_data(ex) {
if let hir::QPath::Resolved(_, path) = path {
}
}
- walk_list!(self, visit_expr, base);
+ if let Some(base) = rest {
+ self.visit_expr(&base);
+ }
}
fn process_method_call(
debug!("visit_expr {:?}", ex.kind);
self.process_macro_use(ex.span);
match ex.kind {
- hir::ExprKind::Struct(ref path, ref fields, ref base) => {
+ hir::ExprKind::Struct(ref path, ref fields, ref rest) => {
let hir_expr = self.save_ctxt.tcx.hir().expect_expr(ex.hir_id);
let adt = match self.save_ctxt.typeck_results().expr_ty_opt(&hir_expr) {
Some(ty) if ty.ty_adt_def().is_some() => ty.ty_adt_def().unwrap(),
}
};
let res = self.save_ctxt.get_path_res(hir_expr.hir_id);
- self.process_struct_lit(ex, path, fields, adt.variant_of_res(res), *base)
+ self.process_struct_lit(ex, path, fields, adt.variant_of_res(res), *rest)
}
hir::ExprKind::MethodCall(ref seg, _, args, _) => {
self.process_method_call(ex, seg, args)
return;
}
+ let (_, line_hi, col_hi) = match ctx.byte_pos_to_line_and_col(span.hi) {
+ Some(pos) => pos,
+ None => {
+ Hash::hash(&TAG_INVALID_SPAN, hasher);
+ span.ctxt.hash_stable(ctx, hasher);
+ return;
+ }
+ };
+
Hash::hash(&TAG_VALID_SPAN, hasher);
// We truncate the stable ID hash and line and column numbers. The chances
// of causing a collision this way should be minimal.
Hash::hash(&(file_lo.name_hash as u64), hasher);
- let col = (col_lo.0 as u64) & 0xFF;
- let line = ((line_lo as u64) & 0xFF_FF_FF) << 8;
- let len = ((span.hi - span.lo).0 as u64) << 32;
- let line_col_len = col | line | len;
- Hash::hash(&line_col_len, hasher);
+ // Hash both the length and the end location (line/column) of a span. If we
+ // hash only the length, for example, then two otherwise equal spans with
+ // different end locations will have the same hash. This can cause a problem
+ // during incremental compilation wherein a previous result for a query that
+ // depends on the end location of a span will be incorrectly reused when the
+ // end location of the span it depends on has changed (see issue #74890). A
+ // similar analysis applies if some query depends specifically on the length
+ // of the span, but we only hash the end location. So hash both.
+
+ let col_lo_trunc = (col_lo.0 as u64) & 0xFF;
+ let line_lo_trunc = ((line_lo as u64) & 0xFF_FF_FF) << 8;
+ let col_hi_trunc = (col_hi.0 as u64) & 0xFF << 32;
+ let line_hi_trunc = ((line_hi as u64) & 0xFF_FF_FF) << 40;
+ let col_line = col_lo_trunc | line_lo_trunc | col_hi_trunc | line_hi_trunc;
+ let len = (span.hi - span.lo).0;
+ Hash::hash(&col_line, hasher);
+ Hash::hash(&len, hasher);
span.ctxt.hash_stable(ctx, hasher);
}
}
mod mips;
mod nvptx;
mod riscv;
+mod spirv;
mod x86;
pub use aarch64::{AArch64InlineAsmReg, AArch64InlineAsmRegClass};
pub use mips::{MipsInlineAsmReg, MipsInlineAsmRegClass};
pub use nvptx::{NvptxInlineAsmReg, NvptxInlineAsmRegClass};
pub use riscv::{RiscVInlineAsmReg, RiscVInlineAsmRegClass};
+pub use spirv::{SpirVInlineAsmReg, SpirVInlineAsmRegClass};
pub use x86::{X86InlineAsmReg, X86InlineAsmRegClass};
#[derive(Copy, Clone, Encodable, Decodable, Debug, Eq, PartialEq, Hash)]
Hexagon,
Mips,
Mips64,
+ SpirV,
}
impl FromStr for InlineAsmArch {
"hexagon" => Ok(Self::Hexagon),
"mips" => Ok(Self::Mips),
"mips64" => Ok(Self::Mips64),
+ "spirv" => Ok(Self::SpirV),
_ => Err(()),
}
}
Nvptx(NvptxInlineAsmReg),
Hexagon(HexagonInlineAsmReg),
Mips(MipsInlineAsmReg),
+ SpirV(SpirVInlineAsmReg),
}
impl InlineAsmReg {
InlineAsmArch::Mips | InlineAsmArch::Mips64 => {
Self::Mips(MipsInlineAsmReg::parse(arch, has_feature, target, &name)?)
}
+ InlineAsmArch::SpirV => {
+ Self::SpirV(SpirVInlineAsmReg::parse(arch, has_feature, target, &name)?)
+ }
})
}
Nvptx(NvptxInlineAsmRegClass),
Hexagon(HexagonInlineAsmRegClass),
Mips(MipsInlineAsmRegClass),
+ SpirV(SpirVInlineAsmRegClass),
}
impl InlineAsmRegClass {
Self::Nvptx(r) => r.name(),
Self::Hexagon(r) => r.name(),
Self::Mips(r) => r.name(),
+ Self::SpirV(r) => r.name(),
}
}
Self::Nvptx(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Nvptx),
Self::Hexagon(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Hexagon),
Self::Mips(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Mips),
+ Self::SpirV(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::SpirV),
}
}
Self::Nvptx(r) => r.suggest_modifier(arch, ty),
Self::Hexagon(r) => r.suggest_modifier(arch, ty),
Self::Mips(r) => r.suggest_modifier(arch, ty),
+ Self::SpirV(r) => r.suggest_modifier(arch, ty),
}
}
Self::Nvptx(r) => r.default_modifier(arch),
Self::Hexagon(r) => r.default_modifier(arch),
Self::Mips(r) => r.default_modifier(arch),
+ Self::SpirV(r) => r.default_modifier(arch),
}
}
Self::Nvptx(r) => r.supported_types(arch),
Self::Hexagon(r) => r.supported_types(arch),
Self::Mips(r) => r.supported_types(arch),
+ Self::SpirV(r) => r.supported_types(arch),
}
}
InlineAsmArch::Mips | InlineAsmArch::Mips64 => {
Self::Mips(MipsInlineAsmRegClass::parse(arch, name)?)
}
+ InlineAsmArch::SpirV => Self::SpirV(SpirVInlineAsmRegClass::parse(arch, name)?),
})
})
}
Self::Nvptx(r) => r.valid_modifiers(arch),
Self::Hexagon(r) => r.valid_modifiers(arch),
Self::Mips(r) => r.valid_modifiers(arch),
+ Self::SpirV(r) => r.valid_modifiers(arch),
}
}
}
mips::fill_reg_map(arch, has_feature, target, &mut map);
map
}
+ InlineAsmArch::SpirV => {
+ let mut map = spirv::regclass_map();
+ spirv::fill_reg_map(arch, has_feature, target, &mut map);
+ map
+ }
}
}
--- /dev/null
+use super::{InlineAsmArch, InlineAsmType};
+use rustc_macros::HashStable_Generic;
+
+def_reg_class! {
+ SpirV SpirVInlineAsmRegClass {
+ reg,
+ }
+}
+
+impl SpirVInlineAsmRegClass {
+ pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] {
+ &[]
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+ None
+ }
+
+ pub fn suggest_modifier(
+ self,
+ _arch: InlineAsmArch,
+ _ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn supported_types(
+ self,
+ _arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<&'static str>)] {
+ match self {
+ Self::reg => {
+ types! { _: I8, I16, I32, I64, F32, F64; }
+ }
+ }
+ }
+}
+
+def_regs! {
+ // SPIR-V is SSA-based, it does not have registers.
+ SpirV SpirVInlineAsmReg SpirVInlineAsmRegClass {}
+}
pub fn target() -> Target {
let opts = TargetOptions {
- vendor: String::new(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
linker: Some("rust-lld".to_owned()),
features: "+strict-align,+neon,+fp-armv8".to_string(),
pub fn target() -> Target {
let opts = TargetOptions {
- vendor: String::new(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
linker: Some("rust-lld".to_owned()),
features: "+strict-align,-neon,-fp-armv8".to_string(),
options: TargetOptions {
endian: "big".to_string(),
- vendor: String::new(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
executables: true,
linker: Some("rust-lld".to_owned()),
options: TargetOptions {
endian: "big".to_string(),
- vendor: String::new(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
executables: true,
linker: Some("rust-lld".to_owned()),
// bare-metal binaries (the `gcc` linker has the advantage that it knows where C
// libraries and crt*.o are but it's not much of an advantage here); LLD is also
// faster
-// - `os` set to `none`. rationale: matches `thumb` targets
-// - `env` and `vendor` are set to an empty string. rationale: matches `thumb`
-// targets
// - `panic_strategy` set to `abort`. rationale: matches `thumb` targets
// - `relocation-model` set to `static`; also no PIE, no relro and no dynamic
// linking. rationale: matches `thumb` targets
pub fn target() -> Target {
let opts = TargetOptions {
- vendor: String::new(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
linker: Some("rust-lld".to_owned()),
features: "+v7,+thumb2,+soft-float,-neon,+strict-align".to_string(),
pub fn target() -> Target {
let opts = TargetOptions {
- vendor: String::new(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
linker: Some("rust-lld".to_owned()),
features: "+v7,+vfp3,-d32,+thumb2,-neon,+strict-align".to_string(),
arch: "arm".to_string(),
options: TargetOptions {
- vendor: String::new(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
executables: true,
linker: Some("rust-lld".to_owned()),
arch: "arm".to_string(),
options: TargetOptions {
- vendor: String::new(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
executables: true,
linker: Some("rust-lld".to_owned()),
pointer_width: 16,
options: TargetOptions {
c_int_width: "16".to_string(),
- os: "unknown".to_string(),
cpu: target_cpu.clone(),
exe_suffix: ".elf".to_string(),
TargetOptions {
os: "fuchsia".to_string(),
- vendor: String::new(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
linker: Some("rust-lld".to_owned()),
lld_flavor: LldFlavor::Ld,
arch: "mips".to_string(),
options: TargetOptions {
- vendor: String::new(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
cpu: "mips32r2".to_string(),
features: "+mips32r2,+soft-float,+noabicalls".to_string(),
pub endian: String,
/// Width of c_int type. Defaults to "32".
pub c_int_width: String,
- /// OS name to use for conditional compilation. Defaults to "none".
+ /// OS name to use for conditional compilation (`target_os`). Defaults to "none".
+ /// "none" implies a bare metal target without `std` library.
+ /// A couple of targets having `std` also use "unknown" as an `os` value,
+ /// but they are exceptions.
pub os: String,
- /// Environment name to use for conditional compilation. Defaults to "".
+ /// Environment name to use for conditional compilation (`target_env`). Defaults to "".
pub env: String,
- /// Vendor name to use for conditional compilation. Defaults to "unknown".
+ /// Vendor name to use for conditional compilation (`target_vendor`). Defaults to "unknown".
pub vendor: String,
/// Default linker flavor used if `-C linker-flavor` or `-C linker` are not passed
/// on the command line. Defaults to `LinkerFlavor::Gcc`.
/// Only useful for compiling against Illumos/Solaris,
/// as they have a different set of linker flags. Defaults to false.
pub is_like_solaris: bool,
- /// Whether the target toolchain is like Windows'. Only useful for compiling against Windows,
- /// only really used for figuring out how to find libraries, since Windows uses its own
- /// library naming convention. Defaults to false.
+ /// Whether the target is like Windows.
+ /// This is a combination of several more specific properties represented as a single flag:
+ /// - The target uses a Windows ABI,
+ /// - uses PE/COFF as a format for object code,
+ /// - uses Windows-style dllexport/dllimport for shared libraries,
+ /// - uses import libraries and .def files for symbol exports,
+ /// - executables support setting a subsystem.
pub is_like_windows: bool,
+ /// Whether the target is like MSVC.
+ /// This is a combination of several more specific properties represented as a single flag:
+ /// - The target has all the properties from `is_like_windows`
+ /// (for in-tree targets "is_like_msvc ⇒ is_like_windows" is ensured by a unit test),
+ /// - has some MSVC-specific Windows ABI properties,
+ /// - uses a link.exe-like linker,
+ /// - uses CodeView/PDB for debuginfo and natvis for its visualization,
+ /// - uses SEH-based unwinding,
+ /// - supports control flow guard mechanism.
pub is_like_msvc: bool,
/// Whether the target toolchain is like Emscripten's. Only useful for compiling with
/// Emscripten toolchain.
options: TargetOptions {
c_int_width: "16".to_string(),
- vendor: String::new(),
executables: true,
// The LLVM backend currently can't generate object files. To
impl Target {
fn check_consistency(&self) {
+ assert!(self.is_like_windows || !self.is_like_msvc);
// Check that LLD with the given flavor is treated identically to the linker it emulates.
// If your target really needs to deviate from the rules below, except it and document the
// reasons.
|| self.linker_flavor == LinkerFlavor::Lld(LldFlavor::Link),
self.lld_flavor == LldFlavor::Link,
);
+ assert_eq!(self.is_like_msvc, self.lld_flavor == LldFlavor::Link);
for args in &[
&self.pre_link_args,
&self.late_link_args,
&& self.post_link_objects_fallback.is_empty())
|| self.crt_objects_fallback.is_some()
);
+ // Keep the default "unknown" vendor instead.
+ assert_ne!(self.vendor, "");
+ if !self.can_use_os_unknown() {
+ // Keep the default "none" for bare metal targets instead.
+ assert_ne!(self.os, "unknown");
+ }
+ }
+
+ // Add your target to the whitelist if it has `std` library
+ // and you certainly want "unknown" for the OS name.
+ fn can_use_os_unknown(&self) -> bool {
+ self.llvm_target == "wasm32-unknown-unknown"
+ || (self.env == "sgx" && self.vendor == "fortanix")
}
}
pub fn opts() -> TargetOptions {
// See rust-lang/rfcs#1645 for a discussion about these defaults
TargetOptions {
- vendor: String::new(),
linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
executables: true,
// In most cases, LLD is good enough
stack_probes: true,
singlethread: true,
linker: Some("rust-lld".to_string()),
- // FIXME: This should likely be `true` inherited from `msvc_base`
- // because UEFI follows Windows ABI and uses PE/COFF.
- // The `false` is probably causing ABI bugs right now.
- is_like_windows: false,
- // FIXME: This should likely be `true` inherited from `msvc_base`
- // because UEFI follows Windows ABI and uses PE/COFF.
- // The `false` is probably causing ABI bugs right now.
- is_like_msvc: false,
-
..base
}
}
let mut options = wasm32_base::options();
options.os = "wasi".to_string();
- options.vendor = String::new();
options.linker_flavor = LinkerFlavor::Lld(LldFlavor::Wasm);
options
.pre_link_args
all_bounds.filter(|p| p.def_id() == stack.obligation.predicate.def_id());
// Keep only those bounds which may apply, and propagate overflow if it occurs.
- let mut param_candidates = vec![];
for bound in matching_bounds {
let wc = self.evaluate_where_clause(stack, bound)?;
if wc.may_apply() {
- param_candidates.push(ParamCandidate(bound));
+ candidates.vec.push(ParamCandidate(bound));
}
}
- candidates.vec.extend(param_candidates);
-
Ok(())
}
let mut end = self.len();
while end > 1 {
end -= 1;
- self.data.swap(0, end);
+ // SAFETY: `end` goes from `self.len() - 1` to 1 (both included),
+ // so it's always a valid index to access.
+ // It is safe to access index 0 (i.e. `ptr`), because
+ // 1 <= end < self.len(), which means self.len() >= 2.
+ unsafe {
+ let ptr = self.data.as_mut_ptr();
+ ptr::swap(ptr, ptr.add(end));
+ }
self.sift_down_range(0, end);
}
self.into_vec()
unsafe {
let mut hole = Hole::new(&mut self.data, pos);
let mut child = 2 * pos + 1;
- while child < end {
- let right = child + 1;
+ while child < end - 1 {
// compare with the greater of the two children
- if right < end && hole.get(child) <= hole.get(right) {
- child = right;
- }
+ child += (hole.get(child) <= hole.get(child + 1)) as usize;
// if we are already in order, stop.
if hole.element() >= hole.get(child) {
- break;
+ return;
}
hole.move_to(child);
child = 2 * hole.pos() + 1;
}
+ if child == end - 1 && hole.element() < hole.get(child) {
+ hole.move_to(child);
+ }
}
}
unsafe {
let mut hole = Hole::new(&mut self.data, pos);
let mut child = 2 * pos + 1;
- while child < end {
- let right = child + 1;
- // compare with the greater of the two children
- if right < end && hole.get(child) <= hole.get(right) {
- child = right;
- }
+ while child < end - 1 {
+ child += (hole.get(child) <= hole.get(child + 1)) as usize;
hole.move_to(child);
child = 2 * hole.pos() + 1;
}
+ if child == end - 1 {
+ hole.move_to(child);
+ }
pos = hole.pos;
}
self.sift_up(start, pos);
/// attacks such as HashDoS.
///
/// The hashing algorithm can be replaced on a per-`HashMap` basis using the
-/// [`default`], [`with_hasher`], and [`with_capacity_and_hasher`] methods. Many
-/// alternative algorithms are available on crates.io, such as the [`fnv`] crate.
+/// [`default`], [`with_hasher`], and [`with_capacity_and_hasher`] methods.
+/// There are many alternative [hashing algorithms available on crates.io].
///
/// It is required that the keys implement the [`Eq`] and [`Hash`] traits, although
/// this can frequently be achieved by using `#[derive(PartialEq, Eq, Hash)]`.
/// The original C++ version of SwissTable can be found [here], and this
/// [CppCon talk] gives an overview of how the algorithm works.
///
+/// [hashing algorithms available on crates.io]: https://crates.io/keywords/hasher
/// [SwissTable]: https://abseil.io/blog/20180927-swisstables
/// [here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h
/// [CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4
/// [`default`]: Default::default
/// [`with_hasher`]: Self::with_hasher
/// [`with_capacity_and_hasher`]: Self::with_capacity_and_hasher
-/// [`fnv`]: https://crates.io/crates/fnv
///
/// ```
/// use std::collections::HashMap;
/// the length of the `to` file as reported by `metadata`.
///
/// If you’re wanting to copy the contents of one file to another and you’re
-/// working with [`File`]s, see the [`io::copy`] function.
+/// working with [`File`]s, see the [`io::copy()`] function.
///
/// # Platform-specific behavior
///
--- /dev/null
+use crate::io::{self, ErrorKind, Read, Write};
+use crate::mem::MaybeUninit;
+
+/// Copies the entire contents of a reader into a writer.
+///
+/// This function will continuously read data from `reader` and then
+/// write it into `writer` in a streaming fashion until `reader`
+/// returns EOF.
+///
+/// On success, the total number of bytes that were copied from
+/// `reader` to `writer` is returned.
+///
+/// If you’re wanting to copy the contents of one file to another and you’re
+/// working with filesystem paths, see the [`fs::copy`] function.
+///
+/// [`fs::copy`]: crate::fs::copy
+///
+/// # Errors
+///
+/// This function will return an error immediately if any call to [`read`] or
+/// [`write`] returns an error. All instances of [`ErrorKind::Interrupted`] are
+/// handled by this function and the underlying operation is retried.
+///
+/// [`read`]: Read::read
+/// [`write`]: Write::write
+///
+/// # Examples
+///
+/// ```
+/// use std::io;
+///
+/// fn main() -> io::Result<()> {
+/// let mut reader: &[u8] = b"hello";
+/// let mut writer: Vec<u8> = vec![];
+///
+/// io::copy(&mut reader, &mut writer)?;
+///
+/// assert_eq!(&b"hello"[..], &writer[..]);
+/// Ok(())
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn copy<R: ?Sized, W: ?Sized>(reader: &mut R, writer: &mut W) -> io::Result<u64>
+where
+ R: Read,
+ W: Write,
+{
+ cfg_if::cfg_if! {
+ if #[cfg(any(target_os = "linux", target_os = "android"))] {
+ crate::sys::kernel_copy::copy_spec(reader, writer)
+ } else {
+ generic_copy(reader, writer)
+ }
+ }
+}
+
+/// The general read-write-loop implementation of
+/// `io::copy` that is used when specializations are not available or not applicable.
+pub(crate) fn generic_copy<R: ?Sized, W: ?Sized>(reader: &mut R, writer: &mut W) -> io::Result<u64>
+where
+ R: Read,
+ W: Write,
+{
+ let mut buf = MaybeUninit::<[u8; super::DEFAULT_BUF_SIZE]>::uninit();
+ // FIXME: #42788
+ //
+ // - This creates a (mut) reference to a slice of
+ // _uninitialized_ integers, which is **undefined behavior**
+ //
+ // - Only the standard library gets to soundly "ignore" this,
+ // based on its privileged knowledge of unstable rustc
+ // internals;
+ unsafe {
+ reader.initializer().initialize(buf.assume_init_mut());
+ }
+
+ let mut written = 0;
+ loop {
+ let len = match reader.read(unsafe { buf.assume_init_mut() }) {
+ Ok(0) => return Ok(written),
+ Ok(len) => len,
+ Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
+ Err(e) => return Err(e),
+ };
+ writer.write_all(unsafe { &buf.assume_init_ref()[..len] })?;
+ written += len as u64;
+ }
+}
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::buffered::{BufReader, BufWriter, LineWriter};
#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::copy::copy;
+#[stable(feature = "rust1", since = "1.0.0")]
pub use self::cursor::Cursor;
#[stable(feature = "rust1", since = "1.0.0")]
pub use self::error::{Error, ErrorKind, Result};
#[doc(no_inline, hidden)]
pub use self::stdio::{set_panic, set_print, LocalOutput};
#[stable(feature = "rust1", since = "1.0.0")]
-pub use self::util::{copy, empty, repeat, sink, Empty, Repeat, Sink};
+pub use self::util::{empty, repeat, sink, Empty, Repeat, Sink};
pub(crate) use self::stdio::clone_io;
mod buffered;
+pub(crate) mod copy;
mod cursor;
mod error;
mod impls;
}
}
+// only used by platform-dependent io::copy specializations, i.e. unused on some platforms
+#[cfg(any(target_os = "linux", target_os = "android"))]
+impl StdinLock<'_> {
+ pub(crate) fn as_mut_buf(&mut self) -> &mut BufReader<impl Read> {
+ &mut self.inner
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl Read for StdinLock<'_> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
use super::{repeat, Cursor, SeekFrom};
use crate::cmp::{self, min};
-use crate::io::prelude::*;
use crate::io::{self, IoSlice, IoSliceMut};
+use crate::io::{BufRead, Read, Seek, Write};
use crate::ops::Deref;
#[test]
mod tests;
use crate::fmt;
-use crate::io::{self, BufRead, ErrorKind, Initializer, IoSlice, IoSliceMut, Read, Write};
-use crate::mem::MaybeUninit;
-
-/// Copies the entire contents of a reader into a writer.
-///
-/// This function will continuously read data from `reader` and then
-/// write it into `writer` in a streaming fashion until `reader`
-/// returns EOF.
-///
-/// On success, the total number of bytes that were copied from
-/// `reader` to `writer` is returned.
-///
-/// If you’re wanting to copy the contents of one file to another and you’re
-/// working with filesystem paths, see the [`fs::copy`] function.
-///
-/// [`fs::copy`]: crate::fs::copy
-///
-/// # Errors
-///
-/// This function will return an error immediately if any call to [`read`] or
-/// [`write`] returns an error. All instances of [`ErrorKind::Interrupted`] are
-/// handled by this function and the underlying operation is retried.
-///
-/// [`read`]: Read::read
-/// [`write`]: Write::write
-///
-/// # Examples
-///
-/// ```
-/// use std::io;
-///
-/// fn main() -> io::Result<()> {
-/// let mut reader: &[u8] = b"hello";
-/// let mut writer: Vec<u8> = vec![];
-///
-/// io::copy(&mut reader, &mut writer)?;
-///
-/// assert_eq!(&b"hello"[..], &writer[..]);
-/// Ok(())
-/// }
-/// ```
-#[stable(feature = "rust1", since = "1.0.0")]
-pub fn copy<R: ?Sized, W: ?Sized>(reader: &mut R, writer: &mut W) -> io::Result<u64>
-where
- R: Read,
- W: Write,
-{
- let mut buf = MaybeUninit::<[u8; super::DEFAULT_BUF_SIZE]>::uninit();
- // FIXME: #42788
- //
- // - This creates a (mut) reference to a slice of
- // _uninitialized_ integers, which is **undefined behavior**
- //
- // - Only the standard library gets to soundly "ignore" this,
- // based on its privileged knowledge of unstable rustc
- // internals;
- unsafe {
- reader.initializer().initialize(buf.assume_init_mut());
- }
-
- let mut written = 0;
- loop {
- let len = match reader.read(unsafe { buf.assume_init_mut() }) {
- Ok(0) => return Ok(written),
- Ok(len) => len,
- Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
- Err(e) => return Err(e),
- };
- writer.write_all(unsafe { &buf.assume_init_ref()[..len] })?;
- written += len as u64;
- }
-}
+use crate::io::{self, BufRead, Initializer, IoSlice, IoSliceMut, Read, Write};
/// A reader which is always at EOF.
///
#![feature(toowned_clone_into)]
#![feature(total_cmp)]
#![feature(trace_macros)]
+#![feature(try_blocks)]
#![feature(try_reserve)]
#![feature(unboxed_closures)]
#![feature(unsafe_block_in_unsafe_fn)]
#[cfg(any(target_os = "linux", target_os = "android"))]
pub fn copy(from: &Path, to: &Path) -> io::Result<u64> {
- use crate::cmp;
- use crate::sync::atomic::{AtomicBool, Ordering};
-
- // Kernel prior to 4.5 don't have copy_file_range
- // We store the availability in a global to avoid unnecessary syscalls
- static HAS_COPY_FILE_RANGE: AtomicBool = AtomicBool::new(true);
-
- unsafe fn copy_file_range(
- fd_in: libc::c_int,
- off_in: *mut libc::loff_t,
- fd_out: libc::c_int,
- off_out: *mut libc::loff_t,
- len: libc::size_t,
- flags: libc::c_uint,
- ) -> libc::c_long {
- libc::syscall(libc::SYS_copy_file_range, fd_in, off_in, fd_out, off_out, len, flags)
- }
-
let (mut reader, reader_metadata) = open_from(from)?;
let max_len = u64::MAX;
let (mut writer, _) = open_to_and_set_permissions(to, reader_metadata)?;
- let has_copy_file_range = HAS_COPY_FILE_RANGE.load(Ordering::Relaxed);
- let mut written = 0u64;
- while written < max_len {
- let copy_result = if has_copy_file_range {
- let bytes_to_copy = cmp::min(max_len - written, usize::MAX as u64) as usize;
- let copy_result = unsafe {
- // We actually don't have to adjust the offsets,
- // because copy_file_range adjusts the file offset automatically
- cvt(copy_file_range(
- reader.as_raw_fd(),
- ptr::null_mut(),
- writer.as_raw_fd(),
- ptr::null_mut(),
- bytes_to_copy,
- 0,
- ))
- };
- if let Err(ref copy_err) = copy_result {
- match copy_err.raw_os_error() {
- Some(libc::ENOSYS | libc::EPERM | libc::EOPNOTSUPP) => {
- HAS_COPY_FILE_RANGE.store(false, Ordering::Relaxed);
- }
- _ => {}
- }
- }
- copy_result
- } else {
- Err(io::Error::from_raw_os_error(libc::ENOSYS))
- };
- match copy_result {
- Ok(0) if written == 0 => {
- // fallback to work around several kernel bugs where copy_file_range will fail to
- // copy any bytes and return 0 instead of an error if
- // - reading virtual files from the proc filesystem which appear to have 0 size
- // but are not empty. noted in coreutils to affect kernels at least up to 5.6.19.
- // - copying from an overlay filesystem in docker. reported to occur on fedora 32.
- return io::copy(&mut reader, &mut writer);
- }
- Ok(0) => return Ok(written), // reached EOF
- Ok(ret) => written += ret as u64,
- Err(err) => {
- match err.raw_os_error() {
- Some(
- libc::ENOSYS | libc::EXDEV | libc::EINVAL | libc::EPERM | libc::EOPNOTSUPP,
- ) => {
- // Try fallback io::copy if either:
- // - Kernel version is < 4.5 (ENOSYS)
- // - Files are mounted on different fs (EXDEV)
- // - copy_file_range is broken in various ways on RHEL/CentOS 7 (EOPNOTSUPP)
- // - copy_file_range is disallowed, for example by seccomp (EPERM)
- // - copy_file_range cannot be used with pipes or device nodes (EINVAL)
- assert_eq!(written, 0);
- return io::copy(&mut reader, &mut writer);
- }
- _ => return Err(err),
- }
- }
- }
+ use super::kernel_copy::{copy_regular_files, CopyResult};
+
+ match copy_regular_files(reader.as_raw_fd(), writer.as_raw_fd(), max_len) {
+ CopyResult::Ended(result) => result,
+ CopyResult::Fallback(written) => match io::copy::generic_copy(&mut reader, &mut writer) {
+ Ok(bytes) => Ok(bytes + written),
+ Err(e) => Err(e),
+ },
}
- Ok(written)
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
--- /dev/null
+//! This module contains specializations that can offload `io::copy()` operations on file descriptor
+//! containing types (`File`, `TcpStream`, etc.) to more efficient syscalls than `read(2)` and `write(2)`.
+//!
+//! Specialization is only applied to wholly std-owned types so that user code can't observe
+//! that the `Read` and `Write` traits are not used.
+//!
+//! Since a copy operation involves a reader and writer side where each can consist of different types
+//! and also involve generic wrappers (e.g. `Take`, `BufReader`) it is not practical to specialize
+//! a single method on all possible combinations.
+//!
+//! Instead readers and writers are handled separately by the `CopyRead` and `CopyWrite` specialization
+//! traits and then specialized on by the `Copier::copy` method.
+//!
+//! `Copier` uses the specialization traits to unpack the underlying file descriptors and
+//! additional prerequisites and constraints imposed by the wrapper types.
+//!
+//! Once it has obtained all necessary pieces and brought any wrapper types into a state where they
+//! can be safely bypassed it will attempt to use the `copy_file_range(2)`,
+//! `sendfile(2)` or `splice(2)` syscalls to move data directly between file descriptors.
+//! Since those syscalls have requirements that cannot be fully checked in advance and
+//! gathering additional information about file descriptors would require additional syscalls
+//! anyway it simply attempts to use them one after another (guided by inaccurate hints) to
+//! figure out which one works and and falls back to the generic read-write copy loop if none of them
+//! does.
+//! Once a working syscall is found for a pair of file descriptors it will be called in a loop
+//! until the copy operation is completed.
+//!
+//! Advantages of using these syscalls:
+//!
+//! * fewer context switches since reads and writes are coalesced into a single syscall
+//! and more bytes are transferred per syscall. This translates to higher throughput
+//! and fewer CPU cycles, at least for sufficiently large transfers to amortize the initial probing.
+//! * `copy_file_range` creates reflink copies on CoW filesystems, thus moving less data and
+//! consuming less disk space
+//! * `sendfile` and `splice` can perform zero-copy IO under some circumstances while
+//! a naive copy loop would move every byte through the CPU.
+//!
+//! Drawbacks:
+//!
+//! * copy operations smaller than the default buffer size can under some circumstances, especially
+//! on older kernels, incur more syscalls than the naive approach would. As mentioned above
+//! the syscall selection is guided by hints to minimize this possibility but they are not perfect.
+//! * optimizations only apply to std types. If a user adds a custom wrapper type, e.g. to report
+//! progress, they can hit a performance cliff.
+//! * complexity
+
+use crate::cmp::min;
+use crate::convert::TryInto;
+use crate::fs::{File, Metadata};
+use crate::io::copy::generic_copy;
+use crate::io::{
+ BufRead, BufReader, BufWriter, Error, Read, Result, StderrLock, StdinLock, StdoutLock, Take,
+ Write,
+};
+use crate::mem::ManuallyDrop;
+use crate::net::TcpStream;
+use crate::os::unix::fs::FileTypeExt;
+use crate::os::unix::io::{AsRawFd, FromRawFd, RawFd};
+use crate::process::{ChildStderr, ChildStdin, ChildStdout};
+use crate::ptr;
+use crate::sync::atomic::{AtomicBool, Ordering};
+use crate::sys::cvt;
+
+#[cfg(test)]
+mod tests;
+
+pub(crate) fn copy_spec<R: Read + ?Sized, W: Write + ?Sized>(
+ read: &mut R,
+ write: &mut W,
+) -> Result<u64> {
+ let copier = Copier { read, write };
+ SpecCopy::copy(copier)
+}
+
+/// This type represents either the inferred `FileType` of a `RawFd` based on the source
+/// type from which it was extracted or the actual metadata
+///
+/// The methods on this type only provide hints, due to `AsRawFd` and `FromRawFd` the inferred
+/// type may be wrong.
+enum FdMeta {
+ /// We obtained the FD from a type that can contain any type of `FileType` and queried the metadata
+ /// because it is cheaper than probing all possible syscalls (reader side)
+ Metadata(Metadata),
+ Socket,
+ Pipe,
+ /// We don't have any metadata, e.g. because the original type was `File` which can represent
+ /// any `FileType` and we did not query the metadata either since it did not seem beneficial
+ /// (writer side)
+ NoneObtained,
+}
+
+impl FdMeta {
+ fn maybe_fifo(&self) -> bool {
+ match self {
+ FdMeta::Metadata(meta) => meta.file_type().is_fifo(),
+ FdMeta::Socket => false,
+ FdMeta::Pipe => true,
+ FdMeta::NoneObtained => true,
+ }
+ }
+
+ fn potential_sendfile_source(&self) -> bool {
+ match self {
+ // procfs erronously shows 0 length on non-empty readable files.
+ // and if a file is truly empty then a `read` syscall will determine that and skip the write syscall
+ // thus there would be benefit from attempting sendfile
+ FdMeta::Metadata(meta)
+ if meta.file_type().is_file() && meta.len() > 0
+ || meta.file_type().is_block_device() =>
+ {
+ true
+ }
+ _ => false,
+ }
+ }
+
+ fn copy_file_range_candidate(&self) -> bool {
+ match self {
+ // copy_file_range will fail on empty procfs files. `read` can determine whether EOF has been reached
+ // without extra cost and skip the write, thus there is no benefit in attempting copy_file_range
+ FdMeta::Metadata(meta) if meta.is_file() && meta.len() > 0 => true,
+ FdMeta::NoneObtained => true,
+ _ => false,
+ }
+ }
+}
+
+struct CopyParams(FdMeta, Option<RawFd>);
+
+struct Copier<'a, 'b, R: Read + ?Sized, W: Write + ?Sized> {
+ read: &'a mut R,
+ write: &'b mut W,
+}
+
+trait SpecCopy {
+ fn copy(self) -> Result<u64>;
+}
+
+impl<R: Read + ?Sized, W: Write + ?Sized> SpecCopy for Copier<'_, '_, R, W> {
+ default fn copy(self) -> Result<u64> {
+ generic_copy(self.read, self.write)
+ }
+}
+
+impl<R: CopyRead, W: CopyWrite> SpecCopy for Copier<'_, '_, R, W> {
+ fn copy(self) -> Result<u64> {
+ let (reader, writer) = (self.read, self.write);
+ let r_cfg = reader.properties();
+ let w_cfg = writer.properties();
+
+ // before direct operations on file descriptors ensure that all source and sink buffers are empty
+ let mut flush = || -> crate::io::Result<u64> {
+ let bytes = reader.drain_to(writer, u64::MAX)?;
+ // BufWriter buffered bytes have already been accounted for in earlier write() calls
+ writer.flush()?;
+ Ok(bytes)
+ };
+
+ let mut written = 0u64;
+
+ if let (CopyParams(input_meta, Some(readfd)), CopyParams(output_meta, Some(writefd))) =
+ (r_cfg, w_cfg)
+ {
+ written += flush()?;
+ let max_write = reader.min_limit();
+
+ if input_meta.copy_file_range_candidate() && output_meta.copy_file_range_candidate() {
+ let result = copy_regular_files(readfd, writefd, max_write);
+
+ match result {
+ CopyResult::Ended(Ok(bytes_copied)) => return Ok(bytes_copied + written),
+ CopyResult::Ended(err) => return err,
+ CopyResult::Fallback(bytes) => written += bytes,
+ }
+ }
+
+ // on modern kernels sendfile can copy from any mmapable type (some but not all regular files and block devices)
+ // to any writable file descriptor. On older kernels the writer side can only be a socket.
+ // So we just try and fallback if needed.
+ // If current file offsets + write sizes overflow it may also fail, we do not try to fix that and instead
+ // fall back to the generic copy loop.
+ if input_meta.potential_sendfile_source() {
+ let result = sendfile_splice(SpliceMode::Sendfile, readfd, writefd, max_write);
+
+ match result {
+ CopyResult::Ended(Ok(bytes_copied)) => return Ok(bytes_copied + written),
+ CopyResult::Ended(err) => return err,
+ CopyResult::Fallback(bytes) => written += bytes,
+ }
+ }
+
+ if input_meta.maybe_fifo() || output_meta.maybe_fifo() {
+ let result = sendfile_splice(SpliceMode::Splice, readfd, writefd, max_write);
+
+ match result {
+ CopyResult::Ended(Ok(bytes_copied)) => return Ok(bytes_copied + written),
+ CopyResult::Ended(err) => return err,
+ CopyResult::Fallback(0) => { /* use the fallback below */ }
+ CopyResult::Fallback(_) => {
+ unreachable!("splice should not return > 0 bytes on the fallback path")
+ }
+ }
+ }
+ }
+
+ // fallback if none of the more specialized syscalls wants to work with these file descriptors
+ match generic_copy(reader, writer) {
+ Ok(bytes) => Ok(bytes + written),
+ err => err,
+ }
+ }
+}
+
+#[rustc_specialization_trait]
+trait CopyRead: Read {
+ /// Implementations that contain buffers (i.e. `BufReader`) must transfer data from their internal
+ /// buffers into `writer` until either the buffers are emptied or `limit` bytes have been
+ /// transferred, whichever occurs sooner.
+ /// If nested buffers are present the outer buffers must be drained first.
+ ///
+ /// This is necessary to directly bypass the wrapper types while preserving the data order
+ /// when operating directly on the underlying file descriptors.
+ fn drain_to<W: Write>(&mut self, _writer: &mut W, _limit: u64) -> Result<u64> {
+ Ok(0)
+ }
+
+ /// The minimum of the limit of all `Take<_>` wrappers, `u64::MAX` otherwise.
+ /// This method does not account for data `BufReader` buffers and would underreport
+ /// the limit of a `Take<BufReader<Take<_>>>` type. Thus its result is only valid
+ /// after draining the buffers via `drain_to`.
+ fn min_limit(&self) -> u64 {
+ u64::MAX
+ }
+
+ /// Extracts the file descriptor and hints/metadata, delegating through wrappers if necessary.
+ fn properties(&self) -> CopyParams;
+}
+
+#[rustc_specialization_trait]
+trait CopyWrite: Write {
+ /// Extracts the file descriptor and hints/metadata, delegating through wrappers if necessary.
+ fn properties(&self) -> CopyParams;
+}
+
+impl<T> CopyRead for &mut T
+where
+ T: CopyRead,
+{
+ fn drain_to<W: Write>(&mut self, writer: &mut W, limit: u64) -> Result<u64> {
+ (**self).drain_to(writer, limit)
+ }
+
+ fn min_limit(&self) -> u64 {
+ (**self).min_limit()
+ }
+
+ fn properties(&self) -> CopyParams {
+ (**self).properties()
+ }
+}
+
+impl<T> CopyWrite for &mut T
+where
+ T: CopyWrite,
+{
+ fn properties(&self) -> CopyParams {
+ (**self).properties()
+ }
+}
+
+impl CopyRead for File {
+ fn properties(&self) -> CopyParams {
+ CopyParams(fd_to_meta(self), Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyRead for &File {
+ fn properties(&self) -> CopyParams {
+ CopyParams(fd_to_meta(*self), Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for File {
+ fn properties(&self) -> CopyParams {
+ CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for &File {
+ fn properties(&self) -> CopyParams {
+ CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyRead for TcpStream {
+ fn properties(&self) -> CopyParams {
+ // avoid the stat syscall since we can be fairly sure it's a socket
+ CopyParams(FdMeta::Socket, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyRead for &TcpStream {
+ fn properties(&self) -> CopyParams {
+ // avoid the stat syscall since we can be fairly sure it's a socket
+ CopyParams(FdMeta::Socket, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for TcpStream {
+ fn properties(&self) -> CopyParams {
+ // avoid the stat syscall since we can be fairly sure it's a socket
+ CopyParams(FdMeta::Socket, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for &TcpStream {
+ fn properties(&self) -> CopyParams {
+ // avoid the stat syscall since we can be fairly sure it's a socket
+ CopyParams(FdMeta::Socket, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for ChildStdin {
+ fn properties(&self) -> CopyParams {
+ CopyParams(FdMeta::Pipe, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyRead for ChildStdout {
+ fn properties(&self) -> CopyParams {
+ CopyParams(FdMeta::Pipe, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyRead for ChildStderr {
+ fn properties(&self) -> CopyParams {
+ CopyParams(FdMeta::Pipe, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyRead for StdinLock<'_> {
+ fn drain_to<W: Write>(&mut self, writer: &mut W, outer_limit: u64) -> Result<u64> {
+ let buf_reader = self.as_mut_buf();
+ let buf = buf_reader.buffer();
+ let buf = &buf[0..min(buf.len(), outer_limit.try_into().unwrap_or(usize::MAX))];
+ let bytes_drained = buf.len();
+ writer.write_all(buf)?;
+ buf_reader.consume(bytes_drained);
+
+ Ok(bytes_drained as u64)
+ }
+
+ fn properties(&self) -> CopyParams {
+ CopyParams(fd_to_meta(self), Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for StdoutLock<'_> {
+ fn properties(&self) -> CopyParams {
+ CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd()))
+ }
+}
+
+impl CopyWrite for StderrLock<'_> {
+ fn properties(&self) -> CopyParams {
+ CopyParams(FdMeta::NoneObtained, Some(self.as_raw_fd()))
+ }
+}
+
+impl<T: CopyRead> CopyRead for Take<T> {
+ fn drain_to<W: Write>(&mut self, writer: &mut W, outer_limit: u64) -> Result<u64> {
+ let local_limit = self.limit();
+ let combined_limit = min(outer_limit, local_limit);
+ let bytes_drained = self.get_mut().drain_to(writer, combined_limit)?;
+ // update limit since read() was bypassed
+ self.set_limit(local_limit - bytes_drained);
+
+ Ok(bytes_drained)
+ }
+
+ fn min_limit(&self) -> u64 {
+ min(Take::limit(self), self.get_ref().min_limit())
+ }
+
+ fn properties(&self) -> CopyParams {
+ self.get_ref().properties()
+ }
+}
+
+impl<T: CopyRead> CopyRead for BufReader<T> {
+ fn drain_to<W: Write>(&mut self, writer: &mut W, outer_limit: u64) -> Result<u64> {
+ let buf = self.buffer();
+ let buf = &buf[0..min(buf.len(), outer_limit.try_into().unwrap_or(usize::MAX))];
+ let bytes = buf.len();
+ writer.write_all(buf)?;
+ self.consume(bytes);
+
+ let remaining = outer_limit - bytes as u64;
+
+ // in case of nested bufreaders we also need to drain the ones closer to the source
+ let inner_bytes = self.get_mut().drain_to(writer, remaining)?;
+
+ Ok(bytes as u64 + inner_bytes)
+ }
+
+ fn min_limit(&self) -> u64 {
+ self.get_ref().min_limit()
+ }
+
+ fn properties(&self) -> CopyParams {
+ self.get_ref().properties()
+ }
+}
+
+impl<T: CopyWrite> CopyWrite for BufWriter<T> {
+ fn properties(&self) -> CopyParams {
+ self.get_ref().properties()
+ }
+}
+
+fn fd_to_meta<T: AsRawFd>(fd: &T) -> FdMeta {
+ let fd = fd.as_raw_fd();
+ let file: ManuallyDrop<File> = ManuallyDrop::new(unsafe { File::from_raw_fd(fd) });
+ match file.metadata() {
+ Ok(meta) => FdMeta::Metadata(meta),
+ Err(_) => FdMeta::NoneObtained,
+ }
+}
+
+pub(super) enum CopyResult {
+ Ended(Result<u64>),
+ Fallback(u64),
+}
+
+/// linux-specific implementation that will attempt to use copy_file_range for copy offloading
+/// as the name says, it only works on regular files
+///
+/// Callers must handle fallback to a generic copy loop.
+/// `Fallback` may indicate non-zero number of bytes already written
+/// if one of the files' cursor +`max_len` would exceed u64::MAX (`EOVERFLOW`).
+pub(super) fn copy_regular_files(reader: RawFd, writer: RawFd, max_len: u64) -> CopyResult {
+ use crate::cmp;
+
+ // Kernel prior to 4.5 don't have copy_file_range
+ // We store the availability in a global to avoid unnecessary syscalls
+ static HAS_COPY_FILE_RANGE: AtomicBool = AtomicBool::new(true);
+
+ unsafe fn copy_file_range(
+ fd_in: libc::c_int,
+ off_in: *mut libc::loff_t,
+ fd_out: libc::c_int,
+ off_out: *mut libc::loff_t,
+ len: libc::size_t,
+ flags: libc::c_uint,
+ ) -> libc::c_long {
+ libc::syscall(libc::SYS_copy_file_range, fd_in, off_in, fd_out, off_out, len, flags)
+ }
+
+ let has_copy_file_range = HAS_COPY_FILE_RANGE.load(Ordering::Relaxed);
+ let mut written = 0u64;
+ while written < max_len {
+ let copy_result = if has_copy_file_range {
+ let bytes_to_copy = cmp::min(max_len - written, usize::MAX as u64);
+ // cap to 1GB chunks in case u64::MAX is passed as max_len and the file has a non-zero seek position
+ // this allows us to copy large chunks without hitting EOVERFLOW,
+ // unless someone sets a file offset close to u64::MAX - 1GB, in which case a fallback would be required
+ let bytes_to_copy = cmp::min(bytes_to_copy as usize, 0x4000_0000usize);
+ let copy_result = unsafe {
+ // We actually don't have to adjust the offsets,
+ // because copy_file_range adjusts the file offset automatically
+ cvt(copy_file_range(
+ reader,
+ ptr::null_mut(),
+ writer,
+ ptr::null_mut(),
+ bytes_to_copy,
+ 0,
+ ))
+ };
+ if let Err(ref copy_err) = copy_result {
+ match copy_err.raw_os_error() {
+ Some(libc::ENOSYS | libc::EPERM | libc::EOPNOTSUPP) => {
+ HAS_COPY_FILE_RANGE.store(false, Ordering::Relaxed);
+ }
+ _ => {}
+ }
+ }
+ copy_result
+ } else {
+ Err(Error::from_raw_os_error(libc::ENOSYS))
+ };
+ match copy_result {
+ Ok(0) if written == 0 => {
+ // fallback to work around several kernel bugs where copy_file_range will fail to
+ // copy any bytes and return 0 instead of an error if
+ // - reading virtual files from the proc filesystem which appear to have 0 size
+ // but are not empty. noted in coreutils to affect kernels at least up to 5.6.19.
+ // - copying from an overlay filesystem in docker. reported to occur on fedora 32.
+ return CopyResult::Fallback(0);
+ }
+ Ok(0) => return CopyResult::Ended(Ok(written)), // reached EOF
+ Ok(ret) => written += ret as u64,
+ Err(err) => {
+ return match err.raw_os_error() {
+ // when file offset + max_length > u64::MAX
+ Some(libc::EOVERFLOW) => CopyResult::Fallback(written),
+ Some(
+ libc::ENOSYS | libc::EXDEV | libc::EINVAL | libc::EPERM | libc::EOPNOTSUPP,
+ ) => {
+ // Try fallback io::copy if either:
+ // - Kernel version is < 4.5 (ENOSYS)
+ // - Files are mounted on different fs (EXDEV)
+ // - copy_file_range is broken in various ways on RHEL/CentOS 7 (EOPNOTSUPP)
+ // - copy_file_range is disallowed, for example by seccomp (EPERM)
+ // - copy_file_range cannot be used with pipes or device nodes (EINVAL)
+ assert_eq!(written, 0);
+ CopyResult::Fallback(0)
+ }
+ _ => CopyResult::Ended(Err(err)),
+ };
+ }
+ }
+ }
+ CopyResult::Ended(Ok(written))
+}
+
+#[derive(PartialEq)]
+enum SpliceMode {
+ Sendfile,
+ Splice,
+}
+
+/// performs splice or sendfile between file descriptors
+/// Does _not_ fall back to a generic copy loop.
+fn sendfile_splice(mode: SpliceMode, reader: RawFd, writer: RawFd, len: u64) -> CopyResult {
+ static HAS_SENDFILE: AtomicBool = AtomicBool::new(true);
+ static HAS_SPLICE: AtomicBool = AtomicBool::new(true);
+
+ syscall! {
+ fn splice(
+ srcfd: libc::c_int,
+ src_offset: *const i64,
+ dstfd: libc::c_int,
+ dst_offset: *const i64,
+ len: libc::size_t,
+ flags: libc::c_int
+ ) -> libc::ssize_t
+ }
+
+ match mode {
+ SpliceMode::Sendfile if !HAS_SENDFILE.load(Ordering::Relaxed) => {
+ return CopyResult::Fallback(0);
+ }
+ SpliceMode::Splice if !HAS_SPLICE.load(Ordering::Relaxed) => {
+ return CopyResult::Fallback(0);
+ }
+ _ => (),
+ }
+
+ let mut written = 0u64;
+ while written < len {
+ // according to its manpage that's the maximum size sendfile() will copy per invocation
+ let chunk_size = crate::cmp::min(len - written, 0x7ffff000_u64) as usize;
+
+ let result = match mode {
+ SpliceMode::Sendfile => {
+ cvt(unsafe { libc::sendfile(writer, reader, ptr::null_mut(), chunk_size) })
+ }
+ SpliceMode::Splice => cvt(unsafe {
+ splice(reader, ptr::null_mut(), writer, ptr::null_mut(), chunk_size, 0)
+ }),
+ };
+
+ match result {
+ Ok(0) => break, // EOF
+ Ok(ret) => written += ret as u64,
+ Err(err) => {
+ return match err.raw_os_error() {
+ Some(libc::ENOSYS | libc::EPERM) => {
+ // syscall not supported (ENOSYS)
+ // syscall is disallowed, e.g. by seccomp (EPERM)
+ match mode {
+ SpliceMode::Sendfile => HAS_SENDFILE.store(false, Ordering::Relaxed),
+ SpliceMode::Splice => HAS_SPLICE.store(false, Ordering::Relaxed),
+ }
+ assert_eq!(written, 0);
+ CopyResult::Fallback(0)
+ }
+ Some(libc::EINVAL) => {
+ // splice/sendfile do not support this particular file descriptor (EINVAL)
+ assert_eq!(written, 0);
+ CopyResult::Fallback(0)
+ }
+ Some(os_err) if mode == SpliceMode::Sendfile && os_err == libc::EOVERFLOW => {
+ CopyResult::Fallback(written)
+ }
+ _ => CopyResult::Ended(Err(err)),
+ };
+ }
+ }
+ }
+ CopyResult::Ended(Ok(written))
+}
--- /dev/null
+use crate::env::temp_dir;
+use crate::fs::OpenOptions;
+use crate::io;
+use crate::io::Result;
+use crate::io::SeekFrom;
+use crate::io::{BufRead, Read, Seek, Write};
+use crate::os::unix::io::AsRawFd;
+
+#[test]
+fn copy_specialization() -> Result<()> {
+ use crate::io::{BufReader, BufWriter};
+
+ let path = crate::env::temp_dir();
+ let source_path = path.join("copy-spec.source");
+ let sink_path = path.join("copy-spec.sink");
+
+ let result: Result<()> = try {
+ let mut source = crate::fs::OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .truncate(true)
+ .open(&source_path)?;
+ source.write_all(b"abcdefghiklmnopqr")?;
+ source.seek(SeekFrom::Start(8))?;
+ let mut source = BufReader::with_capacity(8, source.take(5));
+ source.fill_buf()?;
+ assert_eq!(source.buffer(), b"iklmn");
+ source.get_mut().set_limit(6);
+ source.get_mut().get_mut().seek(SeekFrom::Start(1))?; // "bcdefg"
+ let mut source = source.take(10); // "iklmnbcdef"
+
+ let mut sink = crate::fs::OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .truncate(true)
+ .open(&sink_path)?;
+ sink.write_all(b"000000")?;
+ let mut sink = BufWriter::with_capacity(5, sink);
+ sink.write_all(b"wxyz")?;
+ assert_eq!(sink.buffer(), b"wxyz");
+
+ let copied = crate::io::copy(&mut source, &mut sink)?;
+ assert_eq!(copied, 10);
+ assert_eq!(sink.buffer().len(), 0);
+
+ let mut sink = sink.into_inner()?;
+ sink.seek(SeekFrom::Start(0))?;
+ let mut copied = Vec::new();
+ sink.read_to_end(&mut copied)?;
+ assert_eq!(&copied, b"000000wxyziklmnbcdef");
+ };
+
+ let rm1 = crate::fs::remove_file(source_path);
+ let rm2 = crate::fs::remove_file(sink_path);
+
+ result.and(rm1).and(rm2)
+}
+
+#[bench]
+fn bench_file_to_file_copy(b: &mut test::Bencher) {
+ const BYTES: usize = 128 * 1024;
+ let src_path = temp_dir().join("file-copy-bench-src");
+ let mut src = crate::fs::OpenOptions::new()
+ .create(true)
+ .truncate(true)
+ .read(true)
+ .write(true)
+ .open(src_path)
+ .unwrap();
+ src.write(&vec![0u8; BYTES]).unwrap();
+
+ let sink_path = temp_dir().join("file-copy-bench-sink");
+ let mut sink = crate::fs::OpenOptions::new()
+ .create(true)
+ .truncate(true)
+ .write(true)
+ .open(sink_path)
+ .unwrap();
+
+ b.bytes = BYTES as u64;
+ b.iter(|| {
+ src.seek(SeekFrom::Start(0)).unwrap();
+ sink.seek(SeekFrom::Start(0)).unwrap();
+ assert_eq!(BYTES as u64, io::copy(&mut src, &mut sink).unwrap());
+ });
+}
+
+#[bench]
+fn bench_file_to_socket_copy(b: &mut test::Bencher) {
+ const BYTES: usize = 128 * 1024;
+ let src_path = temp_dir().join("pipe-copy-bench-src");
+ let mut src = OpenOptions::new()
+ .create(true)
+ .truncate(true)
+ .read(true)
+ .write(true)
+ .open(src_path)
+ .unwrap();
+ src.write(&vec![0u8; BYTES]).unwrap();
+
+ let sink_drainer = crate::net::TcpListener::bind("localhost:0").unwrap();
+ let mut sink = crate::net::TcpStream::connect(sink_drainer.local_addr().unwrap()).unwrap();
+ let mut sink_drainer = sink_drainer.accept().unwrap().0;
+
+ crate::thread::spawn(move || {
+ let mut sink_buf = vec![0u8; 1024 * 1024];
+ loop {
+ sink_drainer.read(&mut sink_buf[..]).unwrap();
+ }
+ });
+
+ b.bytes = BYTES as u64;
+ b.iter(|| {
+ src.seek(SeekFrom::Start(0)).unwrap();
+ assert_eq!(BYTES as u64, io::copy(&mut src, &mut sink).unwrap());
+ });
+}
+
+#[cfg(any(target_os = "linux", target_os = "android"))]
+#[bench]
+fn bench_socket_pipe_socket_copy(b: &mut test::Bencher) {
+ use super::CopyResult;
+ use crate::io::ErrorKind;
+ use crate::process::{ChildStdin, ChildStdout};
+ use crate::sys_common::FromInner;
+
+ let (read_end, write_end) = crate::sys::pipe::anon_pipe().unwrap();
+
+ let mut read_end = ChildStdout::from_inner(read_end);
+ let write_end = ChildStdin::from_inner(write_end);
+
+ let acceptor = crate::net::TcpListener::bind("localhost:0").unwrap();
+ let mut remote_end = crate::net::TcpStream::connect(acceptor.local_addr().unwrap()).unwrap();
+
+ let local_end = crate::sync::Arc::new(acceptor.accept().unwrap().0);
+
+ // the data flow in this benchmark:
+ //
+ // socket(tx) local_source
+ // remote_end (write) +--------> (splice to)
+ // write_end
+ // +
+ // |
+ // | pipe
+ // v
+ // read_end
+ // remote_end (read) <---------+ (splice to) *
+ // socket(rx) local_end
+ //
+ // * benchmark loop using io::copy
+
+ crate::thread::spawn(move || {
+ let mut sink_buf = vec![0u8; 1024 * 1024];
+ remote_end.set_nonblocking(true).unwrap();
+ loop {
+ match remote_end.write(&mut sink_buf[..]) {
+ Err(err) if err.kind() == ErrorKind::WouldBlock => {}
+ Ok(_) => {}
+ err => {
+ err.expect("write failed");
+ }
+ };
+ match remote_end.read(&mut sink_buf[..]) {
+ Err(err) if err.kind() == ErrorKind::WouldBlock => {}
+ Ok(_) => {}
+ err => {
+ err.expect("read failed");
+ }
+ };
+ }
+ });
+
+ // check that splice works, otherwise the benchmark would hang
+ let probe = super::sendfile_splice(
+ super::SpliceMode::Splice,
+ local_end.as_raw_fd(),
+ write_end.as_raw_fd(),
+ 1,
+ );
+
+ match probe {
+ CopyResult::Ended(Ok(1)) => {
+ // splice works
+ }
+ _ => {
+ eprintln!("splice failed, skipping benchmark");
+ return;
+ }
+ }
+
+ let local_source = local_end.clone();
+ crate::thread::spawn(move || {
+ loop {
+ super::sendfile_splice(
+ super::SpliceMode::Splice,
+ local_source.as_raw_fd(),
+ write_end.as_raw_fd(),
+ u64::MAX,
+ );
+ }
+ });
+
+ const BYTES: usize = 128 * 1024;
+ b.bytes = BYTES as u64;
+ b.iter(|| {
+ assert_eq!(
+ BYTES as u64,
+ io::copy(&mut (&mut read_end).take(BYTES as u64), &mut &*local_end).unwrap()
+ );
+ });
+}
pub mod fs;
pub mod futex;
pub mod io;
+#[cfg(any(target_os = "linux", target_os = "android"))]
+pub mod kernel_copy;
#[cfg(target_os = "l4re")]
mod l4re;
pub mod memchr;
"invoking predicate_may_hold: param_env={:?}, trait_ref={:?}, ty={:?}",
param_env, trait_ref, ty
);
- match infcx.evaluate_obligation(&traits::Obligation::new(
- cause,
- param_env,
- trait_ref.without_const().to_predicate(infcx.tcx),
- )) {
- Ok(eval_result) => eval_result.may_apply(),
- Err(traits::OverflowError) => true, // overflow doesn't mean yes *or* no
+ let predicates = self
+ .cx
+ .tcx
+ .predicates_of(impl_def_id)
+ .instantiate(self.cx.tcx, impl_substs)
+ .predicates
+ .into_iter()
+ .chain(Some(trait_ref.without_const().to_predicate(infcx.tcx)));
+ for predicate in predicates {
+ debug!("testing predicate {:?}", predicate);
+ let obligation = traits::Obligation::new(
+ traits::ObligationCause::dummy(),
+ param_env,
+ predicate,
+ );
+ match infcx.evaluate_obligation(&obligation) {
+ Ok(eval_result) if eval_result.may_apply() => {}
+ Err(traits::OverflowError) => {}
+ _ => {
+ return false;
+ }
+ }
}
+ true
} else {
false
}
pub render_options: RenderOptions,
/// Output format rendering (used only for "show-coverage" option for the moment)
pub output_format: Option<OutputFormat>,
+ /// If this option is set to `true`, rustdoc will only run checks and not generate
+ /// documentation.
+ pub run_check: bool,
}
impl fmt::Debug for Options {
.field("runtool", &self.runtool)
.field("runtool_args", &self.runtool_args)
.field("enable-per-target-ignores", &self.enable_per_target_ignores)
+ .field("run_check", &self.run_check)
.finish()
}
}
let enable_per_target_ignores = matches.opt_present("enable-per-target-ignores");
let document_private = matches.opt_present("document-private-items");
let document_hidden = matches.opt_present("document-hidden-items");
+ let run_check = matches.opt_present("check");
let (lint_opts, describe_lints, lint_cap) = get_cmd_lint_options(matches, error_format);
runtool_args,
enable_per_target_ignores,
test_builder,
+ run_check,
render_options: RenderOptions {
output,
external_html,
"specified the rustc-like binary to use as the test builder",
)
}),
+ unstable("check", |o| o.optflag("", "check", "Run rustdoc checks")),
]
}
// but we can't crates the Handler ahead of time because it's not Send
let diag_opts = (options.error_format, options.edition, options.debugging_opts.clone());
let show_coverage = options.show_coverage;
+ let run_check = options.run_check;
// First, parse the crate and extract all relevant information.
info!("starting to run rustc");
// if we ran coverage, bail early, we don't need to also generate docs at this point
// (also we didn't load in any of the useful passes)
return Ok(());
+ } else if run_check {
+ // Since we're in "check" mode, no need to generate anything beyond this point.
+ return Ok(());
}
info!("going to format");
if let Ok((Some(ext), res)) = resolver.resolve_macro_path(
&path,
None,
- &ParentScope::module(resolver.graph_root()),
+ &ParentScope::module(resolver.graph_root(), resolver),
false,
false,
) {
BASEDIR=../coverage-reports-base
SOURCEDIR=../coverage
-ifeq ($(UNAME),Darwin)
-# FIXME(richkadel): It appears that --debug is not available on MacOS even when not running
-# under CI.
-NO_LLVM_ASSERTIONS=1
-endif
-
# The `llvm-cov show` flag `--debug`, used to generate the `counters` output files, is only enabled
-# if LLVM assertions are enabled. Some CI builds disable debug assertions.
-ifndef NO_LLVM_ASSERTIONS
+# if LLVM assertions are enabled. Requires Rust config `llvm/optimize` and not
+# `llvm/release_debuginfo`. Note that some CI builds disable debug assertions (by setting
+# `NO_LLVM_ASSERTIONS=1`), so it is not OK to fail the test, but `bless`ed test results cannot be
+# generated without debug assertions.
+LLVM_COV_DEBUG := $(shell "$(LLVM_BIN_DIR)"/llvm-cov show --debug 2>&1 | grep -q "Unknown command line argument '--debug'"; echo $$?)
+ifeq ($(LLVM_COV_DEBUG), 1)
DEBUG_FLAG=--debug
endif
BASEDIR=../coverage-spanview-base
SOURCEDIR=../coverage
-ifeq ($(UNAME),Darwin)
-SED_HAS_ISSUES=1
-endif
+define SPANVIEW_HEADER
+<!DOCTYPE html>
+<!--
+
+Preview this file as rendered HTML from the github source at:
+https://htmlpreview.github.io/?https://github.com/rust-lang/rust/blob/master/src/test/run-make-fulldeps/coverage-spanview-base/expected_mir_dump.%s/%s
+
+For revisions in Pull Requests (PR):
+ * Replace "rust-lang" with the github PR author
+ * Replace "master" with the PR branch name
+
+-->
+endef
+export SPANVIEW_HEADER
all: $(patsubst $(SOURCEDIR)/%.rs,%,$(wildcard $(SOURCEDIR)/*.rs))
-Zdump-mir-spanview \
-Zdump-mir-dir="$(TMPDIR)"/mir_dump.$@
-ifdef SED_HAS_ISSUES
- # FIXME(richkadel): MacOS's default sed has some significant limitations. Until I've come up
- # with a better workaround, I'm disabling this test for MacOS.
- #
- # For future reference, see if `gsed` is available as an alternative.
- which gsed || echo "no gsed"
-else
-
for path in "$(TMPDIR)"/mir_dump.$@/*; do \
- echo $$path; \
file="$$(basename "$$path")"; \
- echo $$file; \
urlescaped="$$("$(PYTHON)" $(BASEDIR)/escape_url.py $$file)" || exit $$?; \
- echo $$urlescaped; \
- sed -i -e '1a\
-<!--\
-\
-Preview this file as rendered HTML from the github source at:\
-https://htmlpreview.github.io/?https://github.com/rust-lang/rust/blob/master/src/test/run-make-fulldeps/coverage-spanview-base/expected_mir_dump.$@/'"$$urlescaped"'\
-\
-For revisions in Pull Requests (PR):\
- * Replace "rust-lang" with the github PR author\
- * Replace "master" with the PR branch name\
-\
--->' "$$path"; \
+ printf "$$SPANVIEW_HEADER\n" "$@" "$$urlescaped" > "$$path".modified; \
+ tail -n +2 "$$path" >> "$$path".modified; \
+ mv "$$path".modified "$$path"; \
done && true # for/done ends in non-zero status
ifdef RUSTC_BLESS_TEST
cp "$(TMPDIR)"/mir_dump.$@/*InstrumentCoverage.0.html "$(TMPDIR)"/actual_mir_dump.$@/
$(DIFF) -r expected_mir_dump.$@/ "$(TMPDIR)"/actual_mir_dump.$@/
endif
-
-endif
--- /dev/null
+include ../../run-make-fulldeps/tools.mk
+
+# FIXME https://github.com/rust-lang/rust/issues/78911
+# ignore-32bit wrong/no cross compiler and sometimes we pass wrong gcc args (-m64)
+
+# Tests that we don't ICE during incremental compilation after modifying a
+# function span such that its previous end line exceeds the number of lines
+# in the new file, but its start line/column and length remain the same.
+
+SRC=$(TMPDIR)/src
+INCR=$(TMPDIR)/incr
+
+all:
+ mkdir $(SRC)
+ mkdir $(INCR)
+ cp a.rs $(SRC)/main.rs
+ $(RUSTC) -C incremental=$(INCR) $(SRC)/main.rs
+ cp b.rs $(SRC)/main.rs
+ $(RUSTC) -C incremental=$(INCR) $(SRC)/main.rs
--- /dev/null
+fn main() {
+ // foo must be used.
+ foo();
+}
+
+// For this test to operate correctly, foo's body must start on exactly the same
+// line and column and have the exact same length in bytes in a.rs and b.rs. In
+// a.rs, the body must end on a line number which does not exist in b.rs.
+// Basically, avoid modifying this file, including adding or removing whitespace!
+fn foo() {
+ assert_eq!(1, 1);
+
+
+
+
+}
--- /dev/null
+fn main() {
+ // foo must be used.
+ foo();
+}
+
+// For this test to operate correctly, foo's body must start on exactly the same
+// line and column and have the exact same length in bytes in a.rs and b.rs. In
+// a.rs, the body must end on a line number which does not exist in b.rs.
+// Basically, avoid modifying this file, including adding or removing whitespace!
+fn foo() {
+ assert_eq!(1, 1);////
+}
include ../../run-make-fulldeps/tools.mk
+# FIXME https://github.com/rust-lang/rust/issues/78911
# ignore-32bit wrong/no cross compiler and sometimes we pass wrong gcc args (-m64)
all: foo
--- /dev/null
+// compile-flags: -Z unstable-options --check
+
+#![deny(missing_docs)]
+#![deny(rustdoc)]
+
+//! ```rust,testharness
+//~^ ERROR
+//! let x = 12;
+//! ```
+
+pub fn foo() {}
+//~^ ERROR
+//~^^ ERROR
+
+/// hello
+//~^ ERROR
+///
+/// ```rust,testharness
+/// let x = 12;
+/// ```
+pub fn bar() {}
--- /dev/null
+error: missing documentation for a function
+ --> $DIR/check-fail.rs:11:1
+ |
+LL | pub fn foo() {}
+ | ^^^^^^^^^^^^
+ |
+note: the lint level is defined here
+ --> $DIR/check-fail.rs:3:9
+ |
+LL | #![deny(missing_docs)]
+ | ^^^^^^^^^^^^
+
+error: missing code example in this documentation
+ --> $DIR/check-fail.rs:11:1
+ |
+LL | pub fn foo() {}
+ | ^^^^^^^^^^^^^^^
+ |
+note: the lint level is defined here
+ --> $DIR/check-fail.rs:4:9
+ |
+LL | #![deny(rustdoc)]
+ | ^^^^^^^
+ = note: `#[deny(missing_doc_code_examples)]` implied by `#[deny(rustdoc)]`
+
+error: unknown attribute `testharness`. Did you mean `test_harness`?
+ --> $DIR/check-fail.rs:6:1
+ |
+LL | / //! ```rust,testharness
+LL | |
+LL | | //! let x = 12;
+LL | | //! ```
+ | |_______^
+ |
+note: the lint level is defined here
+ --> $DIR/check-fail.rs:4:9
+ |
+LL | #![deny(rustdoc)]
+ | ^^^^^^^
+ = note: `#[deny(invalid_codeblock_attributes)]` implied by `#[deny(rustdoc)]`
+ = help: the code block will either not be tested if not marked as a rust one or the code will be wrapped inside a main function
+
+error: unknown attribute `testharness`. Did you mean `test_harness`?
+ --> $DIR/check-fail.rs:15:1
+ |
+LL | / /// hello
+LL | |
+LL | | ///
+LL | | /// ```rust,testharness
+LL | | /// let x = 12;
+LL | | /// ```
+ | |_______^
+ |
+ = help: the code block will either not be tested if not marked as a rust one or the code will be wrapped inside a main function
+
+error: aborting due to 4 previous errors
+
--- /dev/null
+// check-pass
+// compile-flags: -Z unstable-options --check
+
+#![warn(missing_docs)]
+//~^ WARN
+//~^^ WARN
+#![warn(rustdoc)]
+
+pub fn foo() {}
+//~^ WARN
+//~^^ WARN
--- /dev/null
+warning: missing documentation for the crate
+ --> $DIR/check.rs:4:1
+ |
+LL | / #![warn(missing_docs)]
+LL | |
+LL | |
+LL | | #![warn(rustdoc)]
+LL | |
+LL | | pub fn foo() {}
+ | |_______________^
+ |
+note: the lint level is defined here
+ --> $DIR/check.rs:4:9
+ |
+LL | #![warn(missing_docs)]
+ | ^^^^^^^^^^^^
+
+warning: missing documentation for a function
+ --> $DIR/check.rs:9:1
+ |
+LL | pub fn foo() {}
+ | ^^^^^^^^^^^^
+
+warning: missing code example in this documentation
+ --> $DIR/check.rs:4:1
+ |
+LL | / #![warn(missing_docs)]
+LL | |
+LL | |
+LL | | #![warn(rustdoc)]
+LL | |
+LL | | pub fn foo() {}
+ | |_______________^
+ |
+note: the lint level is defined here
+ --> $DIR/check.rs:7:9
+ |
+LL | #![warn(rustdoc)]
+ | ^^^^^^^
+ = note: `#[warn(missing_doc_code_examples)]` implied by `#[warn(rustdoc)]`
+
+warning: missing code example in this documentation
+ --> $DIR/check.rs:9:1
+ |
+LL | pub fn foo() {}
+ | ^^^^^^^^^^^^^^^
+
+warning: 4 warnings emitted
+
// edition:2018
+#![feature(min_const_generics)]
// @has async_fn/fn.foo.html '//pre[@class="rust fn"]' 'pub async fn foo() -> Option<Foo>'
pub async fn foo() -> Option<Foo> {
'âš '
}
+// @has async_fn/fn.mut_args.html '//pre[@class="rust fn"]' 'pub async fn mut_args(a: usize)'
+pub async fn mut_args(mut a: usize) {}
+
+// @has async_fn/fn.mut_ref.html '//pre[@class="rust fn"]' 'pub async fn mut_ref(x: i32)'
+pub async fn mut_ref(ref mut x: i32) {}
+
trait Bar {}
impl Bar for () {}
// @has async_fn/struct.Foo.html
// @matches - '//code' 'pub async fn f\(\)$'
// @matches - '//code' 'pub async unsafe fn g\(\)$'
+// @matches - '//code' 'pub async fn mut_self\(self, first: usize\)$'
pub struct Foo;
impl Foo {
pub async fn f() {}
pub async unsafe fn g() {}
+ pub async fn mut_self(mut self, mut first: usize) {}
}
+
+pub trait Trait<const N: usize> {}
+// @has async_fn/fn.const_generics.html
+// @has - '//pre[@class="rust fn"]' 'pub async fn const_generics<const N: usize>(_: impl Trait<N>)'
+pub async fn const_generics<const N: usize>(_: impl Trait<N>) {}
--- /dev/null
+// compile-flags: -Z unstable-options --check
+
+// @!has check/fn.foo.html
+// @!has check/index.html
+pub fn foo() {}
}
// @has foo/fn.b_sink.html '//pre[@class="rust fn"]' \
-// 'pub async fn b_sink<const N: usize>(__arg0: impl Trait<N>)'
-// FIXME(const_generics): This should be `_` not `__arg0`.
+// 'pub async fn b_sink<const N: usize>(_: impl Trait<N>)'
pub async fn b_sink<const N: usize>(_: impl Trait<N>) {}
// @has foo/fn.concrete.html '//pre[@class="rust fn"]' \
--- /dev/null
+#![crate_name = "issue_78673"]
+
+pub trait Something {}
+
+pub trait AnAmazingTrait {}
+
+impl<T: Something> AnAmazingTrait for T {}
+
+// @has 'issue_78673/struct.MyStruct.html'
+// @has - '//*[@class="impl"]' 'AnAmazingTrait for MyStruct'
+// @!has - '//*[@class="impl"]' 'AnAmazingTrait for T'
+pub struct MyStruct;
+
+impl AnAmazingTrait for MyStruct {}
+
+// generic structs may have _both_ specific and blanket impls that apply
+
+// @has 'issue_78673/struct.AnotherStruct.html'
+// @has - '//*[@class="impl"]' 'AnAmazingTrait for AnotherStruct<()>'
+// @has - '//*[@class="impl"]' 'AnAmazingTrait for T'
+pub struct AnotherStruct<T>(T);
+
+impl<T: Something> Something for AnotherStruct<T> {}
+impl AnAmazingTrait for AnotherStruct<()> {}
},
17 => {
let path = Path::from_ident(Ident::from_str("S"));
- g(ExprKind::Struct(path, vec![], Some(make_x())));
+ g(ExprKind::Struct(path, vec![], StructRest::Base(make_x())));
},
18 => {
iter_exprs(depth - 1, &mut |e| g(ExprKind::Try(e)));
--- /dev/null
+// Check that all types allowed with `min_const_generics` work.
+// run-pass
+// revisions: full min
+
+#![cfg_attr(full, feature(const_generics))]
+#![cfg_attr(full, allow(incomplete_features))]
+#![cfg_attr(min, feature(min_const_generics))]
+
+struct A<const N: u8>;
+struct B<const N: u16>;
+struct C<const N: u32>;
+struct D<const N: u64>;
+struct E<const N: u128>;
+struct F<const N: usize>;
+struct G<const N: i8>;
+struct H<const N: i16>;
+struct I<const N: i32>;
+struct J<const N: i64>;
+struct K<const N: i128>;
+struct L<const N: isize>;
+struct M<const N: char>;
+struct N<const N: bool>;
+
+fn main() {
+ let _ = A::<{u8::MIN}>;
+ let _ = A::<{u8::MAX}>;
+ let _ = B::<{u16::MIN}>;
+ let _ = B::<{u16::MAX}>;
+ let _ = C::<{u32::MIN}>;
+ let _ = C::<{u32::MAX}>;
+ let _ = D::<{u64::MIN}>;
+ let _ = D::<{u64::MAX}>;
+ let _ = E::<{u128::MIN}>;
+ let _ = E::<{u128::MAX}>;
+ let _ = F::<{usize::MIN}>;
+ let _ = F::<{usize::MAX}>;
+ let _ = G::<{i8::MIN}>;
+ let _ = G::<{i8::MAX}>;
+ let _ = H::<{i16::MIN}>;
+ let _ = H::<{i16::MAX}>;
+ let _ = I::<{i32::MIN}>;
+ let _ = I::<{i32::MAX}>;
+ let _ = J::<{i64::MIN}>;
+ let _ = J::<{i64::MAX}>;
+ let _ = K::<{i128::MIN}>;
+ let _ = K::<{i128::MAX}>;
+ let _ = L::<{isize::MIN}>;
+ let _ = L::<{isize::MAX}>;
+ let _ = M::<'A'>;
+ let _ = N::<true>;
+}
#![feature(min_const_generics)]
+#![feature(never_type)]
struct Foo<const N: [u8; 0]>;
//~^ ERROR `[u8; 0]` is forbidden
struct Faz<const N: &'static u8>;
//~^ ERROR `&'static u8` is forbidden
+struct Fiz<const N: !>;
+//~^ ERROR `!` is forbidden
+
+enum Goo<const N: ()> { A, B }
+//~^ ERROR `()` is forbidden
+
+union Boo<const N: ()> { a: () }
+//~^ ERROR `()` is forbidden
+
+
fn main() {}
error: `[u8; 0]` is forbidden as the type of a const generic parameter
- --> $DIR/complex-types.rs:3:21
+ --> $DIR/complex-types.rs:4:21
|
LL | struct Foo<const N: [u8; 0]>;
| ^^^^^^^
= note: more complex types are supported with `#[feature(const_generics)]`
error: `()` is forbidden as the type of a const generic parameter
- --> $DIR/complex-types.rs:6:21
+ --> $DIR/complex-types.rs:7:21
|
LL | struct Bar<const N: ()>;
| ^^
= note: more complex types are supported with `#[feature(const_generics)]`
error: `No` is forbidden as the type of a const generic parameter
- --> $DIR/complex-types.rs:11:21
+ --> $DIR/complex-types.rs:12:21
|
LL | struct Fez<const N: No>;
| ^^
= note: more complex types are supported with `#[feature(const_generics)]`
error: `&'static u8` is forbidden as the type of a const generic parameter
- --> $DIR/complex-types.rs:14:21
+ --> $DIR/complex-types.rs:15:21
|
LL | struct Faz<const N: &'static u8>;
| ^^^^^^^^^^^
= note: the only supported types are integers, `bool` and `char`
= note: more complex types are supported with `#[feature(const_generics)]`
-error: aborting due to 4 previous errors
+error: `!` is forbidden as the type of a const generic parameter
+ --> $DIR/complex-types.rs:18:21
+ |
+LL | struct Fiz<const N: !>;
+ | ^
+ |
+ = note: the only supported types are integers, `bool` and `char`
+ = note: more complex types are supported with `#[feature(const_generics)]`
+
+error: `()` is forbidden as the type of a const generic parameter
+ --> $DIR/complex-types.rs:21:19
+ |
+LL | enum Goo<const N: ()> { A, B }
+ | ^^
+ |
+ = note: the only supported types are integers, `bool` and `char`
+ = note: more complex types are supported with `#[feature(const_generics)]`
+
+error: `()` is forbidden as the type of a const generic parameter
+ --> $DIR/complex-types.rs:24:20
+ |
+LL | union Boo<const N: ()> { a: () }
+ | ^^
+ |
+ = note: the only supported types are integers, `bool` and `char`
+ = note: more complex types are supported with `#[feature(const_generics)]`
+
+error: aborting due to 7 previous errors
--- /dev/null
+// run-pass
+// tests that promoting expressions containing const parameters is allowed.
+#![feature(min_const_generics)]
+
+fn promotion_test<const N: usize>() -> &'static usize {
+ &(3 + N)
+}
+
+fn main() {
+ assert_eq!(promotion_test::<13>(), &16);
+}
--- /dev/null
+// run-pass
+
+#![feature(destructuring_assignment)]
+
+struct Struct<S, T> {
+ a: S,
+ b: T,
+}
+
+struct TupleStruct<S, T>(S, T);
+
+fn main() {
+ let (a, b, c, d);
+ Struct { a: TupleStruct((a, b), c), b: [d] } =
+ Struct { a: TupleStruct((0, 1), 2), b: [3] };
+ assert_eq!((a, b, c, d), (0, 1, 2, 3));
+}
(a, b) += (3, 4); //~ ERROR invalid left-hand side of assignment
//~| ERROR binary assignment operation `+=` cannot be applied
- [a, b] = [3, 4]; //~ ERROR invalid left-hand side of assignment
+ [a, b] = [3, 4]; //~ ERROR destructuring assignments are unstable
[a, b] += [3, 4]; //~ ERROR invalid left-hand side of assignment
//~| ERROR binary assignment operation `+=` cannot be applied
let s = S { x: 3, y: 4 };
- S { x: a, y: b } = s; //~ ERROR invalid left-hand side of assignment
+ S { x: a, y: b } = s; //~ ERROR destructuring assignments are unstable
S { x: a, y: b } += s; //~ ERROR invalid left-hand side of assignment
//~| ERROR binary assignment operation `+=` cannot be applied
S { x: a, ..s } = S { x: 3, y: 4 };
- //~^ ERROR invalid left-hand side of assignment
+ //~^ ERROR functional record updates are not allowed in destructuring assignments
+ //~| ERROR destructuring assignments are unstable
let c = 3;
= help: add `#![feature(destructuring_assignment)]` to the crate attributes to enable
error[E0658]: destructuring assignments are unstable
- --> $DIR/note-unsupported.rs:25:17
+ --> $DIR/note-unsupported.rs:10:12
+ |
+LL | [a, b] = [3, 4];
+ | ------ ^
+ | |
+ | cannot assign to this expression
+ |
+ = note: see issue #71126 <https://github.com/rust-lang/rust/issues/71126> for more information
+ = help: add `#![feature(destructuring_assignment)]` to the crate attributes to enable
+
+error[E0658]: destructuring assignments are unstable
+ --> $DIR/note-unsupported.rs:16:22
+ |
+LL | S { x: a, y: b } = s;
+ | ---------------- ^
+ | |
+ | cannot assign to this expression
+ |
+ = note: see issue #71126 <https://github.com/rust-lang/rust/issues/71126> for more information
+ = help: add `#![feature(destructuring_assignment)]` to the crate attributes to enable
+
+error[E0658]: destructuring assignments are unstable
+ --> $DIR/note-unsupported.rs:20:21
+ |
+LL | S { x: a, ..s } = S { x: 3, y: 4 };
+ | --------------- ^
+ | |
+ | cannot assign to this expression
+ |
+ = note: see issue #71126 <https://github.com/rust-lang/rust/issues/71126> for more information
+ = help: add `#![feature(destructuring_assignment)]` to the crate attributes to enable
+
+error: functional record updates are not allowed in destructuring assignments
+ --> $DIR/note-unsupported.rs:20:17
+ |
+LL | S { x: a, ..s } = S { x: 3, y: 4 };
+ | ^ help: consider removing the trailing pattern
+
+error[E0658]: destructuring assignments are unstable
+ --> $DIR/note-unsupported.rs:26:17
|
LL | ((a, b), c) = ((3, 4), 5);
| ----------- ^
| |
| cannot assign to this expression
-error[E0070]: invalid left-hand side of assignment
- --> $DIR/note-unsupported.rs:10:12
- |
-LL | [a, b] = [3, 4];
- | ------ ^
- | |
- | cannot assign to this expression
-
error[E0368]: binary assignment operation `+=` cannot be applied to type `[{integer}; 2]`
--> $DIR/note-unsupported.rs:11:5
|
| |
| cannot assign to this expression
-error[E0070]: invalid left-hand side of assignment
- --> $DIR/note-unsupported.rs:16:22
- |
-LL | S { x: a, y: b } = s;
- | ---------------- ^
- | |
- | cannot assign to this expression
-
error[E0368]: binary assignment operation `+=` cannot be applied to type `S`
--> $DIR/note-unsupported.rs:17:5
|
| |
| cannot assign to this expression
-error[E0070]: invalid left-hand side of assignment
- --> $DIR/note-unsupported.rs:20:21
- |
-LL | S { x: a, ..s } = S { x: 3, y: 4 };
- | --------------- ^
- | |
- | cannot assign to this expression
-
-error: aborting due to 11 previous errors
+error: aborting due to 12 previous errors
-Some errors have detailed explanations: E0067, E0070, E0368, E0658.
+Some errors have detailed explanations: E0067, E0368, E0658.
For more information about an error, try `rustc --explain E0067`.
--- /dev/null
+// run-pass
+
+#![feature(destructuring_assignment)]
+
+fn main() {
+ let (mut a, mut b);
+ [a, b] = [0, 1];
+ assert_eq!((a, b), (0, 1));
+ let mut c;
+ [a, .., b, c] = [1, 2, 3, 4, 5];
+ assert_eq!((a, b, c), (1, 4, 5));
+ [..] = [1, 2, 3];
+ [c, ..] = [5, 6, 6];
+ assert_eq!(c, 5);
+}
--- /dev/null
+#![feature(destructuring_assignment)]
+
+fn main() {
+ let (mut a, mut b);
+ [a, .., b, ..] = [0, 1]; //~ ERROR `..` can only be used once per slice pattern
+ [a, a, b] = [1, 2]; //~ ERROR pattern requires 3 elements but array has 2
+}
--- /dev/null
+error: `..` can only be used once per slice pattern
+ --> $DIR/slice_destructure_fail.rs:5:14
+ |
+LL | [a, .., b, ..] = [0, 1];
+ | -- ^^ can only be used once per slice pattern
+ | |
+ | previously used here
+
+error[E0527]: pattern requires 3 elements but array has 2
+ --> $DIR/slice_destructure_fail.rs:6:3
+ |
+LL | [a, a, b] = [1, 2];
+ | ^^^^^^^^^ expected 2 elements
+
+error: aborting due to 2 previous errors
+
+For more information about this error, try `rustc --explain E0527`.
--- /dev/null
+// run-pass
+
+#![feature(destructuring_assignment)]
+struct Struct<S, T> {
+ a: S,
+ b: T,
+}
+
+fn main() {
+ let (mut a, mut b);
+ Struct { a, b } = Struct { a: 0, b: 1 };
+ assert_eq!((a, b), (0, 1));
+ Struct { a: b, b: a } = Struct { a: 1, b: 2 };
+ assert_eq!((a,b), (2, 1));
+ Struct { a, .. } = Struct { a: 1, b: 3 };
+ assert_eq!((a, b), (1, 1));
+ Struct { .. } = Struct { a: 1, b: 4 };
+ assert_eq!((a, b), (1, 1));
+}
--- /dev/null
+#![feature(destructuring_assignment)]
+struct Struct<S, T> {
+ a: S,
+ b: T,
+}
+
+fn main() {
+ let (mut a, b);
+ let mut c;
+ let d = Struct { a: 0, b: 1 };
+ Struct { a, b, c } = Struct { a: 0, b: 1 }; //~ ERROR does not have a field named `c`
+ Struct { a, ..d } = Struct { a: 1, b: 2 };
+ //~^ ERROR functional record updates are not allowed in destructuring assignments
+ Struct { a, .. }; //~ ERROR base expression required after `..`
+}
--- /dev/null
+error: functional record updates are not allowed in destructuring assignments
+ --> $DIR/struct_destructure_fail.rs:12:19
+ |
+LL | Struct { a, ..d } = Struct { a: 1, b: 2 };
+ | ^ help: consider removing the trailing pattern
+
+error: base expression required after `..`
+ --> $DIR/struct_destructure_fail.rs:14:19
+ |
+LL | Struct { a, .. };
+ | ^ add a base expression here
+
+error[E0026]: struct `Struct` does not have a field named `c`
+ --> $DIR/struct_destructure_fail.rs:11:20
+ |
+LL | Struct { a, b, c } = Struct { a: 0, b: 1 };
+ | ^ struct `Struct` does not have this field
+
+error: aborting due to 3 previous errors
+
+For more information about this error, try `rustc --explain E0026`.
--- /dev/null
+// run-pass
+
+#![feature(destructuring_assignment)]
+
+struct TupleStruct<S, T>(S, T);
+
+impl<S, T> TupleStruct<S, T> {
+ fn assign(self, first: &mut S, second: &mut T) {
+ // Test usage of `Self` instead of the struct name:
+ Self(*first, *second) = self
+ }
+}
+
+enum Enum<S, T> {
+ SingleVariant(S, T)
+}
+
+type Alias<S> = Enum<S, isize>;
+
+fn main() {
+ let (mut a, mut b);
+ TupleStruct(a, b) = TupleStruct(0, 1);
+ assert_eq!((a, b), (0, 1));
+ TupleStruct(a, .., b) = TupleStruct(1, 2);
+ assert_eq!((a, b), (1, 2));
+ TupleStruct(..) = TupleStruct(3, 4);
+ assert_eq!((a, b), (1, 2));
+ TupleStruct(5,6).assign(&mut a, &mut b);
+ assert_eq!((a, b), (5, 6));
+ Enum::SingleVariant(a, b) = Enum::SingleVariant(7, 8);
+ assert_eq!((a, b), (7, 8));
+ Alias::SingleVariant(a, b) = Alias::SingleVariant(9, 10);
+ assert_eq!((a, b), (9, 10));
+}
--- /dev/null
+#![feature(destructuring_assignment)]
+
+struct TupleStruct<S, T>(S, T);
+
+enum Enum<S, T> {
+ SingleVariant(S, T)
+}
+
+type Alias<S> = Enum<S, isize>;
+
+trait Test {
+ fn test() -> TupleStruct<isize, isize> {
+ TupleStruct(0, 0)
+ }
+}
+
+impl Test for Alias<isize> {}
+
+fn test() -> TupleStruct<isize, isize> {
+ TupleStruct(0, 0)
+}
+
+fn main() {
+ let (mut a, mut b);
+ TupleStruct(a, .., b, ..) = TupleStruct(0, 1);
+ //~^ ERROR `..` can only be used once per tuple struct or variant pattern
+ Enum::SingleVariant(a, .., b, ..) = Enum::SingleVariant(0, 1);
+ //~^ ERROR `..` can only be used once per tuple struct or variant pattern
+
+ TupleStruct(a, a, b) = TupleStruct(1, 2);
+ //~^ ERROR this pattern has 3 fields, but the corresponding tuple struct has 2 fields
+ Enum::SingleVariant(a, a, b) = Enum::SingleVariant(1, 2);
+ //~^ ERROR this pattern has 3 fields, but the corresponding tuple variant has 2 fields
+
+ // Check if `test` is recognized as not a tuple struct but a function call:
+ test() = TupleStruct(0, 0);
+ //~^ ERROR invalid left-hand side of assignment
+ (test)() = TupleStruct(0, 0);
+ //~^ ERROR invalid left-hand side of assignment
+ <Alias::<isize> as Test>::test() = TupleStruct(0, 0);
+ //~^ ERROR invalid left-hand side of assignment
+}
--- /dev/null
+error: `..` can only be used once per tuple struct or variant pattern
+ --> $DIR/tuple_struct_destructure_fail.rs:25:27
+ |
+LL | TupleStruct(a, .., b, ..) = TupleStruct(0, 1);
+ | -- ^^ can only be used once per tuple struct or variant pattern
+ | |
+ | previously used here
+
+error: `..` can only be used once per tuple struct or variant pattern
+ --> $DIR/tuple_struct_destructure_fail.rs:27:35
+ |
+LL | Enum::SingleVariant(a, .., b, ..) = Enum::SingleVariant(0, 1);
+ | -- ^^ can only be used once per tuple struct or variant pattern
+ | |
+ | previously used here
+
+error[E0023]: this pattern has 3 fields, but the corresponding tuple struct has 2 fields
+ --> $DIR/tuple_struct_destructure_fail.rs:30:5
+ |
+LL | struct TupleStruct<S, T>(S, T);
+ | ------------------------------- tuple struct defined here
+...
+LL | TupleStruct(a, a, b) = TupleStruct(1, 2);
+ | ^^^^^^^^^^^^^^^^^^^^ expected 2 fields, found 3
+
+error[E0023]: this pattern has 3 fields, but the corresponding tuple variant has 2 fields
+ --> $DIR/tuple_struct_destructure_fail.rs:32:5
+ |
+LL | SingleVariant(S, T)
+ | ------------------- tuple variant defined here
+...
+LL | Enum::SingleVariant(a, a, b) = Enum::SingleVariant(1, 2);
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected 2 fields, found 3
+
+error[E0070]: invalid left-hand side of assignment
+ --> $DIR/tuple_struct_destructure_fail.rs:36:12
+ |
+LL | test() = TupleStruct(0, 0);
+ | ------ ^
+ | |
+ | cannot assign to this expression
+
+error[E0070]: invalid left-hand side of assignment
+ --> $DIR/tuple_struct_destructure_fail.rs:38:14
+ |
+LL | (test)() = TupleStruct(0, 0);
+ | -------- ^
+ | |
+ | cannot assign to this expression
+
+error[E0070]: invalid left-hand side of assignment
+ --> $DIR/tuple_struct_destructure_fail.rs:40:38
+ |
+LL | <Alias::<isize> as Test>::test() = TupleStruct(0, 0);
+ | -------------------------------- ^
+ | |
+ | cannot assign to this expression
+
+error: aborting due to 7 previous errors
+
+Some errors have detailed explanations: E0023, E0070.
+For more information about an error, try `rustc --explain E0023`.
--- /dev/null
+fn main() {}
+
+struct S { x : u32 }
+
+#[cfg(FALSE)]
+fn foo() {
+ S { x: 5, .. }; //~ ERROR destructuring assignments are unstable
+}
--- /dev/null
+error[E0658]: destructuring assignments are unstable
+ --> $DIR/underscore-range-expr-gating.rs:7:15
+ |
+LL | S { x: 5, .. };
+ | ^^
+ |
+ = note: see issue #71126 <https://github.com/rust-lang/rust/issues/71126> for more information
+ = help: add `#![feature(destructuring_assignment)]` to the crate attributes to enable
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0658`.
--- /dev/null
+pub struct List<T> {
+ head: T,
+}
+
+impl Drop for List<i32> { //~ ERROR E0366
+ fn drop(&mut self) {
+ panic!()
+ }
+}
+
+fn main() {
+ List { head: 0 };
+}
--- /dev/null
+error[E0366]: `Drop` impls cannot be specialized
+ --> $DIR/issue-38868.rs:5:1
+ |
+LL | / impl Drop for List<i32> {
+LL | | fn drop(&mut self) {
+LL | | panic!()
+LL | | }
+LL | | }
+ | |_^
+ |
+note: use the same sequence of generic type, lifetime and const parameters as the struct definition
+ --> $DIR/issue-38868.rs:1:1
+ |
+LL | / pub struct List<T> {
+LL | | head: T,
+LL | | }
+ | |_^
+
+error: aborting due to previous error
+
+For more information about this error, try `rustc --explain E0366`.
--- /dev/null
+// Issue 8142: Test that Drop impls cannot be specialized beyond the
+// predicates attached to the type definition itself.
+#![feature(min_const_generics)]
+
+trait Bound { fn foo(&self) { } }
+struct K<'l1,'l2> { x: &'l1 i8, y: &'l2 u8 }
+struct L<'l1,'l2> { x: &'l1 i8, y: &'l2 u8 }
+struct M<'m> { x: &'m i8 }
+struct N<'n> { x: &'n i8 }
+struct O<To> { x: *const To }
+struct P<Tp> { x: *const Tp }
+struct Q<Tq> { x: *const Tq }
+struct R<Tr> { x: *const Tr }
+struct S<Ts:Bound> { x: *const Ts }
+struct T<'t,Ts:'t> { x: &'t Ts }
+struct U;
+struct V<Tva, Tvb> { x: *const Tva, y: *const Tvb }
+struct W<'l1, 'l2> { x: &'l1 i8, y: &'l2 u8 }
+struct X<const Ca: usize>;
+struct Y<const Ca: usize, const Cb: usize>;
+
+enum Enum<T> { Variant(T) }
+struct TupleStruct<T>(T);
+union Union<T: Copy> { f: T }
+
+impl<'al,'adds_bnd:'al> Drop for K<'al,'adds_bnd> { // REJECT
+ //~^ ERROR `Drop` impl requires `'adds_bnd: 'al`
+ fn drop(&mut self) { } }
+
+impl<'al,'adds_bnd> Drop for L<'al,'adds_bnd> where 'adds_bnd:'al { // REJECT
+ //~^ ERROR `Drop` impl requires `'adds_bnd: 'al`
+ fn drop(&mut self) { } }
+
+impl<'ml> Drop for M<'ml> { fn drop(&mut self) { } } // ACCEPT
+
+impl Drop for N<'static> { fn drop(&mut self) { } } // REJECT
+//~^ ERROR mismatched types
+//~| expected struct `N<'n>`
+//~| found struct `N<'static>`
+
+impl<COkNoBound> Drop for O<COkNoBound> { fn drop(&mut self) { } } // ACCEPT
+
+impl Drop for P<i8> { fn drop(&mut self) { } } // REJECT
+//~^ ERROR `Drop` impls cannot be specialized
+
+impl<AddsBnd:Bound> Drop for Q<AddsBnd> { fn drop(&mut self) { } } // REJECT
+//~^ ERROR `Drop` impl requires `AddsBnd: Bound`
+
+impl<'rbnd,AddsRBnd:'rbnd> Drop for R<AddsRBnd> { fn drop(&mut self) { } } // REJECT
+//~^ ERROR `Drop` impl requires `AddsRBnd: 'rbnd`
+
+impl<Bs:Bound> Drop for S<Bs> { fn drop(&mut self) { } } // ACCEPT
+
+impl<'t,Bt:'t> Drop for T<'t,Bt> { fn drop(&mut self) { } } // ACCEPT
+
+impl Drop for U { fn drop(&mut self) { } } // ACCEPT
+
+impl<One> Drop for V<One,One> { fn drop(&mut self) { } } // REJECT
+//~^ ERROR `Drop` impls cannot be specialized
+
+impl<'lw> Drop for W<'lw,'lw> { fn drop(&mut self) { } } // REJECT
+//~^ ERROR cannot infer an appropriate lifetime for lifetime parameter `'lw`
+
+impl Drop for X<3> { fn drop(&mut self) { } } // REJECT
+//~^ ERROR `Drop` impls cannot be specialized
+
+impl<const Ca: usize> Drop for Y<Ca, Ca> { fn drop(&mut self) { } } // REJECT
+//~^ ERROR `Drop` impls cannot be specialized
+
+impl<AddsBnd:Bound> Drop for Enum<AddsBnd> { fn drop(&mut self) { } } // REJECT
+//~^ ERROR `Drop` impl requires `AddsBnd: Bound`
+
+impl<AddsBnd:Bound> Drop for TupleStruct<AddsBnd> { fn drop(&mut self) { } } // REJECT
+//~^ ERROR `Drop` impl requires `AddsBnd: Bound`
+
+impl<AddsBnd:Copy + Bound> Drop for Union<AddsBnd> { fn drop(&mut self) { } } // REJECT
+//~^ ERROR `Drop` impl requires `AddsBnd: Bound`
+
+pub fn main() { }
--- /dev/null
+error[E0367]: `Drop` impl requires `'adds_bnd: 'al` but the struct it is implemented for does not
+ --> $DIR/reject-specialized-drops-8142.rs:26:20
+ |
+LL | impl<'al,'adds_bnd:'al> Drop for K<'al,'adds_bnd> { // REJECT
+ | ^^^
+ |
+note: the implementor must specify the same requirement
+ --> $DIR/reject-specialized-drops-8142.rs:6:1
+ |
+LL | struct K<'l1,'l2> { x: &'l1 i8, y: &'l2 u8 }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error[E0367]: `Drop` impl requires `'adds_bnd: 'al` but the struct it is implemented for does not
+ --> $DIR/reject-specialized-drops-8142.rs:30:67
+ |
+LL | impl<'al,'adds_bnd> Drop for L<'al,'adds_bnd> where 'adds_bnd:'al { // REJECT
+ | ^^^
+ |
+note: the implementor must specify the same requirement
+ --> $DIR/reject-specialized-drops-8142.rs:7:1
+ |
+LL | struct L<'l1,'l2> { x: &'l1 i8, y: &'l2 u8 }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error[E0308]: mismatched types
+ --> $DIR/reject-specialized-drops-8142.rs:36:1
+ |
+LL | impl Drop for N<'static> { fn drop(&mut self) { } } // REJECT
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ lifetime mismatch
+ |
+ = note: expected struct `N<'n>`
+ found struct `N<'static>`
+note: the lifetime `'n` as defined on the struct at 9:10...
+ --> $DIR/reject-specialized-drops-8142.rs:9:10
+ |
+LL | struct N<'n> { x: &'n i8 }
+ | ^^
+ = note: ...does not necessarily outlive the static lifetime
+
+error[E0366]: `Drop` impls cannot be specialized
+ --> $DIR/reject-specialized-drops-8142.rs:43:1
+ |
+LL | impl Drop for P<i8> { fn drop(&mut self) { } } // REJECT
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+note: use the same sequence of generic type, lifetime and const parameters as the struct definition
+ --> $DIR/reject-specialized-drops-8142.rs:11:1
+ |
+LL | struct P<Tp> { x: *const Tp }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error[E0367]: `Drop` impl requires `AddsBnd: Bound` but the struct it is implemented for does not
+ --> $DIR/reject-specialized-drops-8142.rs:46:14
+ |
+LL | impl<AddsBnd:Bound> Drop for Q<AddsBnd> { fn drop(&mut self) { } } // REJECT
+ | ^^^^^
+ |
+note: the implementor must specify the same requirement
+ --> $DIR/reject-specialized-drops-8142.rs:12:1
+ |
+LL | struct Q<Tq> { x: *const Tq }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error[E0367]: `Drop` impl requires `AddsRBnd: 'rbnd` but the struct it is implemented for does not
+ --> $DIR/reject-specialized-drops-8142.rs:49:21
+ |
+LL | impl<'rbnd,AddsRBnd:'rbnd> Drop for R<AddsRBnd> { fn drop(&mut self) { } } // REJECT
+ | ^^^^^
+ |
+note: the implementor must specify the same requirement
+ --> $DIR/reject-specialized-drops-8142.rs:13:1
+ |
+LL | struct R<Tr> { x: *const Tr }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error[E0366]: `Drop` impls cannot be specialized
+ --> $DIR/reject-specialized-drops-8142.rs:58:1
+ |
+LL | impl<One> Drop for V<One,One> { fn drop(&mut self) { } } // REJECT
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+note: use the same sequence of generic type, lifetime and const parameters as the struct definition
+ --> $DIR/reject-specialized-drops-8142.rs:17:1
+ |
+LL | struct V<Tva, Tvb> { x: *const Tva, y: *const Tvb }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error[E0495]: cannot infer an appropriate lifetime for lifetime parameter `'lw` due to conflicting requirements
+ --> $DIR/reject-specialized-drops-8142.rs:61:1
+ |
+LL | impl<'lw> Drop for W<'lw,'lw> { fn drop(&mut self) { } } // REJECT
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+note: first, the lifetime cannot outlive the lifetime `'l1` as defined on the struct at 18:10...
+ --> $DIR/reject-specialized-drops-8142.rs:18:10
+ |
+LL | struct W<'l1, 'l2> { x: &'l1 i8, y: &'l2 u8 }
+ | ^^^
+note: ...but the lifetime must also be valid for the lifetime `'l2` as defined on the struct at 18:15...
+ --> $DIR/reject-specialized-drops-8142.rs:18:15
+ |
+LL | struct W<'l1, 'l2> { x: &'l1 i8, y: &'l2 u8 }
+ | ^^^
+note: ...so that the types are compatible
+ --> $DIR/reject-specialized-drops-8142.rs:61:1
+ |
+LL | impl<'lw> Drop for W<'lw,'lw> { fn drop(&mut self) { } } // REJECT
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ = note: expected `W<'l1, 'l2>`
+ found `W<'_, '_>`
+
+error[E0366]: `Drop` impls cannot be specialized
+ --> $DIR/reject-specialized-drops-8142.rs:64:1
+ |
+LL | impl Drop for X<3> { fn drop(&mut self) { } } // REJECT
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+note: use the same sequence of generic type, lifetime and const parameters as the struct definition
+ --> $DIR/reject-specialized-drops-8142.rs:19:1
+ |
+LL | struct X<const Ca: usize>;
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error[E0366]: `Drop` impls cannot be specialized
+ --> $DIR/reject-specialized-drops-8142.rs:67:1
+ |
+LL | impl<const Ca: usize> Drop for Y<Ca, Ca> { fn drop(&mut self) { } } // REJECT
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+note: use the same sequence of generic type, lifetime and const parameters as the struct definition
+ --> $DIR/reject-specialized-drops-8142.rs:20:1
+ |
+LL | struct Y<const Ca: usize, const Cb: usize>;
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error[E0367]: `Drop` impl requires `AddsBnd: Bound` but the enum it is implemented for does not
+ --> $DIR/reject-specialized-drops-8142.rs:70:14
+ |
+LL | impl<AddsBnd:Bound> Drop for Enum<AddsBnd> { fn drop(&mut self) { } } // REJECT
+ | ^^^^^
+ |
+note: the implementor must specify the same requirement
+ --> $DIR/reject-specialized-drops-8142.rs:22:1
+ |
+LL | enum Enum<T> { Variant(T) }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error[E0367]: `Drop` impl requires `AddsBnd: Bound` but the struct it is implemented for does not
+ --> $DIR/reject-specialized-drops-8142.rs:73:14
+ |
+LL | impl<AddsBnd:Bound> Drop for TupleStruct<AddsBnd> { fn drop(&mut self) { } } // REJECT
+ | ^^^^^
+ |
+note: the implementor must specify the same requirement
+ --> $DIR/reject-specialized-drops-8142.rs:23:1
+ |
+LL | struct TupleStruct<T>(T);
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error[E0367]: `Drop` impl requires `AddsBnd: Bound` but the union it is implemented for does not
+ --> $DIR/reject-specialized-drops-8142.rs:76:21
+ |
+LL | impl<AddsBnd:Copy + Bound> Drop for Union<AddsBnd> { fn drop(&mut self) { } } // REJECT
+ | ^^^^^
+ |
+note: the implementor must specify the same requirement
+ --> $DIR/reject-specialized-drops-8142.rs:24:1
+ |
+LL | union Union<T: Copy> { f: T }
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+error: aborting due to 13 previous errors
+
+Some errors have detailed explanations: E0308, E0366, E0367, E0495.
+For more information about an error, try `rustc --explain E0308`.
+++ /dev/null
-pub struct List<T> {
- head: T,
-}
-
-impl Drop for List<i32> { //~ ERROR E0366
- fn drop(&mut self) {
- panic!()
- }
-}
-
-fn main() {
- List { head: 0 };
-}
+++ /dev/null
-error[E0366]: `Drop` impls cannot be specialized
- --> $DIR/issue-38868.rs:5:1
- |
-LL | / impl Drop for List<i32> {
-LL | | fn drop(&mut self) {
-LL | | panic!()
-LL | | }
-LL | | }
- | |_^
- |
-note: use the same sequence of generic type, lifetime and const parameters as the struct definition
- --> $DIR/issue-38868.rs:1:1
- |
-LL | / pub struct List<T> {
-LL | | head: T,
-LL | | }
- | |_^
-
-error: aborting due to previous error
-
-For more information about this error, try `rustc --explain E0366`.
--- /dev/null
+// run-pass
+// compile-flags: -Coverflow-checks=off -Ccodegen-units=1 -Copt-level=0
+
+fn foo(a: i128, b: i128, s: u32) -> (i128, i128) {
+ if s == 128 {
+ (0, 0)
+ } else {
+ (b >> s, a >> s)
+ }
+}
+fn main() {
+ let r = foo(0, 8, 1);
+ if r.0 != 4 {
+ panic!();
+ }
+}
fn main() {
let value = [7u8];
- while Some(0) = value.get(0) { //~ ERROR mismatched types
- //~^ NOTE expected `bool`, found `()`
- //~| HELP you might have meant to use pattern matching
+ while Some(0) = value.get(0) { //~ ERROR destructuring assignments are unstable
+ //~| ERROR invalid left-hand side of assignment
+ //~| ERROR mismatched types
+ //~| ERROR mismatched types
+
+ // FIXME The following diagnostic should also be emitted
+ // HELP you might have meant to use pattern matching
}
}
+error[E0658]: destructuring assignments are unstable
+ --> $DIR/issue-77218.rs:3:19
+ |
+LL | while Some(0) = value.get(0) {
+ | ------- ^
+ | |
+ | cannot assign to this expression
+ |
+ = note: see issue #71126 <https://github.com/rust-lang/rust/issues/71126> for more information
+ = help: add `#![feature(destructuring_assignment)]` to the crate attributes to enable
+
+error[E0070]: invalid left-hand side of assignment
+ --> $DIR/issue-77218.rs:3:19
+ |
+LL | while Some(0) = value.get(0) {
+ | - ^
+ | |
+ | cannot assign to this expression
+
+error[E0308]: mismatched types
+ --> $DIR/issue-77218.rs:3:16
+ |
+LL | while Some(0) = value.get(0) {
+ | ^
+ | |
+ | expected integer, found `&u8`
+ | help: consider dereferencing the borrow: `*0`
+
error[E0308]: mismatched types
--> $DIR/issue-77218.rs:3:11
|
LL | while Some(0) = value.get(0) {
| ^^^^^^^^^^^^^^^^^^^^^^ expected `bool`, found `()`
- |
-help: you might have meant to use pattern matching
- |
-LL | while let Some(0) = value.get(0) {
- | ^^^
-error: aborting due to previous error
+error: aborting due to 4 previous errors
-For more information about this error, try `rustc --explain E0308`.
+Some errors have detailed explanations: E0070, E0308, E0658.
+For more information about an error, try `rustc --explain E0070`.
+++ /dev/null
-// Issue 8142: Test that Drop impls cannot be specialized beyond the
-// predicates attached to the type definition itself.
-
-trait Bound { fn foo(&self) { } }
-struct K<'l1,'l2> { x: &'l1 i8, y: &'l2 u8 }
-struct L<'l1,'l2> { x: &'l1 i8, y: &'l2 u8 }
-struct M<'m> { x: &'m i8 }
-struct N<'n> { x: &'n i8 }
-struct O<To> { x: *const To }
-struct P<Tp> { x: *const Tp }
-struct Q<Tq> { x: *const Tq }
-struct R<Tr> { x: *const Tr }
-struct S<Ts:Bound> { x: *const Ts }
-struct T<'t,Ts:'t> { x: &'t Ts }
-struct U;
-struct V<Tva, Tvb> { x: *const Tva, y: *const Tvb }
-struct W<'l1, 'l2> { x: &'l1 i8, y: &'l2 u8 }
-
-enum Enum<T> { Variant(T) }
-struct TupleStruct<T>(T);
-union Union<T: Copy> { f: T }
-
-impl<'al,'adds_bnd:'al> Drop for K<'al,'adds_bnd> { // REJECT
- //~^ ERROR `Drop` impl requires `'adds_bnd: 'al`
- fn drop(&mut self) { } }
-
-impl<'al,'adds_bnd> Drop for L<'al,'adds_bnd> where 'adds_bnd:'al { // REJECT
- //~^ ERROR `Drop` impl requires `'adds_bnd: 'al`
- fn drop(&mut self) { } }
-
-impl<'ml> Drop for M<'ml> { fn drop(&mut self) { } } // ACCEPT
-
-impl Drop for N<'static> { fn drop(&mut self) { } } // REJECT
-//~^ ERROR mismatched types
-//~| expected struct `N<'n>`
-//~| found struct `N<'static>`
-
-impl<COkNoBound> Drop for O<COkNoBound> { fn drop(&mut self) { } } // ACCEPT
-
-impl Drop for P<i8> { fn drop(&mut self) { } } // REJECT
-//~^ ERROR `Drop` impls cannot be specialized
-
-impl<AddsBnd:Bound> Drop for Q<AddsBnd> { fn drop(&mut self) { } } // REJECT
-//~^ ERROR `Drop` impl requires `AddsBnd: Bound`
-
-impl<'rbnd,AddsRBnd:'rbnd> Drop for R<AddsRBnd> { fn drop(&mut self) { } } // REJECT
-//~^ ERROR `Drop` impl requires `AddsRBnd: 'rbnd`
-
-impl<Bs:Bound> Drop for S<Bs> { fn drop(&mut self) { } } // ACCEPT
-
-impl<'t,Bt:'t> Drop for T<'t,Bt> { fn drop(&mut self) { } } // ACCEPT
-
-impl Drop for U { fn drop(&mut self) { } } // ACCEPT
-
-impl<One> Drop for V<One,One> { fn drop(&mut self) { } } // REJECT
-//~^ ERROR `Drop` impls cannot be specialized
-
-impl<'lw> Drop for W<'lw,'lw> { fn drop(&mut self) { } } // REJECT
-//~^ ERROR cannot infer an appropriate lifetime for lifetime parameter `'lw`
-
-impl<AddsBnd:Bound> Drop for Enum<AddsBnd> { fn drop(&mut self) { } } // REJECT
-//~^ ERROR `Drop` impl requires `AddsBnd: Bound`
-
-impl<AddsBnd:Bound> Drop for TupleStruct<AddsBnd> { fn drop(&mut self) { } } // REJECT
-//~^ ERROR `Drop` impl requires `AddsBnd: Bound`
-
-impl<AddsBnd:Copy + Bound> Drop for Union<AddsBnd> { fn drop(&mut self) { } } // REJECT
-//~^ ERROR `Drop` impl requires `AddsBnd: Bound`
-
-pub fn main() { }
+++ /dev/null
-error[E0367]: `Drop` impl requires `'adds_bnd: 'al` but the struct it is implemented for does not
- --> $DIR/reject-specialized-drops-8142.rs:23:20
- |
-LL | impl<'al,'adds_bnd:'al> Drop for K<'al,'adds_bnd> { // REJECT
- | ^^^
- |
-note: the implementor must specify the same requirement
- --> $DIR/reject-specialized-drops-8142.rs:5:1
- |
-LL | struct K<'l1,'l2> { x: &'l1 i8, y: &'l2 u8 }
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error[E0367]: `Drop` impl requires `'adds_bnd: 'al` but the struct it is implemented for does not
- --> $DIR/reject-specialized-drops-8142.rs:27:67
- |
-LL | impl<'al,'adds_bnd> Drop for L<'al,'adds_bnd> where 'adds_bnd:'al { // REJECT
- | ^^^
- |
-note: the implementor must specify the same requirement
- --> $DIR/reject-specialized-drops-8142.rs:6:1
- |
-LL | struct L<'l1,'l2> { x: &'l1 i8, y: &'l2 u8 }
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error[E0308]: mismatched types
- --> $DIR/reject-specialized-drops-8142.rs:33:1
- |
-LL | impl Drop for N<'static> { fn drop(&mut self) { } } // REJECT
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ lifetime mismatch
- |
- = note: expected struct `N<'n>`
- found struct `N<'static>`
-note: the lifetime `'n` as defined on the struct at 8:10...
- --> $DIR/reject-specialized-drops-8142.rs:8:10
- |
-LL | struct N<'n> { x: &'n i8 }
- | ^^
- = note: ...does not necessarily outlive the static lifetime
-
-error[E0366]: `Drop` impls cannot be specialized
- --> $DIR/reject-specialized-drops-8142.rs:40:1
- |
-LL | impl Drop for P<i8> { fn drop(&mut self) { } } // REJECT
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- |
-note: use the same sequence of generic type, lifetime and const parameters as the struct definition
- --> $DIR/reject-specialized-drops-8142.rs:10:1
- |
-LL | struct P<Tp> { x: *const Tp }
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error[E0367]: `Drop` impl requires `AddsBnd: Bound` but the struct it is implemented for does not
- --> $DIR/reject-specialized-drops-8142.rs:43:14
- |
-LL | impl<AddsBnd:Bound> Drop for Q<AddsBnd> { fn drop(&mut self) { } } // REJECT
- | ^^^^^
- |
-note: the implementor must specify the same requirement
- --> $DIR/reject-specialized-drops-8142.rs:11:1
- |
-LL | struct Q<Tq> { x: *const Tq }
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error[E0367]: `Drop` impl requires `AddsRBnd: 'rbnd` but the struct it is implemented for does not
- --> $DIR/reject-specialized-drops-8142.rs:46:21
- |
-LL | impl<'rbnd,AddsRBnd:'rbnd> Drop for R<AddsRBnd> { fn drop(&mut self) { } } // REJECT
- | ^^^^^
- |
-note: the implementor must specify the same requirement
- --> $DIR/reject-specialized-drops-8142.rs:12:1
- |
-LL | struct R<Tr> { x: *const Tr }
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error[E0366]: `Drop` impls cannot be specialized
- --> $DIR/reject-specialized-drops-8142.rs:55:1
- |
-LL | impl<One> Drop for V<One,One> { fn drop(&mut self) { } } // REJECT
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- |
-note: use the same sequence of generic type, lifetime and const parameters as the struct definition
- --> $DIR/reject-specialized-drops-8142.rs:16:1
- |
-LL | struct V<Tva, Tvb> { x: *const Tva, y: *const Tvb }
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error[E0495]: cannot infer an appropriate lifetime for lifetime parameter `'lw` due to conflicting requirements
- --> $DIR/reject-specialized-drops-8142.rs:58:1
- |
-LL | impl<'lw> Drop for W<'lw,'lw> { fn drop(&mut self) { } } // REJECT
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- |
-note: first, the lifetime cannot outlive the lifetime `'l1` as defined on the struct at 17:10...
- --> $DIR/reject-specialized-drops-8142.rs:17:10
- |
-LL | struct W<'l1, 'l2> { x: &'l1 i8, y: &'l2 u8 }
- | ^^^
-note: ...but the lifetime must also be valid for the lifetime `'l2` as defined on the struct at 17:15...
- --> $DIR/reject-specialized-drops-8142.rs:17:15
- |
-LL | struct W<'l1, 'l2> { x: &'l1 i8, y: &'l2 u8 }
- | ^^^
-note: ...so that the types are compatible
- --> $DIR/reject-specialized-drops-8142.rs:58:1
- |
-LL | impl<'lw> Drop for W<'lw,'lw> { fn drop(&mut self) { } } // REJECT
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
- = note: expected `W<'l1, 'l2>`
- found `W<'_, '_>`
-
-error[E0367]: `Drop` impl requires `AddsBnd: Bound` but the enum it is implemented for does not
- --> $DIR/reject-specialized-drops-8142.rs:61:14
- |
-LL | impl<AddsBnd:Bound> Drop for Enum<AddsBnd> { fn drop(&mut self) { } } // REJECT
- | ^^^^^
- |
-note: the implementor must specify the same requirement
- --> $DIR/reject-specialized-drops-8142.rs:19:1
- |
-LL | enum Enum<T> { Variant(T) }
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error[E0367]: `Drop` impl requires `AddsBnd: Bound` but the struct it is implemented for does not
- --> $DIR/reject-specialized-drops-8142.rs:64:14
- |
-LL | impl<AddsBnd:Bound> Drop for TupleStruct<AddsBnd> { fn drop(&mut self) { } } // REJECT
- | ^^^^^
- |
-note: the implementor must specify the same requirement
- --> $DIR/reject-specialized-drops-8142.rs:20:1
- |
-LL | struct TupleStruct<T>(T);
- | ^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error[E0367]: `Drop` impl requires `AddsBnd: Bound` but the union it is implemented for does not
- --> $DIR/reject-specialized-drops-8142.rs:67:21
- |
-LL | impl<AddsBnd:Copy + Bound> Drop for Union<AddsBnd> { fn drop(&mut self) { } } // REJECT
- | ^^^^^
- |
-note: the implementor must specify the same requirement
- --> $DIR/reject-specialized-drops-8142.rs:21:1
- |
-LL | union Union<T: Copy> { f: T }
- | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-error: aborting due to 11 previous errors
-
-Some errors have detailed explanations: E0308, E0366, E0367, E0495.
-For more information about an error, try `rustc --explain E0308`.
let foo = Some(0);
let bar = None;
if Some(x) = foo {} //~ ERROR cannot find value `x` in this scope
+ //~^ ERROR mismatched types
+ //~^^ ERROR destructuring assignments are unstable
if Some(foo) = bar {} //~ ERROR mismatched types
+ //~^ ERROR destructuring assignments are unstable
if 3 = foo {} //~ ERROR mismatched types
if Some(3) = foo {} //~ ERROR mismatched types
+ //~^ ERROR destructuring assignments are unstable
+ //~^^ ERROR invalid left-hand side of assignment
}
LL | if let Some(x) = foo {}
| ^^^
-error[E0308]: mismatched types
- --> $DIR/if-let-typo.rs:5:8
+error[E0658]: destructuring assignments are unstable
+ --> $DIR/if-let-typo.rs:4:16
+ |
+LL | if Some(x) = foo {}
+ | ------- ^
+ | |
+ | cannot assign to this expression
+ |
+ = note: see issue #71126 <https://github.com/rust-lang/rust/issues/71126> for more information
+ = help: add `#![feature(destructuring_assignment)]` to the crate attributes to enable
+
+error[E0658]: destructuring assignments are unstable
+ --> $DIR/if-let-typo.rs:7:18
|
LL | if Some(foo) = bar {}
- | ^^^^^^^^^^^^^^^ expected `bool`, found `()`
+ | --------- ^
+ | |
+ | cannot assign to this expression
|
-help: you might have meant to use pattern matching
+ = note: see issue #71126 <https://github.com/rust-lang/rust/issues/71126> for more information
+ = help: add `#![feature(destructuring_assignment)]` to the crate attributes to enable
+
+error[E0658]: destructuring assignments are unstable
+ --> $DIR/if-let-typo.rs:10:16
|
-LL | if let Some(foo) = bar {}
- | ^^^
-help: you might have meant to compare for equality
+LL | if Some(3) = foo {}
+ | ------- ^
+ | |
+ | cannot assign to this expression
+ |
+ = note: see issue #71126 <https://github.com/rust-lang/rust/issues/71126> for more information
+ = help: add `#![feature(destructuring_assignment)]` to the crate attributes to enable
+
+error[E0308]: mismatched types
+ --> $DIR/if-let-typo.rs:4:8
+ |
+LL | if Some(x) = foo {}
+ | ^^^^^^^^^^^^^ expected `bool`, found `()`
+
+error[E0308]: mismatched types
+ --> $DIR/if-let-typo.rs:7:8
|
-LL | if Some(foo) == bar {}
- | ^^
+LL | if Some(foo) = bar {}
+ | ^^^^^^^^^^^^^^^ expected `bool`, found `()`
error[E0308]: mismatched types
- --> $DIR/if-let-typo.rs:6:8
+ --> $DIR/if-let-typo.rs:9:8
|
LL | if 3 = foo {}
| ^^^^^^^ expected `bool`, found `()`
LL | if let 3 = foo {}
| ^^^
+error[E0070]: invalid left-hand side of assignment
+ --> $DIR/if-let-typo.rs:10:16
+ |
+LL | if Some(3) = foo {}
+ | - ^
+ | |
+ | cannot assign to this expression
+
error[E0308]: mismatched types
- --> $DIR/if-let-typo.rs:7:8
+ --> $DIR/if-let-typo.rs:10:8
|
LL | if Some(3) = foo {}
| ^^^^^^^^^^^^^ expected `bool`, found `()`
- |
-help: you might have meant to use pattern matching
- |
-LL | if let Some(3) = foo {}
- | ^^^
-help: you might have meant to compare for equality
- |
-LL | if Some(3) == foo {}
- | ^^
-error: aborting due to 4 previous errors
+error: aborting due to 9 previous errors
-Some errors have detailed explanations: E0308, E0425.
-For more information about an error, try `rustc --explain E0308`.
+Some errors have detailed explanations: E0070, E0308, E0425, E0658.
+For more information about an error, try `rustc --explain E0070`.
-Subproject commit d5556aeb8405b1fe696adb6e297ad7a1f2989b62
+Subproject commit 2af662e22177a839763ac8fb70d245a680b15214
both(l, r, |l, r| eq_expr(l, r))
}
+pub fn eq_struct_rest(l: &StructRest, r: &StructRest) -> bool {
+ match (l, r) {
+ (StructRest::Base(lb), StructRest::Base(rb)) => eq_expr(lb, rb),
+ (StructRest::Rest(_), StructRest::Rest(_)) => true,
+ (StructRest::None, StructRest::None) => true,
+ _ => false,
+ }
+}
+
pub fn eq_expr(l: &Expr, r: &Expr) -> bool {
use ExprKind::*;
if !over(&l.attrs, &r.attrs, |l, r| eq_attr(l, r)) {
(Path(lq, lp), Path(rq, rp)) => both(lq, rq, |l, r| eq_qself(l, r)) && eq_path(lp, rp),
(MacCall(l), MacCall(r)) => eq_mac_call(l, r),
(Struct(lp, lfs, lb), Struct(rp, rfs, rb)) => {
- eq_path(lp, rp) && eq_expr_opt(lb, rb) && unordered_over(lfs, rfs, |l, r| eq_field(l, r))
+ eq_path(lp, rp) && eq_struct_rest(lb, rb) && unordered_over(lfs, rfs, |l, r| eq_field(l, r))
},
_ => false,
}
+error[E0658]: destructuring assignments are unstable
+ --> $DIR/ice-6250.rs:12:25
+ |
+LL | Some(reference) = cache.data.get(key) {
+ | --------------- ^
+ | |
+ | cannot assign to this expression
+ |
+ = note: see issue #71126 <https://github.com/rust-lang/rust/issues/71126> for more information
+ = help: add `#![feature(destructuring_assignment)]` to the crate attributes to enable
+
error[E0601]: `main` function not found in crate `ice_6250`
--> $DIR/ice-6250.rs:4:1
|
LL | | }
| |_^ consider adding a `main` function to `$DIR/ice-6250.rs`
+error[E0308]: mismatched types
+ --> $DIR/ice-6250.rs:12:14
+ |
+LL | Some(reference) = cache.data.get(key) {
+ | ^^^^^^^^^
+ | |
+ | expected integer, found `&i32`
+ | help: consider dereferencing the borrow: `*reference`
+
error[E0308]: mismatched types
--> $DIR/ice-6250.rs:12:9
|
LL | Some(reference) = cache.data.get(key) {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `bool`, found `()`
- |
-help: you might have meant to use pattern matching
- |
-LL | let Some(reference) = cache.data.get(key) {
- | ^^^
-error: aborting due to 2 previous errors
+error: aborting due to 4 previous errors
-Some errors have detailed explanations: E0308, E0601.
+Some errors have detailed explanations: E0308, E0601, E0658.
For more information about an error, try `rustc --explain E0308`.
-Subproject commit 30e0c303a019737cb0e22db464c774ac66b14e07
+Subproject commit df4109151b6870cdb6d170326d1c099746990ea8
-Subproject commit eb894d53708122a67762de9489881c11aa8ce257
+Subproject commit 0f29ff6da0c5ff622e739beb8fc3bbe77119b3c1