This PR cleans up the rest of the spelling mistakes in the compiler comments. This PR does not change any literal or code spelling issues.
// Convert the result having "2 * precision" significant-bits back to the one
// having "precision" significant-bits. First, move the radix point from
- // poision "2*precision - 1" to "precision - 1". The exponent need to be
+ // position "2*precision - 1" to "precision - 1". The exponent need to be
// adjusted by "2*precision - 1" - "precision - 1" = "precision".
*exp -= precision as ExpInt + 1;
///
/// For example, `#[cfg(FALSE)] struct Foo {}` would
/// have an `attrs` field containing the `#[cfg(FALSE)]` attr,
-/// and a `tokens` field storing the (unparesd) tokens `struct Foo {}`
+/// and a `tokens` field storing the (unparsed) tokens `struct Foo {}`
#[derive(Clone, Debug, Encodable, Decodable)]
pub struct AttributesData {
/// Attributes, both outer and inner.
.infcx
.handle_opaque_type(a, b, a_is_expected, &cause, param_env)?
.obligations,
- // These fields are filled in during exectuion of the operation
+ // These fields are filled in during execution of the operation
base_universe: None,
region_constraints: None,
},
//! ss2 = explicit_slot 8 ; _4: (&&[u16],) size=8 align=8,8
//! sig0 = (i64, i64, i64) system_v
//! sig1 = (i64, i64, i64) system_v
-//! fn0 = colocated u0:6 sig1 ; Instance { def: Item(DefId(0/0:31 ~ example[8787]::{{impl}}[1]::call_mut[0])), substs: [ReErased, ReErased] }
+//! fn0 = collocated u0:6 sig1 ; Instance { def: Item(DefId(0/0:31 ~ example[8787]::{{impl}}[1]::call_mut[0])), substs: [ReErased, ReErased] }
//!
//! block0(v0: i64, v1: i64, v2: i64):
//! v3 = stack_addr.i64 ss0
// we'll encounter later.
let is_allocator = module_kind == ModuleKind::Allocator;
- // We ignore a request for full crate grath LTO if the cate type
+ // We ignore a request for full crate graph LTO if the crate type
// is only an rlib, as there is no full crate graph to process,
// that'll happen later.
//
impl Frame {
/// Construct a new frame around the delimited set of tokens.
fn new(mut tts: Vec<mbe::TokenTree>) -> Frame {
- // Need to add empty delimeters.
+ // Need to add empty delimiters.
let open_tt = mbe::TokenTree::token(token::OpenDelim(token::NoDelim), DUMMY_SP);
let close_tt = mbe::TokenTree::token(token::CloseDelim(token::NoDelim), DUMMY_SP);
tts.insert(0, open_tt);
));
}
} else {
- // 0 is the initial counter (we have done 0 repretitions so far). `len`
+ // 0 is the initial counter (we have done 0 repetitions so far). `len`
// is the total number of repetitions we should generate.
repeats.push((0, len));
if let Some(Some(row)) = self.rows.get(row) { Some(row) } else { None }
}
- /// Interescts `row` with `set`. `set` can be either `BitSet` or
+ /// Intersects `row` with `set`. `set` can be either `BitSet` or
/// `HybridBitSet`. Has no effect if `row` does not exist.
///
/// Returns true if the row was changed.
//! in particular to extract out the resulting region obligations and
//! encode them therein.
//!
-//! For an overview of what canonicaliation is and how it fits into
+//! For an overview of what canonicalization is and how it fits into
//! rustc, check out the [chapter in the rustc dev guide][c].
//!
//! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html
let len = sub1.len() - common_default_params;
let consts_offset = len - sub1.consts().count();
- // Only draw `<...>` if there're lifetime/type arguments.
+ // Only draw `<...>` if there are lifetime/type arguments.
if len > 0 {
values.0.push_normal("<");
values.1.push_normal("<");
}
// Close the type argument bracket.
- // Only draw `<...>` if there're lifetime/type arguments.
+ // Only draw `<...>` if there are lifetime/type arguments.
if len > 0 {
values.0.push_normal(">");
values.1.push_normal(">");
})
.transpose();
if output.is_some() {
- // We don't account for multiple `Future::Output = Ty` contraints.
+ // We don't account for multiple `Future::Output = Ty` constraints.
return output;
}
}
// But if we did in reverse order, we would create a `v <:
// LHS` (or vice versa) constraint and then instantiate
// `v`. This would require further processing to achieve same
- // end-result; in partiular, this screws up some of the logic
+ // end-result; in particular, this screws up some of the logic
// in coercion, which expects LUB to figure out that the LHS
// is (e.g.) `Box<i32>`. A more obvious solution might be to
// iterate on the subtype obligations that are returned, but I
/// Track how many errors were reported when this infcx is created.
/// If the number of errors increases, that's also a sign (line
- /// `tained_by_errors`) to avoid reporting certain kinds of errors.
+ /// `tainted_by_errors`) to avoid reporting certain kinds of errors.
// FIXME(matthewjasper) Merge into `tainted_by_errors_flag`
err_count_on_creation: usize,
}
if a == b {
- // Subtle: if a or b has a bound variable that we are lazilly
+ // Subtle: if a or b has a bound variable that we are lazily
// substituting, then even if a == b, it could be that the values we
// will substitute for those bound variables are *not* the same, and
// hence returning `Ok(a)` is incorrect.
/// # Constrain regions, not the hidden concrete type
///
/// Note that generating constraints on each region `Rc` is *not*
- /// the same as generating an outlives constraint on `Tc` iself.
+ /// the same as generating an outlives constraint on `Tc` itself.
/// For example, if we had a function like this:
///
/// ```rust
// regionck more ways to prove that it holds. However,
// regionck is not (at least currently) prepared to deal with
// higher-ranked regions that may appear in the
- // trait-ref. Therefore, if we see any higher-ranke regions,
+ // trait-ref. Therefore, if we see any higher-rank regions,
// we simply fallback to the most restrictive rule, which
// requires that `Pi: 'a` for all `i`.
ty::Projection(ref data) => {
&self.region_bound_pairs_map
}
- /// This is a hack to support the old-skool regionck, which
+ /// This is a hack to support the old-school regionck, which
/// processes region constraints from the main function and the
/// closure together. In that context, when we enter a closure, we
/// want to be able to "save" the state of the surrounding a
debug!("projection_must_outlive: approx_env_bounds={:?}", approx_env_bounds);
// Remove outlives bounds that we get from the environment but
- // which are also deducable from the trait. This arises (cc
+ // which are also deducible from the trait. This arises (cc
// #55756) in cases where you have e.g., `<T as Foo<'a>>::Item:
// 'a` in the environment but `trait Foo<'b> { type Item: 'b
// }` in the trait definition.
/// not entirely true. In particular, in the future, we may extend the
/// environment with implied bounds or other info about how placeholders
/// relate to regions in outer universes. In that case, `P1: R` for example
- /// might become solveable.
+ /// might become solvable.
///
/// # Summary of the implementation
///
// * `scc_placeholder[scc1]` stores the placeholder that `scc1` must
// be equal to (if any)
//
- // For each succssor `scc2` where `scc1: scc2`:
+ // For each successor `scc2` where `scc1: scc2`:
//
// * `scc_placeholder[scc2]` stores some placeholder `P` where
// `scc2: P` (if any)
// Update minimum universe of scc1.
self.scc_universes[scc1] = scc1_universe;
- // At this point, `scc_placholder[scc1]` stores the placeholder that
+ // At this point, `scc_placeholder[scc1]` stores the placeholder that
// `scc1` must be equal to, if any.
if let Some(scc1_placeholder) = self.scc_placeholders[scc1] {
debug!(
/// exist). This prevents us from making many such regions.
glbs: CombineMap<'tcx>,
- /// When we add a R1 == R2 constriant, we currently add (a) edges
+ /// When we add a R1 == R2 constraint, we currently add (a) edges
/// R1 <= R2 and R2 <= R1 and (b) we unify the two regions in this
/// table. You can then call `opportunistic_resolve_var` early
/// which will map R1 and R2 to some common region (i.e., either
/// The opportunistic region resolver opportunistically resolves regions
/// variables to the variable with the least variable id. It is used when
-/// normlizing projections to avoid hitting the recursion limit by creating
+/// normalizing projections to avoid hitting the recursion limit by creating
/// many versions of a predicate for types that in the end have to unify.
///
/// If you want to resolve type and const variables as well, call
});
},
{
- // We force these querie to run,
+ // We force these queries to run,
// since they might not otherwise get called.
// This marks the corresponding crate-level attributes
// as used, and ensures that their values are valid.
let substs = cx.typeck_results().node_substs(expr.hir_id);
if substs.needs_subst() {
// We can't resolve on types that require monomorphization, so we don't handle them if
- // we need to perfom substitution.
+ // we need to perform substitution.
return;
}
let param_env = cx.tcx.param_env(trait_id);
declare_lint! {
/// The `nontrivial_structural_match` lint detects constants that are used in patterns,
/// whose type is not structural-match and whose initializer body actually uses values
- /// that are not structural-match. So `Option<NotStruturalMatch>` is ok if the constant
+ /// that are not structural-match. So `Option<NotStructuralMatch>` is ok if the constant
/// is just `None`.
///
/// ### Example
///
/// ### Explanation
///
- /// Previous versions of Rust accepted constants in patterns, even if those constants's types
+ /// Previous versions of Rust accepted constants in patterns, even if those constants' types
/// did not have `PartialEq` derived. Thus the compiler falls back to runtime execution of
/// `PartialEq`, which can report that two constants are not equal even if they are
/// bit-equivalent.
/// The `deref_into_dyn_supertrait` lint is output whenever there is a use of the
/// `Deref` implementation with a `dyn SuperTrait` type as `Output`.
///
- /// These implementations will become shadowed when the `trait_upcasting` feature is stablized.
+ /// These implementations will become shadowed when the `trait_upcasting` feature is stabilized.
/// The `deref` functions will no longer be called implicitly, so there might be behavior change.
///
/// ### Example
//
// Suppose that we're currently compiling crate A, and start deserializing
// metadata from crate B. When we deserialize a Span from crate B's metadata,
- // there are two posibilites:
+ // there are two possibilities:
//
// 1. The span references a file from crate B. This makes it a 'local' span,
// which means that we can use crate B's serialized source map information.
// from. We use `TAG_VALID_SPAN_FOREIGN` to indicate that a `CrateNum` should
// be deserialized after the rest of the span data, which tells the deserializer
// which crate contains the source map information.
- // 2. This span comes from our own crate. No special hamdling is needed - we just
+ // 2. This span comes from our own crate. No special handling is needed - we just
// write `TAG_VALID_SPAN_LOCAL` to let the deserializer know that it should use
// our own source map information.
//
}
pub fn iter_local_def_id(self) -> impl Iterator<Item = LocalDefId> + 'hir {
- // Create a dependency to the crate to be sure we reexcute this when the amount of
+ // Create a dependency to the crate to be sure we re-execute this when the amount of
// definitions change.
self.tcx.ensure().hir_crate(());
self.tcx.untracked_resolutions.definitions.iter_local_def_id()
/// distinguish the two (e.g., due to our preference for where
/// clauses over impls).
///
- /// After some unifiations and things have been done, it makes
+ /// After some unification and things have been done, it makes
/// sense to try and prove again -- of course, at that point, the
/// canonical form will be different, making this a distinct
/// query.
(Level::Expect(expect_id), _) => {
// This case is special as we actually allow the lint itself in this context, but
// we can't return early like in the case for `Level::Allow` because we still
- // need the lint diagnostic to be emitted to `rustc_error::HanderInner`.
+ // need the lint diagnostic to be emitted to `rustc_error::HandlerInner`.
//
// We can also not mark the lint expectation as fulfilled here right away, as it
// can still be cancelled in the decorate function. All of this means that we simply
},
/// Something was divided by 0 (x / 0).
DivisionByZero,
- /// Something was "remainded" by 0 (x % 0).
+ /// Something was "remaineded" by 0 (x % 0).
RemainderByZero,
/// Signed division overflowed (INT_MIN / -1).
DivisionOverflow,
/// Encodes the effect of a user-supplied type annotation on the
/// subcomponents of a pattern. The effect is determined by applying the
-/// given list of proejctions to some underlying base type. Often,
+/// given list of projections to some underlying base type. Often,
/// the projection element list `projs` is empty, in which case this
/// directly encodes a type in `base`. But in the case of complex patterns with
/// subpatterns and bindings, we want to apply only a *part* of the type to a variable,
/// > ``… because it's nested under this `unsafe fn` ``
///
/// the second HirId here indicates the first usage of the `unsafe` block,
- /// which allows retrival of the LintLevelSource for why that operation would
+ /// which allows retrieval of the LintLevelSource for why that operation would
/// have been permitted without the block
InUnsafeFn(hir::HirId, hir::HirId),
}
/// Preorder traversal of a graph.
///
/// Preorder traversal is when each node is visited after at least one of its predecessors. If you
-/// are familar with some basic graph theory, then this performs a depth first search and returns
+/// are familiar with some basic graph theory, then this performs a depth first search and returns
/// nodes in order of discovery time.
///
/// ```text
/// One of the following:
/// * `&str`, which will be handled as a string pattern and thus exhaustiveness
/// checking will detect if you use the same string twice in different patterns.
- /// * integer, bool, char or float, which will be handled by exhaustivenes to cover exactly
+ /// * integer, bool, char or float, which will be handled by exhaustiveness to cover exactly
/// its own value, similar to `&str`, but these values are much simpler.
/// * Opaque constants, that must not be matched structurally. So anything that does not derive
/// `PartialEq` and `Eq`.
/// // Case A: ImplSource points at a specific impl. Only possible when
/// // type is concretely known. If the impl itself has bounded
/// // type parameters, ImplSource will carry resolutions for those as well:
-/// concrete.clone(); // ImpleSource(Impl_1, [ImplSource(Impl_2, [ImplSource(Impl_3)])])
+/// concrete.clone(); // ImplSource(Impl_1, [ImplSource(Impl_2, [ImplSource(Impl_3)])])
///
/// // Case A: ImplSource points at a specific impl. Only possible when
/// // type is concretely known. If the impl itself has bounded
/// Given a PolyTraitRef, get the PolyTraitRefs of the trait's (transitive) supertraits.
///
-/// A simplfied version of the same function at `rustc_infer::traits::util::supertraits`.
+/// A simplified version of the same function at `rustc_infer::traits::util::supertraits`.
pub fn supertraits<'tcx>(
tcx: TyCtxt<'tcx>,
trait_ref: PolyTraitRef<'tcx>,
(Some(_), "Self") => return false,
_ => {}
}
- // Suggest a where clause bound for a non-type paremeter.
+ // Suggest a where clause bound for a non-type parameter.
let (action, prefix) = if generics.where_clause.predicates.is_empty() {
("introducing a", " where ")
} else {
/// stack-based unwinding (the exact mechanism of which varies
/// platform-by-platform).
///
-/// Rust functions are classfied whether or not they can unwind based on the
+/// Rust functions are classified whether or not they can unwind based on the
/// active "panic strategy". In other words Rust functions are considered to
/// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
/// Note that Rust supports intermingling panic=abort and panic=unwind code, but
// To fix this UB rustc would like to change in the future to catch unwinds
// from function calls that may unwind within a Rust-defined `extern "C"`
// function and forcibly abort the process, thereby respecting the
- // `nounwind` attribut emitted for `extern "C"`. This behavior change isn't
+ // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
// ready to roll out, so determining whether or not the `C` family of ABIs
// unwinds is conditional not only on their definition but also whether the
// `#![feature(c_unwind)]` feature gate is active.
//! - [`rustc_middle::ty::Ty`], used to represent the semantics of a type.
//! - [`rustc_middle::ty::TyCtxt`], the central data structure in the compiler.
//!
-//! For more information, see ["The `ty` module: representing types"] in the ructc-dev-guide.
+//! For more information, see ["The `ty` module: representing types"] in the rustc-dev-guide.
//!
//! ["The `ty` module: representing types"]: https://rustc-dev-guide.rust-lang.org/ty.html
/// aren't allowed to call that query: it is equal to `type_of(const_param)` which is
/// trivial to compute.
///
-/// If we now want to use that constant in a place which potentionally needs its type
+/// If we now want to use that constant in a place which potentially needs its type
/// we also pass the type of its `const_param`. This is the point of `WithOptConstParam`,
/// except that instead of a `Ty` we bundle the `DefId` of the const parameter.
/// Meaning that we need to use `type_of(const_param_did)` if `const_param_did` is `Some`
// For example, take `std::os::unix::process::CommandExt`, this trait is actually
// defined at `std::sys::unix::ext::process::CommandExt` (at time of writing).
//
- // `std::os::unix` rexports the contents of `std::sys::unix::ext`. `std::sys` is
+ // `std::os::unix` reexports the contents of `std::sys::unix::ext`. `std::sys` is
// private so the "true" path to `CommandExt` isn't accessible.
//
// In this case, the `visible_parent_map` will look something like this:
let mut resugared = false;
- // Special-case `Fn(...) -> ...` and resugar it.
+ // Special-case `Fn(...) -> ...` and re-sugar it.
let fn_trait_kind = cx.tcx().fn_trait_kind_from_lang_item(principal.def_id);
if !cx.tcx().sess.verbose() && fn_trait_kind.is_some() {
if let ty::Tuple(tys) = principal.substs.type_at(0).kind() {
define_scoped_cx!(self);
let mut region_index = self.region_index;
- // If we want to print verbosly, then print *all* binders, even if they
+ // If we want to print verbosely, then print *all* binders, even if they
// aren't named. Eventually, we might just want this as the default, but
// this is not *quite* right and changes the ordering of some output
// anyways.
/// - 'l0...'li and T0...Tj are the generic parameters
/// in scope on the function that defined the closure,
/// - CK represents the *closure kind* (Fn vs FnMut vs FnOnce). This
-/// is rather hackily encoded via a scalar type. See
+/// is rather hacky encoded via a scalar type. See
/// `Ty::to_opt_closure_kind` for details.
/// - CS represents the *closure signature*, representing as a `fn()`
/// type. For example, `fn(u32, u32) -> u32` would mean that the closure
// we lower the guard.
let target_block = self.cfg.start_new_block();
let mut schedule_drops = true;
- // We keep a stack of all of the bindings and type asciptions
+ // We keep a stack of all of the bindings and type descriptions
// from the parent candidates that we visit, that also need to
// be bound for each candidate.
traverse_candidate(
};
TerminatorKind::if_(self.tcx, Operand::Copy(place), true_bb, false_bb)
} else {
- // The switch may be inexhaustive so we have a catch all block
+ // The switch may be inexhaustible so we have a catch all block
debug_assert_eq!(options.len() + 1, target_blocks.len());
let otherwise_block = *target_blocks.last().unwrap();
let switch_targets = SwitchTargets::new(
let tcx = self.tcx;
if let LintLevel::Explicit(current_hir_id) = lint_level {
// Use `maybe_lint_level_root_bounded` with `root_lint_level` as a bound
- // to avoid adding Hir dependences on our parents.
+ // to avoid adding Hir dependencies on our parents.
// We estimate the true lint roots here to avoid creating a lot of source scopes.
let parent_root = tcx.maybe_lint_level_root_bounded(
/// However, `_X` is still registered to be dropped, and so if we
/// do nothing else, we would generate a `DROP(_X)` that occurs
/// after the call. This will later be optimized out by the
- /// drop-elaboation code, but in the meantime it can lead to
+ /// drop-elaboration code, but in the meantime it can lead to
/// spurious borrow-check errors -- the problem, ironically, is
/// not the `DROP(_X)` itself, but the (spurious) unwind pathways
/// that it creates. See #64391 for an example.
{
// `usize`/`isize` are not allowed to be matched exhaustively unless the
// `precise_pointer_size_matching` feature is enabled. So we treat those types like
- // `#[non_exhaustive]` enums by returning a special unmatcheable constructor.
+ // `#[non_exhaustive]` enums by returning a special unmatchable constructor.
smallvec![NonExhaustive]
}
&ty::Int(ity) => {
}
// Without `cx`, we can't know which field corresponds to which, so we can't
- // get the names of the fields. Instead we just display everything as a suple
+ // get the names of the fields. Instead we just display everything as a simple
// struct, which should be good enough.
write!(f, "(")?;
for p in self.iter_fields() {
//!
//! Note: we will often abbreviate "constructor" as "ctor".
//!
-//! The idea that powers everything that is done in this file is the following: a (matcheable)
+//! The idea that powers everything that is done in this file is the following: a (matchable)
//! value is made from a constructor applied to a number of subvalues. Examples of constructors are
//! `Some`, `None`, `(,)` (the 2-tuple constructor), `Foo {..}` (the constructor for a struct
//! `Foo`), and `2` (the constructor for the number `2`). This is natural when we think of
//! Some of the ctors listed above might feel weird: `None` and `2` don't take any arguments.
//! That's ok: those are ctors that take a list of 0 arguments; they are the simplest case of
//! ctors. We treat `2` as a ctor because `u64` and other number types behave exactly like a huge
-//! `enum`, with one variant for each number. This allows us to see any matcheable value as made up
+//! `enum`, with one variant for each number. This allows us to see any matchable value as made up
//! from a tree of ctors, each having a set number of children. For example: `Foo { bar: None,
//! baz: Ok(0) }` is made from 4 different ctors, namely `Foo{..}`, `None`, `Ok` and `0`.
//!
/// Whether the current pattern is the whole pattern as found in a match arm, or if it's a
/// subpattern.
pub(super) is_top_level: bool,
- /// Wether the current pattern is from a `non_exhaustive` enum.
+ /// Whether the current pattern is from a `non_exhaustive` enum.
pub(super) is_non_exhaustive: bool,
}
// and this is ok because `open_drop` here can only be reached
// within that own generator's resume function.
// This should only happen for the self argument on the resume function.
- // It effetively only contains upvars until the generator transformation runs.
+ // It effectively only contains upvars until the generator transformation runs.
// See librustc_body/transform/generator.rs for more details.
ty::Generator(_, substs, _) => {
let tys: Vec<_> = substs.as_generator().upvar_tys().collect();
///
/// This makes `MaybeLiveLocals` unsuitable for certain classes of optimization normally associated
/// with a live variables analysis, notably dead-store elimination. It's a dirty hack, but it works
-/// okay for the generator state transform (currently the main consumuer of this analysis).
+/// okay for the generator state transform (currently the main consumer of this analysis).
///
/// [`MaybeBorrowedLocals`]: super::MaybeBorrowedLocals
/// [flow-test]: https://github.com/rust-lang/rust/blob/a08c47310c7d49cbdc5d7afb38408ba519967ecd/src/test/ui/mir-dataflow/liveness-ptr.rs
// sets on_entry bits for Arg places
fn initialize_start_block(&self, _: &mir::Body<'tcx>, state: &mut Self::Domain) {
- // set all bits to 1 (uninit) before gathering counterevidence
+ // set all bits to 1 (uninit) before gathering counter-evidence
state.insert_all();
drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| {
self.check_mut_borrowing_layout_constrained_field(*place, context.is_mutating_use());
}
- // Some checks below need the extra metainfo of the local declaration.
+ // Some checks below need the extra meta info of the local declaration.
let decl = &self.body.local_decls[place.local];
// Check the base local: it might be an unsafe-to-access static. We only check derefs of the
}
}
- // Attempt to use albegraic identities to eliminate constant expressions
+ // Attempt to use algebraic identities to eliminate constant expressions
fn eval_rvalue_with_identities(
&mut self,
rvalue: &Rvalue<'tcx>,
// The operand ID is outside the known range of counter IDs and also outside the
// known range of expression IDs. In either case, the result of a missing operand
// (if and when used in an expression) will be zero, so from a computation
- // perspective, it doesn't matter whether it is interepretted as a counter or an
+ // perspective, it doesn't matter whether it is interpreted as a counter or an
// expression.
//
// However, the `num_counters` and `num_expressions` query results are used to
/// If prev.span() was split off to the right of a closure, prev.span().lo() will be
/// greater than prev_original_span.lo(). The actual span of `prev_original_span` is
/// not as important as knowing that `prev()` **used to have the same span** as `curr(),
- /// which means their sort order is still meaningful for determinating the dominator
+ /// which means their sort order is still meaningful for determining the dominator
/// relationship.
///
/// When two `CoverageSpan`s have the same `Span`, dominated spans can be discarded; but if
self.prev()
);
self.cutoff_prev_at_overlapping_curr();
- // If one span dominates the other, assocate the span with the code from the dominated
+ // If one span dominates the other, associate the span with the code from the dominated
// block only (`curr`), and discard the overlapping portion of the `prev` span. (Note
// that if `prev.span` is wider than `prev_original_span`, a `CoverageSpan` will still
// be created for `prev`s block, for the non-overlapping portion, left of `curr.span`.)
for (place, proj) in place.iter_projections() {
match proj {
// Dereferencing in the computation of `place` might cause issues from one of two
- // cateogires. First, the referrent might be invalid. We protect against this by
+ // categories. First, the referent might be invalid. We protect against this by
// dereferencing references only (not pointers). Second, the use of a reference may
// invalidate other references that are used later (for aliasing reasons). Consider
// where such an invalidated reference may appear:
// The `liveness` variable contains the liveness of MIR locals ignoring borrows.
// This is correct for movable generators since borrows cannot live across
// suspension points. However for immovable generators we need to account for
- // borrows, so we conseratively assume that all borrowed locals are live until
+ // borrows, so we conservatively assume that all borrowed locals are live until
// we find a StorageDead statement referencing the locals.
// To do this we just union our `liveness` result with `borrowed_locals`, which
// contains all the locals which has been borrowed before this suspension point.
// The `closure_ref` in our example above.
let closure_ref_arg = iter::once(self_);
- // The `tmp0`, `tmp1`, and `tmp2` in our example abonve.
+ // The `tmp0`, `tmp1`, and `tmp2` in our example above.
let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| {
// This is e.g., `tuple_tmp.0` in our example above.
let tuple_field = Operand::Move(tcx.mk_place_field(tuple, Field::new(i), ty));
use rustc_session::Limit;
// FIXME: check whether it is cheaper to precompute the entire call graph instead of invoking
-// this query riddiculously often.
+// this query ridiculously often.
#[instrument(level = "debug", skip(tcx, root, target))]
crate fn mir_callgraph_reachable<'tcx>(
tcx: TyCtxt<'tcx>,
let (renamed_decl, ret_decl) =
body.local_decls.pick2_mut(returned_local, mir::RETURN_PLACE);
- // Sometimes, the return place is assigned a local of a different but coercable type, for
+ // Sometimes, the return place is assigned a local of a different but coercible type, for
// example `&mut T` instead of `&T`. Overwriting the `LocalInfo` for the return place means
// its type may no longer match the return type of its function. This doesn't cause a
// problem in codegen because these two types are layout-compatible, but may be unexpected.
| StatementKind::AscribeUserType(..)
| StatementKind::Coverage(..)
| StatementKind::Nop => {
- // These are all nops in a landing pad
+ // These are all noops in a landing pad
}
StatementKind::Assign(box (place, Rvalue::Use(_) | Rvalue::Discriminant(_))) => {
/// that point.
///
/// This is redundant with drop elaboration, but we need to do it prior to const-checking, and
-/// running const-checking after drop elaboration makes it opimization dependent, causing issues
+/// running const-checking after drop elaboration makes it optimization dependent, causing issues
/// like [#90770].
///
/// [#90770]: https://github.com/rust-lang/rust/issues/90770
//!
//! When the MIR is built, we check `needs_drop` before emitting a `Drop` for a place. This pass is
//! useful because (unlike MIR building) it runs after type checking, so it can make use of
-//! `Reveal::All` to provide more precies type information.
+//! `Reveal::All` to provide more precise type information.
use crate::MirPass;
use rustc_middle::mir::*;
/// (StorageLive index,, StorageDead index, Local)
storage_stmts: Vec<(usize, usize, Local)>,
- /// The statements that should be removed (turned into nops)
+ /// The statements that should be removed (turned into noops)
stmts_to_remove: Vec<usize>,
/// Indices of debug variables that need to be adjusted to point to
// * First is weak lang items. These are basically mechanisms for
// libcore to forward-reference symbols defined later in crates like
// the standard library or `#[panic_handler]` definitions. The
- // definition of these weak lang items needs to be referenceable by
+ // definition of these weak lang items needs to be referencable by
// libcore, so we're no longer a candidate for internalization.
// Removal of these functions can't be done by LLVM but rather must be
// done by the linker as it's a non-local decision.
mark_used_by_default_parameters(tcx, def_id, generics, &mut unused_parameters);
debug!(?unused_parameters, "(after default)");
- // Visit MIR and accumululate used generic parameters.
+ // Visit MIR and accumulate used generic parameters.
let body = match tcx.hir().body_const_context(def_id.expect_local()) {
// Const functions are actually called and should thus be considered for polymorphization
// via their runtime MIR.
&self.sess.span_diagnostic
}
- /// Relace `self` with `snapshot.parser` and extend `unclosed_delims` with `snapshot.unclosed_delims`.
+ /// Replace `self` with `snapshot.parser` and extend `unclosed_delims` with `snapshot.unclosed_delims`.
/// This is to avoid losing unclosed delims errors `create_snapshot_for_diagnostic` clears.
pub(super) fn restore_snapshot(&mut self, snapshot: SnapshotParser<'a>) {
*self = snapshot.parser;
lhs = self.parse_assoc_op_ascribe(lhs, lhs_span)?;
continue;
} else if op == AssocOp::DotDot || op == AssocOp::DotDotEq {
- // If we didn’t have to handle `x..`/`x..=`, it would be pretty easy to
+ // If we did not have to handle `x..`/`x..=`, it would be pretty easy to
// generalise it to the Fixity::None code.
lhs = self.parse_range_expr(prec, lhs, op, cur_op_span)?;
break;
pub current_closure: Option<ClosureSpans>,
}
-/// Stores span informations about a closure.
+/// Stores span information about a closure.
#[derive(Clone)]
pub struct ClosureSpans {
pub whole_closure: Span,
/// Finds the indices of all characters that have been processed and differ between the actual
/// written code (code snippet) and the `InternedString` that gets processed in the `Parser`
-/// in order to properly synthethise the intra-string `Span`s for error diagnostics.
+/// in order to properly synthesise the intra-string `Span`s for error diagnostics.
fn find_skips_from_snippet(
snippet: Option<string::String>,
str_style: Option<usize>,
} = *trait_item
{
// we can ignore functions that do not have default bodies:
- // if those are unimplemented it will be catched by typeck.
+ // if those are unimplemented it will be caught by typeck.
if !defaultness.has_value()
|| self
.tcx
// like AddAssign is implemented).
// For primitive types (which, despite having a trait impl, don't actually
- // end up calling it), the evluation order is right-to-left. For example,
+ // end up calling it), the evaluation order is right-to-left. For example,
// the following code snippet:
//
// let y = &mut 0;
}
if !self.tcx.features().staged_api {
- // Propagate unstability. This can happen even for non-staged-api crates in case
+ // Propagate instability. This can happen even for non-staged-api crates in case
// -Zforce-unstable-if-unmarked is set.
if let Some(stab) = self.parent_stab {
if inherit_deprecation.yes() && stab.level.is_unstable() {
match i.kind {
// Inherent impls and foreign modules serve only as containers for other items,
// they don't have their own stability. They still can be annotated as unstable
- // and propagate this unstability to children, but this annotation is completely
+ // and propagate this instability to children, but this annotation is completely
// optional. They inherit stability from their parents when unannotated.
hir::ItemKind::Impl(hir::Impl { of_trait: None, .. })
| hir::ItemKind::ForeignMod { .. } => {
fn visit_item(&mut self, i: &'tcx Item<'tcx>) {
// Inherent impls and foreign modules serve only as containers for other items,
// they don't have their own stability. They still can be annotated as unstable
- // and propagate this unstability to children, but this annotation is completely
+ // and propagate this instability to children, but this annotation is completely
// optional. They inherit stability from their parents when unannotated.
if !matches!(
i.kind,
}
}
- // Hygine isn't really implemented for `macro_rules!` macros at the
+ // Hygiene isn't really implemented for `macro_rules!` macros at the
// moment. Accordingly, marking them as reachable is unwise. `macro` macros
- // have normal hygine, so we can treat them like other items without type
+ // have normal hygiene, so we can treat them like other items without type
// privacy and mark them reachable.
DefKind::Macro(_) => {
let item = self.tcx.hir().expect_item(def_id);
// 3. mentioned in the associated types of the impl
//
// Those in 1. can only occur if the trait is in
- // this crate and will've been warned about on the
+ // this crate and will have been warned about on the
// trait definition (there's no need to warn twice
// so we don't check the methods).
//
}
// - AST lowering may clone `use` items and the clones don't
// get their entries in the resolver's visibility table.
- // - AST lowering also creates opaque type items with inherited visibilies.
+ // - AST lowering also creates opaque type items with inherited visibilities.
// Visibility on them should have no effect, but to avoid the visibility
// query failing on some items, we provide it for opaque types as well.
Node::Item(hir::Item {
//! The serialisation is performed on-demand when each node is emitted. Using this
//! scheme, we do not need to keep the current graph in memory.
//!
-//! The deserisalisation is performed manually, in order to convert from the stored
+//! The deserialization is performed manually, in order to convert from the stored
//! sequence of NodeInfos to the different arrays in SerializedDepGraph. Since the
//! node and edge count are stored at the end of the file, all the arrays can be
//! pre-allocated with the right length.
// Remove the `removal_span`.
corrections.push((removal_span, "".to_string()));
- // Find the span after the crate name and if it has nested imports immediatately
+ // Find the span after the crate name and if it has nested imports immediately
// after the crate name already.
// ie. `use a::b::{c, d};`
// ^^^^^^^^^
/// In the case where the `Import` was expanded from a "nested" use tree,
/// this id is the ID of the leaf tree. For example:
///
- /// ```ignore (pacify the mercilous tidy)
+ /// ```ignore (pacify the merciless tidy)
/// use foo::bar::{a, b}
/// ```
///
// but if we make a mistake elsewhere, mainly by keeping something in
// `missing_named_lifetime_spots` that we shouldn't, like associated
// `const`s or making a mistake in the AST lowering we would provide
- // non-sensical suggestions. Guard against that by skipping these.
+ // nonsensical suggestions. Guard against that by skipping these.
// (#74264)
continue;
}
s: ScopeRef<'a>,
},
- /// When we have nested trait refs, we concanetate late bound vars for inner
+ /// When we have nested trait refs, we concatenate late bound vars for inner
/// trait refs from outer ones. But we also need to include any HRTB
/// lifetimes encountered when identifying the trait that an associated type
/// is declared on.
/// Any non-concatenating binder scopes.
Normal,
/// Within a syntactic trait ref, there may be multiple poly trait refs that
- /// are nested (under the `associcated_type_bounds` feature). The binders of
- /// the innner poly trait refs are extended from the outer poly trait refs
+ /// are nested (under the `associated_type_bounds` feature). The binders of
+ /// the inner poly trait refs are extended from the outer poly trait refs
/// and don't increase the late bound depth. If you had
/// `T: for<'a> Foo<Bar: for<'b> Baz<'a, 'b>>`, then the `for<'b>` scope
/// would be `Concatenating`. This also used in trait refs in where clauses
path_span: Span,
generic_args: &'v hir::GenericArgs<'v>,
) {
- // parenthesized args enter a new elison scope
+ // parenthesized args enter a new elision scope
if generic_args.parenthesized {
return;
}
/// program) if all but one of them come from glob imports.
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
struct BindingKey {
- /// The identifier for the binding, aways the `normalize_to_macros_2_0` version of the
+ /// The identifier for the binding, always the `normalize_to_macros_2_0` version of the
/// identifier.
ident: Ident,
ns: Namespace,
/// The reason is that we update scopes with value `MacroRulesScope::Invocation(invoc_id)`
/// in-place after `invoc_id` gets expanded.
/// This helps to avoid uncontrollable growth of `macro_rules!` scope chains,
-/// which usually grow lineraly with the number of macro invocations
+/// which usually grow linearly with the number of macro invocations
/// in a module (including derives) and hurt performance.
pub(crate) type MacroRulesScopeRef<'a> = Interned<'a, Cell<MacroRulesScope<'a>>>;
self.process_macro_use(l.span);
self.process_var_decl(&l.pat);
- // Just walk the initialiser and type (don't want to walk the pattern again).
+ // Just walk the initializer and type (don't want to walk the pattern again).
walk_list!(self, visit_ty, &l.ty);
walk_list!(self, visit_expr, &l.init);
}
/// that we don't accidentally try to encode any more `SyntaxContexts`
serialized_ctxts: Lock<FxHashSet<SyntaxContext>>,
/// The `SyntaxContexts` that we have serialized (e.g. as a result of encoding `Spans`)
- /// in the most recent 'round' of serializnig. Serializing `SyntaxContextData`
+ /// in the most recent 'round' of serializing. Serializing `SyntaxContextData`
/// may cause us to serialize more `SyntaxContext`s, so serialize in a loop
/// until we reach a fixed point.
latest_ctxts: Lock<FxHashSet<SyntaxContext>>,
/// hygiene data, most importantly name of the crate it refers to.
/// As a result we print `$crate` as `crate` if it refers to the local crate
/// and as `::other_crate_name` if it refers to some other crate.
-/// Note, that this is only done if the ident token is printed from inside of AST pretty-pringing,
+/// Note, that this is only done if the ident token is printed from inside of AST pretty-printing,
/// but not otherwise. Pretty-printing is the only way for proc macros to discover token contents,
/// so we should not perform this lossy conversion if the top level call to the pretty-printer was
/// done for a token stream or a single token.
// revisited after further improvements to `indexmap`.
//
// This type is private to prevent accidentally constructing more than one
-// `Interner` on the same thread, which makes it easy to mixup `Symbol`s
+// `Interner` on the same thread, which makes it easy to mix up `Symbol`s
// between `Interner`s.
#[derive(Default)]
struct InternerInner {
// Unions and are always treated as a series of 64-bit integer chunks
}
abi::FieldsShape::Arbitrary { .. } => {
- // Stuctures with floating point numbers need special care.
+ // Structures with floating point numbers need special care.
let mut data = parse_structure(
cx,
// setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
// setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
// }
- // The compiler intrisics should be implemented by compiler-builtins.
+ // The compiler intrinsics should be implemented by compiler-builtins.
// Unfortunately, compiler-builtins has not provided those intrinsics yet. Such as:
// i386/divdi3.S
// i386/lshrdi3.S
// 2. Implement Intrinsics.
// We evaluated all options.
// #2 is hard because we need implement the intrinsics (_aulldiv) generated
- // from the other intrinscis (__udivdi3) implementation with the same
+ // from the other intrinsics (__udivdi3) implementation with the same
// functionality (udivmod_inner). If we let _aulldiv() call udivmod_inner!(),
// then we are in loop. We may have to find another way to implement udivmod_inner!().
// #1.2 may break the existing usage.
// It uses cdecl, EAX/ECX/EDX as volatile register, and EAX/EDX as return value.
// We also checked the LLVM X86TargetLowering, the differences between -gnu and -msvc
// is fmodf(f32), longjmp() and TLS. None of them impacts the UEFI code.
- // As a result, we choose -gnu for i686 version before those intrisics are implemented in
+ // As a result, we choose -gnu for i686 version before those intrinsics are implemented in
// compiler-builtins. After compiler-builtins implements all required intrinsics, we may
// remove -gnu and use the default one.
Target {
/// Linker arguments used in addition to `late_link_args` if at least one
/// Rust dependency is dynamically linked.
pub late_link_args_dynamic: LinkArgs,
- /// Linker arguments used in addition to `late_link_args` if aall Rust
+ /// Linker arguments used in addition to `late_link_args` if all Rust
/// dependencies are statically linked.
pub late_link_args_static: LinkArgs,
/// Linker arguments that are unconditionally passed *after* any
/// handling COFF object files with more than 2<sup>15</sup> sections. Since each weak
/// symbol needs its own COMDAT section, weak linkage implies a large
/// number sections that easily exceeds the given limit for larger
- /// codebases. Consequently we want a way to disallow weak linkage on some
+ /// codebase. Consequently we want a way to disallow weak linkage on some
/// platforms.
pub allows_weak_linkage: bool,
/// Whether the linker support rpaths or not. Defaults to false.
crt_objects_fallback: Some(CrtObjectsFallback::Wasm),
// This has no effect in LLVM 8 or prior, but in LLVM 9 and later when
- // PIC code is implemented this has quite a drastric effect if it stays
+ // PIC code is implemented this has quite a drastic effect if it stays
// at the default, `pic`. In an effort to keep wasm binaries as minimal
// as possible we're defaulting to `static` for now, but the hope is
// that eventually we can ship a `pic`-compatible standard library which
match *r {
// Ignore bound regions and `'static` regions that appear in the
// type, we only need to remap regions that reference lifetimes
- // from the function declaraion.
+ // from the function declaration.
// This would ignore `'r` in a type like `for<'r> fn(&'r u32)`.
ty::ReLateBound(..) | ty::ReStatic => return r,
Ok(Some(selection)) => selection,
Ok(None) => {
// Ambiguity can happen when monomorphizing during trans
- // expands to some humongo type that never occurred
- // statically -- this humongo type can then overflow,
+ // expands to some humongous type that never occurred
+ // statically -- this humongous type can then overflow,
// leading to an ambiguous result. So report this as an
// overflow bug, since I believe this is the only case
// where ambiguity can result.
}
// In the case where we detect an error, run the check again, but
- // this time tracking intercrate ambuiguity causes for better
+ // this time tracking intercrate ambiguity causes for better
// diagnostics. (These take time and can lead to false errors.)
tcx.infer_ctxt().enter(|infcx| {
let selcx = &mut SelectionContext::intercrate(&infcx);
ty::Foreign(did) => def_id_is_local(did, in_crate),
ty::Opaque(..) => {
// This merits some explanation.
- // Normally, opaque types are not involed when performing
+ // Normally, opaque types are not involved when performing
// coherence checking, since it is illegal to directly
// implement a trait on an opaque type. However, we might
// end up looking at an opaque type during coherence checking
}
/// Builds the abstract const by walking the thir and bailing out when
- /// encountering an unspported operation.
+ /// encountering an unsupported operation.
fn build(mut self) -> Result<&'tcx [Node<'tcx>], ErrorGuaranteed> {
debug!("Abstractconstbuilder::build: body={:?}", &*self.body);
self.recurse_build(self.body_id)?;
impl<'tcx> ConstUnifyCtxt<'tcx> {
// Substitutes generics repeatedly to allow AbstractConsts to unify where a
- // ConstKind::Unevalated could be turned into an AbstractConst that would unify e.g.
+ // ConstKind::Unevaluated could be turned into an AbstractConst that would unify e.g.
// Param(N) should unify with Param(T), substs: [Unevaluated("T2", [Unevaluated("T3", [Param(N)])])]
#[inline]
#[instrument(skip(self), level = "debug")]
{
// We know we have an `impl Trait` that doesn't satisfy a required projection.
- // Find all of the ocurrences of `impl Trait` for `Trait` in the function arguments'
+ // Find all of the occurrences of `impl Trait` for `Trait` in the function arguments'
// types. There should be at least one, but there might be *more* than one. In that
// case we could just ignore it and try to identify which one needs the restriction,
// but instead we choose to suggest replacing all instances of `impl Trait` with `T`
// Should this fulfillment context register type-lives-for-region
// obligations on its parent infcx? In some cases, region
// obligations are either already known to hold (normalization) or
- // hopefully verifed elsewhere (type-impls-bound), and therefore
+ // hopefully verified elsewhere (type-impls-bound), and therefore
// should not be checked.
//
// Note that if we are normalizing a type that we already
// the main traversal loop:
// basically we want to cut the inheritance directed graph into a few non-overlapping slices of nodes
// that each node is emitted after all its descendents have been emitted.
- // so we convert the directed graph into a tree by skipping all previously visted nodes using a visited set.
+ // so we convert the directed graph into a tree by skipping all previously visited nodes using a visited set.
// this is done on the fly.
// Each loop run emits a slice - it starts by find a "childless" unvisited node, backtracking upwards, and it
// stops after it finds a node that has a next-sibling node.
// fn bar<T:SomeTrait<Foo=usize>>(...) { ... }
// ```
//
- // Doesn't the `T : Sometrait<Foo=usize>` predicate help
+ // Doesn't the `T : SomeTrait<Foo=usize>` predicate help
// resolve `T::Foo`? And of course it does, but in fact
// that single predicate is desugared into two predicates
// in the compiler: a trait predicate (`T : SomeTrait`) and a
// Get obligations corresponding to the predicates from the where-clause of the
// associated type itself.
// Note: `feature(generic_associated_types)` is required to write such
-// predicates, even for non-generic associcated types.
+// predicates, even for non-generic associated types.
fn assoc_ty_own_obligations<'cx, 'tcx>(
selcx: &mut SelectionContext<'cx, 'tcx>,
obligation: &ProjectionTyObligation<'tcx>,
return InferOk { value: kinds, obligations };
}
- // Errors and ambiuity in dropck occur in two cases:
+ // Errors and ambiguity in dropck occur in two cases:
// - unresolved inference variables at the end of typeck
// - non well-formed types where projections cannot be resolved
// Either of these should have created an error before.
..tpred.trait_ref
};
- // Then contstruct a new obligation with Self = () added
+ // Then construct a new obligation with Self = () added
// to the ParamEnv, and see if it holds.
let o = rustc_infer::traits::Obligation::new(
ObligationCause::dummy(),
{
// If the result is something that we can cache, then mark this
// entry as 'complete'. This will allow us to skip evaluating the
- // suboligations at all the next time we evaluate the projection
+ // subobligations at all the next time we evaluate the projection
// predicate.
self.infcx
.inner
// provisional caches entries and inserting them into the evaluation cache
//
// This ensures that when a query reads this entry from the evaluation cache,
- // it will end up (transitively) dependening on all of the incr-comp dependencies
+ // it will end up (transitively) depending on all of the incr-comp dependencies
// created during the evaluation of this trait. For example, evaluating a trait
// will usually require us to invoke `type_of(field_def_id)` to determine the
// constituent types, and we want any queries reading from this evaluation
/// `No` if it does not. Return `Ambiguous` in the case that the projection type is a GAT,
/// and applying this env_predicate constrains any of the obligation's GAT substitutions.
///
- /// This behavior is a somewhat of a hack to prevent overconstraining inference variables
+ /// This behavior is a somewhat of a hack to prevent over-constraining inference variables
/// in cases like #91762.
pub(super) fn match_projection_projections(
&mut self,
// marker trait impls.
//
// Without this restriction, we could end up accidentally
- // constrainting inference variables based on an arbitrarily
+ // constraining inference variables based on an arbitrarily
// chosen trait impl.
//
// Imagine we have the following code:
// some other means (e.g. type-checking of a function). We will
// then be in a position to drop marker trait candidates
// without constraining inference variables (since there are
- // none left to constrin)
+ // none left to constrain)
// 2) Be left with some unconstrained inference variables. We
// will then correctly report an inference error, since the
// existence of multiple marker trait impls tells us nothing
/// - `A B C` and we add a cache for the result of C (DFN 2)
/// - Then we have a stack `A B D` where `D` has DFN 3
/// - We try to solve D by evaluating E: `A B D E` (DFN 4)
- /// - `E` generates various cache entries which have cyclic dependices on `B`
+ /// - `E` generates various cache entries which have cyclic dependencies on `B`
/// - `A B D E F` and so forth
/// - the DFN of `F` for example would be 5
/// - then we determine that `E` is in error -- we will then clear
if potential_assoc_types.len() == assoc_items.len() {
// Only suggest when the amount of missing associated types equals the number of
// extra type arguments present, as that gives us a relatively high confidence
- // that the user forgot to give the associtated type's name. The canonical
+ // that the user forgot to give the associated type's name. The canonical
// example would be trying to use `Iterator<isize>` instead of
// `Iterator<Item = isize>`.
for (potential, item) in iter::zip(&potential_assoc_types, assoc_items) {
/// Given the type/lifetime/const arguments provided to some path (along with
/// an implicit `Self`, if this is a trait reference), returns the complete
/// set of substitutions. This may involve applying defaulted type parameters.
- /// Constraints on associated typess are created from `create_assoc_bindings_for_generic_args`.
+ /// Constraints on associated types are created from `create_assoc_bindings_for_generic_args`.
///
/// Example:
///
// If the projection output contains `Self`, force the user to
// elaborate it explicitly to avoid a lot of complexity.
//
- // The "classicaly useful" case is the following:
+ // The "classically useful" case is the following:
// ```
// trait MyTrait: FnMut() -> <Self as MyTrait>::MyOutput {
// type MyOutput;
// Will fail except for `T::A` and `Self::A`; i.e., if `qself_ty`/`qself_def` are not a type
// parameter or `Self`.
// NOTE: When this function starts resolving `Trait::AssocTy` successfully
- // it should also start reportint the `BARE_TRAIT_OBJECTS` lint.
+ // it should also start reporting the `BARE_TRAIT_OBJECTS` lint.
pub fn associated_path_to_ty(
&self,
hir_ref_id: hir::HirId,
return None;
}
- // The `Future` trait has only one associted item, `Output`,
+ // The `Future` trait has only one associated item, `Output`,
// so check that this is what we see.
let output_assoc_item = self.tcx.associated_item_def_ids(future_trait)[0];
if output_assoc_item != predicate.projection_ty.item_def_id {
//!
//! ## Subtle note
//!
-//! When infering the generic arguments of functions, the argument
+//! When inferring the generic arguments of functions, the argument
//! order is relevant, which can lead to the following edge case:
//!
//! ```rust
let opt_coerce_to = {
// We should release `enclosing_breakables` before the `check_expr_with_hint`
// below, so can't move this block of code to the enclosing scope and share
- // `ctxt` with the second `encloding_breakables` borrow below.
+ // `ctxt` with the second `enclosing_breakables` borrow below.
let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
match enclosing_breakables.opt_find_breakable(target_id) {
Some(ctxt) => ctxt.coerce.as_ref().map(|coerce| coerce.expected_ty()),
self.tcx.types.never
}
- /// `explicit_return` is `true` if we're checkng an explicit `return expr`,
+ /// `explicit_return` is `true` if we're checking an explicit `return expr`,
/// and `false` if we're checking a trailing expression.
pub(super) fn check_return_expr(
&self,
let method = match self.lookup_method(rcvr_t, segment, span, expr, rcvr, args) {
Ok(method) => {
// We could add a "consider `foo::<params>`" suggestion here, but I wasn't able to
- // trigger this codepath causing `structuraly_resolved_type` to emit an error.
+ // trigger this codepath causing `structurally_resolved_type` to emit an error.
self.write_method_call(expr.hir_id, method);
Ok(method)
}
_ => {
// Otherwise, there's a mismatch, so clear out what we're expecting, and set
- // our input typs to err_args so we don't blow up the error messages
+ // our input types to err_args so we don't blow up the error messages
struct_span_err!(
tcx.sess,
call_span,
/// NodeInfo struct for more details, but this information includes things
/// such as the set of control-flow successors, which variables are dropped
/// or reinitialized, and whether each variable has been inferred to be
- /// known-dropped or potentially reintiialized at each point.
+ /// known-dropped or potentially reinitialized at each point.
nodes: IndexVec<PostOrderId, NodeInfo>,
/// We refer to values whose drop state we are tracking by the HirId of
/// where they are defined. Within a NodeInfo, however, we store the
///
/// 1. Moving a variable `a` counts as a move of the whole variable.
/// 2. Moving a partial path like `a.b.c` is ignored.
-/// 3. Reinitializing through a field (e.g. `a.b.c = 5`) counds as a reinitialization of all of
+/// 3. Reinitializing through a field (e.g. `a.b.c = 5`) counts as a reinitialization of all of
/// `a`.
///
/// Some examples:
// FIXME Postponing the normalization of the return type likely only hides a deeper bug,
// which might be caused by the `param_env` itself. The clauses of the `param_env`
// maybe shouldn't include `Param`s, but rather fresh variables or be canonicalized,
- // see isssue #89650
+ // see issue #89650
let cause = traits::ObligationCause::misc(self.span, self.body_id);
let selcx = &mut traits::SelectionContext::new(self.fcx);
let traits::Normalized { value: xform_self_ty, obligations } =
}
/// Similarly to `probe_for_return_type`, this method attempts to find the best matching
- /// candidate method where the method name may have been misspelt. Similarly to other
+ /// candidate method where the method name may have been misspelled. Similarly to other
/// Levenshtein based suggestions, we provide at most one such suggestion.
fn probe_for_lev_candidate(&mut self) -> Result<Option<ty::AssocItem>, MethodError<'tcx>> {
debug!("probing for method names similar to {:?}", self.method_name);
(None, true) => "variant",
}
};
- // FIXME(eddyb) this intendation is probably unnecessary.
+ // FIXME(eddyb) this indentation is probably unnecessary.
let mut err = {
// Suggest clamping down the type if the method that is being attempted to
// be used exists at all, and the type is an ambiguous numeric type
.into_iter()
.filter_map(|info| self.associated_value(info.def_id, item_name));
// There are methods that are defined on the primitive types and won't be
- // found when exploring `all_traits`, but we also need them to be acurate on
+ // found when exploring `all_traits`, but we also need them to be accurate on
// our suggestions (#47759).
let found_assoc = |ty: Ty<'tcx>| {
simplify_type(tcx, ty, TreatParams::AsPlaceholders)
err.emit();
}
-/// Resugar `ty::GenericPredicates` in a way suitable to be used in structured suggestions.
+/// Re-sugar `ty::GenericPredicates` in a way suitable to be used in structured suggestions.
fn bounds_from_generic_predicates<'tcx>(
tcx: TyCtxt<'tcx>,
predicates: ty::GenericPredicates<'tcx>,
}
// Binary operator categories. These categories summarize the behavior
-// with respect to the builtin operationrs supported.
+// with respect to the builtin operations supported.
enum BinOpCategory {
/// &&, || -- cannot be overridden
Shortcircuit,
// In the `ValueNS`, we have `SelfCtor(..) | Ctor(_, Const), _)` remaining which
// could successfully compile. The former being `Self` requires a unit struct.
// In either case, and unlike constants, the pattern itself cannot be
- // a reference type wherefore peeling doesn't give up any expressivity.
+ // a reference type wherefore peeling doesn't give up any expressiveness.
_ => AdjustMode::Peel,
},
// When encountering a `& mut? pat` pattern, reset to "by value".
// The early check here is not for correctness, but rather better
// diagnostics (e.g. when `&str` is being matched, `expected` will
// be peeled to `str` while ty here is still `&str`, if we don't
- // err ealy here, a rather confusing unification error will be
+ // err early here, a rather confusing unification error will be
// emitted instead).
let fail =
!(ty.is_numeric() || ty.is_char() || ty.is_ty_var() || ty.references_error());
match (inexistent_fields_err, unmentioned_err) {
(Some(mut i), Some(mut u)) => {
if let Some(mut e) = self.error_tuple_variant_as_struct_pat(pat, fields, variant) {
- // We don't want to show the inexistent fields error when this was
+ // We don't want to show the nonexistent fields error when this was
// `Foo { a, b }` when it should have been `Foo(a, b)`.
i.delay_as_bug();
u.delay_as_bug();
);
// Build a tuple (U0..Un) of the final upvar types U0..Un
- // and unify the upvar tupe type in the closure with it:
+ // and unify the upvar tuple type in the closure with it:
let final_tupled_upvars_type = self.tcx.mk_tup(final_upvar_tys.iter());
self.demand_suptype(span, substs.tupled_upvars_ty(), final_tupled_upvars_type);
// capture information.
//
// - if descendant is found, remove it from the list, and update the current place's
- // capture information to account for the descendants's capture kind.
+ // capture information to account for the descendant's capture kind.
//
// We can never be in a case where the list contains both an ancestor and a descendant
// Also there can only be ancestor but in case of descendants there might be
// Now that we have the minimized list of captures, sort the captures by field id.
// This causes the closure to capture the upvars in the same order as the fields are
// declared which is also the drop order. Thus, in situations where we capture all the
- // fields of some type, the obserable drop order will remain the same as it previously
+ // fields of some type, the observable drop order will remain the same as it previously
// was even though we're dropping each capture individually.
// See https://github.com/rust-lang/project-rfc-2229/issues/42 and
// `src/test/ui/closures/2229_closure_analysis/preserve_field_drop_order.rs`.
// Observations:
// - `captured_by_move_projs` is not empty. Therefore we can call
// `captured_by_move_projs.first().unwrap()` safely.
- // - All entries in `captured_by_move_projs` have atleast one projection.
+ // - All entries in `captured_by_move_projs` have at least one projection.
// Therefore we can call `captured_by_move_projs.first().unwrap().first().unwrap()` safely.
// We don't capture derefs in case of move captures, which would have be applied to
// We don't capture derefs of raw ptrs
ty::RawPtr(_) => unreachable!(),
- // Derefencing a mut-ref allows us to mut the Place if we don't deref
+ // Dereferencing a mut-ref allows us to mut the Place if we don't deref
// an immut-ref after on top of this.
ty::Ref(.., hir::Mutability::Mut) => is_mutbl = hir::Mutability::Mut,
}
if proj.ty.is_union() {
- // Don't capture preicse fields of a union.
+ // Don't capture precise fields of a union.
truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, i + 1);
break;
}
/// Truncate projections so that following rules are obeyed by the captured `place`:
/// - No Index projections are captured, since arrays are captured completely.
/// - No unsafe block is required to capture `place`
-/// Returns the truncated place and updated cature mode.
+/// Returns the truncated place and updated capture mode.
fn restrict_capture_precision<'tcx>(
place: Place<'tcx>,
curr_mode: ty::UpvarCapture,
/// It is the caller's duty to figure out which path_expr_id to use.
///
/// If both the CaptureKind and Expression are considered to be equivalent,
-/// then `CaptureInfo` A is preferred. This can be useful in cases where we want to priortize
+/// then `CaptureInfo` A is preferred. This can be useful in cases where we want to prioritize
/// expressions reported back to the user as part of diagnostics based on which appears earlier
/// in the closure. This can be achieved simply by calling
/// `determine_capture_info(existing_info, current_info)`. This works out because the
for (region_a, region_a_idx) in ®ions {
// Ignore `'static` lifetimes for the purpose of this lint: it's
- // because we know it outlives everything and so doesn't give meaninful
+ // because we know it outlives everything and so doesn't give meaningful
// clues
if let ty::ReStatic = **region_a {
continue;
struct GATSubstCollector<'tcx> {
tcx: TyCtxt<'tcx>,
gat: DefId,
- // Which region appears and which parameter index its subsituted for
+ // Which region appears and which parameter index its substituted for
regions: FxHashSet<(ty::Region<'tcx>, usize)>,
- // Which params appears and which parameter index its subsituted for
+ // Which params appears and which parameter index its substituted for
types: FxHashSet<(Ty<'tcx>, usize)>,
}
if let Some(ty::Ref(_, base_ty, _)) = base_ty {
let index_ty = typeck_results.expr_ty_adjusted_opt(index).unwrap_or_else(|| {
// When encountering `return [0][0]` outside of a `fn` body we would attempt
- // to access an unexistend index. We assume that more relevant errors will
+ // to access an nonexistent index. We assume that more relevant errors will
// already have been emitted, so we only gate on this with an ICE if no
// error has been emitted. (#64638)
self.fcx.tcx.ty_error_with_message(
// The `#[target_feature]` attribute is allowed on
// WebAssembly targets on all functions, including safe
// ones. Other targets require that `#[target_feature]` is
- // only applied to unsafe funtions (pending the
+ // only applied to unsafe functions (pending the
// `target_feature_11` feature) because on most targets
// execution of instructions that are not supported is
// considered undefined behavior. For WebAssembly which is a
// In the above code we would call this query with the def_id of 3 and
// the parent_node we match on would be the hir node for Self::Assoc<3>
//
- // `Self::Assoc<3>` cant be resolved without typchecking here as we
+ // `Self::Assoc<3>` cant be resolved without typechecking here as we
// didnt write <Self as Foo>::Assoc<3>. If we did then another match
// arm would handle this.
//
// `ItemCtxt::to_ty`. To make things simpler, we just erase all
// of them, regardless of depth. At worse, this will give
// us an inaccurate span for an error message, but cannot
-// lead to unsoundess (we call `delay_span_bug` at the start
+// lead to unsoundness (we call `delay_span_bug` at the start
// of `diagnostic_hir_wf_check`).
impl<'tcx> TypeFolder<'tcx> for EraseAllBoundRegions<'tcx> {
fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
// (*) This is a horrible concession to reality. I think it'd be
// better to just ban unconstrained lifetimes outright, but in
- // practice people do non-hygenic macros like:
+ // practice people do non-hygienic macros like:
//
// ```
// macro_rules! __impl_slice_eq1 {
let msg = format!("add missing {} argument{}", self.kind(), pluralize!(num_missing_args));
// we first try to get lifetime name suggestions from scope or elision information. If none is
- // available we use the parameter defintions
+ // available we use the parameter definitions
let suggested_args = if let Some(hir_id) = self.path_segment.hir_id {
if let Some(lifetimes_in_scope) = self.tcx.lifetime_scope(hir_id) {
match lifetimes_in_scope {
(gen_args_span.shrink_to_lo(), true)
} else {
let arg_span = self.gen_args.args[sugg_offset - 1].span();
- // If we came here then inferred lifetimes's spans can only point
+ // If we came here then inferred lifetime's spans can only point
// to either the opening bracket or to the space right after.
// Both of these spans have an `hi` lower than or equal to the span
// of the generics excluding the brackets.