#![feature(generators)]
#![feature(generator_trait)]
#![feature(fn_traits)]
+#![feature(int_bits_const)]
#![feature(min_specialization)]
#![feature(optin_builtin_traits)]
#![feature(nll)]
P: Pointer,
T: Tag,
{
- const TAG_BIT_SHIFT: usize = (8 * std::mem::size_of::<usize>()) - T::BITS;
+ const TAG_BIT_SHIFT: usize = usize::BITS as usize - T::BITS;
const ASSERTION: () = {
assert!(T::BITS <= P::BITS);
// Used for the transmute_copy's below
//! `late_lint_methods!` invocation in `lib.rs`.
use crate::{
- types::CItemKind, EarlyContext, EarlyLintPass, LateContext, LateLintPass, LintContext,
+ types::{transparent_newtype_field, CItemKind},
+ EarlyContext, EarlyLintPass, LateContext, LateLintPass, LintContext,
};
use rustc_ast::attr::{self, HasAttrs};
use rustc_ast::tokenstream::{TokenStream, TokenTree};
if is_transparent && !is_non_null {
debug_assert!(def.variants.len() == 1);
let v = &def.variants[VariantIdx::new(0)];
- ty = v
- .transparent_newtype_field(tcx)
+ ty = transparent_newtype_field(tcx, v)
.expect(
"single-variant transparent structure with zero-sized field",
)
.any(|a| tcx.sess.check_name(a, sym::rustc_nonnull_optimization_guaranteed))
}
+/// `repr(transparent)` structs can have a single non-ZST field, this function returns that
+/// field.
+pub fn transparent_newtype_field<'a, 'tcx>(
+ tcx: TyCtxt<'tcx>,
+ variant: &'a ty::VariantDef,
+) -> Option<&'a ty::FieldDef> {
+ let param_env = tcx.param_env(variant.def_id);
+ for field in &variant.fields {
+ let field_ty = tcx.type_of(field.did);
+ let is_zst =
+ tcx.layout_of(param_env.and(field_ty)).map(|layout| layout.is_zst()).unwrap_or(false);
+
+ if !is_zst {
+ return Some(field);
+ }
+ }
+
+ None
+}
+
/// Is type known to be non-null?
crate fn ty_is_known_nonnull<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, mode: CItemKind) -> bool {
let tcx = cx.tcx;
}
for variant in &def.variants {
- if let Some(field) = variant.transparent_newtype_field(tcx) {
+ if let Some(field) = transparent_newtype_field(cx.tcx, variant) {
if ty_is_known_nonnull(cx, field.ty(tcx, substs), mode) {
return true;
}
ty::Adt(field_def, field_substs) => {
let inner_field_ty = {
let first_non_zst_ty =
- field_def.variants.iter().filter_map(|v| v.transparent_newtype_field(tcx));
+ field_def.variants.iter().filter_map(|v| transparent_newtype_field(cx.tcx, v));
debug_assert_eq!(
first_non_zst_ty.clone().count(),
1,
if def.repr.transparent() {
// Can assume that only one field is not a ZST, so only check
// that field's type for FFI-safety.
- if let Some(field) = variant.transparent_newtype_field(self.cx.tcx) {
+ if let Some(field) = transparent_newtype_field(self.cx.tcx, variant) {
self.check_field_type_for_ffi(cache, field, substs)
} else {
bug!("malformed transparent type");
Uninit,
}
+#[cfg(target_arch = "x86_64")]
+static_assert_size!(ScalarMaybeUninit, 24);
+
impl<Tag> From<Scalar<Tag>> for ScalarMaybeUninit<Tag> {
#[inline(always)]
fn from(s: Scalar<Tag>) -> Self {
flags: VariantFlags,
}
-impl<'tcx> VariantDef {
+impl VariantDef {
/// Creates a new `VariantDef`.
///
/// `variant_did` is the `DefId` that identifies the enum variant (if this `VariantDef`
pub fn is_recovered(&self) -> bool {
self.flags.intersects(VariantFlags::IS_RECOVERED)
}
-
- /// `repr(transparent)` structs can have a single non-ZST field, this function returns that
- /// field.
- pub fn transparent_newtype_field(&self, tcx: TyCtxt<'tcx>) -> Option<&FieldDef> {
- for field in &self.fields {
- let field_ty = field.ty(tcx, InternalSubsts::identity_for_item(tcx, self.def_id));
- if !field_ty.is_zst(tcx, self.def_id) {
- return Some(field);
- }
- }
-
- None
- }
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
}
}
}
-
- /// Is this a zero-sized type?
- pub fn is_zst(&'tcx self, tcx: TyCtxt<'tcx>, did: DefId) -> bool {
- tcx.layout_of(tcx.param_env(did).and(self)).map(|layout| layout.is_zst()).unwrap_or(false)
- }
}
use rustc_middle::ty::{self, TyCtxt};
use smallvec::{smallvec, SmallVec};
-use std::convert::TryInto;
use std::mem;
use super::abs_domain::Lift;
};
let base_ty = base_place.ty(self.builder.body, self.builder.tcx).ty;
let len: u64 = match base_ty.kind() {
- ty::Array(_, size) => {
- let length = size.eval_usize(self.builder.tcx, self.builder.param_env);
- length
- .try_into()
- .expect("slice pattern of array with more than u32::MAX elements")
- }
+ ty::Array(_, size) => size.eval_usize(self.builder.tcx, self.builder.param_env),
_ => bug!("from_end: false slice pattern of non-array type"),
};
for offset in from..to {
let n = base.len(self)?;
if n < min_length {
// This can only be reached in ConstProp and non-rustc-MIR.
- throw_ub!(BoundsCheckFailed { len: min_length.into(), index: n });
+ throw_ub!(BoundsCheckFailed { len: min_length, index: n });
}
let index = if from_end {
self.mplace_index(base, index)?
}
- Subslice { from, to, from_end } => {
- self.mplace_subslice(base, u64::from(from), u64::from(to), from_end)?
- }
+ Subslice { from, to, from_end } => self.mplace_subslice(base, from, to, from_end)?,
})
}
) -> Result<(), Unpromotable> {
let fn_ty = callee.ty(self.body, self.tcx);
- if !self.explicit && self.const_kind.is_none() {
+ // `const` and `static` use the explicit rules for promotion regardless of the `Candidate`,
+ // meaning calls to `const fn` can be promoted.
+ let context_uses_explicit_promotion_rules = matches!(
+ self.const_kind,
+ Some(hir::ConstContext::Static(_) | hir::ConstContext::Const)
+ );
+
+ if !self.explicit && !context_uses_explicit_promotion_rules {
if let ty::FnDef(def_id, _) = *fn_ty.kind() {
// Never promote runtime `const fn` calls of
// functions without `#[rustc_promotable]`.
let tcx = self.hir.tcx();
let (min_length, exact_size) = match place.ty(&self.local_decls, tcx).ty.kind() {
ty::Array(_, length) => {
- (length.eval_usize(tcx, self.hir.param_env).try_into().unwrap(), true)
+ (length.eval_usize(tcx, self.hir.param_env), true)
}
_ => ((prefix.len() + suffix.len()).try_into().unwrap(), false),
};
use crate::check::FnCtxt;
use rustc_ast as ast;
+
use rustc_ast::util::lev_distance::find_best_match_for_name;
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder};
pat_ty
}
+ fn maybe_suggest_range_literal(
+ &self,
+ e: &mut DiagnosticBuilder<'_>,
+ opt_def_id: Option<hir::def_id::DefId>,
+ ident: Ident,
+ ) -> bool {
+ match opt_def_id {
+ Some(def_id) => match self.tcx.hir().get_if_local(def_id) {
+ Some(hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Const(_, body_id), ..
+ })) => match self.tcx.hir().get(body_id.hir_id) {
+ hir::Node::Expr(expr) => {
+ if hir::is_range_literal(expr) {
+ let span = self.tcx.hir().span(body_id.hir_id);
+ if let Ok(snip) = self.tcx.sess.source_map().span_to_snippet(span) {
+ e.span_suggestion_verbose(
+ ident.span,
+ "you may want to move the range into the match block",
+ snip,
+ Applicability::MachineApplicable,
+ );
+ return true;
+ }
+ }
+ }
+ _ => (),
+ },
+ _ => (),
+ },
+ _ => (),
+ }
+ false
+ }
+
fn emit_bad_pat_path(
&self,
mut e: DiagnosticBuilder<'_>,
);
}
_ => {
- let const_def_id = match pat_ty.kind() {
+ let (type_def_id, item_def_id) = match pat_ty.kind() {
Adt(def, _) => match res {
- Res::Def(DefKind::Const, _) => Some(def.did),
- _ => None,
+ Res::Def(DefKind::Const, def_id) => (Some(def.did), Some(def_id)),
+ _ => (None, None),
},
- _ => None,
+ _ => (None, None),
};
let ranges = &[
self.tcx.lang_items().range_inclusive_struct(),
self.tcx.lang_items().range_to_inclusive_struct(),
];
- if const_def_id != None && ranges.contains(&const_def_id) {
- let msg = "constants only support matching by type, \
- if you meant to match against a range of values, \
- consider using a range pattern like `min ..= max` in the match block";
- e.note(msg);
+ if type_def_id != None && ranges.contains(&type_def_id) {
+ if !self.maybe_suggest_range_literal(&mut e, item_def_id, *ident) {
+ let msg = "constants only support matching by type, \
+ if you meant to match against a range of values, \
+ consider using a range pattern like `min ..= max` in the match block";
+ e.note(msg);
+ }
} else {
let msg = "introduce a new binding instead";
let sugg = format!("other_{}", ident.as_str().to_lowercase());
}
macro_rules! bench_in_place {
- (
- $($fname:ident, $type:ty , $count:expr, $init: expr);*
- ) => {
+ ($($fname:ident, $type:ty, $count:expr, $init:expr);*) => {
$(
#[bench]
fn $fname(b: &mut Bencher) {
let src: Vec<$type> = black_box(vec![$init; $count]);
let mut sink = src.into_iter()
.enumerate()
- .map(|(idx, e)| { (idx as $type) ^ e }).collect::<Vec<$type>>();
+ .map(|(idx, e)| idx as $type ^ e)
+ .collect::<Vec<$type>>();
black_box(sink.as_mut_ptr())
});
}
}
bench_in_place![
- bench_in_place_xxu8_i0_0010, u8, 10, 0;
- bench_in_place_xxu8_i0_0100, u8, 100, 0;
- bench_in_place_xxu8_i0_1000, u8, 1000, 0;
- bench_in_place_xxu8_i1_0010, u8, 10, 1;
- bench_in_place_xxu8_i1_0100, u8, 100, 1;
- bench_in_place_xxu8_i1_1000, u8, 1000, 1;
- bench_in_place_xu32_i0_0010, u32, 10, 0;
- bench_in_place_xu32_i0_0100, u32, 100, 0;
- bench_in_place_xu32_i0_1000, u32, 1000, 0;
- bench_in_place_xu32_i1_0010, u32, 10, 1;
- bench_in_place_xu32_i1_0100, u32, 100, 1;
- bench_in_place_xu32_i1_1000, u32, 1000, 1;
- bench_in_place_u128_i0_0010, u128, 10, 0;
- bench_in_place_u128_i0_0100, u128, 100, 0;
- bench_in_place_u128_i0_1000, u128, 1000, 0;
- bench_in_place_u128_i1_0010, u128, 10, 1;
- bench_in_place_u128_i1_0100, u128, 100, 1;
- bench_in_place_u128_i1_1000, u128, 1000, 1
+ bench_in_place_xxu8_0010_i0, u8, 10, 0;
+ bench_in_place_xxu8_0100_i0, u8, 100, 0;
+ bench_in_place_xxu8_1000_i0, u8, 1000, 0;
+ bench_in_place_xxu8_0010_i1, u8, 10, 1;
+ bench_in_place_xxu8_0100_i1, u8, 100, 1;
+ bench_in_place_xxu8_1000_i1, u8, 1000, 1;
+ bench_in_place_xu32_0010_i0, u32, 10, 0;
+ bench_in_place_xu32_0100_i0, u32, 100, 0;
+ bench_in_place_xu32_1000_i0, u32, 1000, 0;
+ bench_in_place_xu32_0010_i1, u32, 10, 1;
+ bench_in_place_xu32_0100_i1, u32, 100, 1;
+ bench_in_place_xu32_1000_i1, u32, 1000, 1;
+ bench_in_place_u128_0010_i0, u128, 10, 0;
+ bench_in_place_u128_0100_i0, u128, 100, 0;
+ bench_in_place_u128_1000_i0, u128, 1000, 0;
+ bench_in_place_u128_0010_i1, u128, 10, 1;
+ bench_in_place_u128_0100_i1, u128, 100, 1;
+ bench_in_place_u128_1000_i1, u128, 1000, 1
];
#[bench]
use core::fmt;
use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
-use core::mem::{self, size_of, swap, ManuallyDrop};
+use core::mem::{self, swap, ManuallyDrop};
use core::ops::{Deref, DerefMut};
use core::ptr;
#[inline(always)]
fn log2_fast(x: usize) -> usize {
- 8 * size_of::<usize>() - (x.leading_zeros() as usize) - 1
+ (usize::BITS - x.leading_zeros() - 1) as usize
}
// `rebuild` takes O(len1 + len2) operations
#![feature(fn_traits)]
#![feature(fundamental)]
#![feature(inplace_iteration)]
+#![feature(int_bits_const)]
#![feature(lang_items)]
#![feature(layout_for_ptr)]
#![feature(libc)]
#[inline]
fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
- if mem::size_of::<usize>() < 8 && alloc_size > isize::MAX as usize {
+ if usize::BITS < 64 && alloc_size > isize::MAX as usize {
Err(CapacityOverflow)
} else {
Ok(())
#[stable(feature = "collection_debug", since = "1.17.0")]
impl fmt::Debug for Drain<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.pad("Drain { .. }")
+ f.debug_tuple("Drain").field(&self.as_str()).finish()
}
}
}
}
+impl<'a> Drain<'a> {
+ /// Returns the remaining (sub)string of this iterator as a slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(string_drain_as_str)]
+ /// let mut s = String::from("abc");
+ /// let mut drain = s.drain(..);
+ /// assert_eq!(drain.as_str(), "abc");
+ /// let _ = drain.next().unwrap();
+ /// assert_eq!(drain.as_str(), "bc");
+ /// ```
+ #[unstable(feature = "string_drain_as_str", issue = "76905")] // Note: uncomment AsRef impls below when stabilizing.
+ pub fn as_str(&self) -> &str {
+ self.iter.as_str()
+ }
+}
+
+// Uncomment when stabilizing `string_drain_as_str`.
+// #[unstable(feature = "string_drain_as_str", issue = "76905")]
+// impl<'a> AsRef<str> for Drain<'a> {
+// fn as_ref(&self) -> &str {
+// self.as_str()
+// }
+// }
+//
+// #[unstable(feature = "string_drain_as_str", issue = "76905")]
+// impl<'a> AsRef<[u8]> for Drain<'a> {
+// fn as_ref(&self) -> &[u8] {
+// self.as_str().as_bytes()
+// }
+// }
+
#[stable(feature = "drain", since = "1.6.0")]
impl Iterator for Drain<'_> {
type Item = char;
#![stable(feature = "rust1", since = "1.0.0")]
use core::cmp::{self, Ordering};
+use core::convert::TryFrom;
use core::fmt;
use core::hash::{Hash, Hasher};
use core::intrinsics::{arith_offset, assume};
}
}
+#[stable(feature = "array_try_from_vec", since = "1.48.0")]
+impl<T, const N: usize> TryFrom<Vec<T>> for [T; N] {
+ type Error = Vec<T>;
+
+ /// Gets the entire contents of the `Vec<T>` as an array,
+ /// if its size exactly matches that of the requested array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::convert::TryInto;
+ /// assert_eq!(vec![1, 2, 3].try_into(), Ok([1, 2, 3]));
+ /// assert_eq!(<Vec<i32>>::new().try_into(), Ok([]));
+ /// ```
+ ///
+ /// If the length doesn't match, the input comes back in `Err`:
+ /// ```
+ /// use std::convert::TryInto;
+ /// let r: Result<[i32; 4], _> = (0..10).collect::<Vec<_>>().try_into();
+ /// assert_eq!(r, Err(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]));
+ /// ```
+ ///
+ /// If you're fine with just getting a prefix of the `Vec<T>`,
+ /// you can call [`.truncate(N)`](Vec::truncate) first.
+ /// ```
+ /// use std::convert::TryInto;
+ /// let mut v = String::from("hello world").into_bytes();
+ /// v.sort();
+ /// v.truncate(2);
+ /// let [a, b]: [_; 2] = v.try_into().unwrap();
+ /// assert_eq!(a, b' ');
+ /// assert_eq!(b, b'd');
+ /// ```
+ fn try_from(mut vec: Vec<T>) -> Result<[T; N], Vec<T>> {
+ if vec.len() != N {
+ return Err(vec);
+ }
+
+ // SAFETY: `.set_len(0)` is always sound.
+ unsafe { vec.set_len(0) };
+
+ // SAFETY: A `Vec`'s pointer is always aligned properly, and
+ // the alignment the array needs is the same as the items.
+ // We checked earlier that we have sufficient items.
+ // The items will not double-drop as the `set_len`
+ // tells the `Vec` not to also drop them.
+ let array = unsafe { ptr::read(vec.as_ptr() as *const [T; N]) };
+ Ok(array)
+ }
+}
+
////////////////////////////////////////////////////////////////////////////////
// Clone-on-write
////////////////////////////////////////////////////////////////////////////////
#![feature(deque_range)]
#![feature(inplace_iteration)]
#![feature(iter_map_while)]
+#![feature(int_bits_const)]
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use std::borrow::Cow;
use std::collections::TryReserveError::*;
-use std::mem::size_of;
use std::ops::Bound::*;
pub trait IntoCow<'a, B: ?Sized>
// on 64-bit, we assume the OS will give an OOM for such a ridiculous size.
// Any platform that succeeds for these requests is technically broken with
// ptr::offset because LLVM is the worst.
- let guards_against_isize = size_of::<usize>() < 8;
+ let guards_against_isize = usize::BITS < 64;
{
// Note: basic stuff is checked by test_reserve
const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX;
- let guards_against_isize = size_of::<usize>() < 8;
+ let guards_against_isize = usize::BITS < 64;
{
let mut empty_string: String = String::new();
// on 64-bit, we assume the OS will give an OOM for such a ridiculous size.
// Any platform that succeeds for these requests is technically broken with
// ptr::offset because LLVM is the worst.
- let guards_against_isize = size_of::<usize>() < 8;
+ let guards_against_isize = usize::BITS < 64;
{
// Note: basic stuff is checked by test_reserve
f.flags |= 1 << (FlagV1::SignAwareZeroPad as u32);
if f.width.is_none() {
- f.width = Some(((mem::size_of::<usize>() * 8) / 4) + 2);
+ f.width = Some((usize::BITS / 4) as usize + 2);
}
}
f.flags |= 1 << (FlagV1::Alternate as u32);
/// Note however, that `black_box` is only (and can only be) provided on a "best-effort" basis. The
/// extent to which it can block optimisations may vary depending upon the platform and code-gen
/// backend used. Programs cannot rely on `black_box` for *correctness* in any way.
-#[inline]
+#[cfg_attr(not(miri), inline)]
+#[cfg_attr(miri, inline(never))]
#[unstable(feature = "test", issue = "50297")]
#[allow(unreachable_code)] // this makes #[cfg] a bit easier below.
pub fn black_box<T>(mut dummy: T) -> T {
#![macro_use]
use crate::intrinsics;
-use crate::mem;
/// Arithmetic operations required by bignums.
pub trait FullOps: Sized {
// This cannot overflow;
// the output is between `0` and `2^nbits * (2^nbits - 1)`.
// FIXME: will LLVM optimize this into ADC or similar?
- let nbits = mem::size_of::<$ty>() * 8;
let v = (self as $bigty) * (other as $bigty) + (carry as $bigty);
- ((v >> nbits) as $ty, v as $ty)
+ ((v >> <$ty>::BITS) as $ty, v as $ty)
}
fn full_mul_add(self, other: $ty, other2: $ty, carry: $ty) -> ($ty, $ty) {
// This cannot overflow;
// the output is between `0` and `2^nbits * (2^nbits - 1)`.
- let nbits = mem::size_of::<$ty>() * 8;
let v = (self as $bigty) * (other as $bigty) + (other2 as $bigty) +
(carry as $bigty);
- ((v >> nbits) as $ty, v as $ty)
+ ((v >> <$ty>::BITS) as $ty, v as $ty)
}
fn full_div_rem(self, other: $ty, borrow: $ty) -> ($ty, $ty) {
debug_assert!(borrow < other);
// This cannot overflow; the output is between `0` and `other * (2^nbits - 1)`.
- let nbits = mem::size_of::<$ty>() * 8;
- let lhs = ((borrow as $bigty) << nbits) | (self as $bigty);
+ let lhs = ((borrow as $bigty) << <$ty>::BITS) | (self as $bigty);
let rhs = other as $bigty;
((lhs / rhs) as $ty, (lhs % rhs) as $ty)
}
/// Makes a bignum from `u64` value.
pub fn from_u64(mut v: u64) -> $name {
- use crate::mem;
-
let mut base = [0; $n];
let mut sz = 0;
while v > 0 {
base[sz] = v as $ty;
- v >>= mem::size_of::<$ty>() * 8;
+ v >>= <$ty>::BITS;
sz += 1;
}
$name { size: sz, base: base }
/// Returns the `i`-th bit where bit 0 is the least significant one.
/// In other words, the bit with weight `2^i`.
pub fn get_bit(&self, i: usize) -> u8 {
- use crate::mem;
-
- let digitbits = mem::size_of::<$ty>() * 8;
+ let digitbits = <$ty>::BITS as usize;
let d = i / digitbits;
let b = i % digitbits;
((self.base[d] >> b) & 1) as u8
/// Returns the number of bits necessary to represent this value. Note that zero
/// is considered to need 0 bits.
pub fn bit_length(&self) -> usize {
- use crate::mem;
-
// Skip over the most significant digits which are zero.
let digits = self.digits();
let zeros = digits.iter().rev().take_while(|&&x| x == 0).count();
}
// This could be optimized with leading_zeros() and bit shifts, but that's
// probably not worth the hassle.
- let digitbits = mem::size_of::<$ty>() * 8;
+ let digitbits = <$ty>::BITS as usize;
let mut i = nonzero.len() * digitbits - 1;
while self.get_bit(i) == 0 {
i -= 1;
/// Multiplies itself by `2^bits` and returns its own mutable reference.
pub fn mul_pow2(&mut self, bits: usize) -> &mut $name {
- use crate::mem;
-
- let digitbits = mem::size_of::<$ty>() * 8;
+ let digitbits = <$ty>::BITS as usize;
let digits = bits / digitbits;
let bits = bits % digitbits;
/// Divide self by another bignum, overwriting `q` with the quotient and `r` with the
/// remainder.
pub fn div_rem(&self, d: &$name, q: &mut $name, r: &mut $name) {
- use crate::mem;
-
// Stupid slow base-2 long division taken from
// https://en.wikipedia.org/wiki/Division_algorithm
// FIXME use a greater base ($ty) for the long division.
assert!(!d.is_zero());
- let digitbits = mem::size_of::<$ty>() * 8;
+ let digitbits = <$ty>::BITS as usize;
for digit in &mut q.base[..] {
*digit = 0;
}
impl crate::fmt::Debug for $name {
fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
- use crate::mem;
-
let sz = if self.size < 1 { 1 } else { self.size };
- let digitlen = mem::size_of::<$ty>() * 2;
+ let digitlen = <$ty>::BITS as usize / 4;
write!(f, "{:#x}", self.base[sz - 1])?;
for &v in self.base[..sz - 1].iter().rev() {
pub const MAX: Self = !Self::MIN;
}
+ doc_comment! {
+ concat!("The size of this integer type in bits.
+
+# Examples
+
+```
+", $Feature, "#![feature(int_bits_const)]
+assert_eq!(", stringify!($SelfT), "::BITS, ", stringify!($BITS), ");",
+$EndFeature, "
+```"),
+ #[unstable(feature = "int_bits_const", issue = "76904")]
+ pub const BITS: u32 = $BITS;
+ }
+
doc_comment! {
concat!("Converts a string slice in a given base to an integer.
pub const MAX: Self = !0;
}
+ doc_comment! {
+ concat!("The size of this integer type in bits.
+
+# Examples
+
+```
+", $Feature, "#![feature(int_bits_const)]
+assert_eq!(", stringify!($SelfT), "::BITS, ", stringify!($BITS), ");",
+$EndFeature, "
+```"),
+ #[unstable(feature = "int_bits_const", issue = "76904")]
+ pub const BITS: u32 = $BITS;
+ }
+
doc_comment! {
concat!("Converts a string slice in a given base to an integer.
random
};
let mut gen_usize = || {
- if mem::size_of::<usize>() <= 4 {
+ if usize::BITS <= 32 {
gen_u32() as usize
} else {
(((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize
///
/// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero,
/// this function will immediately switch to heapsort.
-fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &mut F, mut pred: Option<&'a T>, mut limit: usize)
+fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &mut F, mut pred: Option<&'a T>, mut limit: u32)
where
F: FnMut(&T, &T) -> bool,
{
}
// Limit the number of imbalanced partitions to `floor(log2(len)) + 1`.
- let limit = mem::size_of::<usize>() * 8 - v.len().leading_zeros() as usize;
+ let limit = usize::BITS - v.len().leading_zeros();
recurse(v, &mut is_less, None, limit);
}
}
let mut it = Test(0);
- let root = usize::MAX >> (::std::mem::size_of::<usize>() * 8 / 2);
+ let root = usize::MAX >> (usize::BITS / 2);
let n = root + 20;
(&mut it).step_by(n).nth(n);
assert_eq!(it.0, n as Bigger * n as Bigger);
#![feature(partition_point)]
#![feature(once_cell)]
#![feature(unsafe_block_in_unsafe_fn)]
+#![feature(int_bits_const)]
#![deny(unsafe_op_in_unsafe_fn)]
extern crate test;
($T:ident, $T_i:ident) => {
#[cfg(test)]
mod tests {
- use core::mem;
use core::ops::{BitAnd, BitOr, BitXor, Not, Shl, Shr};
use core::$T_i::*;
#[test]
fn test_count_zeros() {
- let bits = mem::size_of::<$T>() * 8;
- assert_eq!(A.count_zeros(), bits as u32 - 3);
- assert_eq!(B.count_zeros(), bits as u32 - 2);
- assert_eq!(C.count_zeros(), bits as u32 - 5);
+ assert_eq!(A.count_zeros(), $T::BITS - 3);
+ assert_eq!(B.count_zeros(), $T::BITS - 2);
+ assert_eq!(C.count_zeros(), $T::BITS - 5);
}
#[test]
fn test_leading_trailing_ones() {
- let bits = (mem::size_of::<$T>() * 8) as u32;
-
let a: $T = 0b0101_1111;
assert_eq!(a.trailing_ones(), 5);
- assert_eq!((!a).leading_ones(), bits - 7);
+ assert_eq!((!a).leading_ones(), $T::BITS - 7);
assert_eq!(a.reverse_bits().leading_ones(), 5);
- assert_eq!(_1.leading_ones(), bits);
- assert_eq!(_1.trailing_ones(), bits);
+ assert_eq!(_1.leading_ones(), $T::BITS);
+ assert_eq!(_1.trailing_ones(), $T::BITS);
assert_eq!((_1 << 1).trailing_ones(), 0);
assert_eq!(MAX.leading_ones(), 0);
- assert_eq!((_1 << 1).leading_ones(), bits - 1);
- assert_eq!(MAX.trailing_ones(), bits - 1);
+ assert_eq!((_1 << 1).leading_ones(), $T::BITS - 1);
+ assert_eq!(MAX.trailing_ones(), $T::BITS - 1);
assert_eq!(_0.leading_ones(), 0);
assert_eq!(_0.trailing_ones(), 0);
mod tests {
use core::ops::{BitAnd, BitOr, BitXor, Not, Shl, Shr};
use core::$T_i::*;
- use std::mem;
use std::str::FromStr;
use crate::num;
#[test]
fn test_count_zeros() {
- let bits = mem::size_of::<$T>() * 8;
- assert!(A.count_zeros() == bits as u32 - 3);
- assert!(B.count_zeros() == bits as u32 - 2);
- assert!(C.count_zeros() == bits as u32 - 5);
+ assert!(A.count_zeros() == $T::BITS - 3);
+ assert!(B.count_zeros() == $T::BITS - 2);
+ assert!(C.count_zeros() == $T::BITS - 5);
}
#[test]
fn test_leading_trailing_ones() {
- let bits = (mem::size_of::<$T>() * 8) as u32;
-
let a: $T = 0b0101_1111;
assert_eq!(a.trailing_ones(), 5);
- assert_eq!((!a).leading_ones(), bits - 7);
+ assert_eq!((!a).leading_ones(), $T::BITS - 7);
assert_eq!(a.reverse_bits().leading_ones(), 5);
- assert_eq!(_1.leading_ones(), bits);
- assert_eq!(_1.trailing_ones(), bits);
+ assert_eq!(_1.leading_ones(), $T::BITS);
+ assert_eq!(_1.trailing_ones(), $T::BITS);
assert_eq!((_1 << 1).trailing_ones(), 0);
assert_eq!((_1 >> 1).leading_ones(), 0);
- assert_eq!((_1 << 1).leading_ones(), bits - 1);
- assert_eq!((_1 >> 1).trailing_ones(), bits - 1);
+ assert_eq!((_1 << 1).leading_ones(), $T::BITS - 1);
+ assert_eq!((_1 >> 1).trailing_ones(), $T::BITS - 1);
assert_eq!(_0.leading_ones(), 0);
assert_eq!(_0.trailing_ones(), 0);
}
pub unsafe fn read_sleb128(&mut self) -> i64 {
- let mut shift: usize = 0;
+ let mut shift: u32 = 0;
let mut result: u64 = 0;
let mut byte: u8;
loop {
}
}
// sign-extend
- if shift < 8 * mem::size_of::<u64>() && (byte & 0x40) != 0 {
+ if shift < u64::BITS && (byte & 0x40) != 0 {
result |= (!0 as u64) << shift;
}
result as i64
#[cfg(target_arch = "hexagon")]
const UNWIND_DATA_REG: (i32, i32) = (0, 1); // R0, R1
-#[cfg(target_arch = "riscv64")]
+#[cfg(any(target_arch = "riscv64", target_arch = "riscv32"))]
const UNWIND_DATA_REG: (i32, i32) = (10, 11); // x10, x11
// The following code is based on GCC's C and C++ personality routines. For reference, see:
issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/"
)]
#![feature(core_intrinsics)]
+#![feature(int_bits_const)]
#![feature(lang_items)]
#![feature(libc)]
#![feature(nll)]
target_arch = "mips64",
target_arch = "s390x",
target_arch = "sparc64",
- target_arch = "riscv64"
+ target_arch = "riscv64",
+ target_arch = "riscv32"
))]
mod arch {
pub use libc::{blkcnt_t, blksize_t, ino_t, nlink_t, off_t, stat, time_t};
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "s390x",
- target_arch = "riscv64"
+ target_arch = "riscv64",
+ target_arch = "riscv32"
)
),
all(target_os = "android", any(target_arch = "aarch64", target_arch = "arm")),
target_arch = "powerpc",
target_arch = "powerpc64",
target_arch = "s390x",
- target_arch = "riscv64"
+ target_arch = "riscv64",
+ target_arch = "riscv32"
)
),
all(target_os = "android", any(target_arch = "aarch64", target_arch = "arm")),
//! [`push`]: PathBuf::push
#![stable(feature = "rust1", since = "1.0.0")]
+#![deny(unsafe_op_in_unsafe_fn)]
#[cfg(test)]
mod tests;
unsafe { &*(s as *const OsStr as *const [u8]) }
}
unsafe fn u8_slice_as_os_str(s: &[u8]) -> &OsStr {
- &*(s as *const [u8] as *const OsStr)
+ // SAFETY: see the comment of `os_str_as_u8_slice`
+ unsafe { &*(s as *const [u8] as *const OsStr) }
}
// Detect scheme on Redox
// basic workhorse for splitting stem and extension
fn split_file_at_dot(file: &OsStr) -> (Option<&OsStr>, Option<&OsStr>) {
- unsafe {
- if os_str_as_u8_slice(file) == b".." {
- return (Some(file), None);
- }
-
- // The unsafety here stems from converting between &OsStr and &[u8]
- // and back. This is safe to do because (1) we only look at ASCII
- // contents of the encoding and (2) new &OsStr values are produced
- // only from ASCII-bounded slices of existing &OsStr values.
+ if os_str_as_u8_slice(file) == b".." {
+ return (Some(file), None);
+ }
- let mut iter = os_str_as_u8_slice(file).rsplitn(2, |b| *b == b'.');
- let after = iter.next();
- let before = iter.next();
- if before == Some(b"") {
- (Some(file), None)
- } else {
- (before.map(|s| u8_slice_as_os_str(s)), after.map(|s| u8_slice_as_os_str(s)))
- }
+ // The unsafety here stems from converting between &OsStr and &[u8]
+ // and back. This is safe to do because (1) we only look at ASCII
+ // contents of the encoding and (2) new &OsStr values are produced
+ // only from ASCII-bounded slices of existing &OsStr values.
+ let mut iter = os_str_as_u8_slice(file).rsplitn(2, |b| *b == b'.');
+ let after = iter.next();
+ let before = iter.next();
+ if before == Some(b"") {
+ (Some(file), None)
+ } else {
+ unsafe { (before.map(|s| u8_slice_as_os_str(s)), after.map(|s| u8_slice_as_os_str(s))) }
}
}
// The following (private!) function allows construction of a path from a u8
// slice, which is only safe when it is known to follow the OsStr encoding.
unsafe fn from_u8_slice(s: &[u8]) -> &Path {
- Path::new(u8_slice_as_os_str(s))
+ unsafe { Path::new(u8_slice_as_os_str(s)) }
}
// The following (private!) function reveals the byte encoding used for OsStr.
fn as_u8_slice(&self) -> &[u8] {
target_arch = "powerpc64",
target_arch = "asmjs",
target_arch = "wasm32",
- target_arch = "hexagon"
+ target_arch = "hexagon",
+ target_arch = "riscv32"
)))]
pub const MIN_ALIGN: usize = 8;
#[cfg(all(any(
#[cfg(target_arch = "sparc64")]
pub const unwinder_private_data_size: usize = 2;
-#[cfg(target_arch = "riscv64")]
+#[cfg(any(target_arch = "riscv64", target_arch = "riscv32"))]
pub const unwinder_private_data_size: usize = 2;
#[cfg(target_os = "emscripten")]
| Subcommand::Build { .. }
| Subcommand::Bench { .. }
| Subcommand::Dist { .. }
- | Subcommand::Install { .. } => assert_eq!(config.stage, 2),
+ | Subcommand::Install { .. } => {
+ assert_eq!(
+ config.stage, 2,
+ "x.py should be run with `--stage 2` on CI, but was run with `--stage {}`",
+ config.stage,
+ );
+ }
Subcommand::Clean { .. }
| Subcommand::Check { .. }
| Subcommand::Clippy { .. }
--- /dev/null
+// EMIT_MIR issue_75439.foo.MatchBranchSimplification.diff
+
+#![feature(const_fn_transmute)]
+#![feature(or_patterns)]
+
+use std::mem::transmute;
+
+pub fn foo(bytes: [u8; 16]) -> Option<[u8; 4]> {
+ // big endian `u32`s
+ let dwords: [u32; 4] = unsafe { transmute(bytes) };
+ const FF: u32 = 0x0000_ffff_u32.to_be();
+ if let [0, 0, 0 | FF, ip] = dwords {
+ Some(unsafe { transmute(ip) })
+ } else {
+ None
+ }
+}
+
+fn main() {
+ let _ = foo([0; 16]);
+}
--- /dev/null
+- // MIR for `foo` before MatchBranchSimplification
++ // MIR for `foo` after MatchBranchSimplification
+
+ fn foo(_1: [u8; 16]) -> Option<[u8; 4]> {
+ debug bytes => _1; // in scope 0 at $DIR/issue-75439.rs:8:12: 8:17
+ let mut _0: std::option::Option<[u8; 4]>; // return place in scope 0 at $DIR/issue-75439.rs:8:32: 8:47
+ let _2: [u32; 4]; // in scope 0 at $DIR/issue-75439.rs:10:9: 10:15
+ let mut _3: [u8; 16]; // in scope 0 at $DIR/issue-75439.rs:10:47: 10:52
+ let mut _5: [u8; 4]; // in scope 0 at $DIR/issue-75439.rs:13:14: 13:38
+ let mut _6: u32; // in scope 0 at $DIR/issue-75439.rs:13:33: 13:35
+ scope 1 {
+ debug dwords => _2; // in scope 1 at $DIR/issue-75439.rs:10:9: 10:15
+ let _4: u32; // in scope 1 at $DIR/issue-75439.rs:12:27: 12:29
+ scope 3 {
+ debug ip => _4; // in scope 3 at $DIR/issue-75439.rs:12:27: 12:29
+ scope 4 {
+ }
+ }
+ }
+ scope 2 {
+ }
+
+ bb0: {
+ StorageLive(_2); // scope 0 at $DIR/issue-75439.rs:10:9: 10:15
+ StorageLive(_3); // scope 2 at $DIR/issue-75439.rs:10:47: 10:52
+ _3 = _1; // scope 2 at $DIR/issue-75439.rs:10:47: 10:52
+ _2 = transmute::<[u8; 16], [u32; 4]>(move _3) -> bb1; // scope 2 at $DIR/issue-75439.rs:10:37: 10:53
+ // mir::Constant
+ // + span: $DIR/issue-75439.rs:10:37: 10:46
+ // + literal: Const { ty: unsafe extern "rust-intrinsic" fn([u8; 16]) -> [u32; 4] {std::intrinsics::transmute::<[u8; 16], [u32; 4]>}, val: Value(Scalar(<ZST>)) }
+ }
+
+ bb1: {
+ StorageDead(_3); // scope 2 at $DIR/issue-75439.rs:10:52: 10:53
+ switchInt(_2[0 of 4]) -> [0_u32: bb2, otherwise: bb4]; // scope 1 at $DIR/issue-75439.rs:12:13: 12:14
+ }
+
+ bb2: {
+ switchInt(_2[1 of 4]) -> [0_u32: bb3, otherwise: bb4]; // scope 1 at $DIR/issue-75439.rs:12:16: 12:17
+ }
+
+ bb3: {
+ switchInt(_2[2 of 4]) -> [0_u32: bb6, 4294901760_u32: bb7, otherwise: bb4]; // scope 1 at $DIR/issue-75439.rs:12:19: 12:20
+ }
+
+ bb4: {
+ discriminant(_0) = 0; // scope 1 at $DIR/issue-75439.rs:15:9: 15:13
+ goto -> bb9; // scope 1 at $DIR/issue-75439.rs:12:5: 16:6
+ }
+
+ bb5: {
+ StorageLive(_5); // scope 3 at $DIR/issue-75439.rs:13:14: 13:38
+ StorageLive(_6); // scope 4 at $DIR/issue-75439.rs:13:33: 13:35
+ _6 = _4; // scope 4 at $DIR/issue-75439.rs:13:33: 13:35
+ _5 = transmute::<u32, [u8; 4]>(move _6) -> bb8; // scope 4 at $DIR/issue-75439.rs:13:23: 13:36
+ // mir::Constant
+ // + span: $DIR/issue-75439.rs:13:23: 13:32
+ // + literal: Const { ty: unsafe extern "rust-intrinsic" fn(u32) -> [u8; 4] {std::intrinsics::transmute::<u32, [u8; 4]>}, val: Value(Scalar(<ZST>)) }
+ }
+
+ bb6: {
+ StorageLive(_4); // scope 1 at $DIR/issue-75439.rs:12:27: 12:29
+ _4 = _2[3 of 4]; // scope 1 at $DIR/issue-75439.rs:12:27: 12:29
+ goto -> bb5; // scope 1 at $DIR/issue-75439.rs:12:5: 16:6
+ }
+
+ bb7: {
+ StorageLive(_4); // scope 1 at $DIR/issue-75439.rs:12:27: 12:29
+ _4 = _2[3 of 4]; // scope 1 at $DIR/issue-75439.rs:12:27: 12:29
+ goto -> bb5; // scope 1 at $DIR/issue-75439.rs:12:5: 16:6
+ }
+
+ bb8: {
+ StorageDead(_6); // scope 4 at $DIR/issue-75439.rs:13:35: 13:36
+ ((_0 as Some).0: [u8; 4]) = move _5; // scope 3 at $DIR/issue-75439.rs:13:9: 13:39
+ discriminant(_0) = 1; // scope 3 at $DIR/issue-75439.rs:13:9: 13:39
+ StorageDead(_5); // scope 3 at $DIR/issue-75439.rs:13:38: 13:39
+ StorageDead(_4); // scope 1 at $DIR/issue-75439.rs:14:5: 14:6
+ goto -> bb9; // scope 1 at $DIR/issue-75439.rs:12:5: 16:6
+ }
+
+ bb9: {
+ StorageDead(_2); // scope 0 at $DIR/issue-75439.rs:17:1: 17:2
+ return; // scope 0 at $DIR/issue-75439.rs:17:2: 17:2
+ }
+ }
+
// run-pass
// Test a ZST enum whose dicriminant is ~0i128. This caused an ICE when casting to a i32.
+#![feature(test)]
+use std::hint::black_box;
#[derive(Copy, Clone)]
enum Nums {
const NEG_ONE_I64: i64 = Nums::NegOne as i64;
const NEG_ONE_I128: i128 = Nums::NegOne as i128;
-#[inline(never)]
-fn identity<T>(t: T) -> T { t }
-
fn test_as_arg(n: Nums) {
assert_eq!(-1i8, n as i8);
assert_eq!(-1i16, n as i16);
assert_eq!(-1i64, kind as i64);
assert_eq!(-1i128, kind as i128);
- assert_eq!(-1i8, identity(kind) as i8);
- assert_eq!(-1i16, identity(kind) as i16);
- assert_eq!(-1i32, identity(kind) as i32);
- assert_eq!(-1i64, identity(kind) as i64);
- assert_eq!(-1i128, identity(kind) as i128);
+ assert_eq!(-1i8, black_box(kind) as i8);
+ assert_eq!(-1i16, black_box(kind) as i16);
+ assert_eq!(-1i32, black_box(kind) as i32);
+ assert_eq!(-1i64, black_box(kind) as i64);
+ assert_eq!(-1i128, black_box(kind) as i128);
test_as_arg(Nums::NegOne);
// run-pass
#![feature(const_discriminant)]
+#![feature(test)]
#![allow(dead_code)]
use std::mem::{discriminant, Discriminant};
-
-// `discriminant(const_expr)` may get const-propagated.
-// As we want to check that const-eval is equal to ordinary exection,
-// we wrap `const_expr` with a function which is not const to prevent this.
-#[inline(never)]
-fn identity<T>(x: T) -> T { x }
+use std::hint::black_box;
enum Test {
A(u8),
fn main() {
assert_eq!(TEST_A, TEST_A_OTHER);
- assert_eq!(TEST_A, discriminant(identity(&Test::A(17))));
- assert_eq!(TEST_B, discriminant(identity(&Test::B)));
+ assert_eq!(TEST_A, discriminant(black_box(&Test::A(17))));
+ assert_eq!(TEST_B, discriminant(black_box(&Test::B)));
assert_ne!(TEST_A, TEST_B);
- assert_ne!(TEST_B, discriminant(identity(&Test::C { a: 42, b: 7 })));
+ assert_ne!(TEST_B, discriminant(black_box(&Test::C { a: 42, b: 7 })));
- assert_eq!(TEST_V, discriminant(identity(&SingleVariant::V)));
+ assert_eq!(TEST_V, discriminant(black_box(&SingleVariant::V)));
}
#![allow(non_snake_case)]
use std::ops::RangeInclusive;
+
const RANGE: RangeInclusive<i32> = 0..=255;
+const RANGE2: RangeInclusive<i32> = panic!();
+
fn main() {
let n: i32 = 1;
match n {
RANGE => {}
//~^ ERROR mismatched types
+ RANGE2 => {}
+ //~^ ERROR mismatched types
_ => {}
}
}
error[E0308]: mismatched types
- --> $DIR/issue-76191.rs:10:9
+ --> $DIR/issue-76191.rs:13:9
|
LL | const RANGE: RangeInclusive<i32> = 0..=255;
| ------------------------------------------- constant defined here
|
= note: expected type `i32`
found struct `RangeInclusive<i32>`
+help: you may want to move the range into the match block
+ |
+LL | 0..=255 => {}
+ | ^^^^^^^
+
+error[E0308]: mismatched types
+ --> $DIR/issue-76191.rs:15:9
+ |
+LL | const RANGE2: RangeInclusive<i32> = panic!();
+ | --------------------------------------------- constant defined here
+...
+LL | match n {
+ | - this expression has type `i32`
+...
+LL | RANGE2 => {}
+ | ^^^^^^
+ | |
+ | expected `i32`, found struct `RangeInclusive`
+ | `RANGE2` is interpreted as a constant, not a new binding
+ |
+ = note: expected type `i32`
+ found struct `RangeInclusive<i32>`
= note: constants only support matching by type, if you meant to match against a range of values, consider using a range pattern like `min ..= max` in the match block
-error: aborting due to previous error
+error: aborting due to 2 previous errors
For more information about this error, try `rustc --explain E0308`.
"riscv32i-unknown-none-elf",
"riscv32imc-unknown-none-elf",
"riscv32imac-unknown-none-elf",
+ "riscv32gc-unknown-linux-gnu",
"riscv64imac-unknown-none-elf",
"riscv64gc-unknown-none-elf",
"riscv64gc-unknown-linux-gnu",