#![feature(generators)]
#![feature(generator_trait)]
#![feature(fn_traits)]
+#![feature(int_bits_const)]
#![feature(min_specialization)]
#![feature(optin_builtin_traits)]
#![feature(nll)]
P: Pointer,
T: Tag,
{
- const TAG_BIT_SHIFT: usize = (8 * std::mem::size_of::<usize>()) - T::BITS;
+ const TAG_BIT_SHIFT: usize = usize::BITS as usize - T::BITS;
const ASSERTION: () = {
assert!(T::BITS <= P::BITS);
// Used for the transmute_copy's below
use core::fmt;
use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
-use core::mem::{self, size_of, swap, ManuallyDrop};
+use core::mem::{self, swap, ManuallyDrop};
use core::ops::{Deref, DerefMut};
use core::ptr;
#[inline(always)]
fn log2_fast(x: usize) -> usize {
- 8 * size_of::<usize>() - (x.leading_zeros() as usize) - 1
+ (usize::BITS - x.leading_zeros() - 1) as usize
}
// `rebuild` takes O(len1 + len2) operations
#![feature(fn_traits)]
#![feature(fundamental)]
#![feature(inplace_iteration)]
+#![feature(int_bits_const)]
#![feature(lang_items)]
#![feature(layout_for_ptr)]
#![feature(libc)]
#[inline]
fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
- if mem::size_of::<usize>() < 8 && alloc_size > isize::MAX as usize {
+ if usize::BITS < 64 && alloc_size > isize::MAX as usize {
Err(CapacityOverflow)
} else {
Ok(())
#![feature(deque_range)]
#![feature(inplace_iteration)]
#![feature(iter_map_while)]
+#![feature(int_bits_const)]
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use std::borrow::Cow;
use std::collections::TryReserveError::*;
-use std::mem::size_of;
use std::ops::Bound::*;
pub trait IntoCow<'a, B: ?Sized>
// on 64-bit, we assume the OS will give an OOM for such a ridiculous size.
// Any platform that succeeds for these requests is technically broken with
// ptr::offset because LLVM is the worst.
- let guards_against_isize = size_of::<usize>() < 8;
+ let guards_against_isize = usize::BITS < 64;
{
// Note: basic stuff is checked by test_reserve
const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX;
- let guards_against_isize = size_of::<usize>() < 8;
+ let guards_against_isize = usize::BITS < 64;
{
let mut empty_string: String = String::new();
// on 64-bit, we assume the OS will give an OOM for such a ridiculous size.
// Any platform that succeeds for these requests is technically broken with
// ptr::offset because LLVM is the worst.
- let guards_against_isize = size_of::<usize>() < 8;
+ let guards_against_isize = usize::BITS < 64;
{
// Note: basic stuff is checked by test_reserve
f.flags |= 1 << (FlagV1::SignAwareZeroPad as u32);
if f.width.is_none() {
- f.width = Some(((mem::size_of::<usize>() * 8) / 4) + 2);
+ f.width = Some((usize::BITS / 4) as usize + 2);
}
}
f.flags |= 1 << (FlagV1::Alternate as u32);
#![macro_use]
use crate::intrinsics;
-use crate::mem;
/// Arithmetic operations required by bignums.
pub trait FullOps: Sized {
// This cannot overflow;
// the output is between `0` and `2^nbits * (2^nbits - 1)`.
// FIXME: will LLVM optimize this into ADC or similar?
- let nbits = mem::size_of::<$ty>() * 8;
let v = (self as $bigty) * (other as $bigty) + (carry as $bigty);
- ((v >> nbits) as $ty, v as $ty)
+ ((v >> <$ty>::BITS) as $ty, v as $ty)
}
fn full_mul_add(self, other: $ty, other2: $ty, carry: $ty) -> ($ty, $ty) {
// This cannot overflow;
// the output is between `0` and `2^nbits * (2^nbits - 1)`.
- let nbits = mem::size_of::<$ty>() * 8;
let v = (self as $bigty) * (other as $bigty) + (other2 as $bigty) +
(carry as $bigty);
- ((v >> nbits) as $ty, v as $ty)
+ ((v >> <$ty>::BITS) as $ty, v as $ty)
}
fn full_div_rem(self, other: $ty, borrow: $ty) -> ($ty, $ty) {
debug_assert!(borrow < other);
// This cannot overflow; the output is between `0` and `other * (2^nbits - 1)`.
- let nbits = mem::size_of::<$ty>() * 8;
- let lhs = ((borrow as $bigty) << nbits) | (self as $bigty);
+ let lhs = ((borrow as $bigty) << <$ty>::BITS) | (self as $bigty);
let rhs = other as $bigty;
((lhs / rhs) as $ty, (lhs % rhs) as $ty)
}
/// Makes a bignum from `u64` value.
pub fn from_u64(mut v: u64) -> $name {
- use crate::mem;
-
let mut base = [0; $n];
let mut sz = 0;
while v > 0 {
base[sz] = v as $ty;
- v >>= mem::size_of::<$ty>() * 8;
+ v >>= <$ty>::BITS;
sz += 1;
}
$name { size: sz, base: base }
/// Returns the `i`-th bit where bit 0 is the least significant one.
/// In other words, the bit with weight `2^i`.
pub fn get_bit(&self, i: usize) -> u8 {
- use crate::mem;
-
- let digitbits = mem::size_of::<$ty>() * 8;
+ let digitbits = <$ty>::BITS as usize;
let d = i / digitbits;
let b = i % digitbits;
((self.base[d] >> b) & 1) as u8
/// Returns the number of bits necessary to represent this value. Note that zero
/// is considered to need 0 bits.
pub fn bit_length(&self) -> usize {
- use crate::mem;
-
// Skip over the most significant digits which are zero.
let digits = self.digits();
let zeros = digits.iter().rev().take_while(|&&x| x == 0).count();
}
// This could be optimized with leading_zeros() and bit shifts, but that's
// probably not worth the hassle.
- let digitbits = mem::size_of::<$ty>() * 8;
+ let digitbits = <$ty>::BITS as usize;
let mut i = nonzero.len() * digitbits - 1;
while self.get_bit(i) == 0 {
i -= 1;
/// Multiplies itself by `2^bits` and returns its own mutable reference.
pub fn mul_pow2(&mut self, bits: usize) -> &mut $name {
- use crate::mem;
-
- let digitbits = mem::size_of::<$ty>() * 8;
+ let digitbits = <$ty>::BITS as usize;
let digits = bits / digitbits;
let bits = bits % digitbits;
/// Divide self by another bignum, overwriting `q` with the quotient and `r` with the
/// remainder.
pub fn div_rem(&self, d: &$name, q: &mut $name, r: &mut $name) {
- use crate::mem;
-
// Stupid slow base-2 long division taken from
// https://en.wikipedia.org/wiki/Division_algorithm
// FIXME use a greater base ($ty) for the long division.
assert!(!d.is_zero());
- let digitbits = mem::size_of::<$ty>() * 8;
+ let digitbits = <$ty>::BITS as usize;
for digit in &mut q.base[..] {
*digit = 0;
}
impl crate::fmt::Debug for $name {
fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
- use crate::mem;
-
let sz = if self.size < 1 { 1 } else { self.size };
- let digitlen = mem::size_of::<$ty>() * 2;
+ let digitlen = <$ty>::BITS as usize / 4;
write!(f, "{:#x}", self.base[sz - 1])?;
for &v in self.base[..sz - 1].iter().rev() {
pub const MAX: Self = !Self::MIN;
}
+ doc_comment! {
+ concat!("The size of this integer type in bits.
+
+# Examples
+
+```
+", $Feature, "#![feature(int_bits_const)]
+assert_eq!(", stringify!($SelfT), "::BITS, ", stringify!($BITS), ");",
+$EndFeature, "
+```"),
+ #[unstable(feature = "int_bits_const", issue = "76904")]
+ pub const BITS: u32 = $BITS;
+ }
+
doc_comment! {
concat!("Converts a string slice in a given base to an integer.
pub const MAX: Self = !0;
}
+ doc_comment! {
+ concat!("The size of this integer type in bits.
+
+# Examples
+
+```
+", $Feature, "#![feature(int_bits_const)]
+assert_eq!(", stringify!($SelfT), "::BITS, ", stringify!($BITS), ");",
+$EndFeature, "
+```"),
+ #[unstable(feature = "int_bits_const", issue = "76904")]
+ pub const BITS: u32 = $BITS;
+ }
+
doc_comment! {
concat!("Converts a string slice in a given base to an integer.
random
};
let mut gen_usize = || {
- if mem::size_of::<usize>() <= 4 {
+ if usize::BITS <= 32 {
gen_u32() as usize
} else {
(((gen_u32() as u64) << 32) | (gen_u32() as u64)) as usize
///
/// `limit` is the number of allowed imbalanced partitions before switching to `heapsort`. If zero,
/// this function will immediately switch to heapsort.
-fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &mut F, mut pred: Option<&'a T>, mut limit: usize)
+fn recurse<'a, T, F>(mut v: &'a mut [T], is_less: &mut F, mut pred: Option<&'a T>, mut limit: u32)
where
F: FnMut(&T, &T) -> bool,
{
}
// Limit the number of imbalanced partitions to `floor(log2(len)) + 1`.
- let limit = mem::size_of::<usize>() * 8 - v.len().leading_zeros() as usize;
+ let limit = usize::BITS - v.len().leading_zeros();
recurse(v, &mut is_less, None, limit);
}
}
let mut it = Test(0);
- let root = usize::MAX >> (::std::mem::size_of::<usize>() * 8 / 2);
+ let root = usize::MAX >> (usize::BITS / 2);
let n = root + 20;
(&mut it).step_by(n).nth(n);
assert_eq!(it.0, n as Bigger * n as Bigger);
#![feature(partition_point)]
#![feature(once_cell)]
#![feature(unsafe_block_in_unsafe_fn)]
+#![feature(int_bits_const)]
#![deny(unsafe_op_in_unsafe_fn)]
extern crate test;
($T:ident, $T_i:ident) => {
#[cfg(test)]
mod tests {
- use core::mem;
use core::ops::{BitAnd, BitOr, BitXor, Not, Shl, Shr};
use core::$T_i::*;
#[test]
fn test_count_zeros() {
- let bits = mem::size_of::<$T>() * 8;
- assert_eq!(A.count_zeros(), bits as u32 - 3);
- assert_eq!(B.count_zeros(), bits as u32 - 2);
- assert_eq!(C.count_zeros(), bits as u32 - 5);
+ assert_eq!(A.count_zeros(), $T::BITS - 3);
+ assert_eq!(B.count_zeros(), $T::BITS - 2);
+ assert_eq!(C.count_zeros(), $T::BITS - 5);
}
#[test]
fn test_leading_trailing_ones() {
- let bits = (mem::size_of::<$T>() * 8) as u32;
-
let a: $T = 0b0101_1111;
assert_eq!(a.trailing_ones(), 5);
- assert_eq!((!a).leading_ones(), bits - 7);
+ assert_eq!((!a).leading_ones(), $T::BITS - 7);
assert_eq!(a.reverse_bits().leading_ones(), 5);
- assert_eq!(_1.leading_ones(), bits);
- assert_eq!(_1.trailing_ones(), bits);
+ assert_eq!(_1.leading_ones(), $T::BITS);
+ assert_eq!(_1.trailing_ones(), $T::BITS);
assert_eq!((_1 << 1).trailing_ones(), 0);
assert_eq!(MAX.leading_ones(), 0);
- assert_eq!((_1 << 1).leading_ones(), bits - 1);
- assert_eq!(MAX.trailing_ones(), bits - 1);
+ assert_eq!((_1 << 1).leading_ones(), $T::BITS - 1);
+ assert_eq!(MAX.trailing_ones(), $T::BITS - 1);
assert_eq!(_0.leading_ones(), 0);
assert_eq!(_0.trailing_ones(), 0);
mod tests {
use core::ops::{BitAnd, BitOr, BitXor, Not, Shl, Shr};
use core::$T_i::*;
- use std::mem;
use std::str::FromStr;
use crate::num;
#[test]
fn test_count_zeros() {
- let bits = mem::size_of::<$T>() * 8;
- assert!(A.count_zeros() == bits as u32 - 3);
- assert!(B.count_zeros() == bits as u32 - 2);
- assert!(C.count_zeros() == bits as u32 - 5);
+ assert!(A.count_zeros() == $T::BITS - 3);
+ assert!(B.count_zeros() == $T::BITS - 2);
+ assert!(C.count_zeros() == $T::BITS - 5);
}
#[test]
fn test_leading_trailing_ones() {
- let bits = (mem::size_of::<$T>() * 8) as u32;
-
let a: $T = 0b0101_1111;
assert_eq!(a.trailing_ones(), 5);
- assert_eq!((!a).leading_ones(), bits - 7);
+ assert_eq!((!a).leading_ones(), $T::BITS - 7);
assert_eq!(a.reverse_bits().leading_ones(), 5);
- assert_eq!(_1.leading_ones(), bits);
- assert_eq!(_1.trailing_ones(), bits);
+ assert_eq!(_1.leading_ones(), $T::BITS);
+ assert_eq!(_1.trailing_ones(), $T::BITS);
assert_eq!((_1 << 1).trailing_ones(), 0);
assert_eq!((_1 >> 1).leading_ones(), 0);
- assert_eq!((_1 << 1).leading_ones(), bits - 1);
- assert_eq!((_1 >> 1).trailing_ones(), bits - 1);
+ assert_eq!((_1 << 1).leading_ones(), $T::BITS - 1);
+ assert_eq!((_1 >> 1).trailing_ones(), $T::BITS - 1);
assert_eq!(_0.leading_ones(), 0);
assert_eq!(_0.trailing_ones(), 0);
}
pub unsafe fn read_sleb128(&mut self) -> i64 {
- let mut shift: usize = 0;
+ let mut shift: u32 = 0;
let mut result: u64 = 0;
let mut byte: u8;
loop {
}
}
// sign-extend
- if shift < 8 * mem::size_of::<u64>() && (byte & 0x40) != 0 {
+ if shift < u64::BITS && (byte & 0x40) != 0 {
result |= (!0 as u64) << shift;
}
result as i64
issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/"
)]
#![feature(core_intrinsics)]
+#![feature(int_bits_const)]
#![feature(lang_items)]
#![feature(libc)]
#![feature(nll)]