// except according to those terms.
use fmt::{Formatter, Result, LowerExp, UpperExp, Display, Debug};
-use mem;
+use mem::MaybeUninit;
use num::flt2dec;
// Don't inline this so callers don't use the stack space this function
where T: flt2dec::DecodableFloat
{
unsafe {
- let mut buf: [u8; 1024] = mem::uninitialized(); // enough for f32 and f64
- let mut parts: [flt2dec::Part; 4] = mem::uninitialized();
+ let mut buf = MaybeUninit::<[u8; 1024]>::uninitialized(); // enough for f32 and f64
+ let mut parts = MaybeUninit::<[flt2dec::Part; 4]>::uninitialized();
let formatted = flt2dec::to_exact_fixed_str(flt2dec::strategy::grisu::format_exact,
*num, sign, precision,
- false, &mut buf, &mut parts);
+ false, buf.get_mut(), parts.get_mut());
fmt.pad_formatted_parts(&formatted)
}
}
{
unsafe {
// enough for f32 and f64
- let mut buf: [u8; flt2dec::MAX_SIG_DIGITS] = mem::uninitialized();
- let mut parts: [flt2dec::Part; 4] = mem::uninitialized();
+ let mut buf = MaybeUninit::<[u8; flt2dec::MAX_SIG_DIGITS]>::uninitialized();
+ let mut parts = MaybeUninit::<[flt2dec::Part; 4]>::uninitialized();
let formatted = flt2dec::to_shortest_str(flt2dec::strategy::grisu::format_shortest, *num,
- sign, precision, false, &mut buf, &mut parts);
+ sign, precision, false, buf.get_mut(),
+ parts.get_mut());
fmt.pad_formatted_parts(&formatted)
}
}
where T: flt2dec::DecodableFloat
{
unsafe {
- let mut buf: [u8; 1024] = mem::uninitialized(); // enough for f32 and f64
- let mut parts: [flt2dec::Part; 6] = mem::uninitialized();
+ let mut buf = MaybeUninit::<[u8; 1024]>::uninitialized(); // enough for f32 and f64
+ let mut parts = MaybeUninit::<[flt2dec::Part; 6]>::uninitialized();
let formatted = flt2dec::to_exact_exp_str(flt2dec::strategy::grisu::format_exact,
*num, sign, precision,
- upper, &mut buf, &mut parts);
+ upper, buf.get_mut(), parts.get_mut());
fmt.pad_formatted_parts(&formatted)
}
}
{
unsafe {
// enough for f32 and f64
- let mut buf: [u8; flt2dec::MAX_SIG_DIGITS] = mem::uninitialized();
- let mut parts: [flt2dec::Part; 6] = mem::uninitialized();
+ let mut buf = MaybeUninit::<[u8; flt2dec::MAX_SIG_DIGITS]>::uninitialized();
+ let mut parts = MaybeUninit::<[flt2dec::Part; 6]>::uninitialized();
let formatted = flt2dec::to_shortest_exp_str(flt2dec::strategy::grisu::format_shortest,
*num, sign, (0, 0), upper,
- &mut buf, &mut parts);
+ buf.get_mut(), parts.get_mut());
fmt.pad_formatted_parts(&formatted)
}
}
#[allow(unused_macros)]
macro_rules! vector_impl { ($([$f:ident, $($args:tt)*]),*) => { $($f!($($args)*);)* } }
#[path = "../stdsimd/coresimd/mod.rs"]
+// replacing uses of mem::{uninitialized,zeroed} with MaybeUninit needs to be in the stdsimd repo
+#[allow(deprecated)]
#[allow(missing_docs, missing_debug_implementations, dead_code, unused_imports)]
#[unstable(feature = "stdsimd", issue = "48556")]
#[cfg(not(stage0))] // allow changes to how stdsimd works in stage0
use fmt;
use hash;
use marker::{PhantomData, Unsize};
-use mem;
+use mem::{self, MaybeUninit};
use nonzero::NonZero;
use cmp::Ordering::{self, Less, Equal, Greater};
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
// Give ourselves some scratch space to work with
- let mut tmp: T = mem::uninitialized();
+ let mut tmp = MaybeUninit::<T>::uninitialized();
// Perform the swap
- copy_nonoverlapping(x, &mut tmp, 1);
+ copy_nonoverlapping(x, tmp.get_mut(), 1);
copy(y, x, 1); // `x` and `y` may overlap
- copy_nonoverlapping(&tmp, y, 1);
-
- // y and t now point to the same thing, but we need to completely forget `tmp`
- // because it's no longer relevant.
- mem::forget(tmp);
+ copy_nonoverlapping(tmp.get_ref(), y, 1);
}
/// Swaps a sequence of values at two mutable locations of the same type.
while i + block_size <= len {
// Create some uninitialized memory as scratch space
// Declaring `t` here avoids aligning the stack when this loop is unused
- let mut t: Block = mem::uninitialized();
- let t = &mut t as *mut _ as *mut u8;
+ let mut t = mem::MaybeUninit::<Block>::uninitialized();
+ let t = t.as_mut_ptr() as *mut u8;
let x = x.add(i);
let y = y.add(i);
if i < len {
// Swap any remaining bytes
- let mut t: UnalignedBlock = mem::uninitialized();
+ let mut t = mem::MaybeUninit::<UnalignedBlock>::uninitialized();
let rem = len - i;
- let t = &mut t as *mut _ as *mut u8;
+ let t = t.as_mut_ptr() as *mut u8;
let x = x.add(i);
let y = y.add(i);
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn read<T>(src: *const T) -> T {
- let mut tmp: T = mem::uninitialized();
- copy_nonoverlapping(src, &mut tmp, 1);
- tmp
+ let mut tmp = MaybeUninit::<T>::uninitialized();
+ copy_nonoverlapping(src, tmp.get_mut(), 1);
+ tmp.into_inner()
}
/// Reads the value from `src` without moving it. This leaves the
#[inline]
#[stable(feature = "ptr_unaligned", since = "1.17.0")]
pub unsafe fn read_unaligned<T>(src: *const T) -> T {
- let mut tmp: T = mem::uninitialized();
+ let mut tmp = MaybeUninit::<T>::uninitialized();
copy_nonoverlapping(src as *const u8,
- &mut tmp as *mut T as *mut u8,
+ tmp.as_mut_ptr() as *mut u8,
mem::size_of::<T>());
- tmp
+ tmp.into_inner()
}
/// Overwrites a memory location with the given value without reading or
// except according to those terms.
use cmp;
-use mem;
+use mem::{self, MaybeUninit};
use ptr;
/// Rotation is much faster if it has access to a little bit of memory. This
}
impl<T> RawArray<T> {
- fn new() -> Self {
- unsafe { mem::uninitialized() }
- }
fn ptr(&self) -> *mut T {
unsafe { &self.typed as *const T as *mut T }
}
}
}
- let rawarray = RawArray::new();
- let buf = rawarray.ptr();
+ let rawarray = MaybeUninit::<RawArray<T>>::uninitialized();
+ let buf = rawarray.get_ref().ptr();
let dim = mid.sub(left).add(right);
if left <= right {
//! stable sorting implementation.
use cmp;
-use mem;
+use mem::{self, MaybeUninit};
use ptr;
/// When dropped, copies from `src` into `dest`.
let mut block_l = BLOCK;
let mut start_l = ptr::null_mut();
let mut end_l = ptr::null_mut();
- let mut offsets_l: [u8; BLOCK] = unsafe { mem::uninitialized() };
+ let mut offsets_l = MaybeUninit::<[u8; BLOCK]>::uninitialized();
// The current block on the right side (from `r.sub(block_r)` to `r`).
let mut r = unsafe { l.add(v.len()) };
let mut block_r = BLOCK;
let mut start_r = ptr::null_mut();
let mut end_r = ptr::null_mut();
- let mut offsets_r: [u8; BLOCK] = unsafe { mem::uninitialized() };
+ let mut offsets_r = MaybeUninit::<[u8; BLOCK]>::uninitialized();
// FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather
// than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient.
if start_l == end_l {
// Trace `block_l` elements from the left side.
- start_l = offsets_l.as_mut_ptr();
- end_l = offsets_l.as_mut_ptr();
+ start_l = unsafe { offsets_l.get_mut().as_mut_ptr() };
+ end_l = unsafe { offsets_l.get_mut().as_mut_ptr() };
let mut elem = l;
for i in 0..block_l {
if start_r == end_r {
// Trace `block_r` elements from the right side.
- start_r = offsets_r.as_mut_ptr();
- end_r = offsets_r.as_mut_ptr();
+ start_r = unsafe { offsets_r.get_mut().as_mut_ptr() };
+ end_r = unsafe { offsets_r.get_mut().as_mut_ptr() };
let mut elem = r;
for i in 0..block_r {