dependencies = [
"gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)",
- "libz-sys 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libz-sys 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)",
"openssl-sys 0.9.14 (registry+https://github.com/rust-lang/crates.io-index)",
"pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)",
"libssh2-sys 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "libz-sys 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libz-sys 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)",
"openssl-sys 0.9.14 (registry+https://github.com/rust-lang/crates.io-index)",
"pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
]
dependencies = [
"cmake 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)",
- "libz-sys 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "libz-sys 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)",
"openssl-sys 0.9.14 (registry+https://github.com/rust-lang/crates.io-index)",
"pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "libz-sys"
-version = "1.0.14"
+version = "1.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)",
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
- "wincolor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "wincolor 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
[[package]]
name = "wincolor"
-version = "0.1.3"
+version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"checksum libc 0.2.24 (registry+https://github.com/rust-lang/crates.io-index)" = "38f5c2b18a287cf78b4097db62e20f43cace381dc76ae5c0a3073067f78b7ddc"
"checksum libgit2-sys 0.6.12 (registry+https://github.com/rust-lang/crates.io-index)" = "df18a822100352d9863b302faf6f8f25c0e77f0e60feb40e5dbe1238b7f13b1d"
"checksum libssh2-sys 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0db4ec23611747ef772db1c4d650f8bd762f07b461727ec998f953c614024b75"
-"checksum libz-sys 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)" = "e70195f655a44af531ad7135b1ec2a0a82522b451fe09730fbb25674a85996e7"
+"checksum libz-sys 1.0.16 (registry+https://github.com/rust-lang/crates.io-index)" = "3fdd64ef8ee652185674455c1d450b83cbc8ad895625d543b5324d923f82e4d8"
"checksum log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "880f77541efa6e5cc74e76910c9884d9859683118839d6a1dc3b11e63512565b"
"checksum lzma-sys 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "013fa6506eb7d26040c46dab9ecb7ccb4e2896b5bf24a9d65932501ea9f67af8"
"checksum matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "100aabe6b8ff4e4a7e32c1c13523379802df0772b82466207ac25b013f193376"
"checksum walkdir 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "bb08f9e670fab86099470b97cd2b252d6527f0b3cc1401acdb595ffc9dd288ff"
"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
-"checksum wincolor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "99c2af1426e2166e6f66d88b09b2a4d63afce06875f149174e386f2f1ee9779b"
+"checksum wincolor 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a39ee4464208f6430992ff20154216ab2357772ac871d994c51628d60e58b8b0"
"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
"checksum xattr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "5f04de8a1346489a2f9e9bd8526b73d135ec554227b17568456e86aa35b6f3fc"
"checksum xz2 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e9510bdf100731599107c61f77daf46713a69a568f75458999c1f9dbf6ba25b0"
cmd.arg("-Cprefer-dynamic");
}
- // Pass the `rustbuild` feature flag to crates which rustbuild is
- // building. See the comment in bootstrap/lib.rs where this env var is
- // set for more details.
- if env::var_os("RUSTBUILD_UNSTABLE").is_some() {
- cmd.arg("--cfg").arg("rustbuild");
- }
-
// Help the libc crate compile by assisting it in finding the MUSL
// native libraries.
if let Some(s) = env::var_os("MUSL_ROOT") {
// do that we pass a weird flag to the compiler to get it to do
// so. Note that this is definitely a hack, and we should likely
// flesh out rpath support more fully in the future.
- //
- // FIXME: remove condition after next stage0
- if stage != "0" {
- cmd.arg("-Z").arg("osx-rpath-install-name");
- }
+ cmd.arg("-Z").arg("osx-rpath-install-name");
Some("-Wl,-rpath,@loader_path/../lib")
} else if !target.contains("windows") {
Some("-Wl,-rpath,$ORIGIN/../lib")
// Force all crates compiled by this compiler to (a) be unstable and (b)
// allow the `rustc_private` feature to link to other unstable crates
// also in the sysroot.
- //
- // FIXME: remove condition after next stage0
if env::var_os("RUSTC_FORCE_UNSTABLE").is_some() {
- if stage != "0" {
- cmd.arg("-Z").arg("force-unstable-if-unmarked");
- }
+ cmd.arg("-Z").arg("force-unstable-if-unmarked");
}
}
if self.clean and os.path.exists(build_dir):
shutil.rmtree(build_dir)
env = os.environ.copy()
+ env["RUSTC_BOOTSTRAP"] = '1'
env["CARGO_TARGET_DIR"] = build_dir
env["RUSTC"] = self.rustc()
env["LD_LIBRARY_PATH"] = os.path.join(self.bin_root(), "lib") + \
use Build;
// The version number
-pub const CFG_RELEASE_NUM: &'static str = "1.19.0";
+pub const CFG_RELEASE_NUM: &'static str = "1.20.0";
// An optional number to put after the label, e.g. '.2' -> '-beta.2'
// Be sure to make this starts with a dot to conform to semver pre-release
cmd.arg("test")
.current_dir(build.src.join("src/bootstrap"))
.env("CARGO_TARGET_DIR", build.out.join("bootstrap"))
+ .env("RUSTC_BOOTSTRAP", "1")
.env("RUSTC", &build.rustc);
if build.flags.cmd.no_fail_fast() {
cmd.arg("--no-fail-fast");
pub llvm_static_stdcpp: bool,
pub llvm_link_shared: bool,
pub llvm_targets: Option<String>,
+ pub llvm_experimental_targets: Option<String>,
pub llvm_link_jobs: Option<u32>,
pub llvm_clean_rebuild: bool,
version_check: Option<bool>,
static_libstdcpp: Option<bool>,
targets: Option<String>,
+ experimental_targets: Option<String>,
link_jobs: Option<u32>,
clean_rebuild: Option<bool>,
}
set(&mut config.llvm_static_stdcpp, llvm.static_libstdcpp);
set(&mut config.llvm_clean_rebuild, llvm.clean_rebuild);
config.llvm_targets = llvm.targets.clone();
+ config.llvm_experimental_targets = llvm.experimental_targets.clone();
config.llvm_link_jobs = llvm.link_jobs;
}
# Rust team and file an issue if you need assistance in porting!
#targets = "X86;ARM;AArch64;Mips;PowerPC;SystemZ;JSBackend;MSP430;Sparc;NVPTX;Hexagon"
+# LLVM experimental targets to build support for. These targets are specified in
+# the same format as above, but since these targets are experimental, they are
+# not built by default and the experimental Rust compilation targets that depend
+# on them will not work unless the user opts in to building them. Possible
+# experimental LLVM targets include WebAssembly for the
+# wasm32-experimental-emscripten Rust target.
+#experimental-targets = ""
+
# Cap the number of parallel linker invocations when compiling LLVM.
# This can be useful when building LLVM with debug info, which significantly
# increases the size of binaries and consequently the memory required by
.env("RUSTC_SNAPSHOT_LIBDIR", self.rustc_libdir(compiler));
}
- // There are two invariants we must maintain:
- // * stable crates cannot depend on unstable crates (general Rust rule),
- // * crates that end up in the sysroot must be unstable (rustbuild rule).
- //
- // In order to do enforce the latter, we pass the env var
- // `RUSTBUILD_UNSTABLE` down the line for any crates which will end up
- // in the sysroot. We read this in bootstrap/bin/rustc.rs and if it is
- // set, then we pass the `rustbuild` feature to rustc when building the
- // the crate.
- //
- // In turn, crates that can be used here should recognise the `rustbuild`
- // feature and opt-in to `rustc_private`.
- //
- // We can't always pass `rustbuild` because crates which are outside of
- // the compiler, libs, and tests are stable and we don't want to make
- // their deps unstable (since this would break the first invariant
- // above).
- //
- // FIXME: remove this after next stage0
- if mode != Mode::Tool && stage == 0 {
- cargo.env("RUSTBUILD_UNSTABLE", "1");
- }
-
// Ignore incremental modes except for stage0, since we're
// not guaranteeing correctness across builds if the compiler
// is changing under your feet.`
None => "X86;ARM;AArch64;Mips;PowerPC;SystemZ;JSBackend;MSP430;Sparc;NVPTX;Hexagon",
};
+ let llvm_exp_targets = match build.config.llvm_experimental_targets {
+ Some(ref s) => s,
+ None => "",
+ };
+
let assertions = if build.config.llvm_assertions {"ON"} else {"OFF"};
cfg.target(target)
.profile(profile)
.define("LLVM_ENABLE_ASSERTIONS", assertions)
.define("LLVM_TARGETS_TO_BUILD", llvm_targets)
+ .define("LLVM_EXPERIMENTAL_TARGETS_TO_BUILD", llvm_exp_targets)
.define("LLVM_INCLUDE_EXAMPLES", "OFF")
.define("LLVM_INCLUDE_TESTS", "OFF")
.define("LLVM_INCLUDE_DOCS", "OFF")
--- /dev/null
+# `allocator_api`
+
+The tracking issue for this feature is [#32838]
+
+[#32838]: https://github.com/rust-lang/rust/issues/32838
+
+------------------------
+
+Sometimes you want the memory for one collection to use a different
+allocator than the memory for another collection. In this case,
+replacing the global allocator is not a workable option. Instead,
+you need to pass in an instance of an `Alloc` to each collection
+for which you want a custom allocator.
+
+TBD
--- /dev/null
+# `char_error_internals`
+
+This feature is internal to the Rust compiler and is not intended for general use.
+
+------------------------
+++ /dev/null
-# `question_mark_carrier`
-
-The tracking issue for this feature is: [#31436]
-
-[#31436]: https://github.com/rust-lang/rust/issues/31436
-
-------------------------
-
-This feature has been superseded by [`try_trait`][try_trait].
-
-It exists only in stage0 for bootstrapping.
-
-[try_trait]: library-features/try-trait.html
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![unstable(feature = "allocator_api",
+ reason = "the precise API and guarantees it provides may be tweaked \
+ slightly, especially to possibly take into account the \
+ types being stored to make room for a future \
+ tracing garbage collector",
+ issue = "27700")]
+
+use core::cmp;
+use core::fmt;
+use core::mem;
+use core::usize;
+use core::ptr::{self, Unique};
+
+/// Represents the combination of a starting address and
+/// a total capacity of the returned block.
+#[derive(Debug)]
+pub struct Excess(pub *mut u8, pub usize);
+
+fn size_align<T>() -> (usize, usize) {
+ (mem::size_of::<T>(), mem::align_of::<T>())
+}
+
+/// Layout of a block of memory.
+///
+/// An instance of `Layout` describes a particular layout of memory.
+/// You build a `Layout` up as an input to give to an allocator.
+///
+/// All layouts have an associated non-negative size and a
+/// power-of-two alignment.
+///
+/// (Note however that layouts are *not* required to have positive
+/// size, even though many allocators require that all memory
+/// requeusts have positive size. A caller to the `Alloc::alloc`
+/// method must either ensure that conditions like this are met, or
+/// use specific allocators with looser requirements.)
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub struct Layout {
+ // size of the requested block of memory, measured in bytes.
+ size: usize,
+
+ // alignment of the requested block of memory, measured in bytes.
+ // we ensure that this is always a power-of-two, because API's
+ // like `posix_memalign` require it and it is a reasonable
+ // constraint to impose on Layout constructors.
+ //
+ // (However, we do not analogously require `align >= sizeof(void*)`,
+ // even though that is *also* a requirement of `posix_memalign`.)
+ align: usize,
+}
+
+
+// FIXME: audit default implementations for overflow errors,
+// (potentially switching to overflowing_add and
+// overflowing_mul as necessary).
+
+impl Layout {
+ /// Constructs a `Layout` from a given `size` and `align`,
+ /// or returns `None` if either of the following conditions
+ /// are not met:
+ ///
+ /// * `align` must be a power of two,
+ ///
+ /// * `size`, when rounded up to the nearest multiple of `align`,
+ /// must not overflow (i.e. the rounded value must be less than
+ /// `usize::MAX`).
+ pub fn from_size_align(size: usize, align: usize) -> Option<Layout> {
+ if !align.is_power_of_two() {
+ return None;
+ }
+
+ // (power-of-two implies align != 0.)
+
+ // Rounded up size is:
+ // size_rounded_up = (size + align - 1) & !(align - 1);
+ //
+ // We know from above that align != 0. If adding (align - 1)
+ // does not overflow, then rounding up will be fine.
+ //
+ // Conversely, &-masking with !(align - 1) will subtract off
+ // only low-order-bits. Thus if overflow occurs with the sum,
+ // the &-mask cannot subtract enough to undo that overflow.
+ //
+ // Above implies that checking for summation overflow is both
+ // necessary and sufficient.
+ if size > usize::MAX - (align - 1) {
+ return None;
+ }
+
+ Some(Layout { size: size, align: align })
+ }
+
+ /// The minimum size in bytes for a memory block of this layout.
+ pub fn size(&self) -> usize { self.size }
+
+ /// The minimum byte alignment for a memory block of this layout.
+ pub fn align(&self) -> usize { self.align }
+
+ /// Constructs a `Layout` suitable for holding a value of type `T`.
+ pub fn new<T>() -> Self {
+ let (size, align) = size_align::<T>();
+ Layout::from_size_align(size, align).unwrap()
+ }
+
+ /// Produces layout describing a record that could be used to
+ /// allocate backing structure for `T` (which could be a trait
+ /// or other unsized type like a slice).
+ pub fn for_value<T: ?Sized>(t: &T) -> Self {
+ let (size, align) = (mem::size_of_val(t), mem::align_of_val(t));
+ Layout::from_size_align(size, align).unwrap()
+ }
+
+ /// Creates a layout describing the record that can hold a value
+ /// of the same layout as `self`, but that also is aligned to
+ /// alignment `align` (measured in bytes).
+ ///
+ /// If `self` already meets the prescribed alignment, then returns
+ /// `self`.
+ ///
+ /// Note that this method does not add any padding to the overall
+ /// size, regardless of whether the returned layout has a different
+ /// alignment. In other words, if `K` has size 16, `K.align_to(32)`
+ /// will *still* have size 16.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the combination of `self.size` and the given `align`
+ /// violates the conditions listed in `from_size_align`.
+ pub fn align_to(&self, align: usize) -> Self {
+ Layout::from_size_align(self.size, cmp::max(self.align, align)).unwrap()
+ }
+
+ /// Returns the amount of padding we must insert after `self`
+ /// to ensure that the following address will satisfy `align`
+ /// (measured in bytes).
+ ///
+ /// E.g. if `self.size` is 9, then `self.padding_needed_for(4)`
+ /// returns 3, because that is the minimum number of bytes of
+ /// padding required to get a 4-aligned address (assuming that the
+ /// corresponding memory block starts at a 4-aligned address).
+ ///
+ /// The return value of this function has no meaning if `align` is
+ /// not a power-of-two.
+ ///
+ /// Note that the utility of the returned value requires `align`
+ /// to be less than or equal to the alignment of the starting
+ /// address for the whole allocated block of memory. One way to
+ /// satisfy this constraint is to ensure `align <= self.align`.
+ pub fn padding_needed_for(&self, align: usize) -> usize {
+ let len = self.size();
+
+ // Rounded up value is:
+ // len_rounded_up = (len + align - 1) & !(align - 1);
+ // and then we return the padding difference: `len_rounded_up - len`.
+ //
+ // We use modular arithmetic throughout:
+ //
+ // 1. align is guaranteed to be > 0, so align - 1 is always
+ // valid.
+ //
+ // 2. `len + align - 1` can overflow by at most `align - 1`,
+ // so the &-mask wth `!(align - 1)` will ensure that in the
+ // case of overflow, `len_rounded_up` will itself be 0.
+ // Thus the returned padding, when added to `len`, yields 0,
+ // which trivially satisfies the alignment `align`.
+ //
+ // (Of course, attempts to allocate blocks of memory whose
+ // size and padding overflow in the above manner should cause
+ // the allocator to yield an error anyway.)
+
+ let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1);
+ return len_rounded_up.wrapping_sub(len);
+ }
+
+ /// Creates a layout describing the record for `n` instances of
+ /// `self`, with a suitable amount of padding between each to
+ /// ensure that each instance is given its requested size and
+ /// alignment. On success, returns `(k, offs)` where `k` is the
+ /// layout of the array and `offs` is the distance between the start
+ /// of each element in the array.
+ ///
+ /// On arithmetic overflow, returns `None`.
+ pub fn repeat(&self, n: usize) -> Option<(Self, usize)> {
+ let padded_size = match self.size.checked_add(self.padding_needed_for(self.align)) {
+ None => return None,
+ Some(padded_size) => padded_size,
+ };
+ let alloc_size = match padded_size.checked_mul(n) {
+ None => return None,
+ Some(alloc_size) => alloc_size,
+ };
+
+ // We can assume that `self.align` is a power-of-two.
+ // Furthermore, `alloc_size` has alreayd been rounded up
+ // to a multiple of `self.align`; therefore, the call
+ // to `Layout::from_size_align` below should never panic.
+ Some((Layout::from_size_align(alloc_size, self.align).unwrap(), padded_size))
+ }
+
+ /// Creates a layout describing the record for `self` followed by
+ /// `next`, including any necessary padding to ensure that `next`
+ /// will be properly aligned. Note that the result layout will
+ /// satisfy the alignment properties of both `self` and `next`.
+ ///
+ /// Returns `Some((k, offset))`, where `k` is layout of the concatenated
+ /// record and `offset` is the relative location, in bytes, of the
+ /// start of the `next` embedded witnin the concatenated record
+ /// (assuming that the record itself starts at offset 0).
+ ///
+ /// On arithmetic overflow, returns `None`.
+ pub fn extend(&self, next: Self) -> Option<(Self, usize)> {
+ let new_align = cmp::max(self.align, next.align);
+ let realigned = match Layout::from_size_align(self.size, new_align) {
+ None => return None,
+ Some(l) => l,
+ };
+
+ let pad = realigned.padding_needed_for(next.align);
+
+ let offset = match self.size.checked_add(pad) {
+ None => return None,
+ Some(offset) => offset,
+ };
+ let new_size = match offset.checked_add(next.size) {
+ None => return None,
+ Some(new_size) => new_size,
+ };
+
+ let layout = match Layout::from_size_align(new_size, new_align) {
+ None => return None,
+ Some(l) => l,
+ };
+ Some((layout, offset))
+ }
+
+ /// Creates a layout describing the record for `n` instances of
+ /// `self`, with no padding between each instance.
+ ///
+ /// Note that, unlike `repeat`, `repeat_packed` does not guarantee
+ /// that the repeated instances of `self` will be properly
+ /// aligned, even if a given instance of `self` is properly
+ /// aligned. In other words, if the layout returned by
+ /// `repeat_packed` is used to allocate an array, it is not
+ /// guaranteed that all elements in the array will be properly
+ /// aligned.
+ ///
+ /// On arithmetic overflow, returns `None`.
+ pub fn repeat_packed(&self, n: usize) -> Option<Self> {
+ let size = match self.size().checked_mul(n) {
+ None => return None,
+ Some(scaled) => scaled,
+ };
+
+ Layout::from_size_align(size, self.align)
+ }
+
+ /// Creates a layout describing the record for `self` followed by
+ /// `next` with no additional padding between the two. Since no
+ /// padding is inserted, the alignment of `next` is irrelevant,
+ /// and is not incoporated *at all* into the resulting layout.
+ ///
+ /// Returns `(k, offset)`, where `k` is layout of the concatenated
+ /// record and `offset` is the relative location, in bytes, of the
+ /// start of the `next` embedded witnin the concatenated record
+ /// (assuming that the record itself starts at offset 0).
+ ///
+ /// (The `offset` is always the same as `self.size()`; we use this
+ /// signature out of convenience in matching the signature of
+ /// `extend`.)
+ ///
+ /// On arithmetic overflow, returns `None`.
+ pub fn extend_packed(&self, next: Self) -> Option<(Self, usize)> {
+ let new_size = match self.size().checked_add(next.size()) {
+ None => return None,
+ Some(new_size) => new_size,
+ };
+ let layout = match Layout::from_size_align(new_size, self.align) {
+ None => return None,
+ Some(l) => l,
+ };
+ Some((layout, self.size()))
+ }
+
+ /// Creates a layout describing the record for a `[T; n]`.
+ ///
+ /// On arithmetic overflow, returns `None`.
+ pub fn array<T>(n: usize) -> Option<Self> {
+ Layout::new::<T>()
+ .repeat(n)
+ .map(|(k, offs)| {
+ debug_assert!(offs == mem::size_of::<T>());
+ k
+ })
+ }
+}
+
+/// The `AllocErr` error specifies whether an allocation failure is
+/// specifically due to resource exhaustion or if it is due to
+/// something wrong when combining the given input arguments with this
+/// allocator.
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub enum AllocErr {
+ /// Error due to hitting some resource limit or otherwise running
+ /// out of memory. This condition strongly implies that *some*
+ /// series of deallocations would allow a subsequent reissuing of
+ /// the original allocation request to succeed.
+ Exhausted { request: Layout },
+
+ /// Error due to allocator being fundamentally incapable of
+ /// satisfying the original request. This condition implies that
+ /// such an allocation request will never succeed on the given
+ /// allocator, regardless of environment, memory pressure, or
+ /// other contextual conditions.
+ ///
+ /// For example, an allocator that does not support requests for
+ /// large memory blocks might return this error variant.
+ Unsupported { details: &'static str },
+}
+
+impl AllocErr {
+ pub fn invalid_input(details: &'static str) -> Self {
+ AllocErr::Unsupported { details: details }
+ }
+ pub fn is_memory_exhausted(&self) -> bool {
+ if let AllocErr::Exhausted { .. } = *self { true } else { false }
+ }
+ pub fn is_request_unsupported(&self) -> bool {
+ if let AllocErr::Unsupported { .. } = *self { true } else { false }
+ }
+ pub fn description(&self) -> &str {
+ match *self {
+ AllocErr::Exhausted { .. } => "allocator memory exhausted",
+ AllocErr::Unsupported { .. } => "unsupported allocator request",
+ }
+ }
+}
+
+// (we need this for downstream impl of trait Error)
+impl fmt::Display for AllocErr {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{}", self.description())
+ }
+}
+
+/// The `CannotReallocInPlace` error is used when `grow_in_place` or
+/// `shrink_in_place` were unable to reuse the given memory block for
+/// a requested layout.
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct CannotReallocInPlace;
+
+impl CannotReallocInPlace {
+ pub fn description(&self) -> &str {
+ "cannot reallocate allocator's memory in place"
+ }
+}
+
+// (we need this for downstream impl of trait Error)
+impl fmt::Display for CannotReallocInPlace {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{}", self.description())
+ }
+}
+
+/// An implementation of `Alloc` can allocate, reallocate, and
+/// deallocate arbitrary blocks of data described via `Layout`.
+///
+/// Some of the methods require that a memory block be *currently
+/// allocated* via an allocator. This means that:
+///
+/// * the starting address for that memory block was previously
+/// returned by a previous call to an allocation method (`alloc`,
+/// `alloc_zeroed`, `alloc_excess`, `alloc_one`, `alloc_array`) or
+/// reallocation method (`realloc`, `realloc_excess`, or
+/// `realloc_array`), and
+///
+/// * the memory block has not been subsequently deallocated, where
+/// blocks are deallocated either by being passed to a deallocation
+/// method (`dealloc`, `dealloc_one`, `dealloc_array`) or by being
+/// passed to a reallocation method (see above) that returns `Ok`.
+///
+/// A note regarding zero-sized types and zero-sized layouts: many
+/// methods in the `Alloc` trait state that allocation requests
+/// must be non-zero size, or else undefined behavior can result.
+///
+/// * However, some higher-level allocation methods (`alloc_one`,
+/// `alloc_array`) are well-defined on zero-sized types and can
+/// optionally support them: it is left up to the implementor
+/// whether to return `Err`, or to return `Ok` with some pointer.
+///
+/// * If an `Alloc` implementation chooses to return `Ok` in this
+/// case (i.e. the pointer denotes a zero-sized inaccessible block)
+/// then that returned pointer must be considered "currently
+/// allocated". On such an allocator, *all* methods that take
+/// currently-allocated pointers as inputs must accept these
+/// zero-sized pointers, *without* causing undefined behavior.
+///
+/// * In other words, if a zero-sized pointer can flow out of an
+/// allocator, then that allocator must likewise accept that pointer
+/// flowing back into its deallocation and reallocation methods.
+///
+/// Some of the methods require that a layout *fit* a memory block.
+/// What it means for a layout to "fit" a memory block means (or
+/// equivalently, for a memory block to "fit" a layout) is that the
+/// following two conditions must hold:
+///
+/// 1. The block's starting address must be aligned to `layout.align()`.
+///
+/// 2. The block's size must fall in the range `[use_min, use_max]`, where:
+///
+/// * `use_min` is `self.usable_size(layout).0`, and
+///
+/// * `use_max` is the capacity that was (or would have been)
+/// returned when (if) the block was allocated via a call to
+/// `alloc_excess` or `realloc_excess`.
+///
+/// Note that:
+///
+/// * the size of the layout most recently used to allocate the block
+/// is guaranteed to be in the range `[use_min, use_max]`, and
+///
+/// * a lower-bound on `use_max` can be safely approximated by a call to
+/// `usable_size`.
+///
+/// * if a layout `k` fits a memory block (denoted by `ptr`)
+/// currently allocated via an allocator `a`, then it is legal to
+/// use that layout to deallocate it, i.e. `a.dealloc(ptr, k);`.
+pub unsafe trait Alloc {
+
+ // (Note: existing allocators have unspecified but well-defined
+ // behavior in response to a zero size allocation request ;
+ // e.g. in C, `malloc` of 0 will either return a null pointer or a
+ // unique pointer, but will not have arbitrary undefined
+ // behavior. Rust should consider revising the alloc::heap crate
+ // to reflect this reality.)
+
+ /// Returns a pointer meeting the size and alignment guarantees of
+ /// `layout`.
+ ///
+ /// If this method returns an `Ok(addr)`, then the `addr` returned
+ /// will be non-null address pointing to a block of storage
+ /// suitable for holding an instance of `layout`.
+ ///
+ /// The returned block of storage may or may not have its contents
+ /// initialized. (Extension subtraits might restrict this
+ /// behavior, e.g. to ensure initialization to particular sets of
+ /// bit patterns.)
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure that `layout` has non-zero size.
+ ///
+ /// (Extension subtraits might provide more specific bounds on
+ /// behavior, e.g. guarantee a sentinel address or a null pointer
+ /// in response to a zero-size allocation request.)
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either memory is exhausted or
+ /// `layout` does not meet allocator's size or alignment
+ /// constraints.
+ ///
+ /// Implementations are encouraged to return `Err` on memory
+ /// exhaustion rather than panicking or aborting, but this is not
+ /// a strict requirement. (Specifically: it is *legal* to
+ /// implement this trait atop an underlying native allocation
+ /// library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an
+ /// allocation error are encouraged to call the allocator's `oom`
+ /// method, rather than directly invoking `panic!` or similar.
+ unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr>;
+
+ /// Deallocate the memory referenced by `ptr`.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure all of the following:
+ ///
+ /// * `ptr` must denote a block of memory currently allocated via
+ /// this allocator,
+ ///
+ /// * `layout` must *fit* that block of memory,
+ ///
+ /// * In addition to fitting the block of memory `layout`, the
+ /// alignment of the `layout` must match the alignment used
+ /// to allocate that block of memory.
+ unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout);
+
+ /// Allocator-specific method for signalling an out-of-memory
+ /// condition.
+ ///
+ /// `oom` aborts the thread or process, optionally performing
+ /// cleanup or logging diagnostic information before panicking or
+ /// aborting.
+ ///
+ /// `oom` is meant to be used by clients unable to cope with an
+ /// unsatisfied allocation request (signaled by an error such as
+ /// `AllocErr::Exhausted`), and wish to abandon computation rather
+ /// than attempt to recover locally. Such clients should pass the
+ /// signalling error value back into `oom`, where the allocator
+ /// may incorporate that error value into its diagnostic report
+ /// before aborting.
+ ///
+ /// Implementations of the `oom` method are discouraged from
+ /// infinitely regressing in nested calls to `oom`. In
+ /// practice this means implementors should eschew allocating,
+ /// especially from `self` (directly or indirectly).
+ ///
+ /// Implementions of the allocation and reallocation methods
+ /// (e.g. `alloc`, `alloc_one`, `realloc`) are discouraged from
+ /// panicking (or aborting) in the event of memory exhaustion;
+ /// instead they should return an appropriate error from the
+ /// invoked method, and let the client decide whether to invoke
+ /// this `oom` method in response.
+ fn oom(&mut self, _: AllocErr) -> ! {
+ unsafe { ::core::intrinsics::abort() }
+ }
+
+ // == ALLOCATOR-SPECIFIC QUANTITIES AND LIMITS ==
+ // usable_size
+
+ /// Returns bounds on the guaranteed usable size of a successful
+ /// allocation created with the specified `layout`.
+ ///
+ /// In particular, if one has a memory block allocated via a given
+ /// allocator `a` and layout `k` where `a.usable_size(k)` returns
+ /// `(l, u)`, then one can pass that block to `a.dealloc()` with a
+ /// layout in the size range [l, u].
+ ///
+ /// (All implementors of `usable_size` must ensure that
+ /// `l <= k.size() <= u`)
+ ///
+ /// Both the lower- and upper-bounds (`l` and `u` respectively)
+ /// are provided, because an allocator based on size classes could
+ /// misbehave if one attempts to deallocate a block without
+ /// providing a correct value for its size (i.e., one within the
+ /// range `[l, u]`).
+ ///
+ /// Clients who wish to make use of excess capacity are encouraged
+ /// to use the `alloc_excess` and `realloc_excess` instead, as
+ /// this method is constrained to report conservative values that
+ /// serve as valid bounds for *all possible* allocation method
+ /// calls.
+ ///
+ /// However, for clients that do not wish to track the capacity
+ /// returned by `alloc_excess` locally, this method is likely to
+ /// produce useful results.
+ fn usable_size(&self, layout: &Layout) -> (usize, usize) {
+ (layout.size(), layout.size())
+ }
+
+ // == METHODS FOR MEMORY REUSE ==
+ // realloc. alloc_excess, realloc_excess
+
+ /// Returns a pointer suitable for holding data described by
+ /// `new_layout`, meeting its size and alignment guarantees. To
+ /// accomplish this, this may extend or shrink the allocation
+ /// referenced by `ptr` to fit `new_layout`.
+ ///
+ /// If this returns `Ok`, then ownership of the memory block
+ /// referenced by `ptr` has been transferred to this
+ /// allocator. The memory may or may not have been freed, and
+ /// should be considered unusable (unless of course it was
+ /// transferred back to the caller again via the return value of
+ /// this method).
+ ///
+ /// If this method returns `Err`, then ownership of the memory
+ /// block has not been transferred to this allocator, and the
+ /// contents of the memory block are unaltered.
+ ///
+ /// For best results, `new_layout` should not impose a different
+ /// alignment constraint than `layout`. (In other words,
+ /// `new_layout.align()` should equal `layout.align()`.) However,
+ /// behavior is well-defined (though underspecified) when this
+ /// constraint is violated; further discussion below.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure all of the following:
+ ///
+ /// * `ptr` must be currently allocated via this allocator,
+ ///
+ /// * `layout` must *fit* the `ptr` (see above). (The `new_layout`
+ /// argument need not fit it.)
+ ///
+ /// * `new_layout` must have size greater than zero.
+ ///
+ /// * the alignment of `new_layout` is non-zero.
+ ///
+ /// (Extension subtraits might provide more specific bounds on
+ /// behavior, e.g. guarantee a sentinel address or a null pointer
+ /// in response to a zero-size allocation request.)
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err` only if `new_layout` does not match the
+ /// alignment of `layout`, or does not meet the allocator's size
+ /// and alignment constraints of the allocator, or if reallocation
+ /// otherwise fails.
+ ///
+ /// (Note the previous sentence did not say "if and only if" -- in
+ /// particular, an implementation of this method *can* return `Ok`
+ /// if `new_layout.align() != old_layout.align()`; or it can
+ /// return `Err` in that scenario, depending on whether this
+ /// allocator can dynamically adjust the alignment constraint for
+ /// the block.)
+ ///
+ /// Implementations are encouraged to return `Err` on memory
+ /// exhaustion rather than panicking or aborting, but this is not
+ /// a strict requirement. (Specifically: it is *legal* to
+ /// implement this trait atop an underlying native allocation
+ /// library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an
+ /// reallocation error are encouraged to call the allocator's `oom`
+ /// method, rather than directly invoking `panic!` or similar.
+ unsafe fn realloc(&mut self,
+ ptr: *mut u8,
+ layout: Layout,
+ new_layout: Layout) -> Result<*mut u8, AllocErr> {
+ let new_size = new_layout.size();
+ let old_size = layout.size();
+ let aligns_match = layout.align == new_layout.align;
+
+ if new_size >= old_size && aligns_match {
+ if let Ok(()) = self.grow_in_place(ptr, layout.clone(), new_layout.clone()) {
+ return Ok(ptr);
+ }
+ } else if new_size < old_size && aligns_match {
+ if let Ok(()) = self.shrink_in_place(ptr, layout.clone(), new_layout.clone()) {
+ return Ok(ptr);
+ }
+ }
+
+ // otherwise, fall back on alloc + copy + dealloc.
+ let result = self.alloc(new_layout);
+ if let Ok(new_ptr) = result {
+ ptr::copy_nonoverlapping(ptr as *const u8, new_ptr, cmp::min(old_size, new_size));
+ self.dealloc(ptr, layout);
+ }
+ result
+ }
+
+ /// Behaves like `alloc`, but also ensures that the contents
+ /// are set to zero before being returned.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe for the same reasons that `alloc` is.
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either memory is exhausted or
+ /// `layout` does not meet allocator's size or alignment
+ /// constraints, just as in `alloc`.
+ ///
+ /// Clients wishing to abort computation in response to an
+ /// allocation error are encouraged to call the allocator's `oom`
+ /// method, rather than directly invoking `panic!` or similar.
+ unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
+ let size = layout.size();
+ let p = self.alloc(layout);
+ if let Ok(p) = p {
+ ptr::write_bytes(p, 0, size);
+ }
+ p
+ }
+
+ /// Behaves like `alloc`, but also returns the whole size of
+ /// the returned block. For some `layout` inputs, like arrays, this
+ /// may include extra storage usable for additional data.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe for the same reasons that `alloc` is.
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either memory is exhausted or
+ /// `layout` does not meet allocator's size or alignment
+ /// constraints, just as in `alloc`.
+ ///
+ /// Clients wishing to abort computation in response to an
+ /// allocation error are encouraged to call the allocator's `oom`
+ /// method, rather than directly invoking `panic!` or similar.
+ unsafe fn alloc_excess(&mut self, layout: Layout) -> Result<Excess, AllocErr> {
+ let usable_size = self.usable_size(&layout);
+ self.alloc(layout).map(|p| Excess(p, usable_size.1))
+ }
+
+ /// Behaves like `realloc`, but also returns the whole size of
+ /// the returned block. For some `layout` inputs, like arrays, this
+ /// may include extra storage usable for additional data.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe for the same reasons that `realloc` is.
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either memory is exhausted or
+ /// `layout` does not meet allocator's size or alignment
+ /// constraints, just as in `realloc`.
+ ///
+ /// Clients wishing to abort computation in response to an
+ /// reallocation error are encouraged to call the allocator's `oom`
+ /// method, rather than directly invoking `panic!` or similar.
+ unsafe fn realloc_excess(&mut self,
+ ptr: *mut u8,
+ layout: Layout,
+ new_layout: Layout) -> Result<Excess, AllocErr> {
+ let usable_size = self.usable_size(&new_layout);
+ self.realloc(ptr, layout, new_layout)
+ .map(|p| Excess(p, usable_size.1))
+ }
+
+ /// Attempts to extend the allocation referenced by `ptr` to fit `new_layout`.
+ ///
+ /// If this returns `Ok`, then the allocator has asserted that the
+ /// memory block referenced by `ptr` now fits `new_layout`, and thus can
+ /// be used to carry data of that layout. (The allocator is allowed to
+ /// expend effort to accomplish this, such as extending the memory block to
+ /// include successor blocks, or virtual memory tricks.)
+ ///
+ /// Regardless of what this method returns, ownership of the
+ /// memory block referenced by `ptr` has not been transferred, and
+ /// the contents of the memory block are unaltered.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure all of the following:
+ ///
+ /// * `ptr` must be currently allocated via this allocator,
+ ///
+ /// * `layout` must *fit* the `ptr` (see above); note the
+ /// `new_layout` argument need not fit it,
+ ///
+ /// * `new_layout.size()` must not be less than `layout.size()`,
+ ///
+ /// * `new_layout.align()` must equal `layout.align()`.
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err(CannotReallocInPlace)` when the allocator is
+ /// unable to assert that the memory block referenced by `ptr`
+ /// could fit `layout`.
+ ///
+ /// Note that one cannot pass `CannotReallocInPlace` to the `oom`
+ /// method; clients are expected either to be able to recover from
+ /// `grow_in_place` failures without aborting, or to fall back on
+ /// another reallocation method before resorting to an abort.
+ unsafe fn grow_in_place(&mut self,
+ ptr: *mut u8,
+ layout: Layout,
+ new_layout: Layout) -> Result<(), CannotReallocInPlace> {
+ let _ = ptr; // this default implementation doesn't care about the actual address.
+ debug_assert!(new_layout.size >= layout.size);
+ debug_assert!(new_layout.align == layout.align);
+ let (_l, u) = self.usable_size(&layout);
+ // _l <= layout.size() [guaranteed by usable_size()]
+ // layout.size() <= new_layout.size() [required by this method]
+ if new_layout.size <= u {
+ return Ok(());
+ } else {
+ return Err(CannotReallocInPlace);
+ }
+ }
+
+ /// Attempts to shrink the allocation referenced by `ptr` to fit `new_layout`.
+ ///
+ /// If this returns `Ok`, then the allocator has asserted that the
+ /// memory block referenced by `ptr` now fits `new_layout`, and
+ /// thus can only be used to carry data of that smaller
+ /// layout. (The allocator is allowed to take advantage of this,
+ /// carving off portions of the block for reuse elsewhere.) The
+ /// truncated contents of the block within the smaller layout are
+ /// unaltered, and ownership of block has not been transferred.
+ ///
+ /// If this returns `Err`, then the memory block is considered to
+ /// still represent the original (larger) `layout`. None of the
+ /// block has been carved off for reuse elsewhere, ownership of
+ /// the memory block has not been transferred, and the contents of
+ /// the memory block are unaltered.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure all of the following:
+ ///
+ /// * `ptr` must be currently allocated via this allocator,
+ ///
+ /// * `layout` must *fit* the `ptr` (see above); note the
+ /// `new_layout` argument need not fit it,
+ ///
+ /// * `new_layout.size()` must not be greater than `layout.size()`
+ /// (and must be greater than zero),
+ ///
+ /// * `new_layout.align()` must equal `layout.align()`.
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err(CannotReallocInPlace)` when the allocator is
+ /// unable to assert that the memory block referenced by `ptr`
+ /// could fit `layout`.
+ ///
+ /// Note that one cannot pass `CannotReallocInPlace` to the `oom`
+ /// method; clients are expected either to be able to recover from
+ /// `shrink_in_place` failures without aborting, or to fall back
+ /// on another reallocation method before resorting to an abort.
+ unsafe fn shrink_in_place(&mut self,
+ ptr: *mut u8,
+ layout: Layout,
+ new_layout: Layout) -> Result<(), CannotReallocInPlace> {
+ let _ = ptr; // this default implementation doesn't care about the actual address.
+ debug_assert!(new_layout.size <= layout.size);
+ debug_assert!(new_layout.align == layout.align);
+ let (l, _u) = self.usable_size(&layout);
+ // layout.size() <= _u [guaranteed by usable_size()]
+ // new_layout.size() <= layout.size() [required by this method]
+ if l <= new_layout.size {
+ return Ok(());
+ } else {
+ return Err(CannotReallocInPlace);
+ }
+ }
+
+
+ // == COMMON USAGE PATTERNS ==
+ // alloc_one, dealloc_one, alloc_array, realloc_array. dealloc_array
+
+ /// Allocates a block suitable for holding an instance of `T`.
+ ///
+ /// Captures a common usage pattern for allocators.
+ ///
+ /// The returned block is suitable for passing to the
+ /// `alloc`/`realloc` methods of this allocator.
+ ///
+ /// Note to implementors: If this returns `Ok(ptr)`, then `ptr`
+ /// must be considered "currently allocated" and must be
+ /// acceptable input to methods such as `realloc` or `dealloc`,
+ /// *even if* `T` is a zero-sized type. In other words, if your
+ /// `Alloc` implementation overrides this method in a manner
+ /// that can return a zero-sized `ptr`, then all reallocation and
+ /// deallocation methods need to be similarly overridden to accept
+ /// such values as input.
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either memory is exhausted or
+ /// `T` does not meet allocator's size or alignment constraints.
+ ///
+ /// For zero-sized `T`, may return either of `Ok` or `Err`, but
+ /// will *not* yield undefined behavior.
+ ///
+ /// Clients wishing to abort computation in response to an
+ /// allocation error are encouraged to call the allocator's `oom`
+ /// method, rather than directly invoking `panic!` or similar.
+ fn alloc_one<T>(&mut self) -> Result<Unique<T>, AllocErr>
+ where Self: Sized
+ {
+ let k = Layout::new::<T>();
+ if k.size() > 0 {
+ unsafe { self.alloc(k).map(|p|Unique::new(*p as *mut T)) }
+ } else {
+ Err(AllocErr::invalid_input("zero-sized type invalid for alloc_one"))
+ }
+ }
+
+ /// Deallocates a block suitable for holding an instance of `T`.
+ ///
+ /// The given block must have been produced by this allocator,
+ /// and must be suitable for storing a `T` (in terms of alignment
+ /// as well as minimum and maximum size); otherwise yields
+ /// undefined behavior.
+ ///
+ /// Captures a common usage pattern for allocators.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure both:
+ ///
+ /// * `ptr` must denote a block of memory currently allocated via this allocator
+ ///
+ /// * the layout of `T` must *fit* that block of memory.
+ unsafe fn dealloc_one<T>(&mut self, ptr: Unique<T>)
+ where Self: Sized
+ {
+ let raw_ptr = ptr.as_ptr() as *mut u8;
+ let k = Layout::new::<T>();
+ if k.size() > 0 {
+ self.dealloc(raw_ptr, k);
+ }
+ }
+
+ /// Allocates a block suitable for holding `n` instances of `T`.
+ ///
+ /// Captures a common usage pattern for allocators.
+ ///
+ /// The returned block is suitable for passing to the
+ /// `alloc`/`realloc` methods of this allocator.
+ ///
+ /// Note to implementors: If this returns `Ok(ptr)`, then `ptr`
+ /// must be considered "currently allocated" and must be
+ /// acceptable input to methods such as `realloc` or `dealloc`,
+ /// *even if* `T` is a zero-sized type. In other words, if your
+ /// `Alloc` implementation overrides this method in a manner
+ /// that can return a zero-sized `ptr`, then all reallocation and
+ /// deallocation methods need to be similarly overridden to accept
+ /// such values as input.
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either memory is exhausted or
+ /// `[T; n]` does not meet allocator's size or alignment
+ /// constraints.
+ ///
+ /// For zero-sized `T` or `n == 0`, may return either of `Ok` or
+ /// `Err`, but will *not* yield undefined behavior.
+ ///
+ /// Always returns `Err` on arithmetic overflow.
+ ///
+ /// Clients wishing to abort computation in response to an
+ /// allocation error are encouraged to call the allocator's `oom`
+ /// method, rather than directly invoking `panic!` or similar.
+ fn alloc_array<T>(&mut self, n: usize) -> Result<Unique<T>, AllocErr>
+ where Self: Sized
+ {
+ match Layout::array::<T>(n) {
+ Some(ref layout) if layout.size() > 0 => {
+ unsafe {
+ self.alloc(layout.clone())
+ .map(|p| {
+ Unique::new(p as *mut T)
+ })
+ }
+ }
+ _ => Err(AllocErr::invalid_input("invalid layout for alloc_array")),
+ }
+ }
+
+ /// Reallocates a block previously suitable for holding `n_old`
+ /// instances of `T`, returning a block suitable for holding
+ /// `n_new` instances of `T`.
+ ///
+ /// Captures a common usage pattern for allocators.
+ ///
+ /// The returned block is suitable for passing to the
+ /// `alloc`/`realloc` methods of this allocator.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure all of the following:
+ ///
+ /// * `ptr` must be currently allocated via this allocator,
+ ///
+ /// * the layout of `[T; n_old]` must *fit* that block of memory.
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either memory is exhausted or
+ /// `[T; n_new]` does not meet allocator's size or alignment
+ /// constraints.
+ ///
+ /// For zero-sized `T` or `n_new == 0`, may return either of `Ok` or
+ /// `Err`, but will *not* yield undefined behavior.
+ ///
+ /// Always returns `Err` on arithmetic overflow.
+ ///
+ /// Clients wishing to abort computation in response to an
+ /// reallocation error are encouraged to call the allocator's `oom`
+ /// method, rather than directly invoking `panic!` or similar.
+ unsafe fn realloc_array<T>(&mut self,
+ ptr: Unique<T>,
+ n_old: usize,
+ n_new: usize) -> Result<Unique<T>, AllocErr>
+ where Self: Sized
+ {
+ match (Layout::array::<T>(n_old), Layout::array::<T>(n_new), ptr.as_ptr()) {
+ (Some(ref k_old), Some(ref k_new), ptr) if k_old.size() > 0 && k_new.size() > 0 => {
+ self.realloc(ptr as *mut u8, k_old.clone(), k_new.clone())
+ .map(|p|Unique::new(p as *mut T))
+ }
+ _ => {
+ Err(AllocErr::invalid_input("invalid layout for realloc_array"))
+ }
+ }
+ }
+
+ /// Deallocates a block suitable for holding `n` instances of `T`.
+ ///
+ /// Captures a common usage pattern for allocators.
+ ///
+ /// # Unsafety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure both:
+ ///
+ /// * `ptr` must denote a block of memory currently allocated via this allocator
+ ///
+ /// * the layout of `[T; n]` must *fit* that block of memory.
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either `[T; n]` or the given
+ /// memory block does not meet allocator's size or alignment
+ /// constraints.
+ ///
+ /// Always returns `Err` on arithmetic overflow.
+ unsafe fn dealloc_array<T>(&mut self, ptr: Unique<T>, n: usize) -> Result<(), AllocErr>
+ where Self: Sized
+ {
+ let raw_ptr = ptr.as_ptr() as *mut u8;
+ match Layout::array::<T>(n) {
+ Some(ref k) if k.size() > 0 => {
+ Ok(self.dealloc(raw_ptr, k.clone()))
+ }
+ _ => {
+ Err(AllocErr::invalid_input("invalid layout for dealloc_array"))
+ }
+ }
+ }
+}
tracing garbage collector",
issue = "27700")]
-use core::{isize, usize};
+use allocator::{Alloc, AllocErr, CannotReallocInPlace, Layout};
+use core::{isize, usize, cmp, ptr};
use core::intrinsics::{min_align_of_val, size_of_val};
#[allow(improper_ctypes)]
align);
}
+#[derive(Copy, Clone, Default, Debug)]
+pub struct HeapAlloc;
+
+unsafe impl Alloc for HeapAlloc {
+ unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
+ let addr = allocate(layout.size(), layout.align());
+ if addr.is_null() {
+ Err(AllocErr::Exhausted { request: layout })
+ } else {
+ Ok(addr)
+ }
+ }
+
+ unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
+ let addr = allocate_zeroed(layout.size(), layout.align());
+ if addr.is_null() {
+ Err(AllocErr::Exhausted { request: layout })
+ } else {
+ Ok(addr)
+ }
+ }
+
+ unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
+ deallocate(ptr, layout.size(), layout.align());
+ }
+
+ fn usable_size(&self, layout: &Layout) -> (usize, usize) {
+ (layout.size(), usable_size(layout.size(), layout.align()))
+ }
+
+ unsafe fn realloc(&mut self,
+ ptr: *mut u8,
+ layout: Layout,
+ new_layout: Layout)
+ -> Result<*mut u8, AllocErr>
+ {
+ let old_size = layout.size();
+ let new_size = new_layout.size();
+ if layout.align() == new_layout.align() {
+ let new_ptr = reallocate(ptr, old_size, new_size, layout.align());
+ if new_ptr.is_null() {
+ // We assume `reallocate` already tried alloc + copy +
+ // dealloc fallback; thus pointless to repeat effort
+ Err(AllocErr::Exhausted { request: new_layout })
+ } else {
+ Ok(new_ptr)
+ }
+ } else {
+ // if alignments don't match, fall back on alloc + copy + dealloc
+ let result = self.alloc(new_layout);
+ if let Ok(new_ptr) = result {
+ ptr::copy_nonoverlapping(ptr as *const u8, new_ptr, cmp::min(old_size, new_size));
+ self.dealloc(ptr, layout);
+ }
+ result
+ }
+ }
+
+ unsafe fn grow_in_place(&mut self,
+ ptr: *mut u8,
+ layout: Layout,
+ new_layout: Layout)
+ -> Result<(), CannotReallocInPlace>
+ {
+ // grow_in_place spec requires this, and the spec for reallocate_inplace
+ // makes it hard to detect failure if it does not hold.
+ debug_assert!(new_layout.size() >= layout.size());
+
+ if layout.align() != new_layout.align() { // reallocate_inplace requires this.
+ return Err(CannotReallocInPlace);
+ }
+ let usable = reallocate_inplace(ptr, layout.size(), new_layout.size(), layout.align());
+ if usable >= new_layout.size() { Ok(()) } else { Err(CannotReallocInPlace) }
+ }
+}
+
// FIXME: #13996: mark the `allocate` and `reallocate` return value as `noalias`
/// Return a pointer to `size` bytes of memory aligned to `align`.
#[macro_use]
mod macros;
+// Allocator trait and helper struct definitions
+
+pub mod allocator;
+
// Heaps provided for low-level allocation strategies
pub mod heap;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use core::ptr::Unique;
+use allocator::{Alloc, Layout};
+use core::ptr::{self, Unique};
use core::mem;
use core::slice;
-use heap;
-use super::oom;
+use heap::{HeapAlloc};
use super::boxed::Box;
use core::ops::Drop;
use core::cmp;
/// field. This allows zero-sized types to not be special-cased by consumers of
/// this type.
#[allow(missing_debug_implementations)]
-pub struct RawVec<T> {
+pub struct RawVec<T, A: Alloc = HeapAlloc> {
ptr: Unique<T>,
cap: usize,
+ a: A,
}
-impl<T> RawVec<T> {
- /// Creates the biggest possible RawVec without allocating. If T has positive
- /// size, then this makes a RawVec with capacity 0. If T has 0 size, then it
- /// it makes a RawVec with capacity `usize::MAX`. Useful for implementing
- /// delayed allocation.
- pub fn new() -> Self {
+impl<T, A: Alloc> RawVec<T, A> {
+ /// Like `new` but parameterized over the choice of allocator for
+ /// the returned RawVec.
+ pub fn new_in(a: A) -> Self {
// !0 is usize::MAX. This branch should be stripped at compile time.
let cap = if mem::size_of::<T>() == 0 { !0 } else { 0 };
RawVec {
ptr: Unique::empty(),
cap: cap,
+ a: a,
}
}
- /// Creates a RawVec with exactly the capacity and alignment requirements
- /// for a `[T; cap]`. This is equivalent to calling RawVec::new when `cap` is 0
- /// or T is zero-sized. Note that if `T` is zero-sized this means you will *not*
- /// get a RawVec with the requested capacity!
- ///
- /// # Panics
- ///
- /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
- /// * Panics on 32-bit platforms if the requested capacity exceeds
- /// `isize::MAX` bytes.
- ///
- /// # Aborts
- ///
- /// Aborts on OOM
+ /// Like `with_capacity` but parameterized over the choice of
+ /// allocator for the returned RawVec.
#[inline]
- pub fn with_capacity(cap: usize) -> Self {
- RawVec::allocate(cap, false)
+ pub fn with_capacity_in(cap: usize, a: A) -> Self {
+ RawVec::allocate_in(cap, false, a)
}
- /// Like `with_capacity` but guarantees the buffer is zeroed.
+ /// Like `with_capacity_zeroed` but parameterized over the choice
+ /// of allocator for the returned RawVec.
#[inline]
- pub fn with_capacity_zeroed(cap: usize) -> Self {
- RawVec::allocate(cap, true)
+ pub fn with_capacity_zeroed_in(cap: usize, a: A) -> Self {
+ RawVec::allocate_in(cap, true, a)
}
- fn allocate(cap: usize, zeroed: bool) -> Self {
+ fn allocate_in(cap: usize, zeroed: bool, mut a: A) -> Self {
unsafe {
let elem_size = mem::size_of::<T>();
mem::align_of::<T>() as *mut u8
} else {
let align = mem::align_of::<T>();
- let ptr = if zeroed {
- heap::allocate_zeroed(alloc_size, align)
+ let result = if zeroed {
+ a.alloc_zeroed(Layout::from_size_align(alloc_size, align).unwrap())
} else {
- heap::allocate(alloc_size, align)
+ a.alloc(Layout::from_size_align(alloc_size, align).unwrap())
};
- if ptr.is_null() {
- oom()
+ match result {
+ Ok(ptr) => ptr,
+ Err(err) => a.oom(err),
}
- ptr
};
RawVec {
ptr: Unique::new(ptr as *mut _),
cap: cap,
+ a: a,
}
}
}
+}
+
+impl<T> RawVec<T, HeapAlloc> {
+ /// Creates the biggest possible RawVec (on the system heap)
+ /// without allocating. If T has positive size, then this makes a
+ /// RawVec with capacity 0. If T has 0 size, then it it makes a
+ /// RawVec with capacity `usize::MAX`. Useful for implementing
+ /// delayed allocation.
+ pub fn new() -> Self {
+ Self::new_in(HeapAlloc)
+ }
+
+ /// Creates a RawVec (on the system heap) with exactly the
+ /// capacity and alignment requirements for a `[T; cap]`. This is
+ /// equivalent to calling RawVec::new when `cap` is 0 or T is
+ /// zero-sized. Note that if `T` is zero-sized this means you will
+ /// *not* get a RawVec with the requested capacity!
+ ///
+ /// # Panics
+ ///
+ /// * Panics if the requested capacity exceeds `usize::MAX` bytes.
+ /// * Panics on 32-bit platforms if the requested capacity exceeds
+ /// `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM
+ #[inline]
+ pub fn with_capacity(cap: usize) -> Self {
+ RawVec::allocate_in(cap, false, HeapAlloc)
+ }
+
+ /// Like `with_capacity` but guarantees the buffer is zeroed.
+ #[inline]
+ pub fn with_capacity_zeroed(cap: usize) -> Self {
+ RawVec::allocate_in(cap, true, HeapAlloc)
+ }
+}
+
+impl<T, A: Alloc> RawVec<T, A> {
+ /// Reconstitutes a RawVec from a pointer, capacity, and allocator.
+ ///
+ /// # Undefined Behavior
+ ///
+ /// The ptr must be allocated (via the given allocator `a`), and with the given capacity. The
+ /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
+ /// If the ptr and capacity come from a RawVec created via `a`, then this is guaranteed.
+ pub unsafe fn from_raw_parts_in(ptr: *mut T, cap: usize, a: A) -> Self {
+ RawVec {
+ ptr: Unique::new(ptr),
+ cap: cap,
+ a: a,
+ }
+ }
+}
- /// Reconstitutes a RawVec from a pointer and capacity.
+impl<T> RawVec<T, HeapAlloc> {
+ /// Reconstitutes a RawVec from a pointer, capacity.
///
/// # Undefined Behavior
///
- /// The ptr must be allocated, and with the given capacity. The
+ /// The ptr must be allocated (on the system heap), and with the given capacity. The
/// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
/// If the ptr and capacity come from a RawVec, then this is guaranteed.
pub unsafe fn from_raw_parts(ptr: *mut T, cap: usize) -> Self {
RawVec {
ptr: Unique::new(ptr),
cap: cap,
+ a: HeapAlloc,
}
}
}
}
-impl<T> RawVec<T> {
+impl<T, A: Alloc> RawVec<T, A> {
/// Gets a raw pointer to the start of the allocation. Note that this is
/// Unique::empty() if `cap = 0` or T is zero-sized. In the former case, you must
/// be careful.
}
}
+ /// Returns a shared reference to the allocator backing this RawVec.
+ pub fn alloc(&self) -> &A {
+ &self.a
+ }
+
+ /// Returns a mutable reference to the allocator backing this RawVec.
+ pub fn alloc_mut(&mut self) -> &mut A {
+ &mut self.a
+ }
+
/// Doubles the size of the type's backing allocation. This is common enough
/// to want to do that it's easiest to just have a dedicated method. Slightly
/// more efficient logic can be provided for this than the general case.
// 0, getting to here necessarily means the RawVec is overfull.
assert!(elem_size != 0, "capacity overflow");
- let align = mem::align_of::<T>();
-
- let (new_cap, ptr) = if self.cap == 0 {
+ let (new_cap, ptr_res) = if self.cap == 0 {
// skip to 4 because tiny Vec's are dumb; but not if that would cause overflow
let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
- let ptr = heap::allocate(new_cap * elem_size, align);
- (new_cap, ptr)
+ let ptr_res = self.a.alloc_array::<T>(new_cap);
+ (new_cap, ptr_res)
} else {
// Since we guarantee that we never allocate more than isize::MAX bytes,
// `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow
let new_cap = 2 * self.cap;
let new_alloc_size = new_cap * elem_size;
alloc_guard(new_alloc_size);
- let ptr = heap::reallocate(self.ptr() as *mut _,
- self.cap * elem_size,
- new_alloc_size,
- align);
- (new_cap, ptr)
+ let ptr_res = self.a.realloc_array(self.ptr, self.cap, new_cap);
+ (new_cap, ptr_res)
};
// If allocate or reallocate fail, we'll get `null` back
- if ptr.is_null() {
- oom()
- }
+ let uniq = match ptr_res {
+ Err(err) => self.a.oom(err),
+ Ok(uniq) => uniq,
+ };
- self.ptr = Unique::new(ptr as *mut _);
+ self.ptr = uniq;
self.cap = new_cap;
}
}
pub fn double_in_place(&mut self) -> bool {
unsafe {
let elem_size = mem::size_of::<T>();
- let align = mem::align_of::<T>();
// since we set the capacity to usize::MAX when elem_size is
// 0, getting to here necessarily means the RawVec is overfull.
let new_alloc_size = new_cap * elem_size;
alloc_guard(new_alloc_size);
- let size = heap::reallocate_inplace(self.ptr() as *mut _,
- self.cap * elem_size,
- new_alloc_size,
- align);
- if size >= new_alloc_size {
- // We can't directly divide `size`.
- self.cap = new_cap;
+
+ let ptr = self.ptr() as *mut _;
+ let old_layout = Layout::new::<T>().repeat(self.cap).unwrap().0;
+ let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
+ match self.a.grow_in_place(ptr, old_layout, new_layout) {
+ Ok(_) => {
+ // We can't directly divide `size`.
+ self.cap = new_cap;
+ true
+ }
+ Err(_) => {
+ false
+ }
}
- size >= new_alloc_size
}
}
pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
unsafe {
let elem_size = mem::size_of::<T>();
- let align = mem::align_of::<T>();
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
let new_alloc_size = new_cap.checked_mul(elem_size).expect("capacity overflow");
alloc_guard(new_alloc_size);
- let ptr = if self.cap == 0 {
- heap::allocate(new_alloc_size, align)
+ let result = if self.cap == 0 {
+ self.a.alloc_array::<T>(new_cap)
} else {
- heap::reallocate(self.ptr() as *mut _,
- self.cap * elem_size,
- new_alloc_size,
- align)
+ self.a.realloc_array(self.ptr, self.cap, new_cap)
};
// If allocate or reallocate fail, we'll get `null` back
- if ptr.is_null() {
- oom()
- }
+ let uniq = match result {
+ Err(err) => self.a.oom(err),
+ Ok(uniq) => uniq,
+ };
- self.ptr = Unique::new(ptr as *mut _);
+ self.ptr = uniq;
self.cap = new_cap;
}
}
/// ```
pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
unsafe {
- let elem_size = mem::size_of::<T>();
- let align = mem::align_of::<T>();
-
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
// FIXME: may crash and burn on over-reserve
alloc_guard(new_alloc_size);
- let ptr = if self.cap == 0 {
- heap::allocate(new_alloc_size, align)
+ let result = if self.cap == 0 {
+ self.a.alloc_array::<T>(new_cap)
} else {
- heap::reallocate(self.ptr() as *mut _,
- self.cap * elem_size,
- new_alloc_size,
- align)
+ self.a.realloc_array(self.ptr, self.cap, new_cap)
};
- // If allocate or reallocate fail, we'll get `null` back
- if ptr.is_null() {
- oom()
- }
+ let uniq = match result {
+ Err(err) => self.a.oom(err),
+ Ok(uniq) => uniq,
+ };
- self.ptr = Unique::new(ptr as *mut _);
+ self.ptr = uniq;
self.cap = new_cap;
}
}
/// `isize::MAX` bytes.
pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> bool {
unsafe {
- let elem_size = mem::size_of::<T>();
- let align = mem::align_of::<T>();
-
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
return false;
}
- let (_, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap);
+ let (new_cap, new_alloc_size) = self.amortized_new_size(used_cap, needed_extra_cap);
// FIXME: may crash and burn on over-reserve
alloc_guard(new_alloc_size);
- let size = heap::reallocate_inplace(self.ptr() as *mut _,
- self.cap * elem_size,
- new_alloc_size,
- align);
- if size >= new_alloc_size {
- self.cap = new_alloc_size / elem_size;
+ // Here, `cap < used_cap + needed_extra_cap <= new_cap`
+ // (regardless of whether `self.cap - used_cap` wrapped).
+ // Therefore we can safely call grow_in_place.
+
+ let ptr = self.ptr() as *mut _;
+ let old_layout = Layout::new::<T>().repeat(self.cap).unwrap().0;
+ let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
+ match self.a.grow_in_place(ptr, old_layout, new_layout) {
+ Ok(_) => {
+ self.cap = new_cap;
+ true
+ }
+ Err(_) => {
+ false
+ }
}
- size >= new_alloc_size
}
}
/// Aborts on OOM.
pub fn shrink_to_fit(&mut self, amount: usize) {
let elem_size = mem::size_of::<T>();
- let align = mem::align_of::<T>();
// Set the `cap` because they might be about to promote to a `Box<[T]>`
if elem_size == 0 {
assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
if amount == 0 {
- mem::replace(self, RawVec::new());
+ // We want to create a new zero-length vector within the
+ // same allocator. We use ptr::write to avoid an
+ // erroneous attempt to drop the contents, and we use
+ // ptr::read to sidestep condition against destructuring
+ // types that implement Drop.
+
+ unsafe {
+ let a = ptr::read(&self.a as *const A);
+ self.dealloc_buffer();
+ ptr::write(self, RawVec::new_in(a));
+ }
} else if self.cap != amount {
unsafe {
- // Overflow check is unnecessary as the vector is already at
- // least this large.
- let ptr = heap::reallocate(self.ptr() as *mut _,
- self.cap * elem_size,
- amount * elem_size,
- align);
- if ptr.is_null() {
- oom()
+ match self.a.realloc_array(self.ptr, self.cap, amount) {
+ Err(err) => self.a.oom(err),
+ Ok(uniq) => self.ptr = uniq,
}
- self.ptr = Unique::new(ptr as *mut _);
}
self.cap = amount;
}
}
+}
+impl<T> RawVec<T, HeapAlloc> {
/// Converts the entire buffer into `Box<[T]>`.
///
/// While it is not *strictly* Undefined Behavior to call
}
}
-unsafe impl<#[may_dangle] T> Drop for RawVec<T> {
+impl<T, A: Alloc> RawVec<T, A> {
/// Frees the memory owned by the RawVec *without* trying to Drop its contents.
- fn drop(&mut self) {
+ pub unsafe fn dealloc_buffer(&mut self) {
let elem_size = mem::size_of::<T>();
if elem_size != 0 && self.cap != 0 {
- let align = mem::align_of::<T>();
-
- let num_bytes = elem_size * self.cap;
- unsafe {
- heap::deallocate(self.ptr() as *mut u8, num_bytes, align);
- }
+ let ptr = self.ptr() as *mut u8;
+ let layout = Layout::new::<T>().repeat(self.cap).unwrap().0;
+ self.a.dealloc(ptr, layout);
}
}
}
+unsafe impl<#[may_dangle] T, A: Alloc> Drop for RawVec<T, A> {
+ /// Frees the memory owned by the RawVec *without* trying to Drop its contents.
+ fn drop(&mut self) {
+ unsafe { self.dealloc_buffer(); }
+ }
+}
+
// We need to guarantee the following:
mod tests {
use super::*;
+ #[test]
+ fn allocator_param() {
+ use allocator::{Alloc, AllocErr};
+
+ // Writing a test of integration between third-party
+ // allocators and RawVec is a little tricky because the RawVec
+ // API does not expose fallible allocation methods, so we
+ // cannot check what happens when allocator is exhausted
+ // (beyond detecting a panic).
+ //
+ // Instead, this just checks that the RawVec methods do at
+ // least go through the Allocator API when it reserves
+ // storage.
+
+ // A dumb allocator that consumes a fixed amount of fuel
+ // before allocation attempts start failing.
+ struct BoundedAlloc { fuel: usize }
+ unsafe impl Alloc for BoundedAlloc {
+ unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
+ let size = layout.size();
+ if size > self.fuel {
+ return Err(AllocErr::Unsupported { details: "fuel exhausted" });
+ }
+ match HeapAlloc.alloc(layout) {
+ ok @ Ok(_) => { self.fuel -= size; ok }
+ err @ Err(_) => err,
+ }
+ }
+ unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
+ HeapAlloc.dealloc(ptr, layout)
+ }
+ }
+
+ let a = BoundedAlloc { fuel: 500 };
+ let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
+ assert_eq!(v.a.fuel, 450);
+ v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
+ assert_eq!(v.a.fuel, 250);
+ }
+
#[test]
fn reserve_does_not_overallocate() {
{
}
}
+
}
}
}
-#[stable(feature = "box_from_vec", since = "1.18.0")]
-impl<T> Into<Box<[T]>> for Vec<T> {
- fn into(self) -> Box<[T]> {
- self.into_boxed_slice()
+// note: test pulls in libstd, which causes errors here
+#[cfg(not(test))]
+#[stable(feature = "box_from_vec", since = "1.20.0")]
+impl<T> From<Vec<T>> for Box<[T]> {
+ fn from(v: Vec<T>) -> Box<[T]> {
+ v.into_boxed_slice()
}
}
make.current_dir(&native.out_dir)
.arg("build_lib_static");
+ // These are intended for mingw32-make which we don't use
+ if cfg!(windows) {
+ make.env_remove("MAKEFLAGS").env_remove("MFLAGS");
+ }
+
// mingw make seems... buggy? unclear...
if !host.contains("windows") {
make.arg("-j")
//! objects of a single type.
#![crate_name = "arena"]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
#![feature(dropck_eyepatch)]
#![feature(generic_param_attrs)]
#![feature(needs_drop)]
-#![cfg_attr(stage0, feature(staged_api))]
#![cfg_attr(test, feature(test))]
#![allow(deprecated)]
use convert::TryFrom;
use fmt::{self, Write};
use slice;
-use str::from_utf8_unchecked_mut;
+use str::{from_utf8_unchecked_mut, FromStr};
use iter::FusedIterator;
use mem::transmute;
}
}
+
+/// An error which can be returned when parsing a char.
+#[stable(feature = "char_from_str", since = "1.19.0")]
+#[derive(Clone, Debug)]
+pub struct ParseCharError {
+ kind: CharErrorKind,
+}
+
+impl ParseCharError {
+ #[unstable(feature = "char_error_internals",
+ reason = "this method should not be available publicly",
+ issue = "0")]
+ #[doc(hidden)]
+ pub fn __description(&self) -> &str {
+ match self.kind {
+ CharErrorKind::EmptyString => {
+ "cannot parse char from empty string"
+ },
+ CharErrorKind::TooManyChars => "too many characters in string"
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum CharErrorKind {
+ EmptyString,
+ TooManyChars,
+}
+
+#[stable(feature = "char_from_str", since = "1.19.0")]
+impl fmt::Display for ParseCharError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ self.__description().fmt(f)
+ }
+}
+
+
+#[stable(feature = "char_from_str", since = "1.19.0")]
+impl FromStr for char {
+ type Err = ParseCharError;
+
+ #[inline]
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ let mut chars = s.chars();
+ match (chars.next(), chars.next()) {
+ (None, _) => {
+ Err(ParseCharError { kind: CharErrorKind::EmptyString })
+ },
+ (Some(c), None) => Ok(c),
+ _ => {
+ Err(ParseCharError { kind: CharErrorKind::TooManyChars })
+ }
+ }
+ }
+}
+
+
#[unstable(feature = "try_from", issue = "33417")]
impl TryFrom<u32> for char {
type Error = CharTryFromError;
///
/// The `locality` argument must be a constant integer and is a temporal locality specifier
/// ranging from (0) - no locality, to (3) - extremely local keep in cache
- #[cfg(not(stage0))]
pub fn prefetch_read_data<T>(data: *const T, locality: i32);
/// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
/// if supported; otherwise, it is a noop.
///
/// The `locality` argument must be a constant integer and is a temporal locality specifier
/// ranging from (0) - no locality, to (3) - extremely local keep in cache
- #[cfg(not(stage0))]
pub fn prefetch_write_data<T>(data: *const T, locality: i32);
/// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
/// if supported; otherwise, it is a noop.
///
/// The `locality` argument must be a constant integer and is a temporal locality specifier
/// ranging from (0) - no locality, to (3) - extremely local keep in cache
- #[cfg(not(stage0))]
pub fn prefetch_read_instruction<T>(data: *const T, locality: i32);
/// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction
/// if supported; otherwise, it is a noop.
///
/// The `locality` argument must be a constant integer and is a temporal locality specifier
/// ranging from (0) - no locality, to (3) - extremely local keep in cache
- #[cfg(not(stage0))]
pub fn prefetch_write_instruction<T>(data: *const T, locality: i32);
}
-// Empty bootstrap implementations for stage0 compilation
-#[cfg(stage0)]
-pub fn prefetch_read_data<T>(_data: *const T, _locality: i32) { /* EMPTY */ }
-#[cfg(stage0)]
-pub fn prefetch_write_data<T>(_data: *const T, _locality: i32) { /* EMPTY */ }
-#[cfg(stage0)]
-pub fn prefetch_read_instruction<T>(_data: *const T, _locality: i32) { /* EMPTY */ }
-#[cfg(stage0)]
-pub fn prefetch_write_instruction<T>(_data: *const T, _locality: i32) { /* EMPTY */ }
-
extern "rust-intrinsic" {
pub fn atomic_fence();
#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
pub use self::range::{RangeInclusive, RangeToInclusive};
-#[unstable(feature = "question_mark_carrier", issue = "31436")]
-#[cfg(stage0)]
-pub use self::try::Carrier;
#[unstable(feature = "try_trait", issue = "42327")]
pub use self::try::Try;
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-/// This trait has been superseded by the `Try` trait, but must remain
-/// here as `?` is still lowered to it in stage0 .
-#[cfg(stage0)]
-#[unstable(feature = "question_mark_carrier", issue = "31436")]
-pub trait Carrier {
- /// The type of the value when computation succeeds.
- type Success;
- /// The type of the value when computation errors out.
- type Error;
-
- /// Create a `Carrier` from a success value.
- fn from_success(_: Self::Success) -> Self;
-
- /// Create a `Carrier` from an error value.
- fn from_error(_: Self::Error) -> Self;
-
- /// Translate this `Carrier` to another implementation of `Carrier` with the
- /// same associated types.
- fn translate<T>(self) -> T where T: Carrier<Success=Self::Success, Error=Self::Error>;
-}
-
-#[cfg(stage0)]
-#[unstable(feature = "question_mark_carrier", issue = "31436")]
-impl<U, V> Carrier for Result<U, V> {
- type Success = U;
- type Error = V;
-
- fn from_success(u: U) -> Result<U, V> {
- Ok(u)
- }
-
- fn from_error(e: V) -> Result<U, V> {
- Err(e)
- }
-
- fn translate<T>(self) -> T
- where T: Carrier<Success=U, Error=V>
- {
- match self {
- Ok(u) => T::from_success(u),
- Err(e) => T::from_error(e),
- }
- }
-}
-
-struct _DummyErrorType;
-
-impl Try for _DummyErrorType {
- type Ok = ();
- type Error = ();
-
- fn into_result(self) -> Result<Self::Ok, Self::Error> {
- Ok(())
- }
-
- fn from_ok(_: ()) -> _DummyErrorType {
- _DummyErrorType
- }
-
- fn from_error(_: ()) -> _DummyErrorType {
- _DummyErrorType
- }
-}
-
/// A trait for customizing the behaviour of the `?` operator.
///
/// A type implementing `Try` is one that has a canonical way to view it
}
}
-#[cfg(stage0)] // The bootstrap compiler has a different `...` desugar
-fn inclusive(start: usize, end: usize) -> ops::RangeInclusive<usize> {
- ops::RangeInclusive { start, end }
-}
-#[cfg(not(stage0))]
-fn inclusive(start: usize, end: usize) -> ops::RangeInclusive<usize> {
- start...end
-}
-
#[unstable(feature = "inclusive_range", reason = "recently added, follows RFC", issue = "28237")]
impl<T> SliceIndex<[T]> for ops::RangeToInclusive<usize> {
type Output = [T];
#[inline]
fn get(self, slice: &[T]) -> Option<&[T]> {
- inclusive(0, self.end).get(slice)
+ (0...self.end).get(slice)
}
#[inline]
fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
- inclusive(0, self.end).get_mut(slice)
+ (0...self.end).get_mut(slice)
}
#[inline]
unsafe fn get_unchecked(self, slice: &[T]) -> &[T] {
- inclusive(0, self.end).get_unchecked(slice)
+ (0...self.end).get_unchecked(slice)
}
#[inline]
unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] {
- inclusive(0, self.end).get_unchecked_mut(slice)
+ (0...self.end).get_unchecked_mut(slice)
}
#[inline]
fn index(self, slice: &[T]) -> &[T] {
- inclusive(0, self.end).index(slice)
+ (0...self.end).index(slice)
}
#[inline]
fn index_mut(self, slice: &mut [T]) -> &mut [T] {
- inclusive(0, self.end).index_mut(slice)
+ (0...self.end).index_mut(slice)
}
}
use std::{char,str};
use std::convert::TryFrom;
+use std::str::FromStr;
#[test]
fn test_convert() {
assert!(char::try_from(0xFFFF_FFFF_u32).is_err());
}
+#[test]
+fn test_from_str() {
+ assert_eq!(char::from_str("a").unwrap(), 'a');
+ assert_eq!(char::try_from("a").unwrap(), 'a');
+ assert_eq!(char::from_str("\0").unwrap(), '\0');
+ assert_eq!(char::from_str("\u{D7FF}").unwrap(), '\u{d7FF}');
+ assert!(char::from_str("").is_err());
+ assert!(char::from_str("abc").is_err());
+}
+
#[test]
fn test_is_lowercase() {
assert!('a'.is_lowercase());
//! [mz]: https://code.google.com/p/miniz/
#![crate_name = "flate"]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
#![deny(warnings)]
#![feature(libc)]
-#![cfg_attr(stage0, feature(staged_api))]
#![feature(unique)]
#![cfg_attr(test, feature(rand))]
//! generated instead.
#![crate_name = "fmt_macros"]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
test(attr(deny(warnings))))]
#![deny(warnings)]
-#![cfg_attr(stage0, feature(staged_api))]
-#![feature(rustc_private)]
-
pub use self::Piece::*;
pub use self::Position::*;
pub use self::Alignment::*;
//! ```
#![crate_name = "getopts"]
-#![cfg_attr(stage0, unstable(feature = "rustc_private",
- reason = "use the crates.io `getopts` library instead",
- issue = "27812"))]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
#![deny(missing_docs)]
#![deny(warnings)]
-#![cfg_attr(stage0, feature(staged_api))]
use self::Name::*;
use self::HasArg::*;
//! * [DOT language](http://www.graphviz.org/doc/info/lang.html)
#![crate_name = "graphviz"]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(staged_api))]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
//! }
//! ```
#![crate_name = "proc_macro_plugin"]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
#![feature(plugin_registrar)]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
html_root_url = "https://doc.rust-lang.org/nightly/")]
#![deny(warnings)]
-#![cfg_attr(stage0, feature(staged_api))]
#![feature(rustc_diagnostic_macros)]
-#![cfg_attr(stage0, feature(rustc_private))]
extern crate rustc_plugin;
extern crate syntax;
#![feature(sort_unstable)]
#![feature(trace_macros)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-#![cfg_attr(stage0, feature(loop_break_value))]
-
#![recursion_limit="256"]
extern crate arena;
#![feature(rand)]
#![cfg_attr(test, feature(rand))]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
extern crate syntax;
extern crate libc;
extern crate serialize;
("le32-unknown-nacl", le32_unknown_nacl),
("asmjs-unknown-emscripten", asmjs_unknown_emscripten),
("wasm32-unknown-emscripten", wasm32_unknown_emscripten),
+ ("wasm32-experimental-emscripten", wasm32_experimental_emscripten),
("thumbv6m-none-eabi", thumbv6m_none_eabi),
("thumbv7m-none-eabi", thumbv7m_none_eabi),
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use LinkerFlavor;
+use super::{LinkArgs, Target, TargetOptions};
+use super::emscripten_base::{cmd};
+
+pub fn target() -> Result<Target, String> {
+ let mut post_link_args = LinkArgs::new();
+ post_link_args.insert(LinkerFlavor::Em,
+ vec!["-s".to_string(),
+ "WASM=1".to_string(),
+ "-s".to_string(),
+ "ERROR_ON_UNDEFINED_SYMBOLS=1".to_string()]);
+
+ let opts = TargetOptions {
+ linker: cmd("emcc"),
+ ar: cmd("emar"),
+
+ dynamic_linking: false,
+ executables: true,
+ // Today emcc emits two files - a .js file to bootstrap and
+ // possibly interpret the wasm, and a .wasm file
+ exe_suffix: ".js".to_string(),
+ linker_is_gnu: true,
+ allow_asm: false,
+ obj_is_bitcode: true,
+ is_like_emscripten: true,
+ max_atomic_width: Some(32),
+ post_link_args: post_link_args,
+ target_family: Some("unix".to_string()),
+ .. Default::default()
+ };
+ Ok(Target {
+ llvm_target: "wasm32-unknown-unknown".to_string(),
+ target_endian: "little".to_string(),
+ target_pointer_width: "32".to_string(),
+ target_os: "emscripten".to_string(),
+ target_env: "".to_string(),
+ target_vendor: "unknown".to_string(),
+ data_layout: "e-m:e-p:32:32-i64:64-n32:64-S128".to_string(),
+ arch: "wasm32".to_string(),
+ linker_flavor: LinkerFlavor::Em,
+ options: opts,
+ })
+}
#![crate_type = "rlib"]
#![no_std]
#![deny(warnings)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(staged_api))]
//! A typesafe bitmask flag generator.
#![feature(associated_consts)]
#![feature(nonzero)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
#[macro_use] extern crate log;
#[macro_use] extern crate syntax;
extern crate syntax_pos;
#![feature(const_fn)]
#![feature(i128_type)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
extern crate arena;
#[macro_use] extern crate syntax;
#[macro_use] extern crate log;
#![feature(i128)]
#![feature(i128_type)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
extern crate syntax;
extern crate serialize as rustc_serialize; // used by deriving
#![feature(specialization)]
#![feature(manually_drop)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
#![cfg_attr(stage0, feature(struct_field_attributes))]
#![cfg_attr(unix, feature(libc))]
#![feature(rustc_diagnostic_macros)]
#![feature(set_stdio)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-#![cfg_attr(stage0, feature(loop_break_value))]
-
extern crate arena;
extern crate getopts;
extern crate graphviz;
#![feature(libc)]
#![feature(conservative_impl_trait)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
extern crate term;
extern crate libc;
extern crate serialize as rustc_serialize;
#![feature(conservative_impl_trait)]
#![feature(sort_unstable)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
extern crate graphviz;
#[macro_use] extern crate rustc;
extern crate rustc_data_structures;
#![feature(rustc_diagnostic_macros)]
#![feature(slice_patterns)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
#[macro_use]
extern crate syntax;
#[macro_use]
let mut optional_components =
vec!["x86", "arm", "aarch64", "mips", "powerpc", "pnacl",
- "systemz", "jsbackend", "msp430", "sparc", "nvptx"];
+ "systemz", "jsbackend", "webassembly", "msp430", "sparc", "nvptx"];
let mut version_cmd = Command::new(&llvm_config);
version_cmd.arg("--version");
#![feature(link_args)]
#![feature(static_nobundle)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
extern crate libc;
#[macro_use]
#[no_link]
LLVMInitializeHexagonTargetMC,
LLVMInitializeHexagonAsmPrinter,
LLVMInitializeHexagonAsmParser);
+ init_target!(llvm_component = "webassembly",
+ LLVMInitializeWebAssemblyTargetInfo,
+ LLVMInitializeWebAssemblyTarget,
+ LLVMInitializeWebAssemblyTargetMC,
+ LLVMInitializeWebAssemblyAsmPrinter);
}
pub fn last_error() -> Option<String> {
#![feature(specialization)]
#![feature(discriminant_value)]
#![feature(rustc_private)]
-
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(staged_api))]
#![feature(sort_unstable)]
#[macro_use]
#![feature(placement_in_syntax)]
#![feature(collection_placement)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
#[macro_use] extern crate log;
extern crate graphviz as dot;
#[macro_use]
#![feature(rustc_diagnostic_macros)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
#[macro_use]
extern crate rustc;
extern crate rustc_const_eval;
// except according to those terms.
#![crate_name = "rustc_platform_intrinsics"]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
#![crate_type = "dylib"]
#![crate_type = "rlib"]
-#![cfg_attr(stage0, feature(staged_api))]
#![deny(warnings)]
#![allow(bad_style)]
#![feature(rustc_diagnostic_macros)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
#[macro_use] extern crate syntax;
extern crate rustc;
#![feature(rustc_diagnostic_macros)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
extern crate rustc;
#[macro_use] extern crate syntax;
extern crate syntax_pos;
};
let kind = ModuleKind::Def(Def::Mod(def_id), name);
- self.arenas.alloc_module(ModuleData::new(parent, kind, def_id, Mark::root(), DUMMY_SP))
+ let module =
+ self.arenas.alloc_module(ModuleData::new(parent, kind, def_id, Mark::root(), DUMMY_SP));
+ self.extern_module_map.insert((def_id, macros_only), module);
+ module
}
pub fn macro_def_scope(&mut self, expansion: Mark) -> Module<'a> {
#![feature(associated_consts)]
#![feature(rustc_diagnostic_macros)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
#[macro_use]
extern crate log;
#[macro_use]
#![feature(custom_attribute)]
#![allow(unused_attributes)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
#[macro_use] extern crate rustc;
#[macro_use] extern crate log;
#![feature(conservative_impl_trait)]
#![feature(command_envs)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
use rustc::dep_graph::WorkProduct;
use syntax_pos::symbol::Symbol;
#![feature(rustc_diagnostic_macros)]
#![feature(slice_patterns)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-#![cfg_attr(stage0, feature(loop_break_value))]
-
#[macro_use] extern crate log;
#[macro_use] extern crate syntax;
extern crate syntax_pos;
#![feature(unicode)]
#![feature(vec_remove_item)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
extern crate arena;
extern crate getopts;
extern crate env_logger;
*/
#![crate_name = "serialize"]
-#![cfg_attr(stage0, unstable(feature = "rustc_private",
- reason = "deprecated in favor of rustc-serialize on crates.io",
- issue = "27812"))]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
#![feature(core_intrinsics)]
#![feature(i128_type)]
#![feature(specialization)]
-#![cfg_attr(stage0, feature(staged_api))]
#![cfg_attr(test, feature(test))]
pub use self::serialize::{Decoder, Encoder, Decodable, Encodable};
///
/// # Examples
///
+/// Joining paths on a Unix-like platform:
+///
+/// ```
+/// # if cfg!(unix) {
+/// use std::env;
+/// use std::ffi::OsString;
+/// use std::path::Path;
+///
+/// let paths = [Path::new("/bin"), Path::new("/usr/bin")];
+/// let path_os_string = env::join_paths(paths.iter()).unwrap();
+/// assert_eq!(path_os_string, OsString::from("/bin:/usr/bin"));
+/// # }
+/// ```
+///
+/// Joining a path containing a colon on a Unix-like platform results in an error:
+///
+/// ```
+/// # if cfg!(unix) {
+/// use std::env;
+/// use std::path::Path;
+///
+/// let paths = [Path::new("/bin"), Path::new("/usr/bi:n")];
+/// assert!(env::join_paths(paths.iter()).is_err());
+/// # }
+/// ```
+///
+/// Using `env::join_paths` with `env::spit_paths` to append an item to the `PATH` environment
+/// variable:
+///
/// ```
/// use std::env;
/// use std::path::PathBuf;
// coherence challenge (e.g., specialization, neg impls, etc) we can
// reconsider what crate these items belong in.
+use alloc::allocator;
use any::TypeId;
use cell;
use char;
fn description(&self) -> &str { *self }
}
+#[unstable(feature = "allocator_api",
+ reason = "the precise API and guarantees it provides may be tweaked.",
+ issue = "27700")]
+impl Error for allocator::AllocErr {
+ fn description(&self) -> &str {
+ allocator::AllocErr::description(self)
+ }
+}
+
+#[unstable(feature = "allocator_api",
+ reason = "the precise API and guarantees it provides may be tweaked.",
+ issue = "27700")]
+impl Error for allocator::CannotReallocInPlace {
+ fn description(&self) -> &str {
+ allocator::CannotReallocInPlace::description(self)
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl Error for str::ParseBoolError {
fn description(&self) -> &str { "failed to parse bool" }
}
}
+#[stable(feature = "char_from_str", since = "1.19.0")]
+impl Error for char::ParseCharError {
+ fn description(&self) -> &str {
+ self.__description()
+ }
+}
+
+
// copied from any.rs
impl Error + 'static {
/// Returns true if the boxed type is the same as `T`
/// in the vector provided.
///
/// [`CString::new`]: struct.CString.html#method.new
+///
+/// # Examples
+///
+/// ```
+/// use std::ffi::{CString, NulError};
+///
+/// let _: NulError = CString::new(b"f\0oo".to_vec()).unwrap_err();
+/// ```
#[derive(Clone, PartialEq, Eq, Debug)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct NulError(usize, Vec<u8>);
/// byte was found too early in the slice provided or one wasn't found at all.
///
/// [`CStr::from_bytes_with_nul`]: struct.CStr.html#method.from_bytes_with_nul
+///
+/// # Examples
+///
+/// ```
+/// use std::ffi::{CStr, FromBytesWithNulError};
+///
+/// let _: FromBytesWithNulError = CStr::from_bytes_with_nul(b"f\0oo").unwrap_err();
+/// ```
#[derive(Clone, PartialEq, Eq, Debug)]
#[stable(feature = "cstr_from_bytes", since = "1.10.0")]
pub struct FromBytesWithNulError {
/// to undefined behavior or allocator corruption.
///
/// [`into_raw`]: #method.into_raw
+ ///
+ /// # Examples
+ ///
+ /// Create a `CString`, pass ownership to an `extern` function (via raw pointer), then retake
+ /// ownership with `from_raw`:
+ ///
+ /// ```no_run
+ /// use std::ffi::CString;
+ /// use std::os::raw::c_char;
+ ///
+ /// extern {
+ /// fn some_extern_function(s: *mut c_char);
+ /// }
+ ///
+ /// let c_string = CString::new("Hello!").unwrap();
+ /// let raw = c_string.into_raw();
+ /// unsafe {
+ /// some_extern_function(raw);
+ /// let c_string = CString::from_raw(raw);
+ /// }
+ /// ```
#[stable(feature = "cstr_memory", since = "1.4.0")]
pub unsafe fn from_raw(ptr: *mut c_char) -> CString {
let len = libc::strlen(ptr) + 1; // Including the NUL byte
/// let _ = CString::from_raw(ptr);
/// }
/// ```
+ #[inline]
#[stable(feature = "cstr_memory", since = "1.4.0")]
pub fn into_raw(self) -> *mut c_char {
Box::into_raw(self.into_inner()) as *mut c_char
/// let bytes = c_string.as_bytes();
/// assert_eq!(bytes, &[b'f', b'o', b'o']);
/// ```
+ #[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn as_bytes(&self) -> &[u8] {
&self.inner[..self.inner.len() - 1]
/// let bytes = c_string.as_bytes_with_nul();
/// assert_eq!(bytes, &[b'f', b'o', b'o', b'\0']);
/// ```
+ #[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn as_bytes_with_nul(&self) -> &[u8] {
&self.inner
/// Extracts a [`CStr`] slice containing the entire string.
///
/// [`CStr`]: struct.CStr.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(as_c_str)]
+ ///
+ /// use std::ffi::{CString, CStr};
+ ///
+ /// let c_string = CString::new(b"foo".to_vec()).unwrap();
+ /// let c_str = c_string.as_c_str();
+ /// assert_eq!(c_str, CStr::from_bytes_with_nul(b"foo\0").unwrap());
+ /// ```
+ #[inline]
#[unstable(feature = "as_c_str", issue = "40380")]
pub fn as_c_str(&self) -> &CStr {
&*self
/// Converts this `CString` into a boxed [`CStr`].
///
/// [`CStr`]: struct.CStr.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(into_boxed_c_str)]
+ ///
+ /// use std::ffi::{CString, CStr};
+ ///
+ /// let c_string = CString::new(b"foo".to_vec()).unwrap();
+ /// let boxed = c_string.into_boxed_c_str();
+ /// assert_eq!(&*boxed, CStr::from_bytes_with_nul(b"foo\0").unwrap());
+ /// ```
#[unstable(feature = "into_boxed_c_str", issue = "40380")]
pub fn into_boxed_c_str(self) -> Box<CStr> {
unsafe { mem::transmute(self.into_inner()) }
impl ops::Deref for CString {
type Target = CStr;
+ #[inline]
fn deref(&self) -> &CStr {
unsafe { CStr::from_bytes_with_nul_unchecked(self.as_bytes_with_nul()) }
}
#[stable(feature = "cstring_into", since = "1.7.0")]
impl From<CString> for Vec<u8> {
+ #[inline]
fn from(s: CString) -> Vec<u8> {
s.into_bytes()
}
#[stable(feature = "cstr_borrow", since = "1.3.0")]
impl Borrow<CStr> for CString {
+ #[inline]
fn borrow(&self) -> &CStr { self }
}
#[stable(feature = "c_string_from_box", since = "1.18.0")]
impl From<Box<CStr>> for CString {
+ #[inline]
fn from(s: Box<CStr>) -> CString {
s.into_c_string()
}
#[stable(feature = "box_from_c_string", since = "1.18.0")]
impl Into<Box<CStr>> for CString {
+ #[inline]
fn into(self) -> Box<CStr> {
self.into_boxed_c_str()
}
/// let cstr = CStr::from_bytes_with_nul(b"hello\0");
/// assert!(cstr.is_ok());
/// ```
+ ///
+ /// Creating a `CStr` without a trailing nul byte is an error:
+ ///
+ /// ```
+ /// use std::ffi::CStr;
+ ///
+ /// let c_str = CStr::from_bytes_with_nul(b"hello");
+ /// assert!(c_str.is_err());
+ /// ```
+ ///
+ /// Creating a `CStr` with an interior nul byte is an error:
+ ///
+ /// ```
+ /// use std::ffi::CStr;
+ ///
+ /// let c_str = CStr::from_bytes_with_nul(b"he\0llo\0");
+ /// assert!(c_str.is_err());
+ /// ```
#[stable(feature = "cstr_from_bytes", since = "1.10.0")]
pub fn from_bytes_with_nul(bytes: &[u8])
-> Result<&CStr, FromBytesWithNulError> {
/// assert_eq!(cstr, &*cstring);
/// }
/// ```
+ #[inline]
#[stable(feature = "cstr_from_bytes", since = "1.10.0")]
pub unsafe fn from_bytes_with_nul_unchecked(bytes: &[u8]) -> &CStr {
mem::transmute(bytes)
/// *ptr;
/// }
/// ```
+ #[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn as_ptr(&self) -> *const c_char {
self.inner.as_ptr()
/// > **Note**: This method is currently implemented as a 0-cost cast, but
/// > it is planned to alter its definition in the future to perform the
/// > length calculation whenever this method is called.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CStr;
+ ///
+ /// let c_str = CStr::from_bytes_with_nul(b"foo\0").unwrap();
+ /// assert_eq!(c_str.to_bytes(), b"foo");
+ /// ```
+ #[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn to_bytes(&self) -> &[u8] {
let bytes = self.to_bytes_with_nul();
/// > length calculation whenever this method is called.
///
/// [`to_bytes`]: #method.to_bytes
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CStr;
+ ///
+ /// let c_str = CStr::from_bytes_with_nul(b"foo\0").unwrap();
+ /// assert_eq!(c_str.to_bytes_with_nul(), b"foo\0");
+ /// ```
+ #[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn to_bytes_with_nul(&self) -> &[u8] {
unsafe { mem::transmute(&self.inner) }
/// > check whenever this method is called.
///
/// [`&str`]: ../primitive.str.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CStr;
+ ///
+ /// let c_str = CStr::from_bytes_with_nul(b"foo\0").unwrap();
+ /// assert_eq!(c_str.to_str(), Ok("foo"));
+ /// ```
#[stable(feature = "cstr_to_str", since = "1.4.0")]
pub fn to_str(&self) -> Result<&str, str::Utf8Error> {
// NB: When CStr is changed to perform the length check in .to_bytes()
///
/// [`Cow`]: ../borrow/enum.Cow.html
/// [`str`]: ../primitive.str.html
+ ///
+ /// # Examples
+ ///
+ /// Calling `to_string_lossy` on a `CStr` containing valid UTF-8:
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ /// use std::ffi::CStr;
+ ///
+ /// let c_str = CStr::from_bytes_with_nul(b"Hello World\0").unwrap();
+ /// assert_eq!(c_str.to_string_lossy(), Cow::Borrowed("Hello World"));
+ /// ```
+ ///
+ /// Calling `to_string_lossy` on a `CStr` containing invalid UTF-8:
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ /// use std::ffi::CStr;
+ ///
+ /// let c_str = CStr::from_bytes_with_nul(b"Hello \xF0\x90\x80World\0").unwrap();
+ /// assert_eq!(
+ /// c_str.to_string_lossy(),
+ /// Cow::Owned(String::from("Hello �World")) as Cow<str>
+ /// );
+ /// ```
#[stable(feature = "cstr_to_str", since = "1.4.0")]
pub fn to_string_lossy(&self) -> Cow<str> {
String::from_utf8_lossy(self.to_bytes())
///
/// [`Box`]: ../boxed/struct.Box.html
/// [`CString`]: struct.CString.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(into_boxed_c_str)]
+ ///
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new(b"foo".to_vec()).unwrap();
+ /// let boxed = c_string.into_boxed_c_str();
+ /// assert_eq!(boxed.into_c_string(), CString::new("foo").unwrap());
+ /// ```
#[unstable(feature = "into_boxed_c_str", issue = "40380")]
pub fn into_c_string(self: Box<CStr>) -> CString {
unsafe { mem::transmute(self) }
#[stable(feature = "cstring_asref", since = "1.7.0")]
impl AsRef<CStr> for CStr {
+ #[inline]
fn as_ref(&self) -> &CStr {
self
}
#[stable(feature = "cstring_asref", since = "1.7.0")]
impl AsRef<CStr> for CString {
+ #[inline]
fn as_ref(&self) -> &CStr {
self
}
// std is implemented with unstable features, many of which are internal
// compiler details that will never be stable
#![feature(alloc)]
+#![feature(allocator_api)]
#![feature(allow_internal_unstable)]
#![feature(asm)]
#![feature(associated_consts)]
#![feature(cfg_target_thread_local)]
#![feature(cfg_target_vendor)]
#![feature(char_escape_debug)]
+#![feature(char_error_internals)]
#![feature(char_internals)]
#![feature(collections_range)]
#![feature(compiler_builtins_lib)]
pub use core::char::{MAX, from_digit, from_u32, from_u32_unchecked};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::char::{EscapeDebug, EscapeDefault, EscapeUnicode};
+#[stable(feature = "char_from_str", since = "1.19.0")]
+pub use core::char::ParseCharError;
// unstable reexports
#[unstable(feature = "try_from", issue = "33417")]
#![feature(rustc_diagnostic_macros)]
#![feature(i128_type)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
extern crate serialize;
#[macro_use] extern crate log;
#[macro_use] extern crate bitflags;
#![feature(proc_macro_internals)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
extern crate fmt_macros;
extern crate log;
#[macro_use]
#![allow(unused_attributes)]
#![feature(specialization)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(rustc_private))]
-#![cfg_attr(stage0, feature(staged_api))]
-
use std::borrow::Cow;
use std::cell::{Cell, RefCell};
-use std::ops::{Add, Sub};
-use std::rc::Rc;
use std::cmp;
use std::fmt;
use std::hash::Hasher;
+use std::ops::{Add, Sub};
+use std::rc::Rc;
use rustc_data_structures::stable_hasher::StableHasher;
#![feature(custom_attribute)]
#![allow(unused_attributes)]
-#![cfg_attr(stage0, unstable(feature = "rustc_private", issue = "27812"))]
-#![cfg_attr(stage0, feature(staged_api))]
-
use std::io::prelude::*;
pub use terminfo::TerminfoTerminal;
# source tarball for a stable release you'll likely see `1.x.0` for rustc and
# `0.x.0` for Cargo where they were released on `date`.
-date: 2017-04-25
+date: 2017-06-15
rustc: beta
cargo: beta
// except according to those terms.
// this used to cause exponential code-size blowup during LLVM passes.
+// ignore-test FIXME #41696
// min-llvm-version 3.9
#![feature(test)]
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+// ignore-emscripten missing rust_begin_unwind
+
#![feature(lang_items, start, libc, alloc)]
#![no_std]