Avoid the Box in `TyCtxt::associated_items`.
This reduces instruction counts on `packed_simd` by 2%.
r? @nikomatsakis
"rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
-[[package]]
-name = "alloc_system"
-version = "0.0.0"
-dependencies = [
- "compiler_builtins 0.0.0",
- "core 0.0.0",
- "dlmalloc 0.0.0",
- "libc 0.0.0",
-]
-
[[package]]
name = "ammonia"
version = "1.1.0"
version = "0.0.0"
dependencies = [
"alloc 0.0.0",
- "alloc_system 0.0.0",
"build_helper 0.1.0",
"cmake 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)",
"compiler_builtins 0.0.0",
version = "0.0.0"
dependencies = [
"alloc 0.0.0",
- "alloc_system 0.0.0",
"build_helper 0.1.0",
"cmake 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)",
"compiler_builtins 0.0.0",
version = "0.0.0"
dependencies = [
"alloc 0.0.0",
- "alloc_system 0.0.0",
"build_helper 0.1.0",
"cmake 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)",
"compiler_builtins 0.0.0",
version = "0.0.0"
dependencies = [
"alloc 0.0.0",
- "alloc_system 0.0.0",
"build_helper 0.1.0",
"cmake 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)",
"compiler_builtins 0.0.0",
version = "0.0.0"
dependencies = [
"alloc 0.0.0",
- "alloc_system 0.0.0",
"build_helper 0.1.0",
"cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)",
"compiler_builtins 0.0.0",
"core 0.0.0",
+ "dlmalloc 0.0.0",
"libc 0.0.0",
"panic_abort 0.0.0",
"panic_unwind 0.0.0",
"src/build_helper",
"src/dlmalloc",
"src/liballoc",
- "src/liballoc_system",
"src/libbacktrace",
"src/libcompiler_builtins",
"src/libcore",
-FROM ubuntu:16.04
+FROM ubuntu:18.04
RUN apt-get update && apt-get install -y --no-install-recommends \
g++ \
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use alloc_system::System;
-use std::alloc::{Global, Alloc, Layout};
+use std::alloc::{Global, Alloc, Layout, System};
/// https://github.com/rust-lang/rust/issues/45955
#[test]
// except according to those terms.
#![feature(allocator_api)]
-#![feature(alloc_system)]
#![feature(box_syntax)]
#![feature(drain_filter)]
#![feature(exact_size_is_empty)]
#![feature(unboxed_closures)]
#![feature(repeat_generic_slice)]
-extern crate alloc_system;
extern crate core;
extern crate rand;
+++ /dev/null
-[package]
-authors = ["The Rust Project Developers"]
-name = "alloc_system"
-version = "0.0.0"
-
-[lib]
-name = "alloc_system"
-path = "lib.rs"
-test = false
-doc = false
-
-[dependencies]
-core = { path = "../libcore" }
-libc = { path = "../rustc/libc_shim" }
-compiler_builtins = { path = "../rustc/compiler_builtins_shim" }
-
-# See comments in the source for what this dependency is
-[target.'cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))'.dependencies]
-dlmalloc = { path = "../rustc/dlmalloc_shim" }
+++ /dev/null
-// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![no_std]
-#![allow(unused_attributes)]
-#![unstable(feature = "alloc_system",
- reason = "this library is unlikely to be stabilized in its current \
- form or name",
- issue = "32838")]
-
-#![feature(allocator_api)]
-#![feature(core_intrinsics)]
-#![feature(nll)]
-#![feature(staged_api)]
-#![feature(rustc_attrs)]
-#![cfg_attr(
- all(target_arch = "wasm32", not(target_os = "emscripten")),
- feature(integer_atomics, stdsimd)
-)]
-#![cfg_attr(any(unix, target_os = "cloudabi", target_os = "redox"), feature(libc))]
-#![rustc_alloc_kind = "lib"]
-
-// The minimum alignment guaranteed by the architecture. This value is used to
-// add fast paths for low alignment values.
-#[cfg(all(any(target_arch = "x86",
- target_arch = "arm",
- target_arch = "mips",
- target_arch = "powerpc",
- target_arch = "powerpc64",
- target_arch = "asmjs",
- target_arch = "wasm32")))]
-#[allow(dead_code)]
-const MIN_ALIGN: usize = 8;
-#[cfg(all(any(target_arch = "x86_64",
- target_arch = "aarch64",
- target_arch = "mips64",
- target_arch = "s390x",
- target_arch = "sparc64")))]
-#[allow(dead_code)]
-const MIN_ALIGN: usize = 16;
-
-use core::alloc::{Alloc, GlobalAlloc, AllocErr, Layout};
-use core::ptr::NonNull;
-
-/// The default memory allocator provided by the operating system.
-///
-/// This is based on `malloc` on Unix platforms and `HeapAlloc` on Windows,
-/// plus related functions.
-///
-/// This type can be used in a `static` item
-/// with the `#[global_allocator]` attribute
-/// to force the global allocator to be the system’s one.
-/// (The default is jemalloc for executables, on some platforms.)
-///
-/// ```rust
-/// use std::alloc::System;
-///
-/// #[global_allocator]
-/// static A: System = System;
-///
-/// fn main() {
-/// let a = Box::new(4); // Allocates from the system allocator.
-/// println!("{}", a);
-/// }
-/// ```
-///
-/// It can also be used directly to allocate memory
-/// independently of the standard library’s global allocator.
-#[stable(feature = "alloc_system_type", since = "1.28.0")]
-pub struct System;
-
-#[unstable(feature = "allocator_api", issue = "32838")]
-unsafe impl Alloc for System {
- #[inline]
- unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
- NonNull::new(GlobalAlloc::alloc(self, layout)).ok_or(AllocErr)
- }
-
- #[inline]
- unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
- NonNull::new(GlobalAlloc::alloc_zeroed(self, layout)).ok_or(AllocErr)
- }
-
- #[inline]
- unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
- GlobalAlloc::dealloc(self, ptr.as_ptr(), layout)
- }
-
- #[inline]
- unsafe fn realloc(&mut self,
- ptr: NonNull<u8>,
- layout: Layout,
- new_size: usize) -> Result<NonNull<u8>, AllocErr> {
- NonNull::new(GlobalAlloc::realloc(self, ptr.as_ptr(), layout, new_size)).ok_or(AllocErr)
- }
-}
-
-#[cfg(any(windows, unix, target_os = "cloudabi", target_os = "redox"))]
-mod realloc_fallback {
- use core::alloc::{GlobalAlloc, Layout};
- use core::cmp;
- use core::ptr;
-
- impl super::System {
- pub(crate) unsafe fn realloc_fallback(&self, ptr: *mut u8, old_layout: Layout,
- new_size: usize) -> *mut u8 {
- // Docs for GlobalAlloc::realloc require this to be valid:
- let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
-
- let new_ptr = GlobalAlloc::alloc(self, new_layout);
- if !new_ptr.is_null() {
- let size = cmp::min(old_layout.size(), new_size);
- ptr::copy_nonoverlapping(ptr, new_ptr, size);
- GlobalAlloc::dealloc(self, ptr, old_layout);
- }
- new_ptr
- }
- }
-}
-
-#[cfg(any(unix, target_os = "cloudabi", target_os = "redox"))]
-mod platform {
- extern crate libc;
-
- use core::ptr;
-
- use MIN_ALIGN;
- use System;
- use core::alloc::{GlobalAlloc, Layout};
-
- #[stable(feature = "alloc_system_type", since = "1.28.0")]
- unsafe impl GlobalAlloc for System {
- #[inline]
- unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
- if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
- libc::malloc(layout.size()) as *mut u8
- } else {
- #[cfg(target_os = "macos")]
- {
- if layout.align() > (1 << 31) {
- return ptr::null_mut()
- }
- }
- aligned_malloc(&layout)
- }
- }
-
- #[inline]
- unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
- if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
- libc::calloc(layout.size(), 1) as *mut u8
- } else {
- let ptr = self.alloc(layout.clone());
- if !ptr.is_null() {
- ptr::write_bytes(ptr, 0, layout.size());
- }
- ptr
- }
- }
-
- #[inline]
- unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
- libc::free(ptr as *mut libc::c_void)
- }
-
- #[inline]
- unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
- if layout.align() <= MIN_ALIGN && layout.align() <= new_size {
- libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8
- } else {
- self.realloc_fallback(ptr, layout, new_size)
- }
- }
- }
-
- #[cfg(any(target_os = "android",
- target_os = "hermit",
- target_os = "redox",
- target_os = "solaris"))]
- #[inline]
- unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
- // On android we currently target API level 9 which unfortunately
- // doesn't have the `posix_memalign` API used below. Instead we use
- // `memalign`, but this unfortunately has the property on some systems
- // where the memory returned cannot be deallocated by `free`!
- //
- // Upon closer inspection, however, this appears to work just fine with
- // Android, so for this platform we should be fine to call `memalign`
- // (which is present in API level 9). Some helpful references could
- // possibly be chromium using memalign [1], attempts at documenting that
- // memalign + free is ok [2] [3], or the current source of chromium
- // which still uses memalign on android [4].
- //
- // [1]: https://codereview.chromium.org/10796020/
- // [2]: https://code.google.com/p/android/issues/detail?id=35391
- // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
- // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
- // /memory/aligned_memory.cc
- libc::memalign(layout.align(), layout.size()) as *mut u8
- }
-
- #[cfg(not(any(target_os = "android",
- target_os = "hermit",
- target_os = "redox",
- target_os = "solaris")))]
- #[inline]
- unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
- let mut out = ptr::null_mut();
- let ret = libc::posix_memalign(&mut out, layout.align(), layout.size());
- if ret != 0 {
- ptr::null_mut()
- } else {
- out as *mut u8
- }
- }
-}
-
-#[cfg(windows)]
-#[allow(nonstandard_style)]
-mod platform {
- use MIN_ALIGN;
- use System;
- use core::alloc::{GlobalAlloc, Layout};
-
- type LPVOID = *mut u8;
- type HANDLE = LPVOID;
- type SIZE_T = usize;
- type DWORD = u32;
- type BOOL = i32;
-
- extern "system" {
- fn GetProcessHeap() -> HANDLE;
- fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
- fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
- fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
- fn GetLastError() -> DWORD;
- }
-
- #[repr(C)]
- struct Header(*mut u8);
-
- const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
-
- unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
- &mut *(ptr as *mut Header).offset(-1)
- }
-
- unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
- let aligned = ptr.add(align - (ptr as usize & (align - 1)));
- *get_header(aligned) = Header(ptr);
- aligned
- }
-
- #[inline]
- unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 {
- let ptr = if layout.align() <= MIN_ALIGN {
- HeapAlloc(GetProcessHeap(), flags, layout.size())
- } else {
- let size = layout.size() + layout.align();
- let ptr = HeapAlloc(GetProcessHeap(), flags, size);
- if ptr.is_null() {
- ptr
- } else {
- align_ptr(ptr, layout.align())
- }
- };
- ptr as *mut u8
- }
-
- #[stable(feature = "alloc_system_type", since = "1.28.0")]
- unsafe impl GlobalAlloc for System {
- #[inline]
- unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
- allocate_with_flags(layout, 0)
- }
-
- #[inline]
- unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
- allocate_with_flags(layout, HEAP_ZERO_MEMORY)
- }
-
- #[inline]
- unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
- if layout.align() <= MIN_ALIGN {
- let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID);
- debug_assert!(err != 0, "Failed to free heap memory: {}",
- GetLastError());
- } else {
- let header = get_header(ptr);
- let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
- debug_assert!(err != 0, "Failed to free heap memory: {}",
- GetLastError());
- }
- }
-
- #[inline]
- unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
- if layout.align() <= MIN_ALIGN {
- HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, new_size) as *mut u8
- } else {
- self.realloc_fallback(ptr, layout, new_size)
- }
- }
- }
-}
-
-// This is an implementation of a global allocator on the wasm32 platform when
-// emscripten is not in use. In that situation there's no actual runtime for us
-// to lean on for allocation, so instead we provide our own!
-//
-// The wasm32 instruction set has two instructions for getting the current
-// amount of memory and growing the amount of memory. These instructions are the
-// foundation on which we're able to build an allocator, so we do so! Note that
-// the instructions are also pretty "global" and this is the "global" allocator
-// after all!
-//
-// The current allocator here is the `dlmalloc` crate which we've got included
-// in the rust-lang/rust repository as a submodule. The crate is a port of
-// dlmalloc.c from C to Rust and is basically just so we can have "pure Rust"
-// for now which is currently technically required (can't link with C yet).
-//
-// The crate itself provides a global allocator which on wasm has no
-// synchronization as there are no threads!
-#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
-mod platform {
- extern crate dlmalloc;
-
- use core::alloc::{GlobalAlloc, Layout};
- use System;
-
- static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::DLMALLOC_INIT;
-
- #[stable(feature = "alloc_system_type", since = "1.28.0")]
- unsafe impl GlobalAlloc for System {
- #[inline]
- unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
- let _lock = lock::lock();
- DLMALLOC.malloc(layout.size(), layout.align())
- }
-
- #[inline]
- unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
- let _lock = lock::lock();
- DLMALLOC.calloc(layout.size(), layout.align())
- }
-
- #[inline]
- unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
- let _lock = lock::lock();
- DLMALLOC.free(ptr, layout.size(), layout.align())
- }
-
- #[inline]
- unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
- let _lock = lock::lock();
- DLMALLOC.realloc(ptr, layout.size(), layout.align(), new_size)
- }
- }
-
- #[cfg(target_feature = "atomics")]
- mod lock {
- use core::arch::wasm32;
- use core::sync::atomic::{AtomicI32, Ordering::SeqCst};
-
- static LOCKED: AtomicI32 = AtomicI32::new(0);
-
- pub struct DropLock;
-
- pub fn lock() -> DropLock {
- loop {
- if LOCKED.swap(1, SeqCst) == 0 {
- return DropLock
- }
- unsafe {
- let r = wasm32::atomic::wait_i32(
- &LOCKED as *const AtomicI32 as *mut i32,
- 1, // expected value
- -1, // timeout
- );
- debug_assert!(r == 0 || r == 1);
- }
- }
- }
-
- impl Drop for DropLock {
- fn drop(&mut self) {
- let r = LOCKED.swap(0, SeqCst);
- debug_assert_eq!(r, 1);
- unsafe {
- wasm32::atomic::wake(
- &LOCKED as *const AtomicI32 as *mut i32,
- 1, // only one thread
- );
- }
- }
- }
- }
-
- #[cfg(not(target_feature = "atomics"))]
- mod lock {
- #[inline]
- pub fn lock() {} // no atomics, no threads, that's easy!
- }
-}
use hir::def_id::CrateNum;
-use session;
use session::config;
use ty::TyCtxt;
use middle::cstore::{self, DepKind};
// quite yet, so do so here.
activate_injected_dep(*sess.injected_panic_runtime.get(), &mut ret,
&|cnum| tcx.is_panic_runtime(cnum));
- activate_injected_allocator(sess, &mut ret);
// When dylib B links to dylib A, then when using B we must also link to A.
// It could be the case, however, that the rlib for A is present (hence we
// that here and activate them.
activate_injected_dep(*sess.injected_panic_runtime.get(), &mut ret,
&|cnum| tcx.is_panic_runtime(cnum));
- activate_injected_allocator(sess, &mut ret);
Some(ret)
}
}
}
-fn activate_injected_allocator(sess: &session::Session,
- list: &mut DependencyList) {
- let cnum = match sess.injected_allocator.get() {
- Some(cnum) => cnum,
- None => return,
- };
- let idx = cnum.as_usize() - 1;
- if list[idx] == Linkage::NotLinked {
- list[idx] = Linkage::Static;
- }
-}
-
// After the linkage for a crate has been determined we need to verify that
// there's only going to be one allocator in the output.
fn verify_ok<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, list: &[Linkage]) {
/// The metadata::creader module may inject an allocator/panic_runtime
/// dependency if it didn't already find one, and this tracks what was
/// injected.
- pub injected_allocator: Once<Option<CrateNum>>,
pub allocator_kind: Once<Option<AllocatorKind>>,
pub injected_panic_runtime: Once<Option<CrateNum>>,
type_length_limit: Once::new(),
const_eval_stack_frame_limit: 100,
next_node_id: OneThread::new(Cell::new(NodeId::new(1))),
- injected_allocator: Once::new(),
allocator_kind: Once::new(),
injected_panic_runtime: Once::new(),
imported_macro_spans: OneThread::new(RefCell::new(FxHashMap::default())),
[dependencies]
alloc = { path = "../liballoc" }
-alloc_system = { path = "../liballoc_system" }
core = { path = "../libcore" }
compiler_builtins = { path = "../rustc/compiler_builtins_shim" }
// except according to those terms.
#![sanitizer_runtime]
-#![feature(alloc_system)]
#![feature(nll)]
#![feature(sanitizer_runtime)]
#![feature(staged_api)]
#![unstable(feature = "sanitizer_runtime_lib",
reason = "internal implementation detail of sanitizers",
issue = "0")]
-
-extern crate alloc_system;
-
-use alloc_system::System;
-
-#[global_allocator]
-static ALLOC: System = System;
};
with_llvm_pmb(llmod, config, opt_level, false, &mut |b| {
if thin {
- if !llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm) {
- panic!("this version of LLVM does not support ThinLTO");
- }
+ llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm);
} else {
llvm::LLVMPassManagerBuilderPopulateLTOPassManager(b, pm,
/* Internalize = */ False,
// FIXME: add a non-fast math version once
// https://bugs.llvm.org/show_bug.cgi?id=36732
// is fixed.
- let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src)
- .expect("LLVMRustBuildVectorReduceFAdd is not available in LLVM version < 5.0");
+ let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
// FIXME: add a non-fast math version once
// https://bugs.llvm.org/show_bug.cgi?id=36732
// is fixed.
- let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src)
- .expect("LLVMRustBuildVectorReduceFMul is not available in LLVM version < 5.0");
+ let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
pub fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.add");
- unsafe {
- let instr = llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src);
- instr.expect("LLVMRustBuildVectorReduceAdd is not available in LLVM version < 5.0")
- }
+ unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
}
pub fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.mul");
- unsafe {
- let instr = llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src);
- instr.expect("LLVMRustBuildVectorReduceMul is not available in LLVM version < 5.0")
- }
+ unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
}
pub fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.and");
- unsafe {
- let instr = llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src);
- instr.expect("LLVMRustBuildVectorReduceAnd is not available in LLVM version < 5.0")
- }
+ unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
}
pub fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.or");
- unsafe {
- let instr = llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src);
- instr.expect("LLVMRustBuildVectorReduceOr is not available in LLVM version < 5.0")
- }
+ unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
}
pub fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.xor");
- unsafe {
- let instr = llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src);
- instr.expect("LLVMRustBuildVectorReduceXor is not available in LLVM version < 5.0")
- }
+ unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
}
pub fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.fmin");
- unsafe {
- let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false);
- instr.expect("LLVMRustBuildVectorReduceFMin is not available in LLVM version < 5.0")
- }
+ unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) }
}
pub fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.fmax");
- unsafe {
- let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false);
- instr.expect("LLVMRustBuildVectorReduceFMax is not available in LLVM version < 5.0")
- }
+ unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) }
}
pub fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.fmin_fast");
unsafe {
- let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true)
- .expect("LLVMRustBuildVectorReduceFMin is not available in LLVM version < 5.0");
+ let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
pub fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value {
self.count_insn("vector.reduce.fmax_fast");
unsafe {
- let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true)
- .expect("LLVMRustBuildVectorReduceFMax is not available in LLVM version < 5.0");
+ let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}
}
pub fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
self.count_insn("vector.reduce.min");
- unsafe {
- let instr = llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed);
- instr.expect("LLVMRustBuildVectorReduceMin is not available in LLVM version < 5.0")
- }
+ unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
}
pub fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value {
self.count_insn("vector.reduce.max");
- unsafe {
- let instr = llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed);
- instr.expect("LLVMRustBuildVectorReduceMax is not available in LLVM version < 5.0")
- }
+ unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
}
pub fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
niche_start,
ref variants,
dataful_variant,
- ..
+ ref niche,
} => {
if fallback {
let variant = self.layout.for_variant(cx, dataful_variant);
let niche_value = if i == dataful_variant {
None
} else {
- let niche = (i as u128)
+ let value = (i as u128)
.wrapping_sub(*niche_variants.start() as u128)
.wrapping_add(niche_start);
- assert_eq!(niche as u64 as u128, niche);
- Some(niche as u64)
+ let value = value & ((1u128 << niche.value.size(cx).bits()) - 1);
+ Some(value as u64)
};
MemberDescription {
pub fn LLVMRustBuildVectorReduceFAdd(B: &Builder<'a>,
Acc: &'a Value,
Src: &'a Value)
- -> Option<&'a Value>;
+ -> &'a Value;
pub fn LLVMRustBuildVectorReduceFMul(B: &Builder<'a>,
Acc: &'a Value,
Src: &'a Value)
- -> Option<&'a Value>;
+ -> &'a Value;
pub fn LLVMRustBuildVectorReduceAdd(B: &Builder<'a>,
Src: &'a Value)
- -> Option<&'a Value>;
+ -> &'a Value;
pub fn LLVMRustBuildVectorReduceMul(B: &Builder<'a>,
Src: &'a Value)
- -> Option<&'a Value>;
+ -> &'a Value;
pub fn LLVMRustBuildVectorReduceAnd(B: &Builder<'a>,
Src: &'a Value)
- -> Option<&'a Value>;
+ -> &'a Value;
pub fn LLVMRustBuildVectorReduceOr(B: &Builder<'a>,
Src: &'a Value)
- -> Option<&'a Value>;
+ -> &'a Value;
pub fn LLVMRustBuildVectorReduceXor(B: &Builder<'a>,
Src: &'a Value)
- -> Option<&'a Value>;
+ -> &'a Value;
pub fn LLVMRustBuildVectorReduceMin(B: &Builder<'a>,
Src: &'a Value,
IsSigned: bool)
- -> Option<&'a Value>;
+ -> &'a Value;
pub fn LLVMRustBuildVectorReduceMax(B: &Builder<'a>,
Src: &'a Value,
IsSigned: bool)
- -> Option<&'a Value>;
+ -> &'a Value;
pub fn LLVMRustBuildVectorReduceFMin(B: &Builder<'a>,
Src: &'a Value,
IsNaN: bool)
- -> Option<&'a Value>;
+ -> &'a Value;
pub fn LLVMRustBuildVectorReduceFMax(B: &Builder<'a>,
Src: &'a Value,
IsNaN: bool)
- -> Option<&'a Value>;
+ -> &'a Value;
pub fn LLVMRustBuildMinNum(
B: &Builder<'a>,
RunInliner: Bool);
pub fn LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
PMB: &PassManagerBuilder,
- PM: &PassManager) -> bool;
+ PM: &PassManager);
// Stuff that's in rustllvm/ because it's not upstream yet.
[dependencies]
alloc = { path = "../liballoc" }
-alloc_system = { path = "../liballoc_system" }
core = { path = "../libcore" }
compiler_builtins = { path = "../rustc/compiler_builtins_shim" }
// except according to those terms.
#![sanitizer_runtime]
-#![feature(alloc_system)]
#![feature(nll)]
#![feature(sanitizer_runtime)]
#![feature(staged_api)]
#![unstable(feature = "sanitizer_runtime_lib",
reason = "internal implementation detail of sanitizers",
issue = "0")]
-
-extern crate alloc_system;
-
-use alloc_system::System;
-
-#[global_allocator]
-static ALLOC: System = System;
use schema::CrateRoot;
use rustc_data_structures::sync::{Lrc, RwLock, Lock};
-use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX};
+use rustc::hir::def_id::CrateNum;
use rustc_data_structures::svh::Svh;
use rustc::middle::allocator::AllocatorKind;
use rustc::middle::cstore::DepKind;
needs_allocator = needs_allocator || data.root.needs_allocator;
});
if !needs_allocator {
- self.sess.injected_allocator.set(None);
self.sess.allocator_kind.set(None);
return
}
// At this point we've determined that we need an allocator. Let's see
// if our compilation session actually needs an allocator based on what
// we're emitting.
- let mut need_lib_alloc = false;
- let mut need_exe_alloc = false;
- for ct in self.sess.crate_types.borrow().iter() {
- match *ct {
- config::CrateType::Executable => need_exe_alloc = true,
- config::CrateType::Dylib |
- config::CrateType::ProcMacro |
- config::CrateType::Cdylib |
- config::CrateType::Staticlib => need_lib_alloc = true,
- config::CrateType::Rlib => {}
- }
- }
- if !need_lib_alloc && !need_exe_alloc {
- self.sess.injected_allocator.set(None);
+ let all_rlib = self.sess.crate_types.borrow()
+ .iter()
+ .all(|ct| {
+ match *ct {
+ config::CrateType::Rlib => true,
+ _ => false,
+ }
+ });
+ if all_rlib {
self.sess.allocator_kind.set(None);
return
}
});
if global_allocator.is_some() {
self.sess.allocator_kind.set(Some(AllocatorKind::Global));
- self.sess.injected_allocator.set(None);
return
}
// Ok we haven't found a global allocator but we still need an
- // allocator. At this point we'll either fall back to the "library
- // allocator" or the "exe allocator" depending on a few variables. Let's
- // figure out which one.
- //
- // Note that here we favor linking to the "library allocator" as much as
- // possible. If we're not creating rustc's version of libstd
- // (need_lib_alloc and prefer_dynamic) then we select `None`, and if the
- // exe allocation crate doesn't exist for this target then we also
- // select `None`.
- let exe_allocation_crate_data =
- if need_lib_alloc && !self.sess.opts.cg.prefer_dynamic {
- None
- } else {
- self.sess
- .target
- .target
- .options
- .exe_allocation_crate
- .as_ref()
- .map(|name| {
- // We've determined that we're injecting an "exe allocator" which means
- // that we're going to load up a whole new crate. An example of this is
- // that we're producing a normal binary on Linux which means we need to
- // load the `alloc_jemalloc` crate to link as an allocator.
- let name = Symbol::intern(name);
- let (cnum, data) = self.resolve_crate(&None,
- name,
- name,
- None,
- None,
- DUMMY_SP,
- PathKind::Crate,
- DepKind::Implicit)
- .unwrap_or_else(|err| err.report());
- self.sess.injected_allocator.set(Some(cnum));
- data
- })
- };
-
- let allocation_crate_data = exe_allocation_crate_data.or_else(|| {
- // No allocator was injected
- self.sess.injected_allocator.set(None);
-
- if attr::contains_name(&krate.attrs, "default_lib_allocator") {
- // Prefer self as the allocator if there's a collision
- return None;
+ // allocator. At this point our allocator request is typically fulfilled
+ // by the standard library, denoted by the `#![default_lib_allocator]`
+ // attribute.
+ let mut has_default = attr::contains_name(&krate.attrs, "default_lib_allocator");
+ self.cstore.iter_crate_data(|_, data| {
+ if data.root.has_default_lib_allocator {
+ has_default = true;
}
- // We're not actually going to inject an allocator, we're going to
- // require that something in our crate graph is the default lib
- // allocator. This is typically libstd, so this'll rarely be an
- // error.
- let mut allocator = None;
- self.cstore.iter_crate_data(|_, data| {
- if allocator.is_none() && data.root.has_default_lib_allocator {
- allocator = Some(data.clone());
- }
- });
- allocator
});
- match allocation_crate_data {
- Some(data) => {
- // We have an allocator. We detect separately what kind it is, to allow for some
- // flexibility in misconfiguration.
- let attrs = data.get_item_attrs(CRATE_DEF_INDEX, self.sess);
- let kind_interned = attr::first_attr_value_str_by_name(&attrs, "rustc_alloc_kind")
- .map(Symbol::as_str);
- let kind_str = kind_interned
- .as_ref()
- .map(|s| s as &str);
- let alloc_kind = match kind_str {
- None |
- Some("lib") => AllocatorKind::DefaultLib,
- Some("exe") => AllocatorKind::DefaultExe,
- Some(other) => {
- self.sess.err(&format!("Allocator kind {} not known", other));
- return;
- }
- };
- self.sess.allocator_kind.set(Some(alloc_kind));
- },
- None => {
- if !attr::contains_name(&krate.attrs, "default_lib_allocator") {
- self.sess.err("no global memory allocator found but one is \
- required; link to std or \
- add #[global_allocator] to a static item \
- that implements the GlobalAlloc trait.");
- return;
- }
- self.sess.allocator_kind.set(Some(AllocatorKind::DefaultLib));
- }
+ if !has_default {
+ self.sess.err("no global memory allocator found but one is \
+ required; link to std or \
+ add #[global_allocator] to a static item \
+ that implements the GlobalAlloc trait.");
}
+ self.sess.allocator_kind.set(Some(AllocatorKind::DefaultLib));
fn has_global_allocator(krate: &ast::Crate) -> bool {
struct Finder(bool);
impl<'a> CrateLoader<'a> {
pub fn postprocess(&mut self, krate: &ast::Crate) {
- // inject the sanitizer runtime before the allocator runtime because all
- // sanitizers force the use of the `alloc_system` allocator
self.inject_sanitizer_runtime();
self.inject_profiler_runtime();
self.inject_allocator_crate(krate);
[dependencies]
alloc = { path = "../liballoc" }
-alloc_system = { path = "../liballoc_system" }
core = { path = "../libcore" }
compiler_builtins = { path = "../rustc/compiler_builtins_shim" }
// except according to those terms.
#![sanitizer_runtime]
-#![feature(alloc_system)]
#![feature(nll)]
#![feature(sanitizer_runtime)]
#![feature(staged_api)]
#![unstable(feature = "sanitizer_runtime_lib",
reason = "internal implementation detail of sanitizers",
issue = "0")]
-
-extern crate alloc_system;
-
-use alloc_system::System;
-
-#[global_allocator]
-static ALLOC: System = System;
let mut base = super::freebsd_base::opts();
base.max_atomic_width = Some(128);
- // see #36994
- base.exe_allocation_crate = None;
-
Ok(Target {
llvm_target: "aarch64-unknown-freebsd".to_string(),
target_endian: "little".to_string(),
let mut base = super::linux_base::opts();
base.max_atomic_width = Some(128);
- // see #36994
- base.exe_allocation_crate = None;
-
Ok(Target {
llvm_target: "aarch64-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
let mut base = super::linux_musl_base::opts();
base.max_atomic_width = Some(128);
- // see #36994
- base.exe_allocation_crate = None;
-
Ok(Target {
llvm_target: "aarch64-unknown-linux-musl".to_string(),
target_endian: "little".to_string(),
]);
TargetOptions {
- exe_allocation_crate: None,
executables: true,
has_elf_tls: true,
linker_is_gnu: true,
TargetOptions {
executables: true,
has_elf_tls: false,
- exe_allocation_crate: None,
panic_strategy: PanicStrategy::Abort,
linker: Some("ld".to_string()),
pre_link_args: args,
features: "+mips64r2".to_string(),
max_atomic_width: Some(64),
- // see #36994
- exe_allocation_crate: None,
-
..super::linux_base::opts()
},
})
features: "+mips64r2".to_string(),
max_atomic_width: Some(64),
- // see #36994
- exe_allocation_crate: None,
-
..super::linux_base::opts()
},
})
features: "+mips32r2,+fpxx,+nooddspreg".to_string(),
max_atomic_width: Some(32),
- // see #36994
- exe_allocation_crate: None,
-
..super::linux_base::opts()
},
})
base.cpu = "mips32r2".to_string();
base.features = "+mips32r2,+soft-float".to_string();
base.max_atomic_width = Some(32);
- // see #36994
- base.exe_allocation_crate = None;
base.crt_static_default = false;
Ok(Target {
llvm_target: "mips-unknown-linux-musl".to_string(),
features: "+mips32r2,+soft-float".to_string(),
max_atomic_width: Some(32),
- // see #36994
- exe_allocation_crate: None,
-
..super::linux_base::opts()
},
})
features: "+mips32r2,+fpxx,+nooddspreg".to_string(),
max_atomic_width: Some(32),
- // see #36994
- exe_allocation_crate: None,
-
..super::linux_base::opts()
},
})
base.cpu = "mips32r2".to_string();
base.features = "+mips32r2,+soft-float".to_string();
base.max_atomic_width = Some(32);
- // see #36994
- base.exe_allocation_crate = None;
base.crt_static_default = false;
Ok(Target {
llvm_target: "mipsel-unknown-linux-musl".to_string(),
features: "+mips32r2,+soft-float".to_string(),
max_atomic_width: Some(32),
- // see #36994
- exe_allocation_crate: None,
-
..super::linux_base::opts()
},
})
/// `eh_unwind_resume` lang item.
pub custom_unwind_resume: bool,
- /// If necessary, a different crate to link exe allocators by default
- pub exe_allocation_crate: Option<String>,
-
/// Flag indicating whether ELF TLS (e.g. #[thread_local]) is available for
/// this target.
pub has_elf_tls: bool,
link_env: Vec::new(),
archive_format: "gnu".to_string(),
custom_unwind_resume: false,
- exe_allocation_crate: None,
allow_asm: true,
has_elf_tls: false,
obj_is_bitcode: false,
key!(archive_format);
key!(allow_asm, bool);
key!(custom_unwind_resume, bool);
- key!(exe_allocation_crate, optional);
key!(has_elf_tls, bool);
key!(obj_is_bitcode, bool);
key!(no_integrated_as, bool);
target_option_val!(archive_format);
target_option_val!(allow_asm);
target_option_val!(custom_unwind_resume);
- target_option_val!(exe_allocation_crate);
target_option_val!(has_elf_tls);
target_option_val!(obj_is_bitcode);
target_option_val!(no_integrated_as);
// for now. https://github.com/rust-lang/rust/pull/43170#issuecomment-315411474
base.relro_level = RelroLevel::Partial;
- // see #36994
- base.exe_allocation_crate = None;
-
Ok(Target {
llvm_target: "powerpc64-unknown-linux-gnu".to_string(),
target_endian: "big".to_string(),
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
base.max_atomic_width = Some(64);
- // see #36994
- base.exe_allocation_crate = None;
-
Ok(Target {
llvm_target: "powerpc64le-unknown-linux-gnu".to_string(),
target_endian: "little".to_string(),
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m64".to_string());
base.max_atomic_width = Some(64);
- // see #36994
- base.exe_allocation_crate = None;
-
Ok(Target {
llvm_target: "powerpc64le-unknown-linux-musl".to_string(),
target_endian: "little".to_string(),
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string());
base.max_atomic_width = Some(32);
- // see #36994
- base.exe_allocation_crate = None;
-
Ok(Target {
llvm_target: "powerpc-unknown-linux-gnu".to_string(),
target_endian: "big".to_string(),
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-mspe".to_string());
base.max_atomic_width = Some(32);
- // see #36994
- base.exe_allocation_crate = None;
-
Ok(Target {
llvm_target: "powerpc-unknown-linux-gnuspe".to_string(),
target_endian: "big".to_string(),
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-m32".to_string());
base.max_atomic_width = Some(32);
- // see #36994
- base.exe_allocation_crate = None;
-
Ok(Target {
llvm_target: "powerpc-unknown-netbsd".to_string(),
target_endian: "big".to_string(),
// Pass the -vector feature string to LLVM to respect this assumption.
base.features = "-vector".to_string();
base.max_atomic_width = Some(64);
- // see #36994
- base.exe_allocation_crate = None;
base.min_global_align = Some(16);
Ok(Target {
let mut base = super::linux_base::opts();
base.cpu = "v9".to_string();
base.max_atomic_width = Some(64);
- base.exe_allocation_crate = None;
Ok(Target {
llvm_target: "sparc64-unknown-linux-gnu".to_string(),
base.cpu = "v9".to_string();
base.max_atomic_width = Some(64);
base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-mv8plus".to_string());
- base.exe_allocation_crate = None;
Ok(Target {
llvm_target: "sparc-unknown-linux-gnu".to_string(),
// llvm calls this "v9"
base.cpu = "v9".to_string();
base.max_atomic_width = Some(64);
- base.exe_allocation_crate = None;
Ok(Target {
llvm_target: "sparcv9-sun-solaris".to_string(),
base.has_rpath = false;
base.position_independent_executables = false;
base.disable_redzone = true;
- base.exe_allocation_crate = None;
base.stack_probes = true;
Ok(Target {
[dependencies]
alloc = { path = "../liballoc" }
-alloc_system = { path = "../liballoc_system" }
core = { path = "../libcore" }
compiler_builtins = { path = "../rustc/compiler_builtins_shim" }
// except according to those terms.
#![sanitizer_runtime]
-#![feature(alloc_system)]
#![feature(nll)]
#![feature(sanitizer_runtime)]
#![feature(staged_api)]
#![unstable(feature = "sanitizer_runtime_lib",
reason = "internal implementation detail of sanitizers",
issue = "0")]
-
-extern crate alloc_system;
-
-use alloc_system::System;
-
-#[global_allocator]
-static ALLOC: System = System;
[dependencies]
alloc = { path = "../liballoc" }
-alloc_system = { path = "../liballoc_system" }
panic_unwind = { path = "../libpanic_unwind", optional = true }
panic_abort = { path = "../libpanic_abort" }
core = { path = "../libcore" }
rustc_msan = { path = "../librustc_msan" }
rustc_tsan = { path = "../librustc_tsan" }
+[target.'cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))'.dependencies]
+dlmalloc = { path = '../rustc/dlmalloc_shim' }
+
[build-dependencies]
cc = "1.0"
build_helper = { path = "../build_helper" }
use core::sync::atomic::{AtomicPtr, Ordering};
use core::{mem, ptr};
+use core::ptr::NonNull;
use sys_common::util::dumb_print;
#[stable(feature = "alloc_module", since = "1.28.0")]
#[doc(inline)]
pub use alloc_crate::alloc::*;
+/// The default memory allocator provided by the operating system.
+///
+/// This is based on `malloc` on Unix platforms and `HeapAlloc` on Windows,
+/// plus related functions.
+///
+/// This type implements the `GlobalAlloc` trait and Rust programs by deafult
+/// work as if they had this definition:
+///
+/// ```rust
+/// use std::alloc::System;
+///
+/// #[global_allocator]
+/// static A: System = System;
+///
+/// fn main() {
+/// let a = Box::new(4); // Allocates from the system allocator.
+/// println!("{}", a);
+/// }
+/// ```
+///
+/// You can also define your own wrapper around `System` if you'd like, such as
+/// keeping track of the number of all bytes allocated:
+///
+/// ```rust
+/// use std::alloc::{System, GlobalAlloc, Layout};
+/// use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering::SeqCst};
+///
+/// struct Counter;
+///
+/// static ALLOCATED: AtomicUsize = ATOMIC_USIZE_INIT;
+///
+/// unsafe impl GlobalAlloc for Counter {
+/// unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+/// let ret = System.alloc(layout);
+/// if !ret.is_null() {
+/// ALLOCATED.fetch_add(layout.size(), SeqCst);
+/// }
+/// return ret
+/// }
+///
+/// unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+/// System.dealloc(ptr, layout);
+/// ALLOCATED.fetch_sub(layout.size(), SeqCst);
+/// }
+/// }
+///
+/// #[global_allocator]
+/// static A: Counter = Counter;
+///
+/// fn main() {
+/// println!("allocated bytes before main: {}", ALLOCATED.load(SeqCst));
+/// }
+/// ```
+///
+/// It can also be used directly to allocate memory independently of whatever
+/// global allocator has been selected for a Rust program. For example if a Rust
+/// program opts in to using jemalloc as the global allocator, `System` will
+/// still allocate memory using `malloc` and `HeapAlloc`.
#[stable(feature = "alloc_system_type", since = "1.28.0")]
-#[doc(inline)]
-pub use alloc_system::System;
+#[derive(Debug, Copy, Clone)]
+pub struct System;
+
+#[unstable(feature = "allocator_api", issue = "32838")]
+unsafe impl Alloc for System {
+ #[inline]
+ unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
+ NonNull::new(GlobalAlloc::alloc(self, layout)).ok_or(AllocErr)
+ }
+
+ #[inline]
+ unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
+ NonNull::new(GlobalAlloc::alloc_zeroed(self, layout)).ok_or(AllocErr)
+ }
+
+ #[inline]
+ unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
+ GlobalAlloc::dealloc(self, ptr.as_ptr(), layout)
+ }
+
+ #[inline]
+ unsafe fn realloc(&mut self,
+ ptr: NonNull<u8>,
+ layout: Layout,
+ new_size: usize) -> Result<NonNull<u8>, AllocErr> {
+ NonNull::new(GlobalAlloc::realloc(self, ptr.as_ptr(), layout, new_size)).ok_or(AllocErr)
+ }
+}
static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut());
#![cfg_attr(test, feature(test, update_panic_count))]
#![feature(alloc)]
#![feature(alloc_error_handler)]
-#![feature(alloc_system)]
#![feature(allocator_api)]
#![feature(allocator_internals)]
#![feature(allow_internal_unsafe)]
#[cfg(stage0)]
#[global_allocator]
-static ALLOC: alloc_system::System = alloc_system::System;
+static ALLOC: alloc::System = alloc::System;
// Explicitly import the prelude. The compiler uses this same unstable attribute
// to import the prelude implicitly when building crates that depend on std.
#[allow(unused_imports)] // macros from `alloc` are not used on all platforms
#[macro_use]
extern crate alloc as alloc_crate;
-extern crate alloc_system;
#[doc(masked)]
extern crate libc;
use libc;
use mem;
+#[path = "../unix/alloc.rs"]
+pub mod alloc;
pub mod args;
#[cfg(feature = "backtrace")]
pub mod backtrace;
pub use libc::strlen;
pub use self::rand::hashmap_random_keys;
+#[path = "../unix/alloc.rs"]
+pub mod alloc;
pub mod args;
#[cfg(feature = "backtrace")]
pub mod backtrace;
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use ptr;
+use libc;
+use sys_common::alloc::{MIN_ALIGN, realloc_fallback};
+use alloc::{GlobalAlloc, Layout, System};
+
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+ libc::malloc(layout.size()) as *mut u8
+ } else {
+ #[cfg(target_os = "macos")]
+ {
+ if layout.align() > (1 << 31) {
+ return ptr::null_mut()
+ }
+ }
+ aligned_malloc(&layout)
+ }
+ }
+
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+ libc::calloc(layout.size(), 1) as *mut u8
+ } else {
+ let ptr = self.alloc(layout.clone());
+ if !ptr.is_null() {
+ ptr::write_bytes(ptr, 0, layout.size());
+ }
+ ptr
+ }
+ }
+
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
+ libc::free(ptr as *mut libc::c_void)
+ }
+
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN && layout.align() <= new_size {
+ libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8
+ } else {
+ realloc_fallback(self, ptr, layout, new_size)
+ }
+ }
+}
+
+#[cfg(any(target_os = "android",
+ target_os = "hermit",
+ target_os = "redox",
+ target_os = "solaris"))]
+#[inline]
+unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+ // On android we currently target API level 9 which unfortunately
+ // doesn't have the `posix_memalign` API used below. Instead we use
+ // `memalign`, but this unfortunately has the property on some systems
+ // where the memory returned cannot be deallocated by `free`!
+ //
+ // Upon closer inspection, however, this appears to work just fine with
+ // Android, so for this platform we should be fine to call `memalign`
+ // (which is present in API level 9). Some helpful references could
+ // possibly be chromium using memalign [1], attempts at documenting that
+ // memalign + free is ok [2] [3], or the current source of chromium
+ // which still uses memalign on android [4].
+ //
+ // [1]: https://codereview.chromium.org/10796020/
+ // [2]: https://code.google.com/p/android/issues/detail?id=35391
+ // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
+ // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
+ // /memory/aligned_memory.cc
+ libc::memalign(layout.align(), layout.size()) as *mut u8
+}
+
+#[cfg(not(any(target_os = "android",
+ target_os = "hermit",
+ target_os = "redox",
+ target_os = "solaris")))]
+#[inline]
+unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+ let mut out = ptr::null_mut();
+ let ret = libc::posix_memalign(&mut out, layout.align(), layout.size());
+ if ret != 0 {
+ ptr::null_mut()
+ } else {
+ out as *mut u8
+ }
+}
#[macro_use]
pub mod weak;
+pub mod alloc;
pub mod args;
pub mod android;
#[cfg(feature = "backtrace")]
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! This is an implementation of a global allocator on the wasm32 platform when
+//! emscripten is not in use. In that situation there's no actual runtime for us
+//! to lean on for allocation, so instead we provide our own!
+//!
+//! The wasm32 instruction set has two instructions for getting the current
+//! amount of memory and growing the amount of memory. These instructions are the
+//! foundation on which we're able to build an allocator, so we do so! Note that
+//! the instructions are also pretty "global" and this is the "global" allocator
+//! after all!
+//!
+//! The current allocator here is the `dlmalloc` crate which we've got included
+//! in the rust-lang/rust repository as a submodule. The crate is a port of
+//! dlmalloc.c from C to Rust and is basically just so we can have "pure Rust"
+//! for now which is currently technically required (can't link with C yet).
+//!
+//! The crate itself provides a global allocator which on wasm has no
+//! synchronization as there are no threads!
+
+extern crate dlmalloc;
+
+use alloc::{GlobalAlloc, Layout, System};
+
+static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::DLMALLOC_INIT;
+
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ let _lock = lock::lock();
+ DLMALLOC.malloc(layout.size(), layout.align())
+ }
+
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ let _lock = lock::lock();
+ DLMALLOC.calloc(layout.size(), layout.align())
+ }
+
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ let _lock = lock::lock();
+ DLMALLOC.free(ptr, layout.size(), layout.align())
+ }
+
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ let _lock = lock::lock();
+ DLMALLOC.realloc(ptr, layout.size(), layout.align(), new_size)
+ }
+}
+
+#[cfg(target_feature = "atomics")]
+mod lock {
+ use arch::wasm32;
+ use sync::atomic::{AtomicI32, Ordering::SeqCst};
+
+ static LOCKED: AtomicI32 = AtomicI32::new(0);
+
+ pub struct DropLock;
+
+ pub fn lock() -> DropLock {
+ loop {
+ if LOCKED.swap(1, SeqCst) == 0 {
+ return DropLock
+ }
+ unsafe {
+ let r = wasm32::atomic::wait_i32(
+ &LOCKED as *const AtomicI32 as *mut i32,
+ 1, // expected value
+ -1, // timeout
+ );
+ debug_assert!(r == 0 || r == 1);
+ }
+ }
+ }
+
+ impl Drop for DropLock {
+ fn drop(&mut self) {
+ let r = LOCKED.swap(0, SeqCst);
+ debug_assert_eq!(r, 1);
+ unsafe {
+ wasm32::atomic::wake(
+ &LOCKED as *const AtomicI32 as *mut i32,
+ 1, // only one thread
+ );
+ }
+ }
+ }
+}
+
+#[cfg(not(target_feature = "atomics"))]
+mod lock {
+ #[inline]
+ pub fn lock() {} // no atomics, no threads, that's easy!
+}
use ffi::{OsString, OsStr};
use time::Duration;
+pub mod alloc;
pub mod args;
#[cfg(feature = "backtrace")]
pub mod backtrace;
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use alloc::{GlobalAlloc, Layout, System};
+use sys::c;
+use sys_common::alloc::{MIN_ALIGN, realloc_fallback};
+
+#[repr(C)]
+struct Header(*mut u8);
+
+unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
+ &mut *(ptr as *mut Header).offset(-1)
+}
+
+unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
+ let aligned = ptr.add(align - (ptr as usize & (align - 1)));
+ *get_header(aligned) = Header(ptr);
+ aligned
+}
+
+#[inline]
+unsafe fn allocate_with_flags(layout: Layout, flags: c::DWORD) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN {
+ return c::HeapAlloc(c::GetProcessHeap(), flags, layout.size()) as *mut u8
+ }
+
+ let size = layout.size() + layout.align();
+ let ptr = c::HeapAlloc(c::GetProcessHeap(), flags, size);
+ if ptr.is_null() {
+ ptr as *mut u8
+ } else {
+ align_ptr(ptr as *mut u8, layout.align())
+ }
+}
+
+#[stable(feature = "alloc_system_type", since = "1.28.0")]
+unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ allocate_with_flags(layout, 0)
+ }
+
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ allocate_with_flags(layout, c::HEAP_ZERO_MEMORY)
+ }
+
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ if layout.align() <= MIN_ALIGN {
+ let err = c::HeapFree(c::GetProcessHeap(), 0, ptr as c::LPVOID);
+ debug_assert!(err != 0, "Failed to free heap memory: {}",
+ c::GetLastError());
+ } else {
+ let header = get_header(ptr);
+ let err = c::HeapFree(c::GetProcessHeap(), 0, header.0 as c::LPVOID);
+ debug_assert!(err != 0, "Failed to free heap memory: {}",
+ c::GetLastError());
+ }
+ }
+
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN {
+ c::HeapReAlloc(c::GetProcessHeap(), 0, ptr as c::LPVOID, new_size) as *mut u8
+ } else {
+ realloc_fallback(self, ptr, layout, new_size)
+ }
+ }
+}
pub const STACK_SIZE_PARAM_IS_A_RESERVATION: DWORD = 0x00010000;
+pub const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
+
#[repr(C)]
#[cfg(not(target_pointer_width = "64"))]
pub struct WSADATA {
#[link_name = "SystemFunction036"]
pub fn RtlGenRandom(RandomBuffer: *mut u8, RandomBufferLength: ULONG) -> BOOLEAN;
+
+ pub fn GetProcessHeap() -> HANDLE;
+ pub fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
+ pub fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
+ pub fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
}
// Functions that aren't available on every version of Windows that we support,
#[macro_use] pub mod compat;
+pub mod alloc;
pub mod args;
#[cfg(feature = "backtrace")]
pub mod backtrace;
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(dead_code)]
+
+use alloc::{GlobalAlloc, Layout, System};
+use cmp;
+use ptr;
+
+// The minimum alignment guaranteed by the architecture. This value is used to
+// add fast paths for low alignment values.
+#[cfg(all(any(target_arch = "x86",
+ target_arch = "arm",
+ target_arch = "mips",
+ target_arch = "powerpc",
+ target_arch = "powerpc64",
+ target_arch = "asmjs",
+ target_arch = "wasm32")))]
+pub const MIN_ALIGN: usize = 8;
+#[cfg(all(any(target_arch = "x86_64",
+ target_arch = "aarch64",
+ target_arch = "mips64",
+ target_arch = "s390x",
+ target_arch = "sparc64")))]
+pub const MIN_ALIGN: usize = 16;
+
+pub unsafe fn realloc_fallback(
+ alloc: &System,
+ ptr: *mut u8,
+ old_layout: Layout,
+ new_size: usize,
+) -> *mut u8 {
+ // Docs for GlobalAlloc::realloc require this to be valid:
+ let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
+
+ let new_ptr = GlobalAlloc::alloc(alloc, new_layout);
+ if !new_ptr.is_null() {
+ let size = cmp::min(old_layout.size(), new_size);
+ ptr::copy_nonoverlapping(ptr, new_ptr, size);
+ GlobalAlloc::dealloc(alloc, ptr, old_layout);
+ }
+ new_ptr
+}
})
}
+pub mod alloc;
pub mod at_exit_imp;
#[cfg(feature = "backtrace")]
pub mod backtrace;
/// Either a sequence of token trees or a single one. This is used as the representation of the
/// sequence of tokens that make up a matcher.
#[derive(Clone)]
-enum TokenTreeOrTokenTreeSlice<'a> {
+enum TokenTreeOrTokenTreeSlice<'tt> {
Tt(TokenTree),
- TtSeq(&'a [TokenTree]),
+ TtSeq(&'tt [TokenTree]),
}
-impl<'a> TokenTreeOrTokenTreeSlice<'a> {
+impl<'tt> TokenTreeOrTokenTreeSlice<'tt> {
/// Returns the number of constituent top-level token trees of `self` (top-level in that it
/// will not recursively descend into subtrees).
fn len(&self) -> usize {
/// This is used by `inner_parse_loop` to keep track of delimited submatchers that we have
/// descended into.
#[derive(Clone)]
-struct MatcherTtFrame<'a> {
+struct MatcherTtFrame<'tt> {
/// The "parent" matcher that we are descending into.
- elts: TokenTreeOrTokenTreeSlice<'a>,
+ elts: TokenTreeOrTokenTreeSlice<'tt>,
/// The position of the "dot" in `elts` at the time we descended.
idx: usize,
}
type NamedMatchVec = SmallVec<[NamedMatch; 4]>;
-/// Represents a single "position" (aka "matcher position", aka "item"), as described in the module
-/// documentation.
+/// Represents a single "position" (aka "matcher position", aka "item"), as
+/// described in the module documentation.
+///
+/// Here:
+///
+/// - `'root` represents the lifetime of the stack slot that holds the root
+/// `MatcherPos`. As described in `MatcherPosHandle`, the root `MatcherPos`
+/// structure is stored on the stack, but subsequent instances are put into
+/// the heap.
+/// - `'tt` represents the lifetime of the token trees that this matcher
+/// position refers to.
+///
+/// It is important to distinguish these two lifetimes because we have a
+/// `SmallVec<TokenTreeOrTokenTreeSlice<'tt>>` below, and the destructor of
+/// that is considered to possibly access the data from its elements (it lacks
+/// a `#[may_dangle]` attribute). As a result, the compiler needs to know that
+/// all the elements in that `SmallVec` strictly outlive the root stack slot
+/// lifetime. By separating `'tt` from `'root`, we can show that.
#[derive(Clone)]
-struct MatcherPos<'a> {
+struct MatcherPos<'root, 'tt: 'root> {
/// The token or sequence of tokens that make up the matcher
- top_elts: TokenTreeOrTokenTreeSlice<'a>,
+ top_elts: TokenTreeOrTokenTreeSlice<'tt>,
+
/// The position of the "dot" in this matcher
idx: usize,
+
/// The first span of source source that the beginning of this matcher corresponds to. In other
/// words, the token in the source whose span is `sp_open` is matched against the first token of
/// the matcher.
/// in this matcher.
match_hi: usize,
- // Specifically used if we are matching a repetition. If we aren't both should be `None`.
+ // The following fields are used if we are matching a repetition. If we aren't, they should be
+ // `None`.
+
/// The KleeneOp of this sequence if we are in a repetition.
seq_op: Option<quoted::KleeneOp>,
- /// The separator if we are in a repetition
+
+ /// The separator if we are in a repetition.
sep: Option<Token>,
+
/// The "parent" matcher position if we are in a repetition. That is, the matcher position just
/// before we enter the sequence.
- up: Option<MatcherPosHandle<'a>>,
+ up: Option<MatcherPosHandle<'root, 'tt>>,
- // Specifically used to "unzip" token trees. By "unzip", we mean to unwrap the delimiters from
- // a delimited token tree (e.g. something wrapped in `(` `)`) or to get the contents of a doc
- // comment...
+ /// Specifically used to "unzip" token trees. By "unzip", we mean to unwrap the delimiters from
+ /// a delimited token tree (e.g. something wrapped in `(` `)`) or to get the contents of a doc
+ /// comment...
+ ///
/// When matching against matchers with nested delimited submatchers (e.g. `pat ( pat ( .. )
/// pat ) pat`), we need to keep track of the matchers we are descending into. This stack does
/// that where the bottom of the stack is the outermost matcher.
- // Also, throughout the comments, this "descent" is often referred to as "unzipping"...
- stack: Vec<MatcherTtFrame<'a>>,
+ /// Also, throughout the comments, this "descent" is often referred to as "unzipping"...
+ stack: SmallVec<[MatcherTtFrame<'tt>; 1]>,
}
-impl<'a> MatcherPos<'a> {
+impl<'root, 'tt> MatcherPos<'root, 'tt> {
/// Add `m` as a named match for the `idx`-th metavar.
fn push_match(&mut self, idx: usize, m: NamedMatch) {
let matches = Rc::make_mut(&mut self.matches[idx]);
// Therefore, the initial MatcherPos is always allocated on the stack,
// subsequent ones (of which there aren't that many) are allocated on the heap,
// and this type is used to encapsulate both cases.
-enum MatcherPosHandle<'a> {
- Ref(&'a mut MatcherPos<'a>),
- Box(Box<MatcherPos<'a>>),
+enum MatcherPosHandle<'root, 'tt: 'root> {
+ Ref(&'root mut MatcherPos<'root, 'tt>),
+ Box(Box<MatcherPos<'root, 'tt>>),
}
-impl<'a> Clone for MatcherPosHandle<'a> {
+impl<'root, 'tt> Clone for MatcherPosHandle<'root, 'tt> {
// This always produces a new Box.
fn clone(&self) -> Self {
MatcherPosHandle::Box(match *self {
}
}
-impl<'a> Deref for MatcherPosHandle<'a> {
- type Target = MatcherPos<'a>;
+impl<'root, 'tt> Deref for MatcherPosHandle<'root, 'tt> {
+ type Target = MatcherPos<'root, 'tt>;
fn deref(&self) -> &Self::Target {
match *self {
MatcherPosHandle::Ref(ref r) => r,
}
}
-impl<'a> DerefMut for MatcherPosHandle<'a> {
- fn deref_mut(&mut self) -> &mut MatcherPos<'a> {
+impl<'root, 'tt> DerefMut for MatcherPosHandle<'root, 'tt> {
+ fn deref_mut(&mut self) -> &mut MatcherPos<'root, 'tt> {
match *self {
MatcherPosHandle::Ref(ref mut r) => r,
MatcherPosHandle::Box(ref mut b) => b,
/// Generate the top-level matcher position in which the "dot" is before the first token of the
/// matcher `ms` and we are going to start matching at the span `open` in the source.
-fn initial_matcher_pos(ms: &[TokenTree], open: Span) -> MatcherPos {
+fn initial_matcher_pos<'root, 'tt>(ms: &'tt [TokenTree], open: Span) -> MatcherPos<'root, 'tt> {
let match_idx_hi = count_names(ms);
let matches = create_matches(match_idx_hi);
MatcherPos {
match_hi: match_idx_hi,
// Haven't descended into any delimiters, so empty stack
- stack: vec![],
+ stack: smallvec![],
// Haven't descended into any sequences, so both of these are `None`.
seq_op: None,
/// # Returns
///
/// A `ParseResult`. Note that matches are kept track of through the items generated.
-fn inner_parse_loop<'a>(
+fn inner_parse_loop<'root, 'tt>(
sess: &ParseSess,
- cur_items: &mut SmallVec<[MatcherPosHandle<'a>; 1]>,
- next_items: &mut Vec<MatcherPosHandle<'a>>,
- eof_items: &mut SmallVec<[MatcherPosHandle<'a>; 1]>,
- bb_items: &mut SmallVec<[MatcherPosHandle<'a>; 1]>,
+ cur_items: &mut SmallVec<[MatcherPosHandle<'root, 'tt>; 1]>,
+ next_items: &mut Vec<MatcherPosHandle<'root, 'tt>>,
+ eof_items: &mut SmallVec<[MatcherPosHandle<'root, 'tt>; 1]>,
+ bb_items: &mut SmallVec<[MatcherPosHandle<'root, 'tt>; 1]>,
token: &Token,
span: syntax_pos::Span,
) -> ParseResult<()> {
let matches = create_matches(item.matches.len());
cur_items.push(MatcherPosHandle::Box(Box::new(MatcherPos {
- stack: vec![],
+ stack: smallvec![],
sep: seq.separator.clone(),
seq_op: Some(seq.op),
idx: 0,
LLVMRustSetLastError(toString(MOrErr.takeError()).c_str());
return LLVMRustResult::Failure;
}
-#if LLVM_VERSION_GE(5, 0)
MOrErr->MemberName = sys::path::filename(MOrErr->MemberName);
-#endif
Members.push_back(std::move(*MOrErr));
} else {
Expected<NewArchiveMember> MOrErr =
#include "llvm/Transforms/IPO/FunctionImport.h"
#include "llvm/Transforms/Utils/FunctionImportUtils.h"
#include "llvm/LTO/LTO.h"
-#if LLVM_VERSION_LE(4, 0)
-#include "llvm/Object/ModuleSummaryIndexObjectFile.h"
-#endif
#include "llvm-c/Transforms/PassManagerBuilder.h"
}
extern "C"
-bool LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
+void LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
LLVMPassManagerBuilderRef PMBR,
LLVMPassManagerRef PMR
) {
unwrap(PMBR)->populateThinLTOPassManager(*unwrap(PMR));
- return true;
}
#ifdef LLVM_COMPONENT_X86
Ret->ModuleMap[module->identifier] = mem_buffer;
-#if LLVM_VERSION_GE(5, 0)
if (Error Err = readModuleSummaryIndex(mem_buffer, Ret->Index, i)) {
LLVMRustSetLastError(toString(std::move(Err)).c_str());
return nullptr;
}
-#else
- Expected<std::unique_ptr<object::ModuleSummaryIndexObjectFile>> ObjOrErr =
- object::ModuleSummaryIndexObjectFile::create(mem_buffer);
- if (!ObjOrErr) {
- LLVMRustSetLastError(toString(ObjOrErr.takeError()).c_str());
- return nullptr;
- }
- auto Index = (*ObjOrErr)->takeIndex();
- Ret->Index.mergeFrom(std::move(Index), i);
-#endif
}
// Collect for each module the list of function it defines (GUID -> Summary)
// combined index
//
// This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`
-#if LLVM_VERSION_GE(5, 0)
#if LLVM_VERSION_GE(7, 0)
auto deadIsPrevailing = [&](GlobalValue::GUID G) {
return PrevailingType::Unknown;
Ret->ImportLists,
Ret->ExportLists
);
-#else
- auto DeadSymbols = computeDeadSymbols(Ret->Index, Ret->GUIDPreservedSymbols);
- ComputeCrossModuleImport(
- Ret->Index,
- Ret->ModuleToDefinedGVSummaries,
- Ret->ImportLists,
- Ret->ExportLists,
- &DeadSymbols
- );
-#endif
// Resolve LinkOnce/Weak symbols, this has to be computed early be cause it
// impacts the caching.
StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;
DenseMap<GlobalValue::GUID, const GlobalValueSummary *> PrevailingCopy;
for (auto &I : Ret->Index) {
-#if LLVM_VERSION_GE(5, 0)
if (I.second.SummaryList.size() > 1)
PrevailingCopy[I.first] = getFirstDefinitionForLinker(I.second.SummaryList);
-#else
- if (I.second.size() > 1)
- PrevailingCopy[I.first] = getFirstDefinitionForLinker(I.second);
-#endif
}
auto isPrevailing = [&](GlobalValue::GUID GUID, const GlobalValueSummary *S) {
const auto &Prevailing = PrevailingCopy.find(GUID);
// linkage will stay as external, and internal will stay as internal.
std::set<GlobalValue::GUID> ExportedGUIDs;
for (auto &List : Ret->Index) {
-#if LLVM_VERSION_GE(5, 0)
for (auto &GVS: List.second.SummaryList) {
-#else
- for (auto &GVS: List.second) {
-#endif
if (GlobalValue::isLocalLinkage(GVS->linkage()))
continue;
auto GUID = GVS->getOriginalName();
-#if LLVM_VERSION_GE(5, 0)
if (GVS->flags().Live)
-#else
- if (!DeadSymbols.count(GUID))
-#endif
ExportedGUIDs.insert(GUID);
}
}
// except according to those terms.
#include "rustllvm.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Bitcode/BitcodeWriterPass.h"
#include "llvm/Support/Signals.h"
-
-#include "llvm/IR/CallSite.h"
-
-#if LLVM_VERSION_GE(5, 0)
#include "llvm/ADT/Optional.h"
-#else
-#include <cstdlib>
-#endif
#include <iostream>
LLVMRustAttribute RustAttr) {
CallSite Call = CallSite(unwrap<Instruction>(Instr));
Attribute Attr = Attribute::get(Call->getContext(), fromRust(RustAttr));
-#if LLVM_VERSION_GE(5, 0)
Call.addAttribute(Index, Attr);
-#else
- AttrBuilder B(Attr);
- Call.setAttributes(Call.getAttributes().addAttributes(
- Call->getContext(), Index,
- AttributeSet::get(Call->getContext(), Index, B)));
-#endif
}
extern "C" void LLVMRustAddAlignmentCallSiteAttr(LLVMValueRef Instr,
CallSite Call = CallSite(unwrap<Instruction>(Instr));
AttrBuilder B;
B.addAlignmentAttr(Bytes);
-#if LLVM_VERSION_GE(5, 0)
Call.setAttributes(Call.getAttributes().addAttributes(
Call->getContext(), Index, B));
-#else
- Call.setAttributes(Call.getAttributes().addAttributes(
- Call->getContext(), Index,
- AttributeSet::get(Call->getContext(), Index, B)));
-#endif
}
extern "C" void LLVMRustAddDereferenceableCallSiteAttr(LLVMValueRef Instr,
CallSite Call = CallSite(unwrap<Instruction>(Instr));
AttrBuilder B;
B.addDereferenceableAttr(Bytes);
-#if LLVM_VERSION_GE(5, 0)
Call.setAttributes(Call.getAttributes().addAttributes(
Call->getContext(), Index, B));
-#else
- Call.setAttributes(Call.getAttributes().addAttributes(
- Call->getContext(), Index,
- AttributeSet::get(Call->getContext(), Index, B)));
-#endif
}
extern "C" void LLVMRustAddDereferenceableOrNullCallSiteAttr(LLVMValueRef Instr,
CallSite Call = CallSite(unwrap<Instruction>(Instr));
AttrBuilder B;
B.addDereferenceableOrNullAttr(Bytes);
-#if LLVM_VERSION_GE(5, 0)
Call.setAttributes(Call.getAttributes().addAttributes(
Call->getContext(), Index, B));
-#else
- Call.setAttributes(Call.getAttributes().addAttributes(
- Call->getContext(), Index,
- AttributeSet::get(Call->getContext(), Index, B)));
-#endif
}
extern "C" void LLVMRustAddFunctionAttribute(LLVMValueRef Fn, unsigned Index,
Function *A = unwrap<Function>(Fn);
Attribute Attr = Attribute::get(A->getContext(), fromRust(RustAttr));
AttrBuilder B(Attr);
-#if LLVM_VERSION_GE(5, 0)
A->addAttributes(Index, B);
-#else
- A->addAttributes(Index, AttributeSet::get(A->getContext(), Index, B));
-#endif
}
extern "C" void LLVMRustAddAlignmentAttr(LLVMValueRef Fn,
Function *A = unwrap<Function>(Fn);
AttrBuilder B;
B.addAlignmentAttr(Bytes);
-#if LLVM_VERSION_GE(5, 0)
A->addAttributes(Index, B);
-#else
- A->addAttributes(Index, AttributeSet::get(A->getContext(), Index, B));
-#endif
}
extern "C" void LLVMRustAddDereferenceableAttr(LLVMValueRef Fn, unsigned Index,
Function *A = unwrap<Function>(Fn);
AttrBuilder B;
B.addDereferenceableAttr(Bytes);
-#if LLVM_VERSION_GE(5, 0)
A->addAttributes(Index, B);
-#else
- A->addAttributes(Index, AttributeSet::get(A->getContext(), Index, B));
-#endif
}
extern "C" void LLVMRustAddDereferenceableOrNullAttr(LLVMValueRef Fn,
Function *A = unwrap<Function>(Fn);
AttrBuilder B;
B.addDereferenceableOrNullAttr(Bytes);
-#if LLVM_VERSION_GE(5, 0)
A->addAttributes(Index, B);
-#else
- A->addAttributes(Index, AttributeSet::get(A->getContext(), Index, B));
-#endif
}
extern "C" void LLVMRustAddFunctionAttrStringValue(LLVMValueRef Fn,
Function *F = unwrap<Function>(Fn);
AttrBuilder B;
B.addAttribute(Name, Value);
-#if LLVM_VERSION_GE(5, 0)
F->addAttributes(Index, B);
-#else
- F->addAttributes(Index, AttributeSet::get(F->getContext(), Index, B));
-#endif
}
extern "C" void LLVMRustRemoveFunctionAttributes(LLVMValueRef Fn,
Attribute Attr = Attribute::get(F->getContext(), fromRust(RustAttr));
AttrBuilder B(Attr);
auto PAL = F->getAttributes();
-#if LLVM_VERSION_GE(5, 0)
auto PALNew = PAL.removeAttributes(F->getContext(), Index, B);
-#else
- const AttributeSet PALNew = PAL.removeAttributes(
- F->getContext(), Index, AttributeSet::get(F->getContext(), Index, B));
-#endif
F->setAttributes(PALNew);
}
CrossThread,
};
-#if LLVM_VERSION_GE(5, 0)
static SyncScope::ID fromRust(LLVMRustSynchronizationScope Scope) {
switch (Scope) {
case LLVMRustSynchronizationScope::SingleThread:
report_fatal_error("bad SynchronizationScope.");
}
}
-#else
-static SynchronizationScope fromRust(LLVMRustSynchronizationScope Scope) {
- switch (Scope) {
- case LLVMRustSynchronizationScope::SingleThread:
- return SingleThread;
- case LLVMRustSynchronizationScope::CrossThread:
- return CrossThread;
- default:
- report_fatal_error("bad SynchronizationScope.");
- }
-}
-#endif
extern "C" LLVMValueRef
LLVMRustBuildAtomicFence(LLVMBuilderRef B, LLVMAtomicOrdering Order,
typedef DIBuilder *LLVMRustDIBuilderRef;
-#if LLVM_VERSION_LT(5, 0)
-typedef struct LLVMOpaqueMetadata *LLVMMetadataRef;
-
-namespace llvm {
-DEFINE_ISA_CONVERSION_FUNCTIONS(Metadata, LLVMMetadataRef)
-
-inline Metadata **unwrap(LLVMMetadataRef *Vals) {
- return reinterpret_cast<Metadata **>(Vals);
-}
-}
-#endif
-
template <typename DIT> DIT *unwrapDIPtr(LLVMMetadataRef Ref) {
return (DIT *)(Ref ? unwrap<MDNode>(Ref) : nullptr);
}
if (isSet(Flags & LLVMRustDIFlags::FlagRValueReference)) {
Result |= DINode::DIFlags::FlagRValueReference;
}
-#if LLVM_VERSION_LE(4, 0)
- if (isSet(Flags & LLVMRustDIFlags::FlagExternalTypeRef)) {
- Result |= DINode::DIFlags::FlagExternalTypeRef;
- }
-#endif
if (isSet(Flags & LLVMRustDIFlags::FlagIntroducedVirtual)) {
Result |= DINode::DIFlags::FlagIntroducedVirtual;
}
uint64_t SizeInBits, uint32_t AlignInBits, const char *Name) {
return wrap(Builder->createPointerType(unwrapDI<DIType>(PointeeTy),
SizeInBits, AlignInBits,
-#if LLVM_VERSION_GE(5, 0)
/* DWARFAddressSpace */ None,
-#endif
Name));
}
LLVMMetadataRef Scope, const char *Name,
LLVMMetadataRef File, unsigned LineNo) {
return wrap(Builder->createNameSpace(
- unwrapDI<DIDescriptor>(Scope), Name
-#if LLVM_VERSION_LT(5, 0)
- ,
- unwrapDI<DIFile>(File), LineNo
-#endif
- ,
+ unwrapDI<DIDescriptor>(Scope), Name,
false // ExportSymbols (only relevant for C++ anonymous namespaces)
));
}
}
extern "C" int64_t LLVMRustDIBuilderCreateOpPlusUconst() {
-#if LLVM_VERSION_GE(5, 0)
return dwarf::DW_OP_plus_uconst;
-#else
- // older LLVM used `plus` to behave like `plus_uconst`.
- return dwarf::DW_OP_plus;
-#endif
}
extern "C" void LLVMRustWriteTypeToString(LLVMTypeRef Ty, RustStringRef Str) {
*FunctionOut = wrap(&Opt->getFunction());
RawRustStringOstream FilenameOS(FilenameOut);
-#if LLVM_VERSION_GE(5,0)
DiagnosticLocation loc = Opt->getLocation();
if (loc.isValid()) {
*Line = loc.getLine();
*Column = loc.getColumn();
FilenameOS << loc.getFilename();
}
-#else
- const DebugLoc &loc = Opt->getDebugLoc();
- if (loc) {
- *Line = loc.getLine();
- *Column = loc.getCol();
- FilenameOS << cast<DIScope>(loc.getScope())->getFilename();
- }
-#endif
RawRustStringOstream MessageOS(MessageOut);
MessageOS << Opt->getMsg();
}
// Vector reductions:
-#if LLVM_VERSION_GE(5, 0)
extern "C" LLVMValueRef
LLVMRustBuildVectorReduceFAdd(LLVMBuilderRef B, LLVMValueRef Acc, LLVMValueRef Src) {
return wrap(unwrap(B)->CreateFAddReduce(unwrap(Acc),unwrap(Src)));
return wrap(unwrap(B)->CreateFPMaxReduce(unwrap(Src), NoNaN));
}
-#else
-
-extern "C" LLVMValueRef
-LLVMRustBuildVectorReduceFAdd(LLVMBuilderRef, LLVMValueRef, LLVMValueRef) {
- return nullptr;
-}
-extern "C" LLVMValueRef
-LLVMRustBuildVectorReduceFMul(LLVMBuilderRef, LLVMValueRef, LLVMValueRef) {
- return nullptr;
-}
-extern "C" LLVMValueRef
-LLVMRustBuildVectorReduceAdd(LLVMBuilderRef, LLVMValueRef) {
- return nullptr;
-}
-extern "C" LLVMValueRef
-LLVMRustBuildVectorReduceMul(LLVMBuilderRef, LLVMValueRef) {
- return nullptr;
-}
-extern "C" LLVMValueRef
-LLVMRustBuildVectorReduceAnd(LLVMBuilderRef, LLVMValueRef) {
- return nullptr;
-}
-extern "C" LLVMValueRef
-LLVMRustBuildVectorReduceOr(LLVMBuilderRef, LLVMValueRef) {
- return nullptr;
-}
-extern "C" LLVMValueRef
-LLVMRustBuildVectorReduceXor(LLVMBuilderRef, LLVMValueRef) {
- return nullptr;
-}
-extern "C" LLVMValueRef
-LLVMRustBuildVectorReduceMin(LLVMBuilderRef, LLVMValueRef, bool) {
- return nullptr;
-}
-extern "C" LLVMValueRef
-LLVMRustBuildVectorReduceMax(LLVMBuilderRef, LLVMValueRef, bool) {
- return nullptr;
-}
-extern "C" LLVMValueRef
-LLVMRustBuildVectorReduceFMin(LLVMBuilderRef, LLVMValueRef, bool) {
- return nullptr;
-}
-extern "C" LLVMValueRef
-LLVMRustBuildVectorReduceFMax(LLVMBuilderRef, LLVMValueRef, bool) {
- return nullptr;
-}
-#endif
-
#if LLVM_VERSION_GE(6, 0)
extern "C" LLVMValueRef
LLVMRustBuildMinNum(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS) {
--- /dev/null
+// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// This test depends on a patch that was committed to upstream LLVM
+// before 7.0, then backported to the Rust LLVM fork. It tests that
+// optimized enum debug info accurately reflects the enum layout.
+
+// ignore-tidy-linelength
+// ignore-windows
+// min-system-llvm-version 7.0
+
+// compile-flags: -g -C no-prepopulate-passes
+
+// CHECK: {{.*}}DICompositeType{{.*}}tag: DW_TAG_variant_part,{{.*}}size: 32,{{.*}}
+// CHECK: {{.*}}DIDerivedType{{.*}}tag: DW_TAG_member,{{.*}}name: "Placeholder",{{.*}}extraData: i64 4294967295{{[,)].*}}
+// CHECK: {{.*}}DIDerivedType{{.*}}tag: DW_TAG_member,{{.*}}name: "Error",{{.*}}extraData: i64 0{{[,)].*}}
+
+#![feature(never_type)]
+#![feature(nll)]
+
+#[derive(Copy, Clone)]
+pub struct Entity {
+ private: std::num::NonZeroU32,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq)]
+pub struct Declaration;
+
+impl TypeFamily for Declaration {
+ type Base = Base;
+ type Placeholder = !;
+
+ fn intern_base_data(_: BaseKind<Self>) {}
+}
+
+#[derive(Copy, Clone)]
+pub struct Base;
+
+pub trait TypeFamily: Copy + 'static {
+ type Base: Copy;
+ type Placeholder: Copy;
+
+ fn intern_base_data(_: BaseKind<Self>);
+}
+
+#[derive(Copy, Clone)]
+pub enum BaseKind<F: TypeFamily> {
+ Named(Entity),
+ Placeholder(F::Placeholder),
+ Error,
+}
+
+pub fn main() {
+ let x = BaseKind::Error::<Declaration>;
+ let y = 7;
+}
//!
//! - core may not have platform-specific code
//! - libcompiler_builtins may have platform-specific code
-//! - liballoc_system may have platform-specific code
//! - libpanic_abort may have platform-specific code
//! - libpanic_unwind may have platform-specific code
//! - libunwind may have platform-specific code
// Paths that may contain platform-specific code
const EXCEPTION_PATHS: &[&str] = &[
// std crates
- "src/liballoc_system",
"src/libcompiler_builtins",
"src/liblibc",
"src/libpanic_abort",