1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![cfg_attr(stage0, feature(custom_attribute))]
12 #![crate_name = "alloc_jemalloc"]
13 #![crate_type = "rlib"]
16 #![cfg_attr(not(stage0), allocator)]
17 #![unstable(feature = "alloc_jemalloc",
18 reason = "this library is unlikely to be stabilized in its current \
21 #![feature(allocator)]
24 #![feature(staged_api)]
28 use libc::{c_int, c_void, size_t};
30 // Linkage directives to pull in jemalloc and its dependencies.
32 // On some platforms we need to be sure to link in `pthread` which jemalloc
33 // depends on, and specifically on android we need to also link to libgcc.
34 // Currently jemalloc is compiled with gcc which will generate calls to
35 // intrinsics that are libgcc specific (e.g. those intrinsics aren't present in
36 // libcompiler-rt), so link that in to get that support.
37 #[link(name = "jemalloc", kind = "static")]
38 #[cfg_attr(target_os = "android", link(name = "gcc"))]
39 #[cfg_attr(all(not(windows),
40 not(target_os = "android"),
41 not(target_env = "musl")),
42 link(name = "pthread"))]
44 fn je_mallocx(size: size_t, flags: c_int) -> *mut c_void;
45 fn je_rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void;
46 fn je_xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t;
47 fn je_sdallocx(ptr: *mut c_void, size: size_t, flags: c_int);
48 fn je_nallocx(size: size_t, flags: c_int) -> size_t;
51 // The minimum alignment guaranteed by the architecture. This value is used to
52 // add fast paths for low alignment values. In practice, the alignment is a
53 // constant at the call site and the branch will be optimized out.
54 #[cfg(all(any(target_arch = "arm",
56 target_arch = "mipsel",
57 target_arch = "powerpc")))]
58 const MIN_ALIGN: usize = 8;
59 #[cfg(all(any(target_arch = "x86",
60 target_arch = "x86_64",
61 target_arch = "aarch64")))]
62 const MIN_ALIGN: usize = 16;
64 // MALLOCX_ALIGN(a) macro
65 fn mallocx_align(a: usize) -> c_int {
66 a.trailing_zeros() as c_int
69 fn align_to_flags(align: usize) -> c_int {
70 if align <= MIN_ALIGN {
78 pub extern "C" fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
79 let flags = align_to_flags(align);
80 unsafe { je_mallocx(size as size_t, flags) as *mut u8 }
84 pub extern "C" fn __rust_reallocate(ptr: *mut u8,
89 let flags = align_to_flags(align);
90 unsafe { je_rallocx(ptr as *mut c_void, size as size_t, flags) as *mut u8 }
94 pub extern "C" fn __rust_reallocate_inplace(ptr: *mut u8,
99 let flags = align_to_flags(align);
100 unsafe { je_xallocx(ptr as *mut c_void, size as size_t, 0, flags) as usize }
104 pub extern "C" fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) {
105 let flags = align_to_flags(align);
106 unsafe { je_sdallocx(ptr as *mut c_void, old_size as size_t, flags) }
110 pub extern "C" fn __rust_usable_size(size: usize, align: usize) -> usize {
111 let flags = align_to_flags(align);
112 unsafe { je_nallocx(size as size_t, flags) as usize }