1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![cfg_attr(stage0, feature(custom_attribute))]
12 #![crate_name = "alloc_jemalloc"]
13 #![crate_type = "rlib"]
16 #![cfg_attr(not(stage0), allocator)]
17 #![unstable(feature = "alloc_jemalloc",
18 reason = "this library is unlikely to be stabilized in its current \
21 #![feature(allocator)]
24 #![feature(staged_api)]
28 use libc::{c_int, c_void, size_t};
30 // Linkage directives to pull in jemalloc and its dependencies.
32 // On some platforms we need to be sure to link in `pthread` which jemalloc
33 // depends on, and specifically on android we need to also link to libgcc.
34 // Currently jemalloc is compiled with gcc which will generate calls to
35 // intrinsics that are libgcc specific (e.g. those intrinsics aren't present in
36 // libcompiler-rt), so link that in to get that support.
37 #[link(name = "jemalloc", kind = "static")]
38 #[cfg_attr(target_os = "android", link(name = "gcc"))]
39 #[cfg_attr(all(not(windows),
40 not(target_os = "android"),
41 not(target_env = "musl")),
42 link(name = "pthread"))]
44 fn je_mallocx(size: size_t, flags: c_int) -> *mut c_void;
45 fn je_rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void;
46 fn je_xallocx(ptr: *mut c_void, size: size_t, extra: size_t,
47 flags: c_int) -> size_t;
48 fn je_sdallocx(ptr: *mut c_void, size: size_t, flags: c_int);
49 fn je_nallocx(size: size_t, flags: c_int) -> size_t;
52 // The minimum alignment guaranteed by the architecture. This value is used to
53 // add fast paths for low alignment values. In practice, the alignment is a
54 // constant at the call site and the branch will be optimized out.
55 #[cfg(all(any(target_arch = "arm",
57 target_arch = "mipsel",
58 target_arch = "powerpc")))]
59 const MIN_ALIGN: usize = 8;
60 #[cfg(all(any(target_arch = "x86",
61 target_arch = "x86_64",
62 target_arch = "aarch64")))]
63 const MIN_ALIGN: usize = 16;
65 // MALLOCX_ALIGN(a) macro
66 fn mallocx_align(a: usize) -> c_int { a.trailing_zeros() as c_int }
68 fn align_to_flags(align: usize) -> c_int {
69 if align <= MIN_ALIGN { 0 } else { mallocx_align(align) }
73 pub extern fn __rust_allocate(size: usize, align: usize) -> *mut u8 {
74 let flags = align_to_flags(align);
75 unsafe { je_mallocx(size as size_t, flags) as *mut u8 }
79 pub extern fn __rust_reallocate(ptr: *mut u8, _old_size: usize, size: usize,
80 align: usize) -> *mut u8 {
81 let flags = align_to_flags(align);
82 unsafe { je_rallocx(ptr as *mut c_void, size as size_t, flags) as *mut u8 }
86 pub extern fn __rust_reallocate_inplace(ptr: *mut u8, _old_size: usize,
87 size: usize, align: usize) -> usize {
88 let flags = align_to_flags(align);
89 unsafe { je_xallocx(ptr as *mut c_void, size as size_t, 0, flags) as usize }
93 pub extern fn __rust_deallocate(ptr: *mut u8, old_size: usize, align: usize) {
94 let flags = align_to_flags(align);
95 unsafe { je_sdallocx(ptr as *mut c_void, old_size as size_t, flags) }
99 pub extern fn __rust_usable_size(size: usize, align: usize) -> usize {
100 let flags = align_to_flags(align);
101 unsafe { je_nallocx(size as size_t, flags) as usize }