1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 use std::sync::atomics;
13 use std::os::{errno, page_size, MemoryMap, MapReadable, MapWritable,
14 MapNonStandardFlags, getenv};
17 /// A task's stack. The name "Stack" is a vestige of segmented stacks.
19 buf: Option<MemoryMap>,
21 valgrind_id: libc::c_uint,
24 // Try to use MAP_STACK on platforms that support it (it's what we're doing
25 // anyway), but some platforms don't support it at all. For example, it appears
26 // that there's a bug in freebsd that MAP_STACK implies MAP_FIXED (so it always
27 // fails): http://lists.freebsd.org/pipermail/freebsd-bugs/2011-July/044840.html
29 // DragonFly BSD also seems to suffer from the same problem. When MAP_STACK is
30 // used, it returns the same `ptr` multiple times.
31 #[cfg(not(windows), not(target_os = "freebsd"), not(target_os = "dragonfly"))]
32 static STACK_FLAGS: libc::c_int = libc::MAP_STACK | libc::MAP_PRIVATE |
34 #[cfg(target_os = "freebsd")]
35 #[cfg(target_os = "dragonfly")]
36 static STACK_FLAGS: libc::c_int = libc::MAP_PRIVATE | libc::MAP_ANON;
38 static STACK_FLAGS: libc::c_int = 0;
41 /// Allocate a new stack of `size`. If size = 0, this will fail. Use
42 /// `dummy_stack` if you want a zero-sized stack.
43 pub fn new(size: uint) -> Stack {
44 // Map in a stack. Eventually we might be able to handle stack
45 // allocation failure, which would fail to spawn the task. But there's
46 // not many sensible things to do on OOM. Failure seems fine (and is
47 // what the old stack allocation did).
48 let stack = match MemoryMap::new(size, [MapReadable, MapWritable,
49 MapNonStandardFlags(STACK_FLAGS)]) {
51 Err(e) => fail!("mmap for stack of size {} failed: {}", size, e)
54 // Change the last page to be inaccessible. This is to provide safety;
55 // when an FFI function overflows it will (hopefully) hit this guard
56 // page. It isn't guaranteed, but that's why FFI is unsafe. buf.data is
57 // guaranteed to be aligned properly.
58 if !protect_last_page(&stack) {
59 fail!("Could not memory-protect guard page. stack={}, errno={}",
60 stack.data(), errno());
69 // FIXME: Using the FFI to call a C macro. Slow
70 stk.valgrind_id = unsafe {
71 rust_valgrind_stack_register(stk.start(), stk.end())
76 /// Create a 0-length stack which starts (and ends) at 0.
77 pub unsafe fn dummy_stack() -> Stack {
85 /// Point to the low end of the allocated stack
86 pub fn start(&self) -> *const uint {
87 self.buf.as_ref().map(|m| m.data() as *const uint)
88 .unwrap_or(ptr::null())
91 /// Point one uint beyond the high end of the allocated stack
92 pub fn end(&self) -> *const uint {
93 self.buf.as_ref().map(|buf| unsafe {
94 buf.data().offset(buf.len() as int) as *const uint
95 }).unwrap_or(ptr::null())
100 fn protect_last_page(stack: &MemoryMap) -> bool {
102 // This may seem backwards: the start of the segment is the last page?
103 // Yes! The stack grows from higher addresses (the end of the allocated
104 // block) to lower addresses (the start of the allocated block).
105 let last_page = stack.data() as *mut libc::c_void;
106 libc::mprotect(last_page, page_size() as libc::size_t,
107 libc::PROT_NONE) != -1
112 fn protect_last_page(stack: &MemoryMap) -> bool {
115 let last_page = stack.data() as *mut libc::c_void;
116 let mut old_prot: libc::DWORD = 0;
117 libc::VirtualProtect(last_page, page_size() as libc::SIZE_T,
119 &mut old_prot as libc::LPDWORD) != 0
123 impl Drop for Stack {
126 // FIXME: Using the FFI to call a C macro. Slow
127 rust_valgrind_stack_deregister(self.valgrind_id);
132 pub struct StackPool {
133 // Ideally this would be some data structure that preserved ordering on
139 pub fn new() -> StackPool {
145 pub fn take_stack(&mut self, min_size: uint) -> Stack {
146 // Ideally this would be a binary search
147 match self.stacks.iter().position(|s| min_size <= s.min_size) {
148 Some(idx) => self.stacks.swap_remove(idx).unwrap(),
149 None => Stack::new(min_size)
153 pub fn give_stack(&mut self, stack: Stack) {
154 if self.stacks.len() <= max_cached_stacks() {
155 self.stacks.push(stack)
160 fn max_cached_stacks() -> uint {
161 static mut AMT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
162 match unsafe { AMT.load(atomics::SeqCst) } {
166 let amt = getenv("RUST_MAX_CACHED_STACKS").and_then(|s| from_str(s.as_slice()));
167 // This default corresponds to 20M of cache per scheduler (at the
169 let amt = amt.unwrap_or(10);
170 // 0 is our sentinel value, so ensure that we'll never see 0 after
171 // initialization has run
172 unsafe { AMT.store(amt + 1, atomics::SeqCst); }
177 fn rust_valgrind_stack_register(start: *const libc::uintptr_t,
178 end: *const libc::uintptr_t) -> libc::c_uint;
179 fn rust_valgrind_stack_deregister(id: libc::c_uint);
184 use super::StackPool;
187 fn stack_pool_caches() {
188 let mut p = StackPool::new();
189 let s = p.take_stack(10);
191 let s = p.take_stack(4);
192 assert_eq!(s.min_size, 10);
194 let s = p.take_stack(14);
195 assert_eq!(s.min_size, 14);
200 fn stack_pool_caches_exact() {
201 let mut p = StackPool::new();
202 let mut s = p.take_stack(10);
206 let s = p.take_stack(10);
207 assert_eq!(s.min_size, 10);
208 assert_eq!(s.valgrind_id, 100);