// ptr::mut_offset is positive ints only
#[inline]
pub fn mut_offset<T>(ptr: *mut T, count: int) -> *mut T {
- use std::sys::size_of;
+ use mem::size_of;
(ptr as int + count * (size_of::<T>() as int)) as *mut T
}
+
+ #[inline(always)]
+ pub unsafe fn record_stack_bounds(stack_lo: uint, stack_hi: uint) {
+ // When the old runtime had segmented stacks, it used a calculation that was
+ // "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
+ // symbol resolution, llvm function calls, etc. In theory this red zone
+ // value is 0, but it matters far less when we have gigantic stacks because
+ // we don't need to be so exact about our stack budget. The "fudge factor"
+ // was because LLVM doesn't emit a stack check for functions < 256 bytes in
+ // size. Again though, we have giant stacks, so we round all these
+ // calculations up to the nice round number of 20k.
+ record_sp_limit(stack_lo + RED_ZONE);
+
+ return target_record_stack_bounds(stack_lo, stack_hi);
+
+ #[cfg(not(windows))] #[cfg(not(target_arch = "x86_64"))] #[inline(always)]
+ unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
+ #[cfg(windows, target_arch = "x86_64")] #[inline(always)]
+ unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
+ // Windows compiles C functions which may check the stack bounds. This
+ // means that if we want to perform valid FFI on windows, then we need
+ // to ensure that the stack bounds are what they truly are for this
+ // task. More info can be found at:
+ // https://github.com/mozilla/rust/issues/3445#issuecomment-26114839
+ //
+ // stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
+ asm!("mov $0, %gs:0x08" :: "r"(stack_lo) :: "volatile");
+ asm!("mov $0, %gs:0x10" :: "r"(stack_hi) :: "volatile");
+ }
+ }
+
+ /// Records the current limit of the stack as specified by `end`.
+ ///
+ /// This is stored in an OS-dependent location, likely inside of the thread
+ /// local storage. The location that the limit is stored is a pre-ordained
+ /// location because it's where LLVM has emitted code to check.
+ ///
+ /// Note that this cannot be called under normal circumstances. This function is
+ /// changing the stack limit, so upon returning any further function calls will
+ /// possibly be triggering the morestack logic if you're not careful.
+ ///
+ /// Also note that this and all of the inside functions are all flagged as
+ /// "inline(always)" because they're messing around with the stack limits. This
+ /// would be unfortunate for the functions themselves to trigger a morestack
+ /// invocation (if they were an actual function call).
+ #[inline(always)]
+ pub unsafe fn record_sp_limit(limit: uint) {
+ return target_record_sp_limit(limit);
+
+ // x86-64
+ #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)]
+ unsafe fn target_record_sp_limit(limit: uint) {
+ asm!("movq $$0x60+90*8, %rsi
+ movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
+ }
+ #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)]
+ unsafe fn target_record_sp_limit(limit: uint) {
+ asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
+ }
+ #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)]
+ unsafe fn target_record_sp_limit(limit: uint) {
+ // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block
+ // store this inside of the "arbitrary data slot", but double the size
+ // because this is 64 bit instead of 32 bit
+ asm!("movq $0, %gs:0x28" :: "r"(limit) :: "volatile")
+ }
+ #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)]
+ unsafe fn target_record_sp_limit(limit: uint) {
+ asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
+ }
+
+ // x86
+ #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)]
+ unsafe fn target_record_sp_limit(limit: uint) {
+ asm!("movl $$0x48+90*4, %eax
+ movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
+ }
+ #[cfg(target_arch = "x86", target_os = "linux")]
+ #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)]
+ unsafe fn target_record_sp_limit(limit: uint) {
+ asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
+ }
+ #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)]
+ unsafe fn target_record_sp_limit(limit: uint) {
+ // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block
+ // store this inside of the "arbitrary data slot"
+ asm!("movl $0, %fs:0x14" :: "r"(limit) :: "volatile")
+ }
+
+ // mips, arm - Some brave soul can port these to inline asm, but it's over
+ // my head personally
+ #[cfg(target_arch = "mips")]
+ #[cfg(target_arch = "arm")] #[inline(always)]
+ unsafe fn target_record_sp_limit(limit: uint) {
+ return record_sp_limit(limit as *c_void);
+ extern {
+ #[rust_stack]
+ fn record_sp_limit(limit: *c_void);
+ }
+ }
+ }
+
+ /// The counterpart of the function above, this function will fetch the current
+ /// stack limit stored in TLS.
+ ///
+ /// Note that all of these functions are meant to be exact counterparts of their
+ /// brethren above, except that the operands are reversed.
+ ///
+ /// As with the setter, this function does not have a __morestack header and can
+ /// therefore be called in a "we're out of stack" situation.
+ #[inline(always)]
+ // NOTE: after the next snapshot, can remove the initialization before inline
+ // assembly due to an improvement in how it's handled, then this specific
+ // allow directive should get removed.
+ #[allow(dead_assignment)]
+ pub unsafe fn get_sp_limit() -> uint {
+ return target_get_sp_limit();
+
+ // x86-64
+ #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)]
+ unsafe fn target_get_sp_limit() -> uint {
+ let mut limit: uint = 0;
+ asm!("movq $$0x60+90*8, %rsi
+ movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
+ return limit;
+ }
+ #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)]
+ unsafe fn target_get_sp_limit() -> uint {
+ let mut limit: uint = 0;
+ asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
+ return limit;
+ }
+ #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)]
+ unsafe fn target_get_sp_limit() -> uint {
+ let mut limit: uint = 0;
+ asm!("movq %gs:0x28, $0" : "=r"(limit) ::: "volatile");
+ return limit;
+ }
+ #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)]
+ unsafe fn target_get_sp_limit() -> uint {
+ let mut limit: uint = 0;
+ asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
+ return limit;
+ }
+
+ // x86
+ #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)]
+ unsafe fn target_get_sp_limit() -> uint {
+ let mut limit: uint = 0;
+ asm!("movl $$0x48+90*4, %eax
+ movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
+ return limit;
+ }
+ #[cfg(target_arch = "x86", target_os = "linux")]
+ #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)]
+ unsafe fn target_get_sp_limit() -> uint {
+ let mut limit: uint = 0;
+ asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
+ return limit;
+ }
+ #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)]
+ unsafe fn target_get_sp_limit() -> uint {
+ let mut limit: uint = 0;
+ asm!("movl %fs:0x14, $0" : "=r"(limit) ::: "volatile");
+ return limit;
+ }
+
+ // mips, arm - Some brave soul can port these to inline asm, but it's over
+ // my head personally
+ #[cfg(target_arch = "mips")]
+ #[cfg(target_arch = "arm")] #[inline(always)]
+ unsafe fn target_get_sp_limit() -> uint {
+ return get_sp_limit() as uint;
+ extern {
+ #[rust_stack]
+ fn get_sp_limit() -> *c_void;
+ }
+ }
+ }