1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![feature(compiler_builtins)]
14 #![unstable(feature = "compiler_builtins_lib",
15 reason = "internal implementation detail of rustc right now",
17 #![crate_name = "compiler_builtins"]
18 #![crate_type = "rlib"]
19 #![allow(unused_features)]
20 #![feature(staged_api, core_intrinsics, repr_simd,
21 i128_type, core_float, abi_unadjusted, associated_consts)]
22 #![allow(non_camel_case_types, unused_variables, unused_imports)]
24 #[cfg(any(target_pointer_width="32", target_pointer_width="16", target_os="windows",
25 target_arch="mips64"))]
28 #![allow(unused_comparisons)]
30 use core::intrinsics::unchecked_div;
31 use core::intrinsics::unchecked_rem;
35 ($a:expr, $b:expr, $ty:ty) => {{
36 let (a, b) = ($a, $b);
37 let bits = (::core::mem::size_of::<$ty>() * 8) as $ty;
38 let half_bits = bits >> 1;
39 if b & half_bits != 0 {
40 <$ty>::from_parts(0, a.low().wrapping_shl(
41 b.wrapping_sub(half_bits) as u32))
45 <$ty>::from_parts(a.low().wrapping_shl(b as u32),
46 a.high().wrapping_shl(b as u32)
48 .wrapping_shr(half_bits.wrapping_sub(b) as u32))
53 #[export_name="__ashlti3"]
54 pub extern "C" fn shl(a: u128, b: u128) -> u128 {
59 ($a: expr, $b: expr, $ty:ty) => {{
60 let (a, b) = ($a, $b);
61 let bits = (::core::mem::size_of::<$ty>() * 8) as $ty;
62 let half_bits = bits >> 1;
63 if b & half_bits != 0 {
64 <$ty>::from_parts(a.high().wrapping_shr(b.wrapping_sub(half_bits) as u32)
65 as <$ty as LargeInt>::LowHalf,
66 a.high().wrapping_shr(half_bits.wrapping_sub(1) as u32))
70 let high_unsigned = a.high() as <$ty as LargeInt>::LowHalf;
71 <$ty>::from_parts(high_unsigned.wrapping_shl(half_bits.wrapping_sub(b) as u32)
72 | a.low().wrapping_shr(b as u32),
73 a.high().wrapping_shr(b as u32))
78 #[export_name="__ashrti3"]
79 pub extern "C" fn shr(a: i128, b: i128) -> i128 {
84 ($a: expr, $b: expr, $ty:ty) => {{
85 let (a, b) = ($a, $b);
86 let bits = (::core::mem::size_of::<$ty>() * 8) as $ty;
87 let half_bits = bits >> 1;
88 if b & half_bits != 0 {
89 <$ty>::from_parts(a.high().wrapping_shr(b.wrapping_sub(half_bits) as u32), 0)
93 <$ty>::from_parts(a.high().wrapping_shl(half_bits.wrapping_sub(b) as u32)
94 | a.low().wrapping_shr(b as u32),
95 a.high().wrapping_shr(b as u32))
101 #[export_name="__lshrti3"]
102 pub extern "C" fn lshr(a: u128, b: u128) -> u128 {
106 pub extern "C" fn u128_div_mod(n: u128, d: u128, rem: *mut u128) -> u128 {
107 // Translated from Figure 3-40 of The PowerPC Compiler Writer's Guide
109 // special cases, X is unknown, K != 0
116 *rem = u128::from(unchecked_rem(n.low(), d.low()));
118 return u128::from(unchecked_div(n.low(), d.low()));
140 *rem = u128::from(unchecked_rem(n.high(), d.low()));
142 return u128::from(unchecked_div(n.high(), d.low()));
150 *rem = u128::from_parts(0, unchecked_rem(n.high(), d.high()));
152 return u128::from(unchecked_div(n.high(), d.high()));
159 if d.high().is_power_of_two() {
161 *rem = u128::from_parts(n.low(),
162 n.high() & (d.high().wrapping_sub(1)));
164 return u128::from(n.high().wrapping_shr(d.high().trailing_zeros()));
170 sr = d.high().leading_zeros().wrapping_sub(n.high().leading_zeros());
180 sr = sr.wrapping_add(1);
182 // 1 <= sr <= u64::bits() - 1
183 q = n.wrapping_shl(64u32.wrapping_sub(sr));
184 r = n.wrapping_shr(sr);
190 if d.low().is_power_of_two() {
192 *rem = u128::from(n.low() & (d.low().wrapping_sub(1)));
198 let sr = d.low().trailing_zeros();
199 return n.wrapping_shr(sr);
204 .wrapping_add(d.low().leading_zeros())
205 .wrapping_sub(n.high().leading_zeros());
207 // 2 <= sr <= u64::bits() - 1
208 q = n.wrapping_shl(128u32.wrapping_sub(sr));
209 r = n.wrapping_shr(sr);
210 // FIXME the C compiler-rt implementation has something here
211 // that looks like a speed optimisation.
212 // It would be worth a try to port it to Rust too and
213 // compare the speed.
218 sr = d.high().leading_zeros().wrapping_sub(n.high().leading_zeros());
228 sr = sr.wrapping_add(1);
230 // 1 <= sr <= u32::bits()
231 q = n.wrapping_shl(128u32.wrapping_sub(sr));
232 r = n.wrapping_shr(sr);
236 // Not a special case
237 // q and r are initialized with
238 // q = n << (u64::bits() - sr)
240 // 1 <= sr <= u64::bits() - 1
243 // FIXME: replace this with a for loop
244 // (atm not doable as this generates call to
245 // eh_personality when optimisations are turned off,
246 // which in turn gives a linker error in later
247 // compilation steps)
249 // r:q = ((r:q) << 1) | carry
250 r = r.wrapping_shl(1) | q.wrapping_shr(128 - 1);
251 q = q.wrapping_shl(1) | carry as u128;
258 let s = ((d.wrapping_sub(r).wrapping_sub(1)) as i128).wrapping_shr(128 - 1);
259 carry = (s & 1) as u64;
260 r = r.wrapping_sub(d & s as u128);
261 sr = sr.wrapping_sub(1);
267 (q.wrapping_shl(1)) | carry as u128
271 fn i128_mod(a: i128, b: i128) -> i128 {
276 let mut r = ::core::mem::zeroed();
277 u128_div_mod(a, b, &mut r);
278 if sa == -1 { (r as i128).unchecked_neg() } else { r as i128 }
282 fn i128_div(a: i128, b: i128) -> i128 {
287 let sr = sa.wrapping_mul(sb); // sign of quotient
289 (u128_div_mod(a, b, ptr::null_mut()) as i128).unchecked_neg()
291 u128_div_mod(a, b, ptr::null_mut()) as i128
296 ($a:expr, $b:expr, $o: expr, $ty: ty) => {{
297 let (a, b, overflow) = ($a, $b, $o);
299 let result = a.wrapping_mul(b);
300 if a == <$ty>::min_value() {
301 if b != 0 && b != 1 {
306 if b == <$ty>::min_value() {
307 if a != 0 && a != 1 {
314 let abs_a = a.iabs();
316 let abs_b = b.iabs();
317 if abs_a < 2 || abs_b < 2 {
321 if abs_a > unchecked_div(<$ty>::max_value(), abs_b) {
325 if abs_a > unchecked_div(<$ty>::min_value(), abs_b.unchecked_neg()) {
337 fn low(self) -> Self::LowHalf;
338 fn high(self) -> Self::HighHalf;
339 fn from_parts(low: Self::LowHalf, high: Self::HighHalf) -> Self;
341 impl LargeInt for u128 {
345 fn low(self) -> u64 {
348 fn high(self) -> u64 {
349 self.wrapping_shr(64) as u64
351 fn from_parts(low: u64, high: u64) -> u128 {
352 (high as u128).wrapping_shl(64) | low as u128
355 impl LargeInt for i128 {
359 fn low(self) -> u64 {
362 fn high(self) -> i64 {
363 self.wrapping_shr(64) as i64
365 fn from_parts(low: u64, high: i64) -> i128 {
366 u128::from_parts(low, high as u64) as i128
371 ($a:expr, $b:expr, $ty: ty, $tyh: ty) => {{
372 let (a, b) = ($a, $b);
373 let half_bits = ((::core::mem::size_of::<$tyh>() * 8) / 2) as u32;
374 let lower_mask = (!0u64).wrapping_shr(half_bits);
375 let mut low = (a.low() & lower_mask).wrapping_mul(b.low() & lower_mask);
376 let mut t = low.wrapping_shr(half_bits);
378 t = t.wrapping_add(a.low().wrapping_shr(half_bits)
379 .wrapping_mul(b.low() & lower_mask));
380 low = low.wrapping_add((t & lower_mask).wrapping_shl(half_bits));
381 let mut high = t.wrapping_shr(half_bits) as $tyh;
382 t = low.wrapping_shr(half_bits);
384 t = t.wrapping_add(b.low().wrapping_shr(half_bits)
385 .wrapping_mul(a.low() & lower_mask));
386 low = low.wrapping_add((t & lower_mask).wrapping_shl(half_bits));
387 high = high.wrapping_add(t.wrapping_shr(half_bits) as $tyh);
388 high = high.wrapping_add(a.low().wrapping_shr(half_bits)
389 .wrapping_mul(b.low().wrapping_shr(half_bits)) as $tyh);
391 .wrapping_add(a.high()
392 .wrapping_mul(b.low() as $tyh))
393 .wrapping_add((a.low() as $tyh)
394 .wrapping_mul(b.high()));
395 <$ty>::from_parts(low, high)
399 #[export_name="__multi3"]
400 pub extern "C" fn u128_mul(a: i128, b: i128) -> i128 {
401 mul!(a, b, i128, i64)
404 trait AbsExt: Sized {
405 fn uabs(self) -> u128 {
408 fn iabs(self) -> i128;
411 impl AbsExt for i128 {
412 fn iabs(self) -> i128 {
414 ((self ^ s).wrapping_sub(s))
418 trait NegExt: Sized {
419 fn unchecked_neg(self) -> i128;
422 impl NegExt for i128 {
423 fn unchecked_neg(self) -> i128 {
424 (!self).wrapping_add(1)
428 trait FloatStuff: Sized {
431 const MANTISSA_BITS: u32;
433 const EXP_MASK: Self::ToBytes;
434 const MANTISSA_MASK: Self::ToBytes;
435 const MANTISSA_LEAD_BIT: Self::ToBytes;
437 fn to_bytes(self) -> Self::ToBytes;
438 fn get_exponent(self) -> i32;
441 impl FloatStuff for f32 {
443 const MANTISSA_BITS: u32 = 23;
444 const MAX_EXP: i32 = 127;
445 const EXP_MASK: u32 = 0x7F80_0000;
446 const MANTISSA_MASK: u32 = 0x007F_FFFF;
447 const MANTISSA_LEAD_BIT: u32 = 0x0080_0000;
449 fn to_bytes(self) -> u32 { unsafe { ::core::mem::transmute(self) } }
450 fn get_exponent(self) -> i32 {
451 ((self.to_bytes() & Self::EXP_MASK).wrapping_shr(Self::MANTISSA_BITS) as i32)
452 .wrapping_sub(Self::MAX_EXP)
456 impl FloatStuff for f64 {
458 const MANTISSA_BITS: u32 = 52;
459 const MAX_EXP: i32 = 1023;
460 const EXP_MASK: u64 = 0x7FF0_0000_0000_0000;
461 const MANTISSA_MASK: u64 = 0x000F_FFFF_FFFF_FFFF;
462 const MANTISSA_LEAD_BIT: u64 = 0x0010_0000_0000_0000;
464 fn to_bytes(self) -> u64 { unsafe { ::core::mem::transmute(self) } }
465 fn get_exponent(self) -> i32 {
466 ((self.to_bytes() & Self::EXP_MASK).wrapping_shr(Self::MANTISSA_BITS) as i32)
467 .wrapping_sub(Self::MAX_EXP)
471 macro_rules! float_as_unsigned {
472 ($from: expr, $fromty: ty, $outty: ty) => { {
473 use core::num::Float;
474 let repr = $from.to_bytes();
475 let sign = $from.signum();
476 let exponent = $from.get_exponent();
477 let mantissa_fraction = repr & <$fromty as FloatStuff>::MANTISSA_MASK;
478 let mantissa = mantissa_fraction | <$fromty as FloatStuff>::MANTISSA_LEAD_BIT;
479 if sign == -1.0 || exponent < 0 { return 0 as u128; }
480 if exponent > ::core::mem::size_of::<$outty>() as i32 * 8 {
483 (if exponent < (<$fromty as FloatStuff>::MANTISSA_BITS) as i32 {
485 .wrapping_shr((<$fromty as FloatStuff>::MANTISSA_BITS as i32)
486 .wrapping_sub(exponent) as u32)
489 .wrapping_shl(exponent.wrapping_sub(
490 <$fromty as FloatStuff>::MANTISSA_BITS as i32) as u32)
495 macro_rules! float_as_signed {
496 ($from: expr, $fromty: ty, $outty: ty) => {{
497 use core::num::Float;
498 let repr = $from.to_bytes();
499 let sign = $from.signum();
500 let exponent = $from.get_exponent();
501 let mantissa_fraction = repr & <$fromty as FloatStuff>::MANTISSA_MASK;
502 let mantissa = mantissa_fraction | <$fromty as FloatStuff>::MANTISSA_LEAD_BIT;
504 if exponent < 0 { return 0 as i128; }
505 if exponent > ::core::mem::size_of::<$outty>() as i32 * 8 {
506 let ret = if sign > 0.0 { <$outty>::max_value() } else { <$outty>::min_value() };
509 let r = if exponent < (<$fromty as FloatStuff>::MANTISSA_BITS) as i32 {
511 .wrapping_shr((<$fromty as FloatStuff>::MANTISSA_BITS as i32)
512 .wrapping_sub(exponent) as u32)
515 .wrapping_shl(exponent.wrapping_sub(
516 <$fromty as FloatStuff>::MANTISSA_BITS as i32) as u32)
518 (if sign >= 0.0 { r } else { r.unchecked_neg() })
523 fn i128_as_f64(a: i128) -> f64 {
525 1 => u128_as_f64(a.uabs()),
527 _ => -u128_as_f64(a.uabs()),
531 fn i128_as_f32(a: i128) -> f32 {
533 1 => u128_as_f32(a.uabs()),
535 _ => -u128_as_f32(a.uabs()),
539 fn u128_as_f64(mut a: u128) -> f64 {
540 use ::core::f64::MANTISSA_DIGITS;
541 if a == 0 { return 0.0; }
542 let sd = 128u32.wrapping_sub(a.leading_zeros());
543 let mut e = sd.wrapping_sub(1);
544 const MD1 : u32 = MANTISSA_DIGITS + 1;
545 const MD2 : u32 = MANTISSA_DIGITS + 2;
549 if sd > MANTISSA_DIGITS {
551 MD1 => a.wrapping_shl(1),
553 _ => a.wrapping_shr(sd.wrapping_sub(MANTISSA_DIGITS + 2)) |
554 (if (a & (negn.wrapping_shr(128 + MANTISSA_DIGITS + 2)
555 .wrapping_sub(sd as u128))) == 0 { 0 } else { 1 })
557 a |= if (a & 4) == 0 { 0 } else { 1 };
558 a = a.wrapping_add(1);
559 a = a.wrapping_shr(2);
560 if a & (1 << MANTISSA_DIGITS) != 0 {
561 a = a.wrapping_shr(1);
562 e = e.wrapping_add(1);
565 a = a.wrapping_shl(MANTISSA_DIGITS.wrapping_sub(sd));
568 ::core::mem::transmute((e as u64).wrapping_add(1023).wrapping_shl(52)
569 | (a as u64 & 0x000f_ffff_ffff_ffff))
573 fn u128_as_f32(mut a: u128) -> f32 {
574 use ::core::f32::MANTISSA_DIGITS;
575 if a == 0 { return 0.0; }
576 let sd = 128u32.wrapping_sub(a.leading_zeros());
577 let mut e = sd.wrapping_sub(1);
578 const MD1 : u32 = MANTISSA_DIGITS + 1;
579 const MD2 : u32 = MANTISSA_DIGITS + 2;
583 if sd > MANTISSA_DIGITS {
585 MD1 => a.wrapping_shl(1),
587 _ => a.wrapping_shr(sd.wrapping_sub(MANTISSA_DIGITS + 2)) |
588 (if (a & (negn.wrapping_shr(128 + MANTISSA_DIGITS + 2)
589 .wrapping_sub(sd as u128))) == 0 { 0 } else { 1 })
591 a |= if (a & 4) == 0 { 0 } else { 1 };
592 a = a.wrapping_add(1);
593 a = a.wrapping_shr(2);
594 if a & (1 << MANTISSA_DIGITS) != 0 {
595 a = a.wrapping_shr(1);
596 e = e.wrapping_add(1);
599 a = a.wrapping_shl(MANTISSA_DIGITS.wrapping_sub(sd));
602 ::core::mem::transmute((e as u32).wrapping_add(127).wrapping_shl(23)
603 | (a as u32 & 0x007f_ffff))
608 macro_rules! why_are_abi_strings_checked_by_parser { ($cret:ty, $conv:expr, $unadj:tt) => {
610 use super::{LargeInt, FloatStuff, NegExt, AbsExt};
611 use super::{i128_as_f64, i128_as_f32, u128_as_f64, u128_as_f32,
612 i128_div, i128_mod, u128_div_mod, unchecked_div, ptr};
614 // rdx:rcx, r9:r8, stack -> rdx:rax
616 // define i128 @__muloti4(i128, i128, i32*)
617 #[export_name="__muloti4"]
618 pub unsafe extern $unadj fn i128_mul_oflow(a: i128, b: i128, o: *mut i32) -> i128 {
625 // define double @__muloti4(i128)
626 #[export_name="__floattidf"]
627 pub extern $unadj fn i128_as_f64_(a: i128) -> f64 {
630 #[export_name="__floattisf"]
631 pub extern $unadj fn i128_as_f32_(a: i128) -> f32 {
634 #[export_name="__floatuntidf"]
635 pub extern $unadj fn u128_as_f64_(a: u128) -> f64 {
638 #[export_name="__floatuntisf"]
639 pub extern $unadj fn u128_as_f32_(a: u128) -> f32 {
646 // define i128 @stuff(double)
647 #[export_name="__fixunsdfti"]
648 pub extern $unadj fn f64_as_u128(a: f64) -> u128 {
649 float_as_unsigned!(a, f64, u128)
652 #[export_name="__fixunssfti"]
653 pub extern $unadj fn f32_as_u128(a: f32) -> u128 {
654 float_as_unsigned!(a, f32, u128)
657 #[export_name="__fixdfti"]
658 pub extern $unadj fn f64_as_i128(a: f64) -> i128 {
659 float_as_signed!(a, f64, i128)
662 #[export_name="__fixsfti"]
663 pub extern $unadj fn f32_as_i128(a: f32) -> i128 {
664 float_as_signed!(a, f32, i128)
668 pub struct u64x2(u64, u64);
673 // define <2 x u64> @stuff(i128*, i128*, i128*)
675 // That almost matches the C ABI, so we simply use the C ABI
676 #[export_name="__udivmodti4"]
677 pub extern "C" fn u128_div_mod_(n: u128, d: u128, rem: *mut u128) -> $cret {
678 let x = u128_div_mod(n, d, rem);
682 #[export_name="__udivti3"]
683 pub extern "C" fn u128_div_(a: u128, b: u128) -> $cret {
684 let x = u128_div_mod(a, b, ptr::null_mut());
688 #[export_name="__umodti3"]
689 pub extern "C" fn u128_mod_(a: u128, b: u128) -> $cret {
691 let mut r = ::core::mem::zeroed();
692 u128_div_mod(a, b, &mut r);
697 #[export_name="__divti3"]
698 pub extern "C" fn i128_div_(a: i128, b: i128) -> $cret {
699 let x = i128_div(a, b);
703 #[export_name="__modti3"]
704 pub extern "C" fn i128_mod_(a: i128, b: i128) -> $cret {
705 let x = i128_mod(a, b);
711 // LLVM expectations for ABI on windows x64 are pure madness.
712 #[cfg(all(windows, target_pointer_width="64"))]
713 why_are_abi_strings_checked_by_parser!(u64x2,
714 |i: u128| u64x2(i.low(), i.high()),
717 #[cfg(not(all(windows, target_pointer_width="64")))]
718 why_are_abi_strings_checked_by_parser!(u128, |i|{ i }, "C");
719 pub use self::imp::*;