1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 #![feature(compiler_builtins)]
14 #![unstable(feature = "compiler_builtins_lib",
15 reason = "internal implementation detail of rustc right now",
17 #![crate_name = "compiler_builtins"]
18 #![crate_type = "rlib"]
19 #![allow(unused_features)]
20 #![feature(staged_api, core_intrinsics, repr_simd,
21 i128_type, core_float, abi_unadjusted, associated_consts)]
22 #![allow(non_camel_case_types, unused_variables, unused_imports)]
24 #[cfg(any(target_pointer_width="32", target_pointer_width="16", target_os="windows",
25 target_arch="mips64"))]
28 #![allow(unused_comparisons)]
30 use core::intrinsics::unchecked_div;
31 use core::intrinsics::unchecked_rem;
35 ($a:expr, $b:expr, $ty:ty) => {{
36 let (a, b) = ($a, $b);
37 let bits = ::core::mem::size_of::<$ty>().wrapping_mul(8) as $ty;
38 let half_bits = bits.wrapping_shr(1);
39 if b & half_bits != 0 {
40 <$ty>::from_parts(0, a.low().wrapping_shl(
41 b.wrapping_sub(half_bits) as u32))
45 <$ty>::from_parts(a.low().wrapping_shl(b as u32),
46 a.high().wrapping_shl(b as u32)
48 .wrapping_shr(half_bits.wrapping_sub(b) as u32))
53 #[export_name="__ashlti3"]
54 pub extern "C" fn shl(a: u128, b: u128) -> u128 {
59 ($a: expr, $b: expr, $ty:ty) => {{
60 let (a, b) = ($a, $b);
61 let bits = ::core::mem::size_of::<$ty>().wrapping_mul(8) as $ty;
62 let half_bits = bits.wrapping_shr(1);
63 if b & half_bits != 0 {
64 <$ty>::from_parts(a.high().wrapping_shr(b.wrapping_sub(half_bits) as u32)
65 as <$ty as LargeInt>::LowHalf,
66 a.high().wrapping_shr(half_bits.wrapping_sub(1) as u32))
70 let high_unsigned = a.high() as <$ty as LargeInt>::LowHalf;
71 <$ty>::from_parts(high_unsigned.wrapping_shl(half_bits.wrapping_sub(b) as u32)
72 | a.low().wrapping_shr(b as u32),
73 a.high().wrapping_shr(b as u32))
78 #[export_name="__ashrti3"]
79 pub extern "C" fn shr(a: i128, b: i128) -> i128 {
84 ($a: expr, $b: expr, $ty:ty) => {{
85 let (a, b) = ($a, $b);
86 let bits = ::core::mem::size_of::<$ty>().wrapping_mul(8) as $ty;
87 let half_bits = bits.wrapping_shr(1);
88 if b & half_bits != 0 {
89 <$ty>::from_parts(a.high().wrapping_shr(b.wrapping_sub(half_bits) as u32), 0)
93 <$ty>::from_parts(a.high().wrapping_shl(half_bits.wrapping_sub(b) as u32)
94 | a.low().wrapping_shr(b as u32),
95 a.high().wrapping_shr(b as u32))
101 #[export_name="__lshrti3"]
102 pub extern "C" fn lshr(a: u128, b: u128) -> u128 {
106 pub extern "C" fn u128_div_mod(n: u128, d: u128, rem: *mut u128) -> u128 {
107 // Translated from Figure 3-40 of The PowerPC Compiler Writer's Guide
109 // special cases, X is unknown, K != 0
116 *rem = u128::from(unchecked_rem(n.low(), d.low()));
118 return u128::from(unchecked_div(n.low(), d.low()));
140 *rem = u128::from(unchecked_rem(n.high(), d.low()));
142 return u128::from(unchecked_div(n.high(), d.low()));
150 *rem = u128::from_parts(0, unchecked_rem(n.high(), d.high()));
152 return u128::from(unchecked_div(n.high(), d.high()));
159 if d.high().is_power_of_two() {
161 *rem = u128::from_parts(n.low(),
162 n.high() & (d.high().wrapping_sub(1)));
164 return u128::from(n.high().wrapping_shr(d.high().trailing_zeros()));
170 sr = d.high().leading_zeros().wrapping_sub(n.high().leading_zeros());
180 sr = sr.wrapping_add(1);
182 // 1 <= sr <= u64::bits() - 1
183 q = n.wrapping_shl(128u32.wrapping_sub(sr));
184 r = n.wrapping_shr(sr);
190 if d.low().is_power_of_two() {
192 *rem = u128::from(n.low() & (d.low().wrapping_sub(1)));
198 let sr = d.low().trailing_zeros();
199 return n.wrapping_shr(sr);
204 .wrapping_add(d.low().leading_zeros())
205 .wrapping_sub(n.high().leading_zeros());
207 // 2 <= sr <= u64::bits() - 1
208 q = n.wrapping_shl(128u32.wrapping_sub(sr));
209 r = n.wrapping_shr(sr);
210 // FIXME the C compiler-rt implementation has something here
211 // that looks like a speed optimisation.
212 // It would be worth a try to port it to Rust too and
213 // compare the speed.
218 sr = d.high().leading_zeros().wrapping_sub(n.high().leading_zeros());
228 sr = sr.wrapping_add(1);
230 // 1 <= sr <= u32::bits()
231 q = n.wrapping_shl(128u32.wrapping_sub(sr));
232 r = n.wrapping_shr(sr);
236 // Not a special case
237 // q and r are initialized with
238 // q = n << (u64::bits() - sr)
240 // 1 <= sr <= u64::bits() - 1
243 // FIXME: replace this with a for loop
244 // (atm not doable as this generates call to
245 // eh_personality when optimisations are turned off,
246 // which in turn gives a linker error in later
247 // compilation steps)
249 // r:q = ((r:q) << 1) | carry
250 r = r.wrapping_shl(1) | q.wrapping_shr(128 - 1);
251 q = q.wrapping_shl(1) | carry as u128;
258 let s = ((d.wrapping_sub(r).wrapping_sub(1)) as i128).wrapping_shr(128 - 1);
259 carry = (s & 1) as u64;
260 r = r.wrapping_sub(d & s as u128);
261 sr = sr.wrapping_sub(1);
267 (q.wrapping_shl(1)) | carry as u128
271 fn i128_mod(a: i128, b: i128) -> i128 {
276 let mut r = ::core::mem::zeroed();
277 u128_div_mod(a, b, &mut r);
278 if sa == -1 { (r as i128).unchecked_neg() } else { r as i128 }
282 fn i128_div(a: i128, b: i128) -> i128 {
287 let sr = sa.wrapping_mul(sb); // sign of quotient
289 (u128_div_mod(a, b, ptr::null_mut()) as i128).unchecked_neg()
291 u128_div_mod(a, b, ptr::null_mut()) as i128
296 ($a:expr, $b:expr, $o: expr, $ty: ty) => {{
297 let (a, b, overflow) = ($a, $b, $o);
299 let result = a.wrapping_mul(b);
300 if a == <$ty>::min_value() {
301 if b != 0 && b != 1 {
306 if b == <$ty>::min_value() {
307 if a != 0 && a != 1 {
314 let abs_a = a.iabs();
316 let abs_b = b.iabs();
317 if abs_a < 2 || abs_b < 2 {
321 if abs_a > unchecked_div(<$ty>::max_value(), abs_b) {
325 if abs_a > unchecked_div(<$ty>::min_value(), abs_b.unchecked_neg()) {
337 fn low(self) -> Self::LowHalf;
338 fn high(self) -> Self::HighHalf;
339 fn from_parts(low: Self::LowHalf, high: Self::HighHalf) -> Self;
341 impl LargeInt for u128 {
345 fn low(self) -> u64 {
348 fn high(self) -> u64 {
349 self.wrapping_shr(64) as u64
351 fn from_parts(low: u64, high: u64) -> u128 {
352 (high as u128).wrapping_shl(64) | low as u128
355 impl LargeInt for i128 {
359 fn low(self) -> u64 {
362 fn high(self) -> i64 {
363 self.wrapping_shr(64) as i64
365 fn from_parts(low: u64, high: i64) -> i128 {
366 u128::from_parts(low, high as u64) as i128
371 ($a:expr, $b:expr, $ty: ty, $tyh: ty) => {{
372 let (a, b) = ($a, $b);
373 let half_bits = ::core::mem::size_of::<$tyh>().wrapping_mul(4) as u32;
374 let lower_mask = (!0u64).wrapping_shr(half_bits);
375 let mut low = (a.low() & lower_mask).wrapping_mul(b.low() & lower_mask);
376 let mut t = low.wrapping_shr(half_bits);
378 t = t.wrapping_add(a.low().wrapping_shr(half_bits)
379 .wrapping_mul(b.low() & lower_mask));
380 low = low.wrapping_add((t & lower_mask).wrapping_shl(half_bits));
381 let mut high = t.wrapping_shr(half_bits) as $tyh;
382 t = low.wrapping_shr(half_bits);
384 t = t.wrapping_add(b.low().wrapping_shr(half_bits)
385 .wrapping_mul(a.low() & lower_mask));
386 low = low.wrapping_add((t & lower_mask).wrapping_shl(half_bits));
387 high = high.wrapping_add(t.wrapping_shr(half_bits) as $tyh);
388 high = high.wrapping_add(a.low().wrapping_shr(half_bits)
389 .wrapping_mul(b.low().wrapping_shr(half_bits)) as $tyh);
391 .wrapping_add(a.high()
392 .wrapping_mul(b.low() as $tyh))
393 .wrapping_add((a.low() as $tyh)
394 .wrapping_mul(b.high()));
395 <$ty>::from_parts(low, high)
399 #[export_name="__multi3"]
400 pub extern "C" fn u128_mul(a: i128, b: i128) -> i128 {
401 mul!(a, b, i128, i64)
404 trait AbsExt: Sized {
405 fn uabs(self) -> u128;
406 fn iabs(self) -> i128;
409 impl AbsExt for i128 {
410 fn uabs(self) -> u128 {
413 fn iabs(self) -> i128 {
414 let s = self.wrapping_shr(127);
415 ((self ^ s).wrapping_sub(s))
419 trait NegExt: Sized {
420 fn unchecked_neg(self) -> i128;
423 impl NegExt for i128 {
424 fn unchecked_neg(self) -> i128 {
425 (!self).wrapping_add(1)
429 trait FloatStuff: Sized {
432 const MANTISSA_BITS: u32;
434 const EXP_MASK: Self::ToBytes;
435 const MANTISSA_MASK: Self::ToBytes;
436 const MANTISSA_LEAD_BIT: Self::ToBytes;
438 fn to_bytes(self) -> Self::ToBytes;
439 fn get_exponent(self) -> i32;
442 impl FloatStuff for f32 {
444 const MANTISSA_BITS: u32 = 23;
445 const MAX_EXP: i32 = 127;
446 const EXP_MASK: u32 = 0x7F80_0000;
447 const MANTISSA_MASK: u32 = 0x007F_FFFF;
448 const MANTISSA_LEAD_BIT: u32 = 0x0080_0000;
450 fn to_bytes(self) -> u32 { unsafe { ::core::mem::transmute(self) } }
451 fn get_exponent(self) -> i32 {
452 ((self.to_bytes() & Self::EXP_MASK).wrapping_shr(Self::MANTISSA_BITS) as i32)
453 .wrapping_sub(Self::MAX_EXP)
457 impl FloatStuff for f64 {
459 const MANTISSA_BITS: u32 = 52;
460 const MAX_EXP: i32 = 1023;
461 const EXP_MASK: u64 = 0x7FF0_0000_0000_0000;
462 const MANTISSA_MASK: u64 = 0x000F_FFFF_FFFF_FFFF;
463 const MANTISSA_LEAD_BIT: u64 = 0x0010_0000_0000_0000;
465 fn to_bytes(self) -> u64 { unsafe { ::core::mem::transmute(self) } }
466 fn get_exponent(self) -> i32 {
467 ((self.to_bytes() & Self::EXP_MASK).wrapping_shr(Self::MANTISSA_BITS) as i32)
468 .wrapping_sub(Self::MAX_EXP)
472 macro_rules! float_as_unsigned {
473 ($from: expr, $fromty: ty, $outty: ty) => { {
474 use core::num::Float;
475 let repr = $from.to_bytes();
476 let sign = $from.signum();
477 let exponent = $from.get_exponent();
478 let mantissa_fraction = repr & <$fromty as FloatStuff>::MANTISSA_MASK;
479 let mantissa = mantissa_fraction | <$fromty as FloatStuff>::MANTISSA_LEAD_BIT;
480 if sign == -1.0 || exponent < 0 { return 0 as u128; }
481 if exponent > ::core::mem::size_of::<$outty>().wrapping_mul(8) as i32 {
484 (if exponent < (<$fromty as FloatStuff>::MANTISSA_BITS) as i32 {
486 .wrapping_shr((<$fromty as FloatStuff>::MANTISSA_BITS as i32)
487 .wrapping_sub(exponent) as u32)
490 .wrapping_shl(exponent.wrapping_sub(
491 <$fromty as FloatStuff>::MANTISSA_BITS as i32) as u32)
496 macro_rules! float_as_signed {
497 ($from: expr, $fromty: ty, $outty: ty) => {{
498 use core::num::Float;
499 let repr = $from.to_bytes();
500 let sign = $from.signum();
501 let exponent = $from.get_exponent();
502 let mantissa_fraction = repr & <$fromty as FloatStuff>::MANTISSA_MASK;
503 let mantissa = mantissa_fraction | <$fromty as FloatStuff>::MANTISSA_LEAD_BIT;
505 if exponent < 0 { return 0 as i128; }
506 if exponent > ::core::mem::size_of::<$outty>().wrapping_mul(8) as i32 {
507 let ret = if sign > 0.0 { <$outty>::max_value() } else { <$outty>::min_value() };
510 let r = if exponent < (<$fromty as FloatStuff>::MANTISSA_BITS) as i32 {
512 .wrapping_shr((<$fromty as FloatStuff>::MANTISSA_BITS as i32)
513 .wrapping_sub(exponent) as u32)
516 .wrapping_shl(exponent.wrapping_sub(
517 <$fromty as FloatStuff>::MANTISSA_BITS as i32) as u32)
519 (if sign >= 0.0 { r } else { r.unchecked_neg() })
524 fn i128_as_f64(a: i128) -> f64 {
526 1 => u128_as_f64(a.uabs()),
528 _ => -u128_as_f64(a.uabs()),
532 fn i128_as_f32(a: i128) -> f32 {
534 1 => u128_as_f32(a.uabs()),
536 _ => -u128_as_f32(a.uabs()),
540 fn u128_as_f64(mut a: u128) -> f64 {
541 use ::core::f64::MANTISSA_DIGITS;
542 if a == 0 { return 0.0; }
543 let sd = 128u32.wrapping_sub(a.leading_zeros());
544 let mut e = sd.wrapping_sub(1);
545 const MD1 : u32 = MANTISSA_DIGITS + 1;
546 const MD2 : u32 = MANTISSA_DIGITS + 2;
550 if sd > MANTISSA_DIGITS {
552 MD1 => a.wrapping_shl(1),
554 _ => a.wrapping_shr(sd.wrapping_sub(MANTISSA_DIGITS + 2)) |
555 (if (a & (negn.wrapping_shr(128 + MANTISSA_DIGITS + 2)
556 .wrapping_sub(sd as u128))) == 0 { 0 } else { 1 })
558 a |= if (a & 4) == 0 { 0 } else { 1 };
559 a = a.wrapping_add(1);
560 a = a.wrapping_shr(2);
561 if a & (1 << MANTISSA_DIGITS) != 0 {
562 a = a.wrapping_shr(1);
563 e = e.wrapping_add(1);
566 a = a.wrapping_shl(MANTISSA_DIGITS.wrapping_sub(sd));
569 ::core::mem::transmute((e as u64).wrapping_add(1023).wrapping_shl(52)
570 | (a as u64 & 0x000f_ffff_ffff_ffff))
574 fn u128_as_f32(mut a: u128) -> f32 {
575 use ::core::f32::MANTISSA_DIGITS;
576 if a == 0 { return 0.0; }
577 let sd = 128u32.wrapping_sub(a.leading_zeros());
578 let mut e = sd.wrapping_sub(1);
579 const MD1 : u32 = MANTISSA_DIGITS + 1;
580 const MD2 : u32 = MANTISSA_DIGITS + 2;
584 if sd > MANTISSA_DIGITS {
586 MD1 => a.wrapping_shl(1),
588 _ => a.wrapping_shr(sd.wrapping_sub(MANTISSA_DIGITS + 2)) |
589 (if (a & (negn.wrapping_shr(128 + MANTISSA_DIGITS + 2)
590 .wrapping_sub(sd as u128))) == 0 { 0 } else { 1 })
592 a |= if (a & 4) == 0 { 0 } else { 1 };
593 a = a.wrapping_add(1);
594 a = a.wrapping_shr(2);
595 if a & (1 << MANTISSA_DIGITS) != 0 {
596 a = a.wrapping_shr(1);
597 e = e.wrapping_add(1);
600 a = a.wrapping_shl(MANTISSA_DIGITS.wrapping_sub(sd));
603 ::core::mem::transmute((e as u32).wrapping_add(127).wrapping_shl(23)
604 | (a as u32 & 0x007f_ffff))
609 macro_rules! why_are_abi_strings_checked_by_parser { ($cret:ty, $conv:expr, $unadj:tt) => {
611 use super::{LargeInt, FloatStuff, NegExt, AbsExt};
612 use super::{i128_as_f64, i128_as_f32, u128_as_f64, u128_as_f32,
613 i128_div, i128_mod, u128_div_mod, unchecked_div, ptr};
615 // rdx:rcx, r9:r8, stack -> rdx:rax
617 // define i128 @__muloti4(i128, i128, i32*)
618 #[export_name="__muloti4"]
619 pub unsafe extern $unadj fn i128_mul_oflow(a: i128, b: i128, o: *mut i32) -> i128 {
626 // define double @__muloti4(i128)
627 #[export_name="__floattidf"]
628 pub extern $unadj fn i128_as_f64_(a: i128) -> f64 {
631 #[export_name="__floattisf"]
632 pub extern $unadj fn i128_as_f32_(a: i128) -> f32 {
635 #[export_name="__floatuntidf"]
636 pub extern $unadj fn u128_as_f64_(a: u128) -> f64 {
639 #[export_name="__floatuntisf"]
640 pub extern $unadj fn u128_as_f32_(a: u128) -> f32 {
647 // define i128 @stuff(double)
648 #[export_name="__fixunsdfti"]
649 pub extern $unadj fn f64_as_u128(a: f64) -> u128 {
650 float_as_unsigned!(a, f64, u128)
653 #[export_name="__fixunssfti"]
654 pub extern $unadj fn f32_as_u128(a: f32) -> u128 {
655 float_as_unsigned!(a, f32, u128)
658 #[export_name="__fixdfti"]
659 pub extern $unadj fn f64_as_i128(a: f64) -> i128 {
660 float_as_signed!(a, f64, i128)
663 #[export_name="__fixsfti"]
664 pub extern $unadj fn f32_as_i128(a: f32) -> i128 {
665 float_as_signed!(a, f32, i128)
669 pub struct u64x2(u64, u64);
674 // define <2 x u64> @stuff(i128*, i128*, i128*)
676 // That almost matches the C ABI, so we simply use the C ABI
677 #[export_name="__udivmodti4"]
678 pub extern "C" fn u128_div_mod_(n: u128, d: u128, rem: *mut u128) -> $cret {
679 let x = u128_div_mod(n, d, rem);
683 #[export_name="__udivti3"]
684 pub extern "C" fn u128_div_(a: u128, b: u128) -> $cret {
685 let x = u128_div_mod(a, b, ptr::null_mut());
689 #[export_name="__umodti3"]
690 pub extern "C" fn u128_mod_(a: u128, b: u128) -> $cret {
692 let mut r = ::core::mem::zeroed();
693 u128_div_mod(a, b, &mut r);
698 #[export_name="__divti3"]
699 pub extern "C" fn i128_div_(a: i128, b: i128) -> $cret {
700 let x = i128_div(a, b);
704 #[export_name="__modti3"]
705 pub extern "C" fn i128_mod_(a: i128, b: i128) -> $cret {
706 let x = i128_mod(a, b);
712 // LLVM expectations for ABI on windows x64 are pure madness.
713 #[cfg(all(windows, target_pointer_width="64"))]
714 why_are_abi_strings_checked_by_parser!(u64x2,
715 |i: u128| u64x2(i.low(), i.high()),
718 #[cfg(not(all(windows, target_pointer_width="64")))]
719 why_are_abi_strings_checked_by_parser!(u128, |i|{ i }, "C");
720 pub use self::imp::*;