--- /dev/null
- # This is needed on virgin system where nothing is configured.
+#!/bin/bash
+set -e
+cd $(dirname "$0")
+
+SRC_DIR=$(dirname $(rustup which rustc))"/../lib/rustlib/src/rust/"
+DST_DIR="sysroot_src"
+
+if [ ! -e $SRC_DIR ]; then
+ echo "Please install rust-src component"
+ exit 1
+fi
+
+rm -rf $DST_DIR
+mkdir -p $DST_DIR/library
+cp -r $SRC_DIR/library $DST_DIR/
+
+pushd $DST_DIR
+echo "[GIT] init"
+git init
+echo "[GIT] add"
+git add .
+echo "[GIT] commit"
+
++# This is needed on systems where nothing is configured.
+# git really needs something here, or it will fail.
+# Even using --author is not enough.
+git config user.email || git config user.email "none@example.com"
+git config user.name || git config user.name "None"
+
+git commit -m "Initial commit" -q
+for file in $(ls ../../patches/ | grep -v patcha); do
+echo "[GIT] apply" $file
+git apply ../../patches/$file
+git add -A
+git commit --no-gpg-sign -m "Patch $file"
+done
+popd
+
+echo "Successfully prepared libcore for building"
--- /dev/null
- #export RUSTFLAGS=$linker' -Cpanic=abort -Cdebuginfo=2 -Zpanic-abort-tests -Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_gcc.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot -Clto=fat -Cembed-bitcode=yes'
+set -e
+
+export CARGO_INCREMENTAL=0
+
+export GCC_PATH=$(cat gcc_path)
+
+unamestr=`uname`
+if [[ "$unamestr" == 'Linux' ]]; then
+ dylib_ext='so'
+elif [[ "$unamestr" == 'Darwin' ]]; then
+ dylib_ext='dylib'
+else
+ echo "Unsupported os"
+ exit 1
+fi
+
+HOST_TRIPLE=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ")
+TARGET_TRIPLE=$HOST_TRIPLE
+#TARGET_TRIPLE="aarch64-unknown-linux-gnu"
+
+linker=''
+RUN_WRAPPER=''
+if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then
+ if [[ "$TARGET_TRIPLE" == "aarch64-unknown-linux-gnu" ]]; then
+ # We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
+ linker='-Clinker=aarch64-linux-gnu-gcc'
+ RUN_WRAPPER='qemu-aarch64 -L /usr/aarch64-linux-gnu'
+ else
+ echo "Unknown non-native platform"
+ fi
+fi
+
+export RUSTFLAGS=$linker' -Cpanic=abort -Cdebuginfo=2 -Zpanic-abort-tests -Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_gcc.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot'
- # FIXME remove once the atomic shim is gone
+
-
- export CG_CLIF_DISPLAY_CG_TIME=1
- export CG_CLIF_INCR_CACHE_DISABLED=1
++# FIXME(antoyo): remove once the atomic shim is gone
+if [[ `uname` == 'Darwin' ]]; then
+ export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
+fi
+
+RUSTC="rustc $RUSTFLAGS -L crate=target/out --out-dir target/out"
+export RUSTC_LOG=warn # display metadata load errors
+
+export LD_LIBRARY_PATH="$(pwd)/target/out:$(pwd)/build_sysroot/sysroot/lib/rustlib/$TARGET_TRIPLE/lib:$GCC_PATH"
+export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH
--- /dev/null
- // TODO: not sure about this assert. ABC is not defined, so should it be really 0?
+// Adapted from https://github.com/sunfishcode/mir2cranelift/blob/master/rust-examples/nocore-hello-world.rs
+
+#![feature(
+ no_core, unboxed_closures, start, lang_items, box_syntax, never_type, linkage,
+ extern_types, thread_local
+)]
+#![no_core]
+#![allow(dead_code, non_camel_case_types)]
+
+extern crate mini_core;
+
+use mini_core::*;
+use mini_core::libc::*;
+
+unsafe extern "C" fn my_puts(s: *const u8) {
+ puts(s);
+}
+
+#[lang = "termination"]
+trait Termination {
+ fn report(self) -> i32;
+}
+
+impl Termination for () {
+ fn report(self) -> i32 {
+ unsafe {
+ NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44
+ *NUM_REF as i32
+ }
+ }
+}
+
+trait SomeTrait {
+ fn object_safe(&self);
+}
+
+impl SomeTrait for &'static str {
+ fn object_safe(&self) {
+ unsafe {
+ puts(*self as *const str as *const u8);
+ }
+ }
+}
+
+struct NoisyDrop {
+ text: &'static str,
+ inner: NoisyDropInner,
+}
+
+struct NoisyDropInner;
+
+impl Drop for NoisyDrop {
+ fn drop(&mut self) {
+ unsafe {
+ puts(self.text as *const str as *const u8);
+ }
+ }
+}
+
+impl Drop for NoisyDropInner {
+ fn drop(&mut self) {
+ unsafe {
+ puts("Inner got dropped!\0" as *const str as *const u8);
+ }
+ }
+}
+
+impl SomeTrait for NoisyDrop {
+ fn object_safe(&self) {}
+}
+
+enum Ordering {
+ Less = -1,
+ Equal = 0,
+ Greater = 1,
+}
+
+#[lang = "start"]
+fn start<T: Termination + 'static>(
+ main: fn() -> T,
+ argc: isize,
+ argv: *const *const u8,
+) -> isize {
+ if argc == 3 {
+ unsafe { puts(*argv); }
+ unsafe { puts(*((argv as usize + intrinsics::size_of::<*const u8>()) as *const *const u8)); }
+ unsafe { puts(*((argv as usize + 2 * intrinsics::size_of::<*const u8>()) as *const *const u8)); }
+ }
+
+ main().report();
+ 0
+}
+
+static mut NUM: u8 = 6 * 7;
+static NUM_REF: &'static u8 = unsafe { &NUM };
+
+macro_rules! assert {
+ ($e:expr) => {
+ if !$e {
+ panic(stringify!(! $e));
+ }
+ };
+}
+
+macro_rules! assert_eq {
+ ($l:expr, $r: expr) => {
+ if $l != $r {
+ panic(stringify!($l != $r));
+ }
+ }
+}
+
+struct Unique<T: ?Sized> {
+ pointer: *const T,
+ _marker: PhantomData<T>,
+}
+
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
+unsafe fn zeroed<T>() -> T {
+ let mut uninit = MaybeUninit { uninit: () };
+ intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1);
+ uninit.value.value
+}
+
+fn take_f32(_f: f32) {}
+fn take_unique(_u: Unique<()>) {}
+
+fn return_u128_pair() -> (u128, u128) {
+ (0, 0)
+}
+
+fn call_return_u128_pair() {
+ return_u128_pair();
+}
+
+fn main() {
+ take_unique(Unique {
+ pointer: 0 as *const (),
+ _marker: PhantomData,
+ });
+ take_f32(0.1);
+
+ //call_return_u128_pair();
+
+ let slice = &[0, 1] as &[i32];
+ let slice_ptr = slice as *const [i32] as *const i32;
+
+ assert_eq!(slice_ptr as usize % 4, 0);
+
+ //return;
+
+ unsafe {
+ printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8);
+
+ let hello: &[u8] = b"Hello\0" as &[u8; 6];
+ let ptr: *const u8 = hello as *const [u8] as *const u8;
+ puts(ptr);
+
+ let world: Box<&str> = box "World!\0";
+ puts(*world as *const str as *const u8);
+ world as Box<dyn SomeTrait>;
+
+ assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8);
+
+ assert_eq!(intrinsics::bswap(0xabu8), 0xabu8);
+ assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16);
+ assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32);
+ assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64);
+
+ assert_eq!(intrinsics::size_of_val(hello) as u8, 6);
+
+ let chars = &['C', 'h', 'a', 'r', 's'];
+ let chars = chars as &[char];
+ assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5);
+
+ let a: &dyn SomeTrait = &"abc\0";
+ a.object_safe();
+
+ assert_eq!(intrinsics::size_of_val(a) as u8, 16);
+ assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4);
+
+ assert_eq!(intrinsics::min_align_of::<u16>() as u8, 2);
+ assert_eq!(intrinsics::min_align_of_val(&a) as u8, intrinsics::min_align_of::<&str>() as u8);
+
+ assert!(!intrinsics::needs_drop::<u8>());
+ assert!(intrinsics::needs_drop::<NoisyDrop>());
+
+ Unique {
+ pointer: 0 as *const &str,
+ _marker: PhantomData,
+ } as Unique<dyn SomeTrait>;
+
+ struct MyDst<T: ?Sized>(T);
+
+ intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>);
+
+ struct Foo {
+ x: u8,
+ y: !,
+ }
+
+ unsafe fn uninitialized<T>() -> T {
+ MaybeUninit { uninit: () }.value.value
+ }
+
+ zeroed::<(u8, u8)>();
+ #[allow(unreachable_code)]
+ {
+ if false {
+ zeroed::<!>();
+ zeroed::<Foo>();
+ uninitialized::<Foo>();
+ }
+ }
+ }
+
+ let _ = box NoisyDrop {
+ text: "Boxed outer got dropped!\0",
+ inner: NoisyDropInner,
+ } as Box<dyn SomeTrait>;
+
+ const FUNC_REF: Option<fn()> = Some(main);
+ match FUNC_REF {
+ Some(_) => {},
+ None => assert!(false),
+ }
+
+ match Ordering::Less {
+ Ordering::Less => {},
+ _ => assert!(false),
+ }
+
+ [NoisyDropInner, NoisyDropInner];
+
+ let x = &[0u32, 42u32] as &[u32];
+ match x {
+ [] => assert_eq!(0u32, 1),
+ [_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize),
+ }
+
+ assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42);
+
+ extern {
+ #[linkage = "weak"]
+ static ABC: *const u8;
+ }
+
+ {
+ extern {
+ #[linkage = "weak"]
+ static ABC: *const u8;
+ }
+ }
+
++ // TODO(antoyo): to make this work, support weak linkage.
+ //unsafe { assert_eq!(ABC as usize, 0); }
+
+ &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
+
+ let f = 1000.0;
+ assert_eq!(f as u8, 255);
+ let f2 = -1000.0;
+ assert_eq!(f2 as i8, -128);
+ assert_eq!(f2 as u8, 0);
+
+ static ANOTHER_STATIC: &u8 = &A_STATIC;
+ assert_eq!(*ANOTHER_STATIC, 42);
+
+ check_niche_behavior();
+
+ extern "C" {
+ type ExternType;
+ }
+
+ struct ExternTypeWrapper {
+ _a: ExternType,
+ }
+
+ let nullptr = 0 as *const ();
+ let extern_nullptr = nullptr as *const ExternTypeWrapper;
+ extern_nullptr as *const ();
+ let slice_ptr = &[] as *const [u8];
+ slice_ptr as *const u8;
+
+ #[cfg(not(jit))]
+ test_tls();
+}
+
+#[repr(C)]
+enum c_void {
+ _1,
+ _2,
+}
+
+type c_int = i32;
+type c_ulong = u64;
+
+type pthread_t = c_ulong;
+
+#[repr(C)]
+struct pthread_attr_t {
+ __size: [u64; 7],
+}
+
+#[link(name = "pthread")]
+extern "C" {
+ fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int;
+
+ fn pthread_create(
+ native: *mut pthread_t,
+ attr: *const pthread_attr_t,
+ f: extern "C" fn(_: *mut c_void) -> *mut c_void,
+ value: *mut c_void
+ ) -> c_int;
+
+ fn pthread_join(
+ native: pthread_t,
+ value: *mut *mut c_void
+ ) -> c_int;
+}
+
+#[thread_local]
+#[cfg(not(jit))]
+static mut TLS: u8 = 42;
+
+#[cfg(not(jit))]
+extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void {
+ unsafe { TLS = 0; }
+ 0 as *mut c_void
+}
+
+#[cfg(not(jit))]
+fn test_tls() {
+ unsafe {
+ let mut attr: pthread_attr_t = zeroed();
+ let mut thread: pthread_t = 0;
+
+ assert_eq!(TLS, 42);
+
+ if pthread_attr_init(&mut attr) != 0 {
+ assert!(false);
+ }
+
+ if pthread_create(&mut thread, &attr, mutate_tls, 0 as *mut c_void) != 0 {
+ assert!(false);
+ }
+
+ let mut res = 0 as *mut c_void;
+ pthread_join(thread, &mut res);
+
+ // TLS of main thread must not have been changed by the other thread.
+ assert_eq!(TLS, 42);
+
+ puts("TLS works!\n\0" as *const str as *const u8);
+ }
+}
+
+// Copied ui/issues/issue-61696.rs
+
+pub enum Infallible {}
+
+// The check that the `bool` field of `V1` is encoding a "niche variant"
+// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect,
+// causing valid `V1` values to be interpreted as other variants.
+pub enum E1 {
+ V1 { f: bool },
+ V2 { f: Infallible },
+ V3,
+ V4,
+}
+
+// Computing the discriminant used to be done using the niche type (here `u8`,
+// from the `bool` field of `V1`), overflowing for variants with large enough
+// indices (`V3` and `V4`), causing them to be interpreted as other variants.
+pub enum E2<X> {
+ V1 { f: bool },
+
+ /*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X),
+ _08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X),
+ _10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X),
+ _18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X),
+ _20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X),
+ _28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X),
+ _30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X),
+ _38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X),
+ _40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X),
+ _48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X),
+ _50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X),
+ _58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X),
+ _60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X),
+ _68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X),
+ _70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X),
+ _78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X),
+ _80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X),
+ _88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X),
+ _90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X),
+ _98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X),
+ _A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X),
+ _A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X),
+ _B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X),
+ _B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X),
+ _C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X),
+ _C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X),
+ _D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X),
+ _D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X),
+ _E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X),
+ _E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X),
+ _F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X),
+ _F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X),
+
+ V3,
+ V4,
+}
+
+fn check_niche_behavior () {
+ if let E1::V2 { .. } = (E1::V1 { f: true }) {
+ intrinsics::abort();
+ }
+
+ if let E2::V1 { .. } = E2::V3::<Infallible> {
+ intrinsics::abort();
+ }
+}
--- /dev/null
- // FIXME: this thread panics.
+#![feature(core_intrinsics, generators, generator_trait, is_sorted)]
+
+use std::arch::x86_64::*;
+use std::io::Write;
+use std::ops::Generator;
+
+extern {
+ pub fn printf(format: *const i8, ...) -> i32;
+}
+
+fn main() {
+ let mutex = std::sync::Mutex::new(());
+ let _guard = mutex.lock().unwrap();
+
+ let _ = ::std::iter::repeat('a' as u8).take(10).collect::<Vec<_>>();
+ let stderr = ::std::io::stderr();
+ let mut stderr = stderr.lock();
+
- /*assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
+ std::thread::spawn(move || {
+ println!("Hello from another thread!");
+ });
+
+ writeln!(stderr, "some {} text", "<unknown>").unwrap();
+
+ let _ = std::process::Command::new("true").env("c", "d").spawn();
+
+ println!("cargo:rustc-link-lib=z");
+
+ static ONCE: std::sync::Once = std::sync::Once::new();
+ ONCE.call_once(|| {});
+
+ let _eq = LoopState::Continue(()) == LoopState::Break(());
+
+ // Make sure ByValPair values with differently sized components are correctly passed
+ map(None::<(u8, Box<Instruction>)>);
+
+ println!("{}", 2.3f32.exp());
+ println!("{}", 2.3f32.exp2());
+ println!("{}", 2.3f32.abs());
+ println!("{}", 2.3f32.sqrt());
+ println!("{}", 2.3f32.floor());
+ println!("{}", 2.3f32.ceil());
+ println!("{}", 2.3f32.min(1.0));
+ println!("{}", 2.3f32.max(1.0));
+ println!("{}", 2.3f32.powi(2));
+ println!("{}", 2.3f32.log2());
+ assert_eq!(2.3f32.copysign(-1.0), -2.3f32);
+ println!("{}", 2.3f32.powf(2.0));
+
+ assert_eq!(-128i8, (-128i8).saturating_sub(1));
+ assert_eq!(127i8, 127i8.saturating_sub(-128));
+ assert_eq!(-128i8, (-128i8).saturating_add(-128));
+ assert_eq!(127i8, 127i8.saturating_add(1));
+
+ assert_eq!(-32768i16, (-32768i16).saturating_add(-32768));
+ assert_eq!(32767i16, 32767i16.saturating_add(1));
+
- assert_eq!(houndred_f64 as i128, 100);*/
++ assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
+ assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
+
+ let _d = 0i128.checked_div(2i128);
+ let _d = 0u128.checked_div(2u128);
+ assert_eq!(1u128 + 2, 3);
+
+ assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128);
+ assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128);
+ assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128);
+
+ let tmp = 353985398u128;
+ assert_eq!(tmp * 932490u128, 330087843781020u128);
+
+ let tmp = -0x1234_5678_9ABC_DEF0i64;
+ assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128);
+
+ // Check that all u/i128 <-> float casts work correctly.
+ let houndred_u128 = 100u128;
+ let houndred_i128 = 100i128;
+ let houndred_f32 = 100.0f32;
+ let houndred_f64 = 100.0f64;
+ assert_eq!(houndred_u128 as f32, 100.0);
+ assert_eq!(houndred_u128 as f64, 100.0);
+ assert_eq!(houndred_f32 as u128, 100);
+ assert_eq!(houndred_f64 as u128, 100);
+ assert_eq!(houndred_i128 as f32, 100.0);
+ assert_eq!(houndred_i128 as f64, 100.0);
+ assert_eq!(houndred_f32 as i128, 100);
++ assert_eq!(houndred_f64 as i128, 100);
+
+ let _a = 1u32 << 2u8;
+
+ let empty: [i32; 0] = [];
+ assert!(empty.is_sorted());
+
+ println!("{:?}", std::intrinsics::caller_location());
+
+ /*unsafe {
+ test_simd();
+ }*/
+
+ Box::pin(move |mut _task_context| {
+ yield ();
+ }).as_mut().resume(0);
+
+ println!("End");
+}
+
+/*#[target_feature(enable = "sse2")]
+unsafe fn test_simd() {
+ let x = _mm_setzero_si128();
+ let y = _mm_set1_epi16(7);
+ let or = _mm_or_si128(x, y);
+ let cmp_eq = _mm_cmpeq_epi8(y, y);
+ let cmp_lt = _mm_cmplt_epi8(y, y);
+
+ /*assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]);
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_eq), [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]);
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]);
+
+ test_mm_slli_si128();
+ test_mm_movemask_epi8();
+ test_mm256_movemask_epi8();
+ test_mm_add_epi8();
+ test_mm_add_pd();
+ test_mm_cvtepi8_epi16();
+ test_mm_cvtsi128_si64();
+
+ // FIXME(#666) implement `#[rustc_arg_required_const(..)]` support
+ //test_mm_extract_epi8();
+
+ let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)));
+ assert_eq!(mask1, 1);*/
+}*/
+
+/*#[target_feature(enable = "sse2")]
+unsafe fn test_mm_slli_si128() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 1);
+ let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ assert_eq_m128i(r, e);
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 15);
+ let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1);
+ assert_eq_m128i(r, e);
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 16);
+ assert_eq_m128i(r, _mm_set1_epi8(0));
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, -1);
+ assert_eq_m128i(_mm_set1_epi8(0), r);
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, -0x80000000);
+ assert_eq_m128i(r, _mm_set1_epi8(0));
+}
+
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_movemask_epi8() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01,
+ 0b0101, 0b1111_0000u8 as i8, 0, 0,
+ 0, 0, 0b1111_0000u8 as i8, 0b0101,
+ 0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8,
+ );
+ let r = _mm_movemask_epi8(a);
+ assert_eq!(r, 0b10100100_00100101);
+}
+
+#[target_feature(enable = "avx2")]
+unsafe fn test_mm256_movemask_epi8() {
+ let a = _mm256_set1_epi8(-1);
+ let r = _mm256_movemask_epi8(a);
+ let e = -1;
+ assert_eq!(r, e);
+}
+
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_epi8() {
+ let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ #[rustfmt::skip]
+ let b = _mm_setr_epi8(
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ );
+ let r = _mm_add_epi8(a, b);
+ #[rustfmt::skip]
+ let e = _mm_setr_epi8(
+ 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46,
+ );
+ assert_eq_m128i(r, e);
+}
+
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_pd() {
+ let a = _mm_setr_pd(1.0, 2.0);
+ let b = _mm_setr_pd(5.0, 10.0);
+ let r = _mm_add_pd(a, b);
+ assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0));
+}
+
+fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) {
+ unsafe {
+ assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y));
+ }
+}
+
+#[target_feature(enable = "sse2")]
+pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) {
+ if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {
+ panic!("{:?} != {:?}", a, b);
+ }
+}
+
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_cvtsi128_si64() {
+ let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0]));
+ assert_eq!(r, 5);
+}
+
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_cvtepi8_epi16() {
+ let a = _mm_set1_epi8(10);
+ let r = _mm_cvtepi8_epi16(a);
+ let e = _mm_set1_epi16(10);
+ assert_eq_m128i(r, e);
+ let a = _mm_set1_epi8(-10);
+ let r = _mm_cvtepi8_epi16(a);
+ let e = _mm_set1_epi16(-10);
+ assert_eq_m128i(r, e);
+}
+
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_extract_epi8() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ -1, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15
+ );
+ let r1 = _mm_extract_epi8(a, 0);
+ let r2 = _mm_extract_epi8(a, 19);
+ assert_eq!(r1, 0xFF);
+ assert_eq!(r2, 3);
+}*/
+
+#[derive(PartialEq)]
+enum LoopState {
+ Continue(()),
+ Break(())
+}
+
+pub enum Instruction {
+ Increment,
+ Loop,
+}
+
+fn map(a: Option<(u8, Box<Instruction>)>) -> Option<Box<Instruction>> {
+ match a {
+ None => None,
+ Some((_, instr)) => Some(instr),
+ }
+}
--- /dev/null
++/home/bouanto/Ordinateur/Programmation/Projets/gcc-build/build/gcc
--- /dev/null
- // TODO
- //fn_abi.apply_attrs_callsite(self, callsite)
+use gccjit::{ToRValue, Type};
+use rustc_codegen_ssa::traits::{AbiBuilderMethods, BaseTypeMethods};
+use rustc_middle::bug;
+use rustc_middle::ty::Ty;
+use rustc_target::abi::call::{CastTarget, FnAbi, PassMode, Reg, RegKind};
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::intrinsic::ArgAbiExt;
+use crate::type_of::LayoutGccExt;
+
+impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn apply_attrs_callsite(&mut self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _callsite: Self::Value) {
- // TODO: return a function pointer type instead?
++ // TODO(antoyo)
+ }
+
+ fn get_param(&self, index: usize) -> Self::Value {
+ self.cx.current_func.borrow().expect("current func")
+ .get_param(index as i32)
+ .to_rvalue()
+ }
+}
+
+impl GccType for CastTarget {
+ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
+ let rest_gcc_unit = self.rest.unit.gcc_type(cx);
+ let (rest_count, rem_bytes) =
+ if self.rest.unit.size.bytes() == 0 {
+ (0, 0)
+ }
+ else {
+ (self.rest.total.bytes() / self.rest.unit.size.bytes(), self.rest.total.bytes() % self.rest.unit.size.bytes())
+ };
+
+ if self.prefix.iter().all(|x| x.is_none()) {
+ // Simplify to a single unit when there is no prefix and size <= unit size
+ if self.rest.total <= self.rest.unit.size {
+ return rest_gcc_unit;
+ }
+
+ // Simplify to array when all chunks are the same size and type
+ if rem_bytes == 0 {
+ return cx.type_array(rest_gcc_unit, rest_count);
+ }
+ }
+
+ // Create list of fields in the main structure
+ let mut args: Vec<_> = self
+ .prefix
+ .iter()
+ .flat_map(|option_kind| {
+ option_kind.map(|kind| Reg { kind, size: self.prefix_chunk_size }.gcc_type(cx))
+ })
+ .chain((0..rest_count).map(|_| rest_gcc_unit))
+ .collect();
+
+ // Append final integer
+ if rem_bytes != 0 {
+ // Only integers can be really split further.
+ assert_eq!(self.rest.unit.kind, RegKind::Integer);
+ args.push(cx.type_ix(rem_bytes * 8));
+ }
+
+ cx.type_struct(&args, false)
+ }
+}
+
+pub trait GccType {
+ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc>;
+}
+
+impl GccType for Reg {
+ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
+ match self.kind {
+ RegKind::Integer => cx.type_ix(self.size.bits()),
+ RegKind::Float => {
+ match self.size.bits() {
+ 32 => cx.type_f32(),
+ 64 => cx.type_f64(),
+ _ => bug!("unsupported float: {:?}", self),
+ }
+ },
+ RegKind::Vector => unimplemented!(), //cx.type_vector(cx.type_i8(), self.size.bytes()),
+ }
+ }
+}
+
+pub trait FnAbiGccExt<'gcc, 'tcx> {
- /*fn llvm_cconv(&self) -> llvm::CallConv;
- fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
- fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);*/
++ // TODO(antoyo): return a function pointer type instead?
+ fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool);
+ fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
- /*let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
- let ptr_layout = cx.layout_of(ptr_ty);
- argument_tys.push(ptr_layout.scalar_pair_element_gcc_type(cx, 0, true));
- argument_tys.push(ptr_layout.scalar_pair_element_gcc_type(cx, 1, true));*/
+}
+
+impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
+ fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool) {
+ let args_capacity: usize = self.args.iter().map(|arg|
+ if arg.pad.is_some() {
+ 1
+ }
+ else {
+ 0
+ } +
+ if let PassMode::Pair(_, _) = arg.mode {
+ 2
+ } else {
+ 1
+ }
+ ).sum();
+ let mut argument_tys = Vec::with_capacity(
+ if let PassMode::Indirect { .. } = self.ret.mode {
+ 1
+ }
+ else {
+ 0
+ } + args_capacity,
+ );
+
+ let return_ty =
+ match self.ret.mode {
+ PassMode::Ignore => cx.type_void(),
+ PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx),
+ PassMode::Cast(cast) => cast.gcc_type(cx),
+ PassMode::Indirect { .. } => {
+ argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
+ cx.type_void()
+ }
+ };
+
+ for arg in &self.args {
+ // add padding
+ if let Some(ty) = arg.pad {
+ argument_tys.push(ty.gcc_type(cx));
+ }
+
+ let arg_ty = match arg.mode {
+ PassMode::Ignore => continue,
+ PassMode::Direct(_) => arg.layout.immediate_gcc_type(cx),
+ PassMode::Pair(..) => {
+ argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 0, true));
+ argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 1, true));
+ continue;
+ }
+ PassMode::Indirect { extra_attrs: Some(_), .. } => {
- //continue;
+ unimplemented!();
-
- /*fn llvm_cconv(&self) -> llvm::CallConv {
- match self.conv {
- Conv::C | Conv::Rust => llvm::CCallConv,
- Conv::AmdGpuKernel => llvm::AmdGpuKernel,
- Conv::ArmAapcs => llvm::ArmAapcsCallConv,
- Conv::Msp430Intr => llvm::Msp430Intr,
- Conv::PtxKernel => llvm::PtxKernel,
- Conv::X86Fastcall => llvm::X86FastcallCallConv,
- Conv::X86Intr => llvm::X86_Intr,
- Conv::X86Stdcall => llvm::X86StdcallCallConv,
- Conv::X86ThisCall => llvm::X86_ThisCall,
- Conv::X86VectorCall => llvm::X86_VectorCall,
- Conv::X86_64SysV => llvm::X86_64_SysV,
- Conv::X86_64Win64 => llvm::X86_64_Win64,
- }
- }
-
- fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
- // FIXME(eddyb) can this also be applied to callsites?
- if self.ret.layout.abi.is_uninhabited() {
- llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
- }
-
- // FIXME(eddyb, wesleywiser): apply this to callsites as well?
- if !self.can_unwind {
- llvm::Attribute::NoUnwind.apply_llfn(llvm::AttributePlace::Function, llfn);
- }
-
- let mut i = 0;
- let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
- attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn, ty);
- i += 1;
- };
- match self.ret.mode {
- PassMode::Direct(ref attrs) => {
- attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn, None);
- }
- PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.gcc_type(cx))),
- _ => {}
- }
- for arg in &self.args {
- if arg.pad.is_some() {
- apply(&ArgAttributes::new(), None);
- }
- match arg.mode {
- PassMode::Ignore => {}
- PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => {
- apply(attrs, Some(arg.layout.gcc_type(cx)))
- }
- PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
- apply(attrs, None);
- apply(extra_attrs, None);
- }
- PassMode::Pair(ref a, ref b) => {
- apply(a, None);
- apply(b, None);
- }
- PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
- }
- }
- }
-
- fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
- // FIXME(wesleywiser, eddyb): We should apply `nounwind` and `noreturn` as appropriate to this callsite.
-
- let mut i = 0;
- let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
- attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite, ty);
- i += 1;
- };
- match self.ret.mode {
- PassMode::Direct(ref attrs) => {
- attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite, None);
- }
- PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.gcc_type(bx))),
- _ => {}
- }
- if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi {
- // If the value is a boolean, the range is 0..2 and that ultimately
- // become 0..0 when the type becomes i1, which would be rejected
- // by the LLVM verifier.
- if let Int(..) = scalar.value {
- if !scalar.is_bool() {
- let range = scalar.valid_range_exclusive(bx);
- if range.start != range.end {
- bx.range_metadata(callsite, range);
- }
- }
- }
- }
- for arg in &self.args {
- if arg.pad.is_some() {
- apply(&ArgAttributes::new(), None);
- }
- match arg.mode {
- PassMode::Ignore => {}
- PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => {
- apply(attrs, Some(arg.layout.gcc_type(bx)))
- }
- PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
- apply(attrs, None);
- apply(extra_attrs, None);
- }
- PassMode::Pair(ref a, ref b) => {
- apply(a, None);
- apply(b, None);
- }
- PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
- }
- }
-
- let cconv = self.llvm_cconv();
- if cconv != llvm::CCallConv {
- llvm::SetInstructionCallConv(callsite, cconv);
- }
- }*/
+ }
+ PassMode::Cast(cast) => cast.gcc_type(cx),
+ PassMode::Indirect { extra_attrs: None, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
+ };
+ argument_tys.push(arg_ty);
+ }
+
+ (return_ty, argument_tys, self.c_variadic)
+ }
+
+ fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ let (return_type, params, variadic) = self.gcc_type(cx);
+ let pointer_type = cx.context.new_function_pointer_type(None, return_type, ¶ms, variadic);
+ pointer_type
+ }
+}
--- /dev/null
- //use crate::attributes;
+use gccjit::{FunctionType, ToRValue};
+use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_middle::bug;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::sym;
+
+use crate::GccContext;
+
+pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, kind: AllocatorKind, has_alloc_error_handler: bool) {
+ let context = &mods.context;
+ let usize =
+ match tcx.sess.target.pointer_width {
+ 16 => context.new_type::<u16>(),
+ 32 => context.new_type::<u32>(),
+ 64 => context.new_type::<u64>(),
+ tws => bug!("Unsupported target word size for int: {}", tws),
+ };
+ let i8 = context.new_type::<i8>();
+ let i8p = i8.make_pointer();
+ let void = context.new_type::<()>();
+
+ for method in ALLOCATOR_METHODS {
+ let mut types = Vec::with_capacity(method.inputs.len());
+ for ty in method.inputs.iter() {
+ match *ty {
+ AllocatorTy::Layout => {
+ types.push(usize);
+ types.push(usize);
+ }
+ AllocatorTy::Ptr => types.push(i8p),
+ AllocatorTy::Usize => types.push(usize),
+
+ AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+ }
+ }
+ let output = match method.output {
+ AllocatorTy::ResultPtr => Some(i8p),
+ AllocatorTy::Unit => None,
+
+ AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+ panic!("invalid allocator output")
+ }
+ };
+ let name = format!("__rust_{}", method.name);
+
+ let args: Vec<_> = types.iter().enumerate()
+ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+ .collect();
+ let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false);
+
+ if tcx.sess.target.options.default_hidden_visibility {
- //llvm::LLVMRustSetVisibility(func, llvm::Visibility::Hidden);
++ // TODO(antoyo): set visibility.
+ }
+ if tcx.sess.must_emit_unwind_tables() {
- // TODO
- //attributes::emit_uwtable(func, true);
++ // TODO(antoyo): emit unwind tables.
+ }
+
+ let callee = kind.fn_name(method.name);
+ let args: Vec<_> = types.iter().enumerate()
+ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+ .collect();
+ let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false);
- //llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
++ // TODO(antoyo): set visibility.
+
+ let block = func.new_block("entry");
+
+ let args = args
+ .iter()
+ .enumerate()
+ .map(|(i, _)| func.get_param(i as i32).to_rvalue())
+ .collect::<Vec<_>>();
+ let ret = context.new_call(None, callee, &args);
+ //llvm::LLVMSetTailCall(ret, True);
+ if output.is_some() {
+ block.end_with_return(None, ret);
+ }
+ else {
+ block.end_with_void_return(None);
+ }
+ }
+
+ let types = [usize, usize];
+ let name = "__rust_alloc_error_handler".to_string();
+ let args: Vec<_> = types.iter().enumerate()
+ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+ .collect();
+ let func = context.new_function(None, FunctionType::Exported, void, &args, name, false);
+
+ let kind =
+ if has_alloc_error_handler {
+ AllocatorKind::Global
+ }
+ else {
+ AllocatorKind::Default
+ };
+ let callee = kind.fn_name(sym::oom);
+ let args: Vec<_> = types.iter().enumerate()
+ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+ .collect();
+ let callee = context.new_function(None, FunctionType::Extern, void, &args, callee, false);
+ //llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+
+ let block = func.new_block("entry");
+
+ let args = args
+ .iter()
+ .enumerate()
+ .map(|(i, _)| func.get_param(i as i32).to_rvalue())
+ .collect::<Vec<_>>();
+ let _ret = context.new_call(None, callee, &args);
+ //llvm::LLVMSetTailCall(ret, True);
+ block.end_with_void_return(None);
+}
--- /dev/null
- // TODO
+use gccjit::{RValue, ToRValue, Type};
+use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_codegen_ssa::mir::operand::OperandValue;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{AsmBuilderMethods, AsmMethods, BaseTypeMethods, BuilderMethods, GlobalAsmOperandRef, InlineAsmOperandRef};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::LlvmInlineAsmInner;
+use rustc_middle::bug;
+use rustc_span::Span;
+use rustc_target::asm::*;
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn codegen_llvm_inline_asm(&mut self, _ia: &LlvmInlineAsmInner, _outputs: Vec<PlaceRef<'tcx, RValue<'gcc>>>, mut _inputs: Vec<RValue<'gcc>>, _span: Span) -> bool {
-
- /*let mut ext_constraints = vec![];
- let mut output_types = vec![];
-
- // Prepare the output operands
- let mut indirect_outputs = vec![];
- for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() {
- if out.is_rw {
- let operand = self.load_operand(place);
- if let OperandValue::Immediate(_) = operand.val {
- inputs.push(operand.immediate());
- }
- ext_constraints.push(i.to_string());
- }
- if out.is_indirect {
- let operand = self.load_operand(place);
- if let OperandValue::Immediate(_) = operand.val {
- indirect_outputs.push(operand.immediate());
- }
- } else {
- output_types.push(place.layout.gcc_type(self.cx()));
- }
- }
- if !indirect_outputs.is_empty() {
- indirect_outputs.extend_from_slice(&inputs);
- inputs = indirect_outputs;
- }
-
- let clobbers = ia.clobbers.iter().map(|s| format!("~{{{}}}", &s));
-
- // Default per-arch clobbers
- // Basically what clang does
- let arch_clobbers = match &self.sess().target.target.arch[..] {
- "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
- "mips" | "mips64" => vec!["~{$1}"],
- _ => Vec::new(),
- };
-
- let all_constraints = ia
- .outputs
- .iter()
- .map(|out| out.constraint.to_string())
- .chain(ia.inputs.iter().map(|s| s.to_string()))
- .chain(ext_constraints)
- .chain(clobbers)
- .chain(arch_clobbers.iter().map(|s| (*s).to_string()))
- .collect::<Vec<String>>()
- .join(",");
-
- debug!("Asm Constraints: {}", &all_constraints);
-
- // Depending on how many outputs we have, the return type is different
- let num_outputs = output_types.len();
- let output_type = match num_outputs {
- 0 => self.type_void(),
- 1 => output_types[0],
- _ => self.type_struct(&output_types, false),
- };
-
- let asm = ia.asm.as_str();
- let r = inline_asm_call(
- self,
- &asm,
- &all_constraints,
- &inputs,
- output_type,
- ia.volatile,
- ia.alignstack,
- ia.dialect,
- );
- if r.is_none() {
- return false;
- }
- let r = r.unwrap();
-
- // Again, based on how many outputs we have
- let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
- for (i, (_, &place)) in outputs.enumerate() {
- let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) };
- OperandValue::Immediate(v).store(self, place);
- }
-
- // Store mark in a metadata node so we can map LLVM errors
- // back to source locations. See #17552.
- unsafe {
- let key = "srcloc";
- let kind = llvm::LLVMGetMDKindIDInContext(
- self.llcx,
- key.as_ptr() as *const c_char,
- key.len() as c_uint,
- );
-
- let val: &'ll Value = self.const_i32(span.ctxt().outer_expn().as_u32() as i32);
-
- llvm::LLVMSetMetadata(r, kind, llvm::LLVMMDNodeInContext(self.llcx, &val, 1));
- }
-
- true*/
++ // TODO(antoyo)
+ return true;
- // FIXME: we do this here instead of later because of a bug in libgccjit where creating the
+ }
+
+ fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, _span: &[Span]) {
+ let asm_arch = self.tcx.sess.asm_arch.unwrap();
+
+ let intel_dialect =
+ match asm_arch {
+ InlineAsmArch::X86 | InlineAsmArch::X86_64 if !options.contains(InlineAsmOptions::ATT_SYNTAX) => true,
+ _ => false,
+ };
+
+ // Collect the types of output operands
- // If the output is discarded, we don't really care what
- // type is used. We're just using this to tell GCC to
- // reserve the register.
- //dummy_output_type(self.cx, reg.reg_class())
-
++ // FIXME(antoyo): we do this here instead of later because of a bug in libgccjit where creating the
+ // variable after the extended asm expression causes a segfault:
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100380
+ let mut output_vars = FxHashMap::default();
+ let mut operand_numbers = FxHashMap::default();
+ let mut current_number = 0;
+ for (idx, op) in operands.iter().enumerate() {
+ match *op {
+ InlineAsmOperandRef::Out { place, .. } => {
+ let ty =
+ match place {
+ Some(place) => place.layout.gcc_type(self.cx, false),
+ None => {
+ // If the output is discarded, we don't really care what
+ // type is used. We're just using this to tell GCC to
+ // reserve the register.
+ //dummy_output_type(self.cx, reg.reg_class())
+
+ // NOTE: if no output value, we should not create one (it will be a
+ // clobber).
+ continue;
+ },
+ };
+ let var = self.current_func().new_local(None, ty, "output_register");
+ operand_numbers.insert(idx, current_number);
+ current_number += 1;
+ output_vars.insert(idx, var);
+ }
+ InlineAsmOperandRef::InOut { out_place, .. } => {
+ let ty =
+ match out_place {
+ Some(place) => place.layout.gcc_type(self.cx, false),
+ None => {
- // FIXME: this might break the "m" memory constraint:
+ // NOTE: if no output value, we should not create one.
+ continue;
+ },
+ };
+ operand_numbers.insert(idx, current_number);
+ current_number += 1;
+ let var = self.current_func().new_local(None, ty, "output_register");
+ output_vars.insert(idx, var);
+ }
+ _ => {}
+ }
+ }
+
+ // All output operands must come before the input operands, hence the 2 loops.
+ for (idx, op) in operands.iter().enumerate() {
+ match *op {
+ InlineAsmOperandRef::In { .. } | InlineAsmOperandRef::InOut { .. } => {
+ operand_numbers.insert(idx, current_number);
+ current_number += 1;
+ },
+ _ => (),
+ }
+ }
+
+ // Build the template string
+ let mut template_str = String::new();
+ for piece in template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref string) => {
+ if string.contains('%') {
+ for c in string.chars() {
+ if c == '%' {
+ template_str.push_str("%%");
+ }
+ else {
+ template_str.push(c);
+ }
+ }
+ }
+ else {
+ template_str.push_str(string)
+ }
+ }
+ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
+ match operands[operand_idx] {
+ InlineAsmOperandRef::Out { reg, place: Some(_), .. } => {
+ let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
+ if let Some(modifier) = modifier {
+ template_str.push_str(&format!("%{}{}", modifier, operand_numbers[&operand_idx]));
+ } else {
+ template_str.push_str(&format!("%{}", operand_numbers[&operand_idx]));
+ }
+ },
+ InlineAsmOperandRef::Out { place: None, .. } => {
+ unimplemented!("Out None");
+ },
+ InlineAsmOperandRef::In { reg, .. }
+ | InlineAsmOperandRef::InOut { reg, .. } => {
+ let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
+ if let Some(modifier) = modifier {
+ template_str.push_str(&format!("%{}{}", modifier, operand_numbers[&operand_idx]));
+ } else {
+ template_str.push_str(&format!("%{}", operand_numbers[&operand_idx]));
+ }
+ }
+ InlineAsmOperandRef::Const { ref string } => {
+ // Const operands get injected directly into the template
+ template_str.push_str(string);
+ }
+ InlineAsmOperandRef::SymFn { .. }
+ | InlineAsmOperandRef::SymStatic { .. } => {
+ unimplemented!();
+ // Only emit the raw symbol name
+ //template_str.push_str(&format!("${{{}:c}}", op_idx[&operand_idx]));
+ }
+ }
+ }
+ }
+ }
+
+ let block = self.llbb();
+ let template_str =
+ if intel_dialect {
+ template_str
+ }
+ else {
- // TODO: only set on x86 platforms.
++ // FIXME(antoyo): this might break the "m" memory constraint:
+ // https://stackoverflow.com/a/9347957/389119
- //op_idx.insert(idx, constraints.len());
++ // TODO(antoyo): only set on x86 platforms.
+ format!(".att_syntax noprefix\n\t{}\n\t.intel_syntax noprefix", template_str)
+ };
+ let extended_asm = block.add_extended_asm(None, &template_str);
+
+ // Collect the types of output operands
+ let mut output_types = vec![];
+ for (idx, op) in operands.iter().enumerate() {
+ match *op {
+ InlineAsmOperandRef::Out { reg, late, place } => {
+ let ty =
+ match place {
+ Some(place) => place.layout.gcc_type(self.cx, false),
+ None => {
+ // If the output is discarded, we don't really care what
+ // type is used. We're just using this to tell GCC to
+ // reserve the register.
+ dummy_output_type(self.cx, reg.reg_class())
+ },
+ };
+ output_types.push(ty);
- //op_idx.insert(idx, constraints.len());
- // TODO: prefix of "+" for reading and writing?
+ let prefix = if late { "=" } else { "=&" };
+ let constraint = format!("{}{}", prefix, reg_to_gcc(reg));
+
+ if place.is_some() {
+ let var = output_vars[&idx];
+ extended_asm.add_output_operand(None, &constraint, var);
+ }
+ else {
+ // NOTE: reg.to_string() returns the register name with quotes around it so
+ // remove them.
+ extended_asm.add_clobber(reg.to_string().trim_matches('"'));
+ }
+ }
+ InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
+ let ty =
+ match out_place {
+ Some(out_place) => out_place.layout.gcc_type(self.cx, false),
+ None => dummy_output_type(self.cx, reg.reg_class())
+ };
+ output_types.push(ty);
- // TODO: also specify an output operand when out_place is none: that would
++ // TODO(antoyo): prefix of "+" for reading and writing?
+ let prefix = if late { "=" } else { "=&" };
+ let constraint = format!("{}{}", prefix, reg_to_gcc(reg));
+
+ if out_place.is_some() {
+ let var = output_vars[&idx];
- /*if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
- match asm_arch {
- InlineAsmArch::AArch64 | InlineAsmArch::Arm => {
- constraints.push("~{cc}".to_string());
- }
- InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
- constraints.extend_from_slice(&[
- "~{dirflag}".to_string(),
- "~{fpsr}".to_string(),
- "~{flags}".to_string(),
- ]);
- }
- InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {}
- }
- }
- if !options.contains(InlineAsmOptions::NOMEM) {
- // This is actually ignored by LLVM, but it's probably best to keep
- // it just in case. LLVM instead uses the ReadOnly/ReadNone
- // attributes on the call instruction to optimize.
- constraints.push("~{memory}".to_string());
- }
- let volatile = !options.contains(InlineAsmOptions::PURE);
- let alignstack = !options.contains(InlineAsmOptions::NOSTACK);
- let output_type = match &output_types[..] {
- [] => self.type_void(),
- [ty] => ty,
- tys => self.type_struct(&tys, false),
- };*/
-
- /*let result = inline_asm_call(
- self,
- &template_str,
- &constraints.join(","),
- &inputs,
- output_type,
- volatile,
- alignstack,
- dialect,
- span,
- )
- .unwrap_or_else(|| span_bug!(span, "LLVM asm constraint validation failed"));
-
- if options.contains(InlineAsmOptions::PURE) {
- if options.contains(InlineAsmOptions::NOMEM) {
- llvm::Attribute::ReadNone.apply_callsite(llvm::AttributePlace::Function, result);
- } else if options.contains(InlineAsmOptions::READONLY) {
- llvm::Attribute::ReadOnly.apply_callsite(llvm::AttributePlace::Function, result);
- }
- } else {
- if options.contains(InlineAsmOptions::NOMEM) {
- llvm::Attribute::InaccessibleMemOnly
- .apply_callsite(llvm::AttributePlace::Function, result);
- } else {
- // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
- }
- }*/
-
++ // TODO(antoyo): also specify an output operand when out_place is none: that would
+ // be the clobber but clobbers do not support general constraint like reg;
+ // they only support named registers.
+ // Not sure how we can do this. And the LLVM backend does not seem to add a
+ // clobber.
+ extended_asm.add_output_operand(None, &constraint, var);
+ }
+
+ let constraint = reg_to_gcc(reg);
+ extended_asm.add_input_operand(None, &constraint, in_value.immediate());
+ }
+ InlineAsmOperandRef::In { reg, value } => {
+ let constraint = reg_to_gcc(reg);
+ extended_asm.add_input_operand(None, &constraint, value.immediate());
+ }
+ _ => {}
+ }
+ }
+
- // TODO: return &'static str instead?
+ // Write results to outputs
+ for (idx, op) in operands.iter().enumerate() {
+ if let InlineAsmOperandRef::Out { place: Some(place), .. }
+ | InlineAsmOperandRef::InOut { out_place: Some(place), .. } = *op
+ {
+ OperandValue::Immediate(output_vars[&idx].to_rvalue()).store(self, place);
+ }
+ }
+ }
+}
+
+/// Converts a register class to a GCC constraint code.
- // TODO: add support for vector register.
++// TODO(antoyo): return &'static str instead?
+fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> String {
+ match reg {
+ // For vector registers LLVM wants the register name to match the type size.
+ InlineAsmRegOrRegClass::Reg(reg) => {
- // TODO: for registers like r11, we have to create a register variable: https://stackoverflow.com/a/31774784/389119
- // TODO: in this case though, it's a clobber, so it should work as r11.
++ // TODO(antoyo): add support for vector register.
+ let constraint =
+ match reg.name() {
+ "ax" => "a",
+ "bx" => "b",
+ "cx" => "c",
+ "dx" => "d",
+ "si" => "S",
+ "di" => "D",
- name => name, // FIXME: probably wrong.
++ // TODO(antoyo): for registers like r11, we have to create a register variable: https://stackoverflow.com/a/31774784/389119
++ // TODO(antoyo): in this case though, it's a clobber, so it should work as r11.
+ // Recent nightly supports clobber() syntax, so update to it. It does not seem
+ // like it's implemented yet.
- //if modifier == Some('v') { None } else { modifier }
++ name => name, // FIXME(antoyo): probably wrong.
+ };
+ constraint.to_string()
+ },
+ InlineAsmRegOrRegClass::RegClass(reg) => match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => unimplemented!(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => unimplemented!(),
+ InlineAsmRegClass::Bpf(_) => unimplemented!(),
+ InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => unimplemented!(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => unimplemented!(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => unimplemented!(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => unimplemented!(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => unimplemented!(),
+ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(),
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("GCC backend does not support SPIR-V")
+ }
+ InlineAsmRegClass::Err => unreachable!(),
+ }
+ .to_string(),
+ }
+}
+
+/// Type to use for outputs that are discarded. It doesn't really matter what
+/// the type is, as long as it is valid for the constraint code.
+fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegClass) -> Type<'gcc> {
+ match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+ | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+ unimplemented!()
+ }
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => cx.type_i32(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+ unimplemented!()
+ }
+ InlineAsmRegClass::Bpf(_) => unimplemented!(),
+ InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => cx.type_f32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
+ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ },
+ InlineAsmRegClass::Err => unreachable!(),
+ }
+}
+
+impl<'gcc, 'tcx> AsmMethods for CodegenCx<'gcc, 'tcx> {
+ fn codegen_global_asm(&self, template: &[InlineAsmTemplatePiece], operands: &[GlobalAsmOperandRef], options: InlineAsmOptions, _line_spans: &[Span]) {
+ let asm_arch = self.tcx.sess.asm_arch.unwrap();
+
+ // Default to Intel syntax on x86
+ let intel_syntax = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
+ && !options.contains(InlineAsmOptions::ATT_SYNTAX);
+
+ // Build the template string
+ let mut template_str = String::new();
+ for piece in template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref string) => {
+ for line in string.lines() {
+ // NOTE: gcc does not allow inline comment, so remove them.
+ let line =
+ if let Some(index) = line.rfind("//") {
+ &line[..index]
+ }
+ else {
+ line
+ };
+ template_str.push_str(line);
+ template_str.push('\n');
+ }
+ },
+ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
+ match operands[operand_idx] {
+ GlobalAsmOperandRef::Const { ref string } => {
+ // Const operands get injected directly into the
+ // template. Note that we don't need to escape $
+ // here unlike normal inline assembly.
+ template_str.push_str(string);
+ }
+ }
+ }
+ }
+ }
+
+ let template_str =
+ if intel_syntax {
+ format!("{}\n\t.intel_syntax noprefix", template_str)
+ }
+ else {
+ format!(".att_syntax\n\t{}\n\t.intel_syntax noprefix", template_str)
+ };
+ // NOTE: seems like gcc will put the asm in the wrong section, so set it to .text manually.
+ let template_str = format!(".pushsection .text\n{}\n.popsection", template_str);
+ self.context.add_top_level_asm(None, &template_str);
+ }
+}
+
+fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option<char>) -> Option<char> {
+ match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => modifier,
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+ | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+ unimplemented!()
- /*if modifier.is_none() {
- Some('q')
- } else {
- modifier
- }*/
+ }
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+ unimplemented!()
- | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => unimplemented!() /*match (reg, modifier) {
- (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
- (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
- (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
- (_, Some('x')) => Some('x'),
- (_, Some('y')) => Some('t'),
- (_, Some('z')) => Some('g'),
- _ => unreachable!(),
- }*/,
+ }
+ InlineAsmRegClass::Bpf(_) => unimplemented!(),
+ InlineAsmRegClass::Hexagon(_) => unimplemented!(),
+ InlineAsmRegClass::Mips(_) => unimplemented!(),
+ InlineAsmRegClass::Nvptx(_) => unimplemented!(),
+ InlineAsmRegClass::PowerPC(_) => unimplemented!(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
+ | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
+ None if arch == InlineAsmArch::X86_64 => Some('q'),
+ None => Some('k'),
+ Some('l') => Some('b'),
+ Some('h') => Some('h'),
+ Some('x') => Some('w'),
+ Some('e') => Some('k'),
+ Some('r') => Some('q'),
+ _ => unreachable!(),
+ },
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
++ | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => unimplemented!(),
+ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(),
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ },
+ InlineAsmRegClass::Err => unreachable!(),
+ }
+}
--- /dev/null
- //let llcx = &*module.module_llvm.llcx;
- //let tm = &*module.module_llvm.tm;
+use std::fs;
+
+use gccjit::OutputKind;
+use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
+use rustc_codegen_ssa::back::write::{CodegenContext, EmitObj, ModuleConfig};
+use rustc_errors::Handler;
+use rustc_session::config::OutputType;
+use rustc_span::fatal_error::FatalError;
+use rustc_target::spec::SplitDebuginfo;
+
+use crate::{GccCodegenBackend, GccContext};
+
+pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, module: ModuleCodegen<GccContext>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &module.name[..]);
+ {
+ let context = &module.module_llvm.context;
+
- //let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
-
- /*if cgcx.msvc_imps_needed {
- create_msvc_imps(cgcx, llcx, llmod);
- }*/
-
- // A codegen-specific pass manager is used to generate object
- // files for an GCC module.
- //
- // Apparently each of these pass managers is a one-shot kind of
- // thing, so we create a new one for each type of output. The
- // pass manager passed to the closure should be ensured to not
- // escape the closure itself, and the manager should only be
- // used once.
- /*unsafe fn with_codegen<'ll, F, R>(tm: &'ll llvm::TargetMachine, llmod: &'ll llvm::Module, no_builtins: bool, f: F) -> R
- where F: FnOnce(&'ll mut PassManager<'ll>) -> R,
- {
- let cpm = llvm::LLVMCreatePassManager();
- llvm::LLVMAddAnalysisPasses(tm, cpm);
- llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
- f(cpm)
- }*/
-
- // Two things to note:
- // - If object files are just LLVM bitcode we write bitcode, copy it to
- // the .o file, and delete the bitcode if it wasn't otherwise
- // requested.
- // - If we don't have the integrated assembler then we need to emit
- // asm from LLVM and use `gcc` to create the object file.
+ let module_name = module.name.clone();
+ let module_name = Some(&module_name[..]);
- // TODO
- /*let _timer = cgcx
- .prof
- .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &module.name[..]);
- let thin = ThinBuffer::new(llmod);
- let data = thin.data();
-
- if config.emit_bc || config.emit_obj == EmitObj::Bitcode {
- let _timer = cgcx.prof.generic_activity_with_arg(
- "LLVM_module_codegen_emit_bitcode",
- &module.name[..],
- );
- if let Err(e) = fs::write(&bc_out, data) {
- let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
- diag_handler.err(&msg);
- }
- }
-
- if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) {
- let _timer = cgcx.prof.generic_activity_with_arg(
- "LLVM_module_codegen_embed_bitcode",
- &module.name[..],
- );
- embed_bitcode(cgcx, llcx, llmod, Some(data));
- }
-
- if config.emit_bc_compressed {
- let _timer = cgcx.prof.generic_activity_with_arg(
- "LLVM_module_codegen_emit_compressed_bitcode",
- &module.name[..],
- );
- let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION);
- let data = bytecode::encode(&module.name, data);
- if let Err(e) = fs::write(&dst, data) {
- let msg = format!("failed to write bytecode to {}: {}", dst.display(), e);
- diag_handler.err(&msg);
- }
- }*/
- } /*else if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Marker) {
- unimplemented!();
- //embed_bitcode(cgcx, llcx, llmod, None);
- }*/
+
+ let _bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
+ let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
+
+ if config.bitcode_needed() {
- /*let _timer = cgcx
- .prof
- .generic_activity_with_arg("LLVM_module_codegen_emit_ir", &module.name[..]);
- let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
- let out_c = path_to_c_string(&out);
-
- extern "C" fn demangle_callback(
- input_ptr: *const c_char,
- input_len: size_t,
- output_ptr: *mut c_char,
- output_len: size_t,
- ) -> size_t {
- let input =
- unsafe { slice::from_raw_parts(input_ptr as *const u8, input_len as usize) };
-
- let input = match str::from_utf8(input) {
- Ok(s) => s,
- Err(_) => return 0,
- };
-
- let output = unsafe {
- slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
- };
- let mut cursor = io::Cursor::new(output);
-
- let demangled = match rustc_demangle::try_demangle(input) {
- Ok(d) => d,
- Err(_) => return 0,
- };
-
- if write!(cursor, "{:#}", demangled).is_err() {
- // Possible only if provided buffer is not big enough
- return 0;
- }
-
- cursor.position() as size_t
- }
-
- let result = llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback);
- result.into_result().map_err(|()| {
- let msg = format!("failed to write LLVM IR to {}", out.display());
- llvm_err(diag_handler, &msg)
- })?;*/
++ // TODO(antoyo)
++ }
+
+ if config.emit_ir {
+ unimplemented!();
-
- /*with_codegen(tm, llmod, config.no_builtins, |cpm| {
- write_output_file(diag_handler, tm, cpm, llmod, &path, llvm::FileType::AssemblyFile)
- })?;*/
+ }
+
+ if config.emit_asm {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_asm", &module.name[..]);
+ let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
+ context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str"));
- //with_codegen(tm, llmod, config.no_builtins, |cpm| {
- //println!("1: {}", module.name);
- match &*module.name {
- "std_example.7rcbfp3g-cgu.15" => {
- println!("Dumping reproducer {}", module.name);
- let _ = fs::create_dir("/tmp/reproducers");
- // FIXME: segfault in dump_reproducer_to_file() might be caused by
- // transmuting an rvalue to an lvalue.
- // Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue
- context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name));
- println!("Dumped reproducer {}", module.name);
- },
- _ => (),
- }
- /*let _ = fs::create_dir("/tmp/dumps");
- context.dump_to_file(&format!("/tmp/dumps/{}.c", module.name), true);
- println!("Dumped {}", module.name);*/
- //println!("Compile module {}", module.name);
- context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
- //})?;
+ }
+
+ match config.emit_obj {
+ EmitObj::ObjectCode(_) => {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &module.name[..]);
- //unimplemented!();
- /*debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
- if let Err(e) = link_or_copy(&bc_out, &obj_out) {
- diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
- }
-
- if !config.emit_bc {
- debug!("removing_bitcode {:?}", bc_out);
- if let Err(e) = fs::remove_file(&bc_out) {
- diag_handler.err(&format!("failed to remove bitcode: {}", e));
- }
- }*/
++ match &*module.name {
++ "std_example.7rcbfp3g-cgu.15" => {
++ println!("Dumping reproducer {}", module.name);
++ let _ = fs::create_dir("/tmp/reproducers");
++ // FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by
++ // transmuting an rvalue to an lvalue.
++ // Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue
++ context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name));
++ println!("Dumped reproducer {}", module.name);
++ },
++ _ => (),
++ }
++ context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
+ }
+
+ EmitObj::Bitcode => {
-
- //drop(handlers);
++ // TODO(antoyo)
+ }
+
+ EmitObj::None => {}
+ }
- /*use super::lto::{Linker, ModuleBuffer};
- // Sort the modules by name to ensure to ensure deterministic behavior.
- modules.sort_by(|a, b| a.name.cmp(&b.name));
- let (first, elements) =
- modules.split_first().expect("Bug! modules must contain at least one module.");
-
- let mut linker = Linker::new(first.module_llvm.llmod());
- for module in elements {
- let _timer =
- cgcx.prof.generic_activity_with_arg("LLVM_link_module", format!("{:?}", module.name));
- let buffer = ModuleBuffer::new(module.module_llvm.llmod());
- linker.add(&buffer.data()).map_err(|()| {
- let msg = format!("failed to serialize module {:?}", module.name);
- llvm_err(&diag_handler, &msg)
- })?;
- }
- drop(linker);
- Ok(modules.remove(0))*/
+ }
+
+ Ok(module.into_compiled_module(
+ config.emit_obj != EmitObj::None,
+ cgcx.target_can_use_split_dwarf && cgcx.split_debuginfo == SplitDebuginfo::Unpacked,
+ config.emit_bc,
+ &cgcx.output_filenames,
+ ))
+}
+
+pub(crate) fn link(_cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, mut _modules: Vec<ModuleCodegen<GccContext>>) -> Result<ModuleCodegen<GccContext>, FatalError> {
+ unimplemented!();
+}
--- /dev/null
- Linkage::ExternalWeak => GlobalKind::Imported, // TODO: should be weak linkage.
+use std::env;
+use std::sync::Once;
+use std::time::Instant;
+
+use gccjit::{
+ Context,
+ FunctionType,
+ GlobalKind,
+};
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::dep_graph;
+use rustc_middle::middle::cstore::EncodedMetadata;
+use rustc_middle::middle::exported_symbols;
+use rustc_middle::ty::TyCtxt;
+use rustc_middle::mir::mono::Linkage;
+use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
+use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
+use rustc_codegen_ssa::mono_item::MonoItemExt;
+use rustc_codegen_ssa::traits::DebugInfoMethods;
+use rustc_session::config::DebugInfo;
+use rustc_span::Symbol;
+
+use crate::{GccContext, create_function_calling_initializers};
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+
+pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind {
+ match linkage {
+ Linkage::External => GlobalKind::Imported,
+ Linkage::AvailableExternally => GlobalKind::Imported,
+ Linkage::LinkOnceAny => unimplemented!(),
+ Linkage::LinkOnceODR => unimplemented!(),
+ Linkage::WeakAny => unimplemented!(),
+ Linkage::WeakODR => unimplemented!(),
+ Linkage::Appending => unimplemented!(),
+ Linkage::Internal => GlobalKind::Internal,
+ Linkage::Private => GlobalKind::Internal,
- Linkage::WeakAny => FunctionType::Exported, // FIXME: should be similar to linkonce.
++ Linkage::ExternalWeak => GlobalKind::Imported, // TODO(antoyo): should be weak linkage.
+ Linkage::Common => unimplemented!(),
+ }
+}
+
+pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType {
+ match linkage {
+ Linkage::External => FunctionType::Exported,
+ Linkage::AvailableExternally => FunctionType::Extern,
+ Linkage::LinkOnceAny => unimplemented!(),
+ Linkage::LinkOnceODR => unimplemented!(),
- // TODO: only set on x86 platforms.
++ Linkage::WeakAny => FunctionType::Exported, // FIXME(antoyo): should be similar to linkonce.
+ Linkage::WeakODR => unimplemented!(),
+ Linkage::Appending => unimplemented!(),
+ Linkage::Internal => FunctionType::Internal,
+ Linkage::Private => FunctionType::Internal,
+ Linkage::ExternalWeak => unimplemented!(),
+ Linkage::Common => unimplemented!(),
+ }
+}
+
+pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen<GccContext>, u64) {
+ let prof_timer = tcx.prof.generic_activity("codegen_module");
+ let start_time = Instant::now();
+
+ let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
+ let (module, _) = tcx.dep_graph.with_task(dep_node, tcx, cgu_name, module_codegen, dep_graph::hash_result);
+ let time_to_codegen = start_time.elapsed();
+ drop(prof_timer);
+
+ // We assume that the cost to run GCC on a CGU is proportional to
+ // the time we needed for codegenning it.
+ let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64;
+
+ fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<GccContext> {
+ let cgu = tcx.codegen_unit(cgu_name);
+ // Instantiate monomorphizations without filling out definitions yet...
+ //let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str());
+ let context = Context::default();
- //context.set_dump_code_on_compile(true);
++ // TODO(antoyo): only set on x86 platforms.
+ context.add_command_line_option("-masm=intel");
+ for arg in &tcx.sess.opts.cg.llvm_args {
+ context.add_command_line_option(arg);
+ }
+ context.add_command_line_option("-fno-semantic-interposition");
- //context.set_dump_everything(true);
- //context.set_keep_intermediates(true);
++ if env::var("CG_GCCJIT_DUMP_CODE").as_deref() == Ok("1") {
++ context.set_dump_code_on_compile(true);
++ }
+ if env::var("CG_GCCJIT_DUMP_GIMPLE").as_deref() == Ok("1") {
+ context.set_dump_initial_gimple(true);
+ }
+ context.set_debug_info(true);
- //println!("module_codegen: {:?} {:?}", cgu_name, &cx.context as *const _);
++ if env::var("CG_GCCJIT_DUMP_EVERYTHING").as_deref() == Ok("1") {
++ context.set_dump_everything(true);
++ }
++ if env::var("CG_GCCJIT_KEEP_INTERMEDIATES").as_deref() == Ok("1") {
++ context.set_keep_intermediates(true);
++ }
+
+ {
+ let cx = CodegenCx::new(&context, cgu, tcx);
+
+ static START: Once = Once::new();
+ START.call_once(|| {
+ let initializer_name = format!("__gccGlobalCrateInit{}", tcx.crate_name(LOCAL_CRATE));
+ let func = context.new_function(None, FunctionType::Exported, context.new_type::<()>(), &[], initializer_name, false);
+ let block = func.new_block("initial");
+ create_function_calling_initializers(tcx, &context, block);
+ block.end_with_void_return(None);
+ });
+
+ let mono_items = cgu.items_in_deterministic_order(tcx);
+ for &(mono_item, (linkage, visibility)) in &mono_items {
+ mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility);
+ }
+
+ // ... and now that we have everything pre-defined, fill out those definitions.
+ for &(mono_item, _) in &mono_items {
+ mono_item.define::<Builder<'_, '_, '_>>(&cx);
+ }
+
+ // If this codegen unit contains the main function, also create the
+ // wrapper here
+ maybe_create_entry_wrapper::<Builder<'_, '_, '_>>(&cx);
+
+ // Finalize debuginfo
+ if cx.sess().opts.debuginfo != DebugInfo::None {
+ cx.debuginfo_finalize();
+ }
+
+ cx.global_init_block.end_with_void_return(None);
+ }
+
+ ModuleCodegen {
+ name: cgu_name.to_string(),
+ module_llvm: GccContext {
+ context
+ },
+ kind: ModuleKind::Regular,
+ }
+ }
+
+ (module, cost)
+}
+
+pub fn write_compressed_metadata<'tcx>(tcx: TyCtxt<'tcx>, metadata: &EncodedMetadata, gcc_module: &mut GccContext) {
+ use snap::write::FrameEncoder;
+ use std::io::Write;
+
+ // Historical note:
+ //
+ // When using link.exe it was seen that the section name `.note.rustc`
+ // was getting shortened to `.note.ru`, and according to the PE and COFF
+ // specification:
+ //
+ // > Executable images do not use a string table and do not support
+ // > section names longer than 8 characters
+ //
+ // https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
+ //
+ // As a result, we choose a slightly shorter name! As to why
+ // `.note.rustc` works on MinGW, see
+ // https://github.com/llvm/llvm-project/blob/llvmorg-12.0.0/lld/COFF/Writer.cpp#L1190-L1197
+ let section_name = if tcx.sess.target.is_like_osx { "__DATA,.rustc" } else { ".rustc" };
+
+ let context = &gcc_module.context;
+ let mut compressed = rustc_metadata::METADATA_HEADER.to_vec();
+ FrameEncoder::new(&mut compressed).write_all(&metadata.raw_data).unwrap();
+
+ let name = exported_symbols::metadata_symbol_name(tcx);
+ let typ = context.new_array_type(None, context.new_type::<u8>(), compressed.len() as i32);
+ let global = context.new_global(None, GlobalKind::Exported, typ, name);
+ global.global_set_initializer(&compressed);
+ global.set_link_section(section_name);
+
+ // Also generate a .section directive to force no
+ // flags, at least for ELF outputs, so that the
+ // metadata doesn't get loaded into memory.
+ let directive = format!(".section {}", section_name);
+ context.add_top_level_asm(None, &directive);
+}
--- /dev/null
- // TODO
+use std::borrow::Cow;
+use std::cell::Cell;
+use std::convert::TryFrom;
+use std::ops::{Deref, Range};
+
+use gccjit::FunctionType;
+use gccjit::{
+ BinaryOp,
+ Block,
+ ComparisonOp,
+ Function,
+ LValue,
+ RValue,
+ ToRValue,
+ Type,
+ UnaryOp,
+};
+use rustc_codegen_ssa::MemFlags;
+use rustc_codegen_ssa::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{
+ BackendTypes,
+ BaseTypeMethods,
+ BuilderMethods,
+ ConstMethods,
+ DerivedTypeMethods,
+ LayoutTypeMethods,
+ HasCodegen,
+ OverflowOp,
+ StaticBuilderMethods,
+};
+use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
+use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, TyAndLayout};
+use rustc_span::Span;
+use rustc_span::def_id::DefId;
+use rustc_target::abi::{
+ self,
+ Align,
+ HasDataLayout,
+ LayoutOf,
+ Size,
+ TargetDataLayout,
+};
+use rustc_target::spec::{HasTargetSpec, Target};
+
+use crate::common::{SignType, TypeReflection, type_is_pointer};
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
- // TODO: remove this variable.
++// TODO(antoyo)
+type Funclet = ();
+
- // TODO: does this make sense?
++// TODO(antoyo): remove this variable.
+static mut RETURN_VALUE_COUNT: usize = 0;
+
+enum ExtremumOperation {
+ Max,
+ Min,
+}
+
+trait EnumClone {
+ fn clone(&self) -> Self;
+}
+
+impl EnumClone for AtomicOrdering {
+ fn clone(&self) -> Self {
+ match *self {
+ AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic,
+ AtomicOrdering::Unordered => AtomicOrdering::Unordered,
+ AtomicOrdering::Monotonic => AtomicOrdering::Monotonic,
+ AtomicOrdering::Acquire => AtomicOrdering::Acquire,
+ AtomicOrdering::Release => AtomicOrdering::Release,
+ AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease,
+ AtomicOrdering::SequentiallyConsistent => AtomicOrdering::SequentiallyConsistent,
+ }
+ }
+}
+
+pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
+ pub cx: &'a CodegenCx<'gcc, 'tcx>,
+ pub block: Option<Block<'gcc>>,
+ stack_var_count: Cell<usize>,
+}
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+ fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>) -> Self {
+ Builder {
+ cx,
+ block: None,
+ stack_var_count: Cell::new(0),
+ }
+ }
+
+ fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
+ let size = self.cx.int_width(src.get_type()) / 8;
+
+ let func = self.current_func();
+
+ let load_ordering =
+ match order {
- //let mut fn_ty = self.cx.val_ty(func);
- // Strip off pointers
- /*while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
- fn_ty = self.cx.element_type(fn_ty);
- }*/
-
- /*assert!(
- self.cx.type_kind(fn_ty) == TypeKind::Function,
- "builder::{} not passed a function, but {:?}",
- typ,
- fn_ty
- );
-
- let param_tys = self.cx.func_params_types(fn_ty);
-
- let all_args_match = param_tys
- .iter()
- .zip(args.iter().map(|&v| self.val_ty(v)))
- .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/
-
++ // TODO(antoyo): does this make sense?
+ AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
+ _ => order.clone(),
+ };
+ let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering.clone(), Size::from_bytes(size));
+ let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
+ let return_value = func.new_local(None, previous_value.get_type(), "return_value");
+ self.llbb().add_assignment(None, previous_var, previous_value);
+ self.llbb().add_assignment(None, return_value, previous_var.to_rvalue());
+
+ let while_block = func.new_block("while");
+ let after_block = func.new_block("after_while");
+ self.llbb().end_with_jump(None, while_block);
+
+ // NOTE: since jumps were added and compare_exchange doesn't expect this, the current blocks in the
+ // state need to be updated.
+ self.block = Some(while_block);
+ *self.cx.current_block.borrow_mut() = Some(while_block);
+
+ let comparison_operator =
+ match operation {
+ ExtremumOperation::Max => ComparisonOp::LessThan,
+ ExtremumOperation::Min => ComparisonOp::GreaterThan,
+ };
+
+ let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type()));
+ let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
+ let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange);
+ let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2);
+
+ while_block.end_with_conditional(None, cond, while_block, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
+ // state need to be updated.
+ self.block = Some(after_block);
+ *self.cx.current_block.borrow_mut() = Some(after_block);
+
+ return_value.to_rvalue()
+ }
+
+ fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
+ let size = self.cx.int_width(src.get_type());
+ let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size / 8));
+ let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+ let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
+ let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
+
+ let void_ptr_type = self.context.new_type::<*mut ()>();
+ let volatile_void_ptr_type = void_ptr_type.make_volatile();
+ let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
+ let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type);
+
+ // NOTE: not sure why, but we have the wrong type here.
+ let int_type = compare_exchange.get_param(2).to_rvalue().get_type();
+ let src = self.context.new_cast(None, src, int_type);
+ self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order])
+ }
+
+ pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
+ self.llbb().add_assignment(None, lvalue, value);
+ }
+
+ fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
- /*debug!(
- "type mismatch in function call of {:?}. \
- Expected {:?} for param {}, got {:?}; injecting bitcast",
- func, expected_ty, i, actual_ty
- );*/
- /*println!(
- "type mismatch in function call of {:?}. \
- Expected {:?} for param {}, got {:?}; injecting bitcast",
- func, expected_ty, i, actual_ty
- );*/
+ let mut all_args_match = true;
+ let mut param_types = vec![];
+ let param_count = func.get_param_count();
+ for (index, arg) in args.iter().enumerate().take(param_count) {
+ let param = func.get_param(index as i32);
+ let param = param.to_rvalue().get_type();
+ if param != arg.get_type() {
+ all_args_match = false;
+ }
+ param_types.push(param);
+ }
+
+ if all_args_match {
+ return Cow::Borrowed(args);
+ }
+
+ let casted_args: Vec<_> = param_types
+ .into_iter()
+ .zip(args.iter())
+ .enumerate()
+ .map(|(_i, (expected_ty, &actual_val))| {
+ let actual_ty = actual_val.get_type();
+ if expected_ty != actual_ty {
- //let mut fn_ty = self.cx.val_ty(func);
- // Strip off pointers
- /*while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
- fn_ty = self.cx.element_type(fn_ty);
- }*/
-
- /*assert!(
- self.cx.type_kind(fn_ty) == TypeKind::Function,
- "builder::{} not passed a function, but {:?}",
- typ,
- fn_ty
- );
-
- let param_tys = self.cx.func_params_types(fn_ty);
-
- let all_args_match = param_tys
- .iter()
- .zip(args.iter().map(|&v| self.val_ty(v)))
- .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/
-
+ self.bitcast(actual_val, expected_ty)
+ }
+ else {
+ actual_val
+ }
+ })
+ .collect();
+
+ Cow::Owned(casted_args)
+ }
+
+ fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
- /*debug!(
- "type mismatch in function call of {:?}. \
- Expected {:?} for param {}, got {:?}; injecting bitcast",
- func, expected_ty, i, actual_ty
- );*/
- /*println!(
- "type mismatch in function call of {:?}. \
- Expected {:?} for param {}, got {:?}; injecting bitcast",
- func, expected_ty, i, actual_ty
- );*/
+ let mut all_args_match = true;
+ let mut param_types = vec![];
+ let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr");
+ for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) {
+ let param = gcc_func.get_param_type(index);
+ if param != arg.get_type() {
+ all_args_match = false;
+ }
+ param_types.push(param);
+ }
+
+ if all_args_match {
+ return Cow::Borrowed(args);
+ }
+
+ let casted_args: Vec<_> = param_types
+ .into_iter()
+ .zip(args.iter())
+ .enumerate()
+ .map(|(_i, (expected_ty, &actual_val))| {
+ let actual_ty = actual_val.get_type();
+ if expected_ty != actual_ty {
- let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO: make sure make_pointer() is okay here.
+ self.bitcast(actual_val, expected_ty)
+ }
+ else {
+ actual_val
+ }
+ })
+ .collect();
+
+ Cow::Owned(casted_args)
+ }
+
+ fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
- //assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
-
++ let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO(antoyo): make sure make_pointer() is okay here.
+ let stored_ty = self.cx.val_ty(val);
+ let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
+
- /*debug!(
- "type mismatch in store. \
- Expected {:?}, got {:?}; inserting bitcast",
- dest_ptr_ty, stored_ptr_ty
- );*/
- /*println!(
- "type mismatch in store. \
- Expected {:?}, got {:?}; inserting bitcast",
- dest_ptr_ty, stored_ptr_ty
- );*/
- //ptr
+ if dest_ptr_ty == stored_ptr_ty {
+ ptr
+ }
+ else {
- //debug!("call {:?} with args ({:?})", func, args);
-
- // TODO: remove when the API supports a different type for functions.
+ self.bitcast(ptr, stored_ptr_ty)
+ }
+ }
+
+ pub fn current_func(&self) -> Function<'gcc> {
+ self.block.expect("block").get_function()
+ }
+
+ fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
- //let bundle = funclet.map(|funclet| funclet.bundle());
- //let bundle = bundle.as_ref().map(|b| &*b.raw);
++ // TODO(antoyo): remove when the API supports a different type for functions.
+ let func: Function<'gcc> = self.cx.rvalue_as_function(func);
+ let args = self.check_call("call", func, args);
- //debug!("func ptr call {:?} with args ({:?})", func, args);
-
+
+ // gccjit requires to use the result of functions, even when it's not used.
+ // That's why we assign the result to a local or call add_eval().
+ let return_type = func.get_return_type();
+ let current_block = self.current_block.borrow().expect("block");
+ let void_type = self.context.new_type::<()>();
+ let current_func = current_block.get_function();
+ if return_type != void_type {
+ unsafe { RETURN_VALUE_COUNT += 1 };
+ let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
+ current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
+ result.to_rvalue()
+ }
+ else {
+ current_block.add_eval(None, self.cx.context.new_call(None, func, &args));
+ // Return dummy value when not having return value.
+ self.context.new_rvalue_from_long(self.isize_type, 0)
+ }
+ }
+
+ fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
- //let bundle = funclet.map(|funclet| funclet.bundle());
- //let bundle = bundle.as_ref().map(|b| &*b.raw);
+ let args = self.check_ptr_call("call", func_ptr, args);
- // FIXME: As a temporary workaround for unsupported LLVM intrinsics.
+
+ // gccjit requires to use the result of functions, even when it's not used.
+ // That's why we assign the result to a local or call add_eval().
+ let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr");
+ let mut return_type = gcc_func.get_return_type();
+ let current_block = self.current_block.borrow().expect("block");
+ let void_type = self.context.new_type::<()>();
+ let current_func = current_block.get_function();
+
- // FIXME: As a temporary workaround for unsupported LLVM intrinsics.
++ // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
+ if gcc_func.get_param_count() == 0 && format!("{:?}", func_ptr) == "__builtin_ia32_pmovmskb128" {
+ return_type = self.int_type;
+ }
+
+ if return_type != void_type {
+ unsafe { RETURN_VALUE_COUNT += 1 };
+ let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
+ current_block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+ result.to_rvalue()
+ }
+ else {
+ if gcc_func.get_param_count() == 0 {
- //debug!("overflow_call {:?} with args ({:?})", func, args);
-
- //let bundle = funclet.map(|funclet| funclet.bundle());
- //let bundle = bundle.as_ref().map(|b| &*b.raw);
-
++ // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
+ current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
+ }
+ else {
+ current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+ }
+ // Return dummy value when not having return value.
+ let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
+ current_block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
+ result.to_rvalue()
+ }
+ }
+
+ pub fn overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
- // TODO: return the new_call() directly? Since the overflow function has no side-effects.
+ // gccjit requires to use the result of functions, even when it's not used.
+ // That's why we assign the result to a local.
+ let return_type = self.context.new_type::<bool>();
+ let current_block = self.current_block.borrow().expect("block");
+ let current_func = current_block.get_function();
- // TODO
- /*debug!("invoke {:?} with args ({:?})", func, args);
-
- let args = self.check_call("invoke", func, args);
- let bundle = funclet.map(|funclet| funclet.bundle());
- let bundle = bundle.as_ref().map(|b| &*b.raw);
-
- unsafe {
- llvm::LLVMRustBuildInvoke(
- self.llbuilder,
- func,
- args.as_ptr(),
- args.len() as c_uint,
- then,
- catch,
- bundle,
- UNNAMED,
- )
- }*/
++ // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
+ unsafe { RETURN_VALUE_COUNT += 1 };
+ let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
+ current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
+ result.to_rvalue()
+ }
+}
+
+impl<'gcc, 'tcx> HasCodegen<'tcx> for Builder<'_, 'gcc, 'tcx> {
+ type CodegenCx = CodegenCx<'gcc, 'tcx>;
+}
+
+impl<'tcx> HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.cx.tcx()
+ }
+}
+
+impl HasDataLayout for Builder<'_, '_, '_> {
+ fn data_layout(&self) -> &TargetDataLayout {
+ self.cx.data_layout()
+ }
+}
+
+impl<'tcx> LayoutOf for Builder<'_, '_, 'tcx> {
+ type Ty = Ty<'tcx>;
+ type TyAndLayout = TyAndLayout<'tcx>;
+
+ fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
+ self.cx.layout_of(ty)
+ }
+}
+
+impl<'gcc, 'tcx> Deref for Builder<'_, 'gcc, 'tcx> {
+ type Target = CodegenCx<'gcc, 'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ self.cx
+ }
+}
+
+impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
+ type Value = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Value;
+ type Function = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Function;
+ type BasicBlock = <CodegenCx<'gcc, 'tcx> as BackendTypes>::BasicBlock;
+ type Type = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Type;
+ type Funclet = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Funclet;
+
+ type DIScope = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIScope;
+ type DILocation = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DILocation;
+ type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
+}
+
+impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
+ let mut bx = Builder::with_cx(cx);
+ *cx.current_block.borrow_mut() = Some(block);
+ bx.block = Some(block);
+ bx
+ }
+
+ fn build_sibling_block(&mut self, name: &str) -> Self {
+ let block = self.append_sibling_block(name);
+ Self::build(self.cx, block)
+ }
+
+ fn llbb(&self) -> Block<'gcc> {
+ self.block.expect("block")
+ }
+
+ fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> {
+ let func = cx.rvalue_as_function(func);
+ func.new_block(name)
+ }
+
+ fn append_sibling_block(&mut self, name: &str) -> Block<'gcc> {
+ let func = self.current_func();
+ func.new_block(name)
+ }
+
+ fn ret_void(&mut self) {
+ self.llbb().end_with_void_return(None)
+ }
+
+ fn ret(&mut self, value: RValue<'gcc>) {
+ let value =
+ if self.structs_as_pointer.borrow().contains(&value) {
+ // NOTE: hack to workaround a limitation of the rustc API: see comment on
+ // CodegenCx.structs_as_pointer
+ value.dereference(None).to_rvalue()
+ }
+ else {
+ value
+ };
+ self.llbb().end_with_return(None, value);
+ }
+
+ fn br(&mut self, dest: Block<'gcc>) {
+ self.llbb().end_with_jump(None, dest)
+ }
+
+ fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) {
+ self.llbb().end_with_conditional(None, cond, then_block, else_block)
+ }
+
+ fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>) {
+ let mut gcc_cases = vec![];
+ let typ = self.val_ty(value);
+ for (on_val, dest) in cases {
+ let on_val = self.const_uint_big(typ, on_val);
+ gcc_cases.push(self.context.new_case(on_val, on_val, dest));
+ }
+ self.block.expect("block").end_with_switch(None, value, default_block, &gcc_cases);
+ }
+
+ fn invoke(&mut self, _typ: Type<'gcc>, _func: RValue<'gcc>, _args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
+ let condition = self.context.new_rvalue_from_int(self.bool_type, 0);
+ self.llbb().end_with_conditional(None, condition, then, catch);
+ self.context.new_rvalue_from_int(self.int_type, 0)
+
- // FIXME: this should not be required.
++ // TODO(antoyo)
+ }
+
+ fn unreachable(&mut self) {
+ let func = self.context.get_builtin_function("__builtin_unreachable");
+ let block = self.block.expect("block");
+ block.add_eval(None, self.context.new_call(None, func, &[]));
+ let return_type = block.get_function().get_return_type();
+ let void_type = self.context.new_type::<()>();
+ if return_type == void_type {
+ block.end_with_void_return(None)
+ }
+ else {
+ let return_value = self.current_func()
+ .new_local(None, return_type, "unreachableReturn");
+ block.end_with_return(None, return_value)
+ }
+ }
+
+ fn add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: convert the arguments to unsigned?
++ // FIXME(antoyo): this should not be required.
+ if format!("{:?}", a.get_type()) != format!("{:?}", b.get_type()) {
+ b = self.context.new_cast(None, b, a.get_type());
+ }
+ a + b
+ }
+
+ fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a + b
+ }
+
+ fn sub(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
+ if a.get_type() != b.get_type() {
+ b = self.context.new_cast(None, b, a.get_type());
+ }
+ a - b
+ }
+
+ fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a - b
+ }
+
+ fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a * b
+ }
+
+ fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a * b
+ }
+
+ fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: convert the arguments to unsigned?
- // TODO: poison if not exact.
++ // TODO(antoyo): convert the arguments to unsigned?
+ a / b
+ }
+
+ fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: convert the arguments to signed?
++ // TODO(antoyo): convert the arguments to unsigned?
++ // TODO(antoyo): poison if not exact.
+ a / b
+ }
+
+ fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: posion if not exact.
- // FIXME: rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
++ // TODO(antoyo): convert the arguments to signed?
+ a / b
+ }
+
+ fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // FIXME: this seems to produce the wrong result.
++ // TODO(antoyo): posion if not exact.
++ // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
+ // should be the same.
+ let typ = a.get_type().to_signed(self);
+ let a = self.context.new_cast(None, a, typ);
+ let b = self.context.new_cast(None, b, typ);
+ a / b
+ }
+
+ fn fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a / b
+ }
+
+ fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a % b
+ }
+
+ fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a % b
+ }
+
+ fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ if a.get_type() == self.cx.float_type {
+ let fmodf = self.context.get_builtin_function("fmodf");
- // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number.
++ // FIXME(antoyo): this seems to produce the wrong result.
+ return self.context.new_call(None, fmodf, &[a, b]);
+ }
+ assert_eq!(a.get_type(), self.cx.double_type);
+
+ let fmod = self.context.get_builtin_function("fmod");
+ return self.context.new_call(None, fmod, &[a, b]);
+ }
+
+ fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- //println!("shl: {:?} -> {:?}", a, b_type);
++ // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ if a_type.is_unsigned(self) && b_type.is_signed(self) {
- //println!("shl: {:?} -> {:?}", result, a_type);
+ let a = self.context.new_cast(None, a, b_type);
+ let result = a << b;
- //println!("shl: {:?} -> {:?}", b, a_type);
+ self.context.new_cast(None, result, a_type)
+ }
+ else if a_type.is_signed(self) && b_type.is_unsigned(self) {
- // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number.
- // TODO: cast to unsigned to do a logical shift if that does not work.
+ let b = self.context.new_cast(None, b, a_type);
+ a << b
+ }
+ else {
+ a << b
+ }
+ }
+
+ fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- //println!("lshl: {:?} -> {:?}", a, b_type);
++ // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
++ // TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ if a_type.is_unsigned(self) && b_type.is_signed(self) {
- //println!("lshl: {:?} -> {:?}", result, a_type);
+ let a = self.context.new_cast(None, a, b_type);
+ let result = a >> b;
- //println!("lshl: {:?} -> {:?}", b, a_type);
+ self.context.new_cast(None, result, a_type)
+ }
+ else if a_type.is_signed(self) && b_type.is_unsigned(self) {
- // TODO: check whether behavior is an arithmetic shift for >> .
- // FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number.
+ let b = self.context.new_cast(None, b, a_type);
+ a >> b
+ }
+ else {
+ a >> b
+ }
+ }
+
+ fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- //println!("ashl: {:?} -> {:?}", a, b_type);
++ // TODO(antoyo): check whether behavior is an arithmetic shift for >> .
++ // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ if a_type.is_unsigned(self) && b_type.is_signed(self) {
- //println!("ashl: {:?} -> {:?}", result, a_type);
+ let a = self.context.new_cast(None, a, b_type);
+ let result = a >> b;
- //println!("ashl: {:?} -> {:?}", b, a_type);
+ self.context.new_cast(None, result, a_type)
+ }
+ else if a_type.is_signed(self) && b_type.is_unsigned(self) {
- // FIXME: hack by putting the result in a variable to workaround this bug:
+ let b = self.context.new_cast(None, b, a_type);
+ a >> b
+ }
+ else {
+ a >> b
+ }
+ }
+
+ fn and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
- // FIXME: hack by putting the result in a variable to workaround this bug:
++ // FIXME(antoyo): hack by putting the result in a variable to workaround this bug:
+ // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
+ if a.get_type() != b.get_type() {
+ b = self.context.new_cast(None, b, a.get_type());
+ }
+ let res = self.current_func().new_local(None, b.get_type(), "andResult");
+ self.llbb().add_assignment(None, res, a & b);
+ res.to_rvalue()
+ }
+
+ fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: use new_unary_op()?
++ // FIXME(antoyo): hack by putting the result in a variable to workaround this bug:
+ // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
+ let res = self.current_func().new_local(None, b.get_type(), "orResult");
+ self.llbb().add_assignment(None, res, a | b);
+ res.to_rvalue()
+ }
+
+ fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a ^ b
+ }
+
+ fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: should generate poison value?
++ // TODO(antoyo): use new_unary_op()?
+ self.cx.context.new_rvalue_from_long(a.get_type(), 0) - a
+ }
+
+ fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
+ self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
+ }
+
+ fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
+ let operation =
+ if a.get_type().is_bool() {
+ UnaryOp::LogicalNegate
+ }
+ else {
+ UnaryOp::BitwiseNegate
+ };
+ self.cx.context.new_unary_op(None, operation, a.get_type(), a)
+ }
+
+ fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a + b
+ }
+
+ fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a + b
+ }
+
+ fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a - b
+ }
+
+ fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
- /*unsafe {
- let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
- llvm::LLVMRustSetHasUnsafeAlgebra(instr);
- instr
- }*/
++ // TODO(antoyo): should generate poison value?
+ a - b
+ }
+
+ fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a * b
+ }
+
+ fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a * b
+ }
+
+ fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
- /*unsafe {
- let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
- llvm::LLVMRustSetHasUnsafeAlgebra(instr);
- instr
- }*/
+ }
+
+ fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
- /*unsafe {
- let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
- llvm::LLVMRustSetHasUnsafeAlgebra(instr);
- instr
- }*/
+ }
+
+ fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
- /*unsafe {
- let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
- llvm::LLVMRustSetHasUnsafeAlgebra(instr);
- instr
- }*/
+ }
+
+ fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
- /*unsafe {
- let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
- llvm::LLVMRustSetHasUnsafeAlgebra(instr);
- instr
- }*/
+ }
+
+ fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
- // TODO: remove duplication with intrinsic?
+ }
+
+ fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
+ use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
+
+ let new_kind =
+ match typ.kind() {
+ Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
+ Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
+ t @ (Uint(_) | Int(_)) => t.clone(),
+ _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
+ };
+
- // TODO: is it correct to use rhs type instead of the parameter typ?
++ // TODO(antoyo): remove duplication with intrinsic?
+ let name =
+ match oop {
+ OverflowOp::Add =>
+ match new_kind {
+ Int(I8) => "__builtin_add_overflow",
+ Int(I16) => "__builtin_add_overflow",
+ Int(I32) => "__builtin_sadd_overflow",
+ Int(I64) => "__builtin_saddll_overflow",
+ Int(I128) => "__builtin_add_overflow",
+
+ Uint(U8) => "__builtin_add_overflow",
+ Uint(U16) => "__builtin_add_overflow",
+ Uint(U32) => "__builtin_uadd_overflow",
+ Uint(U64) => "__builtin_uaddll_overflow",
+ Uint(U128) => "__builtin_add_overflow",
+
+ _ => unreachable!(),
+ },
+ OverflowOp::Sub =>
+ match new_kind {
+ Int(I8) => "__builtin_sub_overflow",
+ Int(I16) => "__builtin_sub_overflow",
+ Int(I32) => "__builtin_ssub_overflow",
+ Int(I64) => "__builtin_ssubll_overflow",
+ Int(I128) => "__builtin_sub_overflow",
+
+ Uint(U8) => "__builtin_sub_overflow",
+ Uint(U16) => "__builtin_sub_overflow",
+ Uint(U32) => "__builtin_usub_overflow",
+ Uint(U64) => "__builtin_usubll_overflow",
+ Uint(U128) => "__builtin_sub_overflow",
+
+ _ => unreachable!(),
+ },
+ OverflowOp::Mul =>
+ match new_kind {
+ Int(I8) => "__builtin_mul_overflow",
+ Int(I16) => "__builtin_mul_overflow",
+ Int(I32) => "__builtin_smul_overflow",
+ Int(I64) => "__builtin_smulll_overflow",
+ Int(I128) => "__builtin_mul_overflow",
+
+ Uint(U8) => "__builtin_mul_overflow",
+ Uint(U16) => "__builtin_mul_overflow",
+ Uint(U32) => "__builtin_umul_overflow",
+ Uint(U64) => "__builtin_umulll_overflow",
+ Uint(U128) => "__builtin_mul_overflow",
+
+ _ => unreachable!(),
+ },
+ };
+
+ let intrinsic = self.context.get_builtin_function(&name);
+ let res = self.current_func()
- // FIXME: this check that we don't call get_aligned() a second time on a time.
++ // TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
+ .new_local(None, rhs.get_type(), "binopResult")
+ .get_address(None);
+ let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
+ (res.dereference(None).to_rvalue(), overflow)
+ }
+
+ fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
- // TODO: It might be better to return a LValue, but fixing the rustc API is non-trivial.
++ // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
+ // Ideally, we shouldn't need to do this check.
+ let aligned_type =
+ if ty == self.cx.u128_type || ty == self.cx.i128_type {
+ ty
+ }
+ else {
+ ty.get_aligned(align.bytes())
+ };
- /*unsafe {
- let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
- llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
- alloca
- }*/
++ // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
+ self.stack_var_count.set(self.stack_var_count.get() + 1);
+ self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
+ }
+
+ fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> {
+ unimplemented!();
- /*unsafe {
- let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
- llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
- alloca
- }*/
+ }
+
+ fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
+ unimplemented!();
- // TODO: use ty.
+ }
+
+ fn load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
- // TODO: handle align.
++ // TODO(antoyo): use ty.
+ let block = self.llbb();
+ let function = block.get_function();
+ // NOTE: instead of returning the dereference here, we have to assign it to a variable in
+ // the current basic block. Otherwise, it could be used in another basic block, causing a
+ // dereference after a drop, for instance.
- // TODO: use ty.
- //println!("5: volatile load: {:?} to {:?}", ptr, ptr.get_type().make_volatile());
++ // TODO(antoyo): handle align.
+ let deref = ptr.dereference(None).to_rvalue();
+ let value_type = deref.get_type();
+ unsafe { RETURN_VALUE_COUNT += 1 };
+ let loaded_value = function.new_local(None, value_type, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
+ block.add_assignment(None, loaded_value, deref);
+ loaded_value.to_rvalue()
+ }
+
+ fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
- //println!("6");
++ // TODO(antoyo): use ty.
+ let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile());
- // TODO: use ty.
- // TODO: handle alignment.
+ ptr.dereference(None).to_rvalue()
+ }
+
+ fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
- //debug!("PlaceRef::load: {:?}", place);
-
++ // TODO(antoyo): use ty.
++ // TODO(antoyo): handle alignment.
+ let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
+ let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+
+ let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile();
+ let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
+ self.context.new_call(None, atomic_load, &[ptr, ordering])
+ }
+
+ fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
- let const_llval = None;
- /*unsafe {
- if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
- if llvm::LLVMIsGlobalConstant(global) == llvm::True {
- const_llval = llvm::LLVMGetInitializer(global);
- }
- }
- }*/
- let llval = const_llval.unwrap_or_else(|| {
- let load = self.load(place.llval.get_type(), place.llval, place.align);
- if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
- scalar_load_metadata(self, load, scalar);
- }
- load
- });
- OperandValue::Immediate(self.to_immediate(llval, place.layout))
+ assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
+
+ if place.layout.is_zst() {
+ return OperandRef::new_zst(self, place.layout);
+ }
+
+ fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
+ let vr = scalar.valid_range.clone();
+ match scalar.value {
+ abi::Int(..) => {
+ let range = scalar.valid_range_exclusive(bx);
+ if range.start != range.end {
+ bx.range_metadata(load, range);
+ }
+ }
+ abi::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
+ bx.nonnull_metadata(load);
+ }
+ _ => {}
+ }
+ }
+
+ let val =
+ if let Some(llextra) = place.llextra {
+ OperandValue::Ref(place.llval, Some(llextra), place.align)
+ }
+ else if place.layout.is_gcc_immediate() {
- // TODO
- /*if self.sess().target.target.arch == "amdgpu" {
- // amdgpu/LLVM does something weird and thinks a i64 value is
- // split into a v2i32, halving the bitwidth LLVM expects,
- // tripping an assertion. So, for now, just disable this
- // optimization.
- return;
- }
-
- unsafe {
- let llty = self.cx.val_ty(load);
- let v = [
- self.cx.const_uint_big(llty, range.start),
- self.cx.const_uint_big(llty, range.end),
- ];
-
- llvm::LLVMSetMetadata(
- load,
- llvm::MD_range as c_uint,
- llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
- );
- }*/
++ let load = self.load(place.llval.get_type(), place.llval, place.align);
++ if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
++ scalar_load_metadata(self, load, scalar);
++ }
++ OperandValue::Immediate(self.to_immediate(load, place.layout))
+ }
+ else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
+ let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
+ let pair_type = place.layout.gcc_type(self, false);
+
+ let mut load = |i, scalar: &abi::Scalar, align| {
+ let llptr = self.struct_gep(pair_type, place.llval, i as u64);
+ let load = self.load(llptr.get_type(), llptr, align);
+ scalar_load_metadata(self, load, scalar);
+ if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
+ };
+
+ OperandValue::Pair(
+ load(0, a, place.align),
+ load(1, b, place.align.restrict_for_offset(b_offset)),
+ )
+ }
+ else {
+ OperandValue::Ref(place.llval, None, place.align)
+ };
+
+ OperandRef { val, layout: place.layout }
+ }
+
+ fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self {
+ let zero = self.const_usize(0);
+ let count = self.const_usize(count);
+ let start = dest.project_index(&mut self, zero).llval;
+ let end = dest.project_index(&mut self, count).llval;
+
+ let mut header_bx = self.build_sibling_block("repeat_loop_header");
+ let mut body_bx = self.build_sibling_block("repeat_loop_body");
+ let next_bx = self.build_sibling_block("repeat_loop_next");
+
+ let ptr_type = start.get_type();
+ let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var");
+ let current_val = current.to_rvalue();
+ self.assign(current, start);
+
+ self.br(header_bx.llbb());
+
+ let keep_going = header_bx.icmp(IntPredicate::IntNE, current_val, end);
+ header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
+
+ let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
+ cg_elem.val.store(&mut body_bx, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
+
+ let next = body_bx.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
+ body_bx.llbb().add_assignment(None, current, next);
+ body_bx.br(header_bx.llbb());
+
+ next_bx
+ }
+
+ fn range_metadata(&mut self, _load: RValue<'gcc>, _range: Range<u128>) {
- // TODO
- /*unsafe {
- llvm::LLVMSetMetadata(
- load,
- llvm::MD_nonnull as c_uint,
- llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
- );
- }*/
++ // TODO(antoyo)
+ }
+
+ fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
- //debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
++ // TODO(antoyo)
+ }
+
+ fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
+ self.store_with_flags(val, ptr, align, MemFlags::empty())
+ }
+
+ fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, _align: Align, _flags: MemFlags) -> RValue<'gcc> {
- /*let align =
- if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
- llvm::LLVMSetAlignment(store, align);
- if flags.contains(MemFlags::VOLATILE) {
- llvm::LLVMSetVolatile(store, llvm::True);
- }
- if flags.contains(MemFlags::NONTEMPORAL) {
- // According to LLVM [1] building a nontemporal store must
- // *always* point to a metadata value of the integer 1.
- //
- // [1]: http://llvm.org/docs/LangRef.html#store-instruction
- let one = self.cx.const_i32(1);
- let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
- llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
- }*/
- // NOTE: dummy value here since it's never used. FIXME: API should not return a value here?
+ let ptr = self.check_store(val, ptr);
+ self.llbb().add_assignment(None, ptr.dereference(None), val);
- // TODO: handle alignment.
++ // TODO(antoyo): handle align and flags.
++ // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
+ self.cx.context.new_rvalue_zero(self.type_i32())
+ }
+
+ fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
- // FIXME: fix libgccjit to allow comparing an integer type with an aligned integer type because
++ // TODO(antoyo): handle alignment.
+ let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
+ let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+ let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile();
+ let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
+
- // FIXME: would be safer if doing the same thing (loop) as gep.
- // TODO: specify inbounds somehow.
++ // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because
+ // the following cast is required to avoid this error:
+ // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4))))
+ let int_type = atomic_store.get_param(1).to_rvalue().get_type();
+ let value = self.context.new_cast(None, value, int_type);
+ self.llbb()
+ .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering]));
+ }
+
+ fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
+ let mut result = ptr;
+ for index in indices {
+ result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue();
+ }
+ result
+ }
+
+ fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
- let array = ptr.dereference(None); // TODO: assert that first index is 0?
++ // FIXME(antoyo): would be safer if doing the same thing (loop) as gep.
++ // TODO(antoyo): specify inbounds somehow.
+ match indices.len() {
+ 1 => {
+ self.context.new_array_access(None, ptr, indices[0]).get_address(None)
+ },
+ 2 => {
- // FIXME: it would be better if the API only called this on struct, not on arrays.
++ let array = ptr.dereference(None); // TODO(antoyo): assert that first index is 0?
+ self.context.new_array_access(None, array, indices[1]).get_address(None)
+ },
+ _ => unimplemented!(),
+ }
+ }
+
+ fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
- // TODO: check that it indeed truncate the value.
- //println!("trunc: {:?} -> {:?}", value, dest_ty);
++ // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
+ assert_eq!(idx as usize as u64, idx);
+ let value = ptr.dereference(None).to_rvalue();
+
+ if value_type.is_array().is_some() {
+ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+ let element = self.context.new_array_access(None, value, index);
+ element.get_address(None)
+ }
+ else if let Some(vector_type) = value_type.is_vector() {
+ let array_type = vector_type.get_element_type().make_pointer();
+ let array = self.bitcast(ptr, array_type);
+ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+ let element = self.context.new_array_access(None, array, index);
+ element.get_address(None)
+ }
+ else if let Some(struct_type) = value_type.is_struct() {
+ ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None)
+ }
+ else {
+ panic!("Unexpected type {:?}", value_type);
+ }
+ }
+
+ /* Casts */
+ fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- // TODO: check that it indeed sign extend the value.
- //println!("Sext {:?} to {:?}", value, dest_ty);
- //if let Some(vector_type) = value.get_type().is_vector() {
++ // TODO(antoyo): check that it indeed truncate the value.
+ self.context.new_cast(None, value, dest_ty)
+ }
+
+ fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- // TODO: nothing to do as it is only for LLVM?
++ // TODO(antoyo): check that it indeed sign extend the value.
+ if dest_ty.is_vector().is_some() {
- /*let dest_type = self.context.new_vector_type(dest_ty, vector_type.get_num_units() as u64);
- println!("Casting {:?} to {:?}", value, dest_type);
- return self.context.new_cast(None, value, dest_type);*/
++ // TODO(antoyo): nothing to do as it is only for LLVM?
+ return value;
- //println!("7: fptoui: {:?} to {:?}", value, dest_ty);
- let ret = self.context.new_cast(None, value, dest_ty);
- //println!("8");
- ret
- //unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+ self.context.new_cast(None, value, dest_ty)
+ }
+
+ fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- //println!("1: uitofp: {:?} -> {:?}", value, dest_ty);
- let ret = self.context.new_cast(None, value, dest_ty);
- //println!("2");
- ret
++ self.context.new_cast(None, value, dest_ty)
+ }
+
+ fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.context.new_cast(None, value, dest_ty)
+ }
+
+ fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- //println!("3: sitofp: {:?} -> {:?}", value, dest_ty);
- let ret = self.context.new_cast(None, value, dest_ty);
- //println!("4");
- ret
++ self.context.new_cast(None, value, dest_ty)
+ }
+
+ fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- // TODO: make sure it trancates.
++ self.context.new_cast(None, value, dest_ty)
+ }
+
+ fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- //println!("intcast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_typ);
++ // TODO(antoyo): make sure it truncates.
+ self.context.new_cast(None, value, dest_ty)
+ }
+
+ fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.context.new_cast(None, value, dest_ty)
+ }
+
+ fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.cx.ptrtoint(self.block.expect("block"), value, dest_ty)
+ }
+
+ fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.cx.inttoptr(self.block.expect("block"), value, dest_ty)
+ }
+
+ fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.cx.const_bitcast(value, dest_ty)
+ }
+
+ fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
+ // NOTE: is_signed is for value, not dest_typ.
- //println!("pointercast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_ty);
+ self.cx.context.new_cast(None, value, dest_typ)
+ }
+
+ fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
- //self.cx.context.new_cast(None, value, dest_ty)
+ let val_type = value.get_type();
+ match (type_is_pointer(val_type), type_is_pointer(dest_ty)) {
+ (false, true) => {
+ // NOTE: Projecting a field of a pointer type will attemp a cast from a signed char to
+ // a pointer, which is not supported by gccjit.
+ return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty);
+ },
+ (false, false) => {
+ // When they are not pointers, we want a transmute (or reinterpret_cast).
- // TODO: handle aligns and is_volatile.
+ self.bitcast(value, dest_ty)
+ },
+ (true, true) => self.cx.context.new_cast(None, value, dest_ty),
+ (true, false) => unimplemented!(),
+ }
+ }
+
+ /* Comparisons */
+ fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
+ if lhs.get_type() != rhs.get_type() {
+ // NOTE: hack because we try to cast a vector type to the same vector type.
+ if format!("{:?}", lhs.get_type()) != format!("{:?}", rhs.get_type()) {
+ rhs = self.context.new_cast(None, rhs, lhs.get_type());
+ }
+ }
+ self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
+ }
+
+ fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+ self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
+ }
+
+ /* Miscellaneous instructions */
+ fn memcpy(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
+ if flags.contains(MemFlags::NONTEMPORAL) {
+ // HACK(nox): This is inefficient but there is no nontemporal memcpy.
+ let val = self.load(src.get_type(), src, src_align);
+ let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
+ self.store_with_flags(val, ptr, dst_align, flags);
+ return;
+ }
+ let size = self.intcast(size, self.type_size_t(), false);
+ let _is_volatile = flags.contains(MemFlags::VOLATILE);
+ let dst = self.pointercast(dst, self.type_i8p());
+ let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
+ let memcpy = self.context.get_builtin_function("memcpy");
+ let block = self.block.expect("block");
- // TODO: handle is_volatile.
++ // TODO(antoyo): handle aligns and is_volatile.
+ block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
+ }
+
+ fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
+ if flags.contains(MemFlags::NONTEMPORAL) {
+ // HACK(nox): This is inefficient but there is no nontemporal memmove.
+ let val = self.load(src.get_type(), src, src_align);
+ let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
+ self.store_with_flags(val, ptr, dst_align, flags);
+ return;
+ }
+ let size = self.intcast(size, self.type_size_t(), false);
+ let _is_volatile = flags.contains(MemFlags::VOLATILE);
+ let dst = self.pointercast(dst, self.type_i8p());
+ let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
+
+ let memmove = self.context.get_builtin_function("memmove");
+ let block = self.block.expect("block");
- // TODO: handle aligns and is_volatile.
- //println!("memset: {:?} -> {:?}", fill_byte, self.i32_type);
++ // TODO(antoyo): handle is_volatile.
+ block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
+ }
+
+ fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
+ let _is_volatile = flags.contains(MemFlags::VOLATILE);
+ let ptr = self.pointercast(ptr, self.type_i8p());
+ let memset = self.context.get_builtin_function("memset");
+ let block = self.block.expect("block");
- //unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
++ // TODO(antoyo): handle align and is_volatile.
+ let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
+ let size = self.intcast(size, self.type_size_t(), false);
+ block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
+ }
+
+ fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
+ let func = self.current_func();
+ let variable = func.new_local(None, then_val.get_type(), "selectVar");
+ let then_block = func.new_block("then");
+ let else_block = func.new_block("else");
+ let after_block = func.new_block("after");
+ self.llbb().end_with_conditional(None, cond, then_block, else_block);
+
+ then_block.add_assignment(None, variable, then_val);
+ then_block.end_with_jump(None, after_block);
+
+ if then_val.get_type() != else_val.get_type() {
+ else_val = self.context.new_cast(None, else_val, then_val.get_type());
+ }
+ else_block.add_assignment(None, variable, else_val);
+ else_block.end_with_jump(None, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
+ // state need to be updated.
+ self.block = Some(after_block);
+ *self.cx.current_block.borrow_mut() = Some(after_block);
+
+ variable.to_rvalue()
+ }
+
+ #[allow(dead_code)]
+ fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
- //unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
+ }
+
+ fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
- /*unsafe {
- let elt_ty = self.cx.val_ty(elt);
- let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
- let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
- let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
- self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
- }*/
+ }
+
+ fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
- // FIXME: it would be better if the API only called this on struct, not on arrays.
+ }
+
+ fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
- /*assert_eq!(idx as c_uint as u64, idx);
- unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }*/
++ // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
+ assert_eq!(idx as usize as u64, idx);
+ let value_type = aggregate_value.get_type();
+
+ if value_type.is_array().is_some() {
+ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+ let element = self.context.new_array_access(None, aggregate_value, index);
+ element.get_address(None)
+ }
+ else if value_type.is_vector().is_some() {
+ panic!();
+ }
+ else if let Some(pointer_type) = value_type.get_pointee() {
+ if let Some(struct_type) = pointer_type.is_struct() {
+ // NOTE: hack to workaround a limitation of the rustc API: see comment on
+ // CodegenCx.structs_as_pointer
+ aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue()
+ }
+ else {
+ panic!("Unexpected type {:?}", value_type);
+ }
+ }
+ else if let Some(struct_type) = value_type.is_struct() {
+ aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue()
+ }
+ else {
+ panic!("Unexpected type {:?}", value_type);
+ }
- // FIXME: it would be better if the API only called this on struct, not on arrays.
+ }
+
+ fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
- // TODO
- /*unsafe {
- llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, num_clauses as c_uint, UNNAMED)
- }*/
++ // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
+ assert_eq!(idx as usize as u64, idx);
+ let value_type = aggregate_value.get_type();
+
+ let lvalue =
+ if value_type.is_array().is_some() {
+ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+ self.context.new_array_access(None, aggregate_value, index)
+ }
+ else if value_type.is_vector().is_some() {
+ panic!();
+ }
+ else if let Some(pointer_type) = value_type.get_pointee() {
+ if let Some(struct_type) = pointer_type.is_struct() {
+ // NOTE: hack to workaround a limitation of the rustc API: see comment on
+ // CodegenCx.structs_as_pointer
+ aggregate_value.dereference_field(None, struct_type.get_field(idx as i32))
+ }
+ else {
+ panic!("Unexpected type {:?}", value_type);
+ }
+ }
+ else {
+ panic!("Unexpected type {:?}", value_type);
+ };
+ self.llbb().add_assignment(None, lvalue, value);
+
+ aggregate_value
+ }
+
+ fn landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>, _num_clauses: usize) -> RValue<'gcc> {
+ let field1 = self.context.new_field(None, self.u8_type, "landing_pad_field_1");
+ let field2 = self.context.new_field(None, self.i32_type, "landing_pad_field_1");
+ let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]);
+ self.current_func().new_local(None, struct_type.as_type(), "landing_pad")
+ .to_rvalue()
- // TODO
- /*unsafe {
- llvm::LLVMSetCleanup(landing_pad, llvm::True);
- }*/
++ // TODO(antoyo): Properly implement unwinding.
++ // the above is just to make the compilation work as it seems
++ // rustc_codegen_ssa now calls the unwinding builder methods even on panic=abort.
+ }
+
+ fn set_cleanup(&mut self, _landing_pad: RValue<'gcc>) {
- //unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) }
++ // TODO(antoyo)
+ }
+
+ fn resume(&mut self, _exn: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
- /*let name = const_cstr!("cleanuppad");
- let ret = unsafe {
- llvm::LLVMRustBuildCleanupPad(
- self.llbuilder,
- parent,
- args.len() as c_uint,
- args.as_ptr(),
- name.as_ptr(),
- )
- };
- Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))*/
+ }
+
+ fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
+ unimplemented!();
- /*let ret =
- unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) };
- ret.expect("LLVM does not have support for cleanupret")*/
+ }
+
+ fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) -> RValue<'gcc> {
+ unimplemented!();
- /*let name = const_cstr!("catchpad");
- let ret = unsafe {
- llvm::LLVMRustBuildCatchPad(
- self.llbuilder,
- parent,
- args.len() as c_uint,
- args.as_ptr(),
- name.as_ptr(),
- )
- };
- Funclet::new(ret.expect("LLVM does not have support for catchpad"))*/
+ }
+
+ fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet {
+ unimplemented!();
- /*let name = const_cstr!("catchswitch");
- let ret = unsafe {
- llvm::LLVMRustBuildCatchSwitch(
- self.llbuilder,
- parent,
- unwind,
- num_handlers as c_uint,
- name.as_ptr(),
- )
- };
- ret.expect("LLVM does not have support for catchswitch")*/
+ }
+
+ fn catch_switch(&mut self, _parent: Option<RValue<'gcc>>, _unwind: Option<Block<'gcc>>, _num_handlers: usize) -> RValue<'gcc> {
+ unimplemented!();
- /*unsafe {
- llvm::LLVMRustAddHandler(catch_switch, handler);
- }*/
+ }
+
+ fn add_handler(&mut self, _catch_switch: RValue<'gcc>, _handler: Block<'gcc>) {
+ unimplemented!();
- // TODO
- /*unsafe {
- llvm::LLVMSetPersonalityFn(self.llfn(), personality);
- }*/
+ }
+
+ fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
- let align = Align::from_bits(64).expect("align"); // TODO: use good align.
++ // TODO(antoyo)
+ }
+
+ // Atomic Operations
+ fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
+ let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
+ self.llbb().add_assignment(None, expected, cmp);
+ let success = self.compare_exchange(dst, expected, src, order, failure_order, weak);
+
+ let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false);
+ let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result");
- // TODO: handle when value is not a struct.
++ let align = Align::from_bits(64).expect("align"); // TODO(antoyo): use good align.
+
+ let value_type = result.to_rvalue().get_type();
+ if let Some(struct_type) = value_type.is_struct() {
+ self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align);
+ // NOTE: since success contains the call to the intrinsic, it must be stored before
+ // expected so that we store expected after the call.
+ self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align);
+ }
- // NOTE: not sure why, but we have the wrong type here.
++ // TODO(antoyo): handle when value is not a struct.
+
+ result.to_rvalue()
+ }
+
+ fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
+ let size = self.cx.int_width(src.get_type()) / 8;
+ let name =
+ match op {
+ AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
+ AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
+ AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
+ AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
+ AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
+ AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
+ AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
+ AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
+ AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
+ AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
+ AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
+ };
+
+
+ let atomic_function = self.context.get_builtin_function(name);
+ let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+
+ let void_ptr_type = self.context.new_type::<*mut ()>();
+ let volatile_void_ptr_type = void_ptr_type.make_volatile();
+ let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
- // TODO
- /*unsafe {
- llvm::LLVMSetMetadata(
- load,
- llvm::MD_invariant_load as c_uint,
- llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
- );
- }*/
++ // FIXME(antoyo): not sure why, but we have the wrong type here.
+ let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
+ let src = self.context.new_cast(None, src, new_src_type);
+ let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
+ self.context.new_cast(None, res, src.get_type())
+ }
+
+ fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
+ let name =
+ match scope {
+ SynchronizationScope::SingleThread => "__atomic_signal_fence",
+ SynchronizationScope::CrossThread => "__atomic_thread_fence",
+ };
+ let thread_fence = self.context.get_builtin_function(name);
+ let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+ self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order]));
+ }
+
+ fn set_invariant_load(&mut self, load: RValue<'gcc>) {
+ // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
+ self.normal_function_addresses.borrow_mut().insert(load);
- // TODO
- //self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
++ // TODO(antoyo)
+ }
+
+ fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
- // TODO
- //self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
++ // TODO(antoyo)
+ }
+
+ fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
- // FIXME: remove when having a proper API.
++ // TODO(antoyo)
+ }
+
+ fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> {
- // FIXME: this does not zero-extend.
++ // FIXME(antoyo): remove when having a proper API.
+ let gcc_func = unsafe { std::mem::transmute(func) };
+ if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
+ self.function_call(func, args, funclet)
+ }
+ else {
+ // If it's a not function that was defined, it's a function pointer.
+ self.function_ptr_call(func, args, funclet)
+ }
+ }
+
+ fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
- // FIXME: hack because base::from_immediate converts i1 to i8.
++ // FIXME(antoyo): this does not zero-extend.
+ if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
- //println!("zext: {:?} -> {:?}", value, dest_typ);
++ // FIXME(antoyo): hack because base::from_immediate converts i1 to i8.
+ // Fix the code in codegen_ssa::base::from_immediate.
+ return value;
+ }
- //llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
+ self.context.new_cast(None, value, dest_typ)
+ }
+
+ fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
+ self.cx
+ }
+
+ fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
+ unimplemented!();
- /*debug!(
- "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
- fn_name, hash, num_counters, index
- );
-
- let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
- let args = &[fn_name, hash, num_counters, index];
- let args = self.check_call("call", llfn, args);
-
- unsafe {
- let _ = llvm::LLVMRustBuildCall(
- self.llbuilder,
- llfn,
- args.as_ptr() as *const &llvm::Value,
- args.len() as c_uint,
- None,
- );
- }*/
+ }
+
+ fn set_span(&mut self, _span: Span) {}
+
+ fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
+ if self.cx().val_ty(val) == self.cx().type_i1() {
+ self.zext(val, self.cx().type_i8())
+ }
+ else {
+ val
+ }
+ }
+
+ fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value {
+ if scalar.is_bool() {
+ return self.trunc(val, self.cx().type_i1());
+ }
+ val
+ }
+
+ fn fptoui_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
+ None
+ }
+
+ fn fptosi_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
+ None
+ }
+
+ fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
+ unimplemented!();
- // TODO: check that ordered vs non-ordered is respected.
+ }
+}
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+ pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> {
+ let return_type = v1.get_type();
+ let params = [
+ self.context.new_parameter(None, return_type, "v1"),
+ self.context.new_parameter(None, return_type, "v2"),
+ self.context.new_parameter(None, mask.get_type(), "mask"),
+ ];
+ let shuffle = self.context.new_function(None, FunctionType::Extern, return_type, ¶ms, "_mm_shuffle_epi8", false);
+ self.context.new_call(None, shuffle, &[v1, v2, mask])
+ }
+}
+
+impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> {
+ fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> {
+ // Forward to the `get_static` method of `CodegenCx`
+ self.cx().get_static(def_id)
+ }
+}
+
+impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
+ fn param_env(&self) -> ParamEnv<'tcx> {
+ self.cx.param_env()
+ }
+}
+
+impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.cx.target_spec()
+ }
+}
+
+trait ToGccComp {
+ fn to_gcc_comparison(&self) -> ComparisonOp;
+}
+
+impl ToGccComp for IntPredicate {
+ fn to_gcc_comparison(&self) -> ComparisonOp {
+ match *self {
+ IntPredicate::IntEQ => ComparisonOp::Equals,
+ IntPredicate::IntNE => ComparisonOp::NotEquals,
+ IntPredicate::IntUGT => ComparisonOp::GreaterThan,
+ IntPredicate::IntUGE => ComparisonOp::GreaterThanEquals,
+ IntPredicate::IntULT => ComparisonOp::LessThan,
+ IntPredicate::IntULE => ComparisonOp::LessThanEquals,
+ IntPredicate::IntSGT => ComparisonOp::GreaterThan,
+ IntPredicate::IntSGE => ComparisonOp::GreaterThanEquals,
+ IntPredicate::IntSLT => ComparisonOp::LessThan,
+ IntPredicate::IntSLE => ComparisonOp::LessThanEquals,
+ }
+ }
+}
+
+impl ToGccComp for RealPredicate {
+ fn to_gcc_comparison(&self) -> ComparisonOp {
- AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO: check if that's the same.
++ // TODO(antoyo): check that ordered vs non-ordered is respected.
+ match *self {
+ RealPredicate::RealPredicateFalse => unreachable!(),
+ RealPredicate::RealOEQ => ComparisonOp::Equals,
+ RealPredicate::RealOGT => ComparisonOp::GreaterThan,
+ RealPredicate::RealOGE => ComparisonOp::GreaterThanEquals,
+ RealPredicate::RealOLT => ComparisonOp::LessThan,
+ RealPredicate::RealOLE => ComparisonOp::LessThanEquals,
+ RealPredicate::RealONE => ComparisonOp::NotEquals,
+ RealPredicate::RealORD => unreachable!(),
+ RealPredicate::RealUNO => unreachable!(),
+ RealPredicate::RealUEQ => ComparisonOp::Equals,
+ RealPredicate::RealUGT => ComparisonOp::GreaterThan,
+ RealPredicate::RealUGE => ComparisonOp::GreaterThan,
+ RealPredicate::RealULT => ComparisonOp::LessThan,
+ RealPredicate::RealULE => ComparisonOp::LessThan,
+ RealPredicate::RealUNE => ComparisonOp::NotEquals,
+ RealPredicate::RealPredicateTrue => unreachable!(),
+ }
+ }
+}
+
+#[repr(C)]
+#[allow(non_camel_case_types)]
+enum MemOrdering {
+ __ATOMIC_RELAXED,
+ __ATOMIC_CONSUME,
+ __ATOMIC_ACQUIRE,
+ __ATOMIC_RELEASE,
+ __ATOMIC_ACQ_REL,
+ __ATOMIC_SEQ_CST,
+}
+
+trait ToGccOrdering {
+ fn to_gcc(self) -> i32;
+}
+
+impl ToGccOrdering for AtomicOrdering {
+ fn to_gcc(self) -> i32 {
+ use MemOrdering::*;
+
+ let ordering =
+ match self {
- AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO: check if that's the same.
++ AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
+ AtomicOrdering::Unordered => __ATOMIC_RELAXED,
++ AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
+ AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
+ AtomicOrdering::Release => __ATOMIC_RELEASE,
+ AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
+ AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
+ };
+ ordering as i32
+ }
+}
--- /dev/null
- //debug!("get_fn(instance={:?})", instance);
-
+use gccjit::{FunctionType, RValue};
+use rustc_codegen_ssa::traits::BaseTypeMethods;
+use rustc_middle::ty::{Instance, TypeFoldable};
+use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
+use rustc_target::abi::call::FnAbi;
+
+use crate::abi::FnAbiGccExt;
+use crate::context::CodegenCx;
+
+/// Codegens a reference to a fn/method item, monomorphizing and
+/// inlining as it goes.
+///
+/// # Parameters
+///
+/// - `cx`: the crate context
+/// - `instance`: the instance to be instantiated
+pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> RValue<'gcc> {
+ let tcx = cx.tcx();
+
- //debug!("get_fn({:?}: {:?}) => {}", instance, instance.monomorphic_ty(cx.tcx()), sym);
+ assert!(!instance.substs.needs_infer());
+ assert!(!instance.substs.has_escaping_bound_vars());
+ assert!(!instance.substs.has_param_types_or_consts());
+
+ if let Some(&func) = cx.instances.borrow().get(&instance) {
+ return func;
+ }
+
+ let sym = tcx.symbol_name(instance).name;
- // TODO
+
+ let fn_abi = FnAbi::of_instance(cx, instance, &[]);
+
- //debug!("get_fn: casting {:?} to {:?}", func, ptrty);
- // TODO
- //cx.const_ptrcast(func, ptrty)
+ let func =
+ if let Some(func) = cx.get_declared_value(&sym) {
+ // Create a fn pointer with the new signature.
+ let ptrty = fn_abi.ptr_to_gcc_type(cx);
+
+ // This is subtle and surprising, but sometimes we have to bitcast
+ // the resulting fn pointer. The reason has to do with external
+ // functions. If you have two crates that both bind the same C
+ // library, they may not use precisely the same types: for
+ // example, they will probably each declare their own structs,
+ // which are distinct types from LLVM's point of view (nominal
+ // types).
+ //
+ // Now, if those two crates are linked into an application, and
+ // they contain inlined code, you can wind up with a situation
+ // where both of those functions wind up being loaded into this
+ // application simultaneously. In that case, the same function
+ // (from LLVM's point of view) requires two types. But of course
+ // LLVM won't allow one function to have two types.
+ //
+ // What we currently do, therefore, is declare the function with
+ // one of the two types (whichever happens to come first) and then
+ // bitcast as needed when the function is referenced to make sure
+ // it has the type we expect.
+ //
+ // This can occur on either a crate-local or crate-external
+ // reference. It also occurs when testing libcore and in some
+ // other weird situations. Annoying.
+ if cx.val_ty(func) != ptrty {
- //debug!("get_fn: not casting pointer!");
++ // TODO(antoyo): cast the pointer.
+ func
+ }
+ else {
- //cx.linkage.set(FunctionType::Internal);
- //debug!("get_fn: not casting pointer!");
-
- // TODO
- //attributes::from_fn_attrs(cx, func, instance);
-
- //let instance_def_id = instance.def_id();
-
- // TODO
- /*if cx.use_dll_storage_attrs && tcx.is_dllimport_foreign_item(instance_def_id) {
- unsafe {
- llvm::LLVMSetDLLStorageClass(func, llvm::DLLStorageClass::DllImport);
- }
- }*/
+ func
+ }
+ }
+ else {
+ cx.linkage.set(FunctionType::Extern);
+ let func = cx.declare_fn(&sym, &fn_abi);
+
++ // TODO(antoyo): set linkage and attributes.
+ func
+ };
+
+ cx.instances.borrow_mut().insert(instance, func);
+
+ func
+}
--- /dev/null
- // TODO: handle null_terminated.
+use std::convert::TryFrom;
+use std::convert::TryInto;
+
+use gccjit::{Block, CType, RValue, Type, ToRValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{
+ BaseTypeMethods,
+ ConstMethods,
+ DerivedTypeMethods,
+ MiscMethods,
+ StaticMethods,
+};
+use rustc_middle::bug;
+use rustc_middle::mir::Mutability;
+use rustc_middle::ty::{layout::TyAndLayout, ScalarInt};
+use rustc_mir::interpret::{Allocation, GlobalAlloc, Scalar};
+use rustc_span::Symbol;
+use rustc_target::abi::{self, HasDataLayout, LayoutOf, Pointer, Size};
+
+use crate::consts::const_alloc_to_gcc;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn const_bytes(&self, bytes: &[u8]) -> RValue<'gcc> {
+ bytes_in_context(self, bytes)
+ }
+
+ fn const_cstr(&self, symbol: Symbol, _null_terminated: bool) -> RValue<'gcc> {
- // TODO: handle non-null-terminated strings.
++ // TODO(antoyo): handle null_terminated.
+ if let Some(&value) = self.const_cstr_cache.borrow().get(&symbol) {
+ return value.to_rvalue();
+ }
+
+ let global = self.global_string(&*symbol.as_str());
+
+ self.const_cstr_cache.borrow_mut().insert(symbol, global.dereference(None));
+ global
+ }
+
+ fn global_string(&self, string: &str) -> RValue<'gcc> {
- //llvm::LLVMRustSetLinkage(global, llvm::Linkage::InternalLinkage);
++ // TODO(antoyo): handle non-null-terminated strings.
+ let string = self.context.new_string_literal(&*string);
+ let sym = self.generate_local_symbol_name("str");
+ // NOTE: TLS is always off for a string litteral.
+ // NOTE: string litterals do not have a link section.
+ let global = self.define_global(&sym, self.val_ty(string), false, None)
+ .unwrap_or_else(|| bug!("symbol `{}` is already defined", sym));
+ self.global_init_block.add_assignment(None, global.dereference(None), string);
+ global.to_rvalue()
- // TODO: when libgccjit allow casting from pointer to int, remove this.
++ // TODO(antoyo): set linkage.
+ }
+
+ pub fn inttoptr(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ let func = block.get_function();
+ let local = func.new_local(None, value.get_type(), "intLocal");
+ block.add_assignment(None, local, value);
+ let value_address = local.get_address(None);
+
+ let ptr = self.context.new_cast(None, value_address, dest_ty.make_pointer());
+ ptr.dereference(None).to_rvalue()
+ }
+
+ pub fn ptrtoint(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-
- /*pub fn const_vector(&self, elements: &[RValue<'gcc>]) -> RValue<'gcc> {
- self.context.new_rvalue_from_vector(None, elements[0].get_type(), elements)
- }*/
++ // TODO(antoyo): when libgccjit allow casting from pointer to int, remove this.
+ let func = block.get_function();
+ let local = func.new_local(None, value.get_type(), "ptrLocal");
+ block.add_assignment(None, local, value);
+ let ptr_address = local.get_address(None);
+
+ let ptr = self.context.new_cast(None, ptr_address, dest_ty.make_pointer());
+ ptr.dereference(None).to_rvalue()
+ }
- // FIXME: workaround for a bug where libgccjit is expecting a constant.
+}
+
+pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> {
+ let context = &cx.context;
+ let typ = context.new_array_type(None, context.new_type::<u8>(), bytes.len() as i32);
+ let global = cx.declare_unnamed_global(typ);
+ global.global_set_initializer(bytes);
+ global.to_rvalue()
+}
+
+pub fn type_is_pointer<'gcc>(typ: Type<'gcc>) -> bool {
+ typ.get_pointee().is_some()
+}
+
+impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn const_null(&self, typ: Type<'gcc>) -> RValue<'gcc> {
+ if type_is_pointer(typ) {
+ self.context.new_null(typ)
+ }
+ else {
+ self.const_int(typ, 0)
+ }
+ }
+
+ fn const_undef(&self, typ: Type<'gcc>) -> RValue<'gcc> {
+ let local = self.current_func.borrow().expect("func")
+ .new_local(None, typ, "undefined");
+ if typ.is_struct().is_some() {
+ // NOTE: hack to workaround a limitation of the rustc API: see comment on
+ // CodegenCx.structs_as_pointer
+ let pointer = local.get_address(None);
+ self.structs_as_pointer.borrow_mut().insert(pointer);
+ pointer
+ }
+ else {
+ local.to_rvalue()
+ }
+ }
+
+ fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
+ self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
+ }
+
+ fn const_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
+ self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
+ }
+
+ fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
+ let num64: Result<i64, _> = num.try_into();
+ if let Ok(num) = num64 {
- // FIXME: use a new function new_rvalue_from_unsigned_long()?
++ // FIXME(antoyo): workaround for a bug where libgccjit is expecting a constant.
+ // The operations >> 64 and | low are making the normal case a non-constant.
+ return self.context.new_rvalue_from_long(typ, num as i64);
+ }
+
+ if num >> 64 != 0 {
- //self.const_uint(self.type_i8(), i as u64)
++ // FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()?
+ let low = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
+ let high = self.context.new_rvalue_from_long(typ, (num >> 64) as u64 as i64);
+
+ let sixty_four = self.context.new_rvalue_from_long(typ, 64);
+ (high << sixty_four) | self.context.new_cast(None, low, typ)
+ }
+ else if typ.is_i128(self) {
+ let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
+ self.context.new_cast(None, num, typ)
+ }
+ else {
+ self.context.new_rvalue_from_long(typ, num as u64 as i64)
+ }
+ }
+
+ fn const_bool(&self, val: bool) -> RValue<'gcc> {
+ self.const_uint(self.type_i1(), val as u64)
+ }
+
+ fn const_i32(&self, i: i32) -> RValue<'gcc> {
+ self.const_int(self.type_i32(), i as i64)
+ }
+
+ fn const_u32(&self, i: u32) -> RValue<'gcc> {
+ self.const_uint(self.type_u32(), i as u64)
+ }
+
+ fn const_u64(&self, i: u64) -> RValue<'gcc> {
+ self.const_uint(self.type_u64(), i)
+ }
+
+ fn const_usize(&self, i: u64) -> RValue<'gcc> {
+ let bit_size = self.data_layout().pointer_size.bits();
+ if bit_size < 64 {
+ // make sure it doesn't overflow
+ assert!(i < (1 << bit_size));
+ }
+
+ self.const_uint(self.usize_type, i)
+ }
+
+ fn const_u8(&self, _i: u8) -> RValue<'gcc> {
+ unimplemented!();
- //unsafe { llvm::LLVMConstReal(t, val) }
+ }
+
+ fn const_real(&self, _t: Type<'gcc>, _val: f64) -> RValue<'gcc> {
+ unimplemented!();
- // TODO: cache the type? It's anonymous, so probably not.
+ }
+
+ fn const_str(&self, s: Symbol) -> (RValue<'gcc>, RValue<'gcc>) {
+ let len = s.as_str().len();
+ let cs = self.const_ptrcast(self.const_cstr(s, false),
+ self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self, true)),
+ );
+ (cs, self.const_usize(len as u64))
+ }
+
+ fn const_struct(&self, values: &[RValue<'gcc>], packed: bool) -> RValue<'gcc> {
+ let fields: Vec<_> = values.iter()
+ .map(|value| value.get_type())
+ .collect();
- // TODO
++ // TODO(antoyo): cache the type? It's anonymous, so probably not.
+ let name = fields.iter().map(|typ| format!("{:?}", typ)).collect::<Vec<_>>().join("_");
+ let typ = self.type_struct(&fields, packed);
+ let structure = self.global_init_func.new_local(None, typ, &name);
+ let struct_type = typ.is_struct().expect("struct type");
+ for (index, value) in values.iter().enumerate() {
+ let field = struct_type.get_field(index as i32);
+ let field_lvalue = structure.access_field(None, field);
+ self.global_init_block.add_assignment(None, field_lvalue, *value);
+ }
+ self.lvalue_to_rvalue(structure)
+ }
+
+ fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option<u64> {
- //try_as_const_integral(v).map(|v| unsafe { llvm::LLVMConstIntGetZExtValue(v) })
++ // TODO(antoyo)
+ None
- // TODO
+ }
+
+ fn const_to_opt_u128(&self, _v: RValue<'gcc>, _sign_ext: bool) -> Option<u128> {
- /*try_as_const_integral(v).and_then(|v| unsafe {
- let (mut lo, mut hi) = (0u64, 0u64);
- let success = llvm::LLVMRustConstInt128Get(v, sign_ext, &mut hi, &mut lo);
- success.then_some(hi_lo_to_u128(lo, hi))
- })*/
++ // TODO(antoyo)
+ None
- // FIXME: there's some issues with using the u128 code that follows, so hard-code
+ }
+
+ fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> {
+ let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
+ match cv {
+ Scalar::Int(ScalarInt::ZST) => {
+ assert_eq!(0, layout.value.size(self).bytes());
+ self.const_undef(self.type_ix(0))
+ }
+ Scalar::Int(int) => {
+ let data = int.assert_bits(layout.value.size(self));
+
- // TODO
- //llvm::set_value_name(value, format!("{:?}", ptr.alloc_id).as_bytes());
++ // FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
+ // the paths for floating-point values.
+ if ty == self.float_type {
+ return self.context.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64);
+ }
+ else if ty == self.double_type {
+ return self.context.new_rvalue_from_double(ty, f64::from_bits(data as u64));
+ }
+
+ let value = self.const_uint_big(self.type_ix(bitsize), data);
+ if layout.value == Pointer {
+ self.inttoptr(self.current_block.borrow().expect("block"), value, ty)
+ } else {
+ self.const_bitcast(value, ty)
+ }
+ }
+ Scalar::Ptr(ptr, _size) => {
+ let (alloc_id, offset) = ptr.into_parts();
+ let base_addr =
+ match self.tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Memory(alloc) => {
+ let init = const_alloc_to_gcc(self, alloc);
+ let value =
+ match alloc.mutability {
+ Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
+ _ => self.static_addr_of(init, alloc.align, None),
+ };
+ if !self.sess().fewer_names() {
++ // TODO(antoyo): set value name.
+ }
+ value
+ },
+ GlobalAlloc::Function(fn_instance) => {
+ self.get_fn_addr(fn_instance)
+ },
+ GlobalAlloc::Static(def_id) => {
+ assert!(self.tcx.is_static(def_id));
+ self.get_static(def_id)
+ },
+ };
+ let ptr_type = base_addr.get_type();
+ let base_addr = self.const_bitcast(base_addr, self.usize_type);
+ let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64);
+ let ptr = self.const_bitcast(base_addr + offset, ptr_type);
+ let value = ptr.dereference(None);
+ if layout.value != Pointer {
+ self.const_bitcast(value.to_rvalue(), ty)
+ }
+ else {
+ self.const_bitcast(value.get_address(None), ty)
+ }
+ }
+ }
+ }
+
+ fn const_data_from_alloc(&self, alloc: &Allocation) -> Self::Value {
+ const_alloc_to_gcc(self, alloc)
+ }
+
+ fn from_const_alloc(&self, layout: TyAndLayout<'tcx>, alloc: &Allocation, offset: Size) -> PlaceRef<'tcx, RValue<'gcc>> {
+ assert_eq!(alloc.align, layout.align.abi);
+ let ty = self.type_ptr_to(layout.gcc_type(self, true));
+ let value =
+ if layout.size == Size::ZERO {
+ let value = self.const_usize(alloc.align.bytes());
+ self.context.new_cast(None, value, ty)
+ }
+ else {
+ let init = const_alloc_to_gcc(self, alloc);
+ let base_addr = self.static_addr_of(init, alloc.align, None);
+
+ let array = self.const_bitcast(base_addr, self.type_i8p());
+ let value = self.context.new_array_access(None, array, self.const_usize(offset.bytes())).get_address(None);
+ self.const_bitcast(value, ty)
+ };
+ PlaceRef::new_sized(value, layout)
+ }
+
+ fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> {
+ self.context.new_cast(None, val, ty)
+ }
+}
+
+pub trait SignType<'gcc, 'tcx> {
+ fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+}
+
+impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> {
+ fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.is_i8(cx) || self.is_i16(cx) || self.is_i32(cx) || self.is_i64(cx) || self.is_i128(cx)
+ }
+
+ fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.is_u8(cx) || self.is_u16(cx) || self.is_u32(cx) || self.is_u64(cx) || self.is_u128(cx)
+ }
+
+ fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ if self.is_u8(cx) {
+ cx.i8_type
+ }
+ else if self.is_u16(cx) {
+ cx.i16_type
+ }
+ else if self.is_u32(cx) {
+ cx.i32_type
+ }
+ else if self.is_u64(cx) {
+ cx.i64_type
+ }
+ else if self.is_u128(cx) {
+ cx.i128_type
+ }
+ else {
+ self.clone()
+ }
+ }
+}
+
+pub trait TypeReflection<'gcc, 'tcx> {
+ fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+
+ fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+
+ fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_f64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+}
+
+impl<'gcc, 'tcx> TypeReflection<'gcc, 'tcx> for Type<'gcc> {
+ fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u8_type
+ }
+
+ fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u16_type
+ }
+
+ fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.uint_type
+ }
+
+ fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.ulong_type
+ }
+
+ fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.ulonglong_type
+ }
+
+ fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.i8_type
+ }
+
+ fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u8_type
+ }
+
+ fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.i16_type
+ }
+
+ fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u16_type
+ }
+
+ fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.i32_type
+ }
+
+ fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u32_type
+ }
+
+ fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.i64_type
+ }
+
+ fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u64_type
+ }
+
+ fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.context.new_c_type(CType::Int128t)
+ }
+
+ fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.context.new_c_type(CType::UInt128t)
+ }
+
+ fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.context.new_type::<f32>()
+ }
+
+ fn is_f64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.context.new_type::<f64>()
+ }
+}
--- /dev/null
- // TODO
- /*unsafe {
- // Upgrade the alignment in cases where the same constant is used with different
- // alignment requirements
- let llalign = align.bytes() as u32;
- if llalign > llvm::LLVMGetAlignment(gv) {
- llvm::LLVMSetAlignment(gv, llalign);
- }
- }*/
+use gccjit::{RValue, Type};
+use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods};
+use rustc_hir as hir;
+use rustc_hir::Node;
+use rustc_middle::{bug, span_bug};
+use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
+use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_mir::interpret::{self, Allocation, ErrorHandled, Scalar as InterpScalar, read_target_uint};
+use rustc_span::Span;
+use rustc_span::def_id::DefId;
+use rustc_target::abi::{self, Align, HasDataLayout, LayoutOf, Primitive, Size};
+
+use crate::base;
+use crate::context::CodegenCx;
+use crate::mangled_std_symbols::{ARGC, ARGV, ARGV_INIT_ARRAY};
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> {
+ if value.get_type() == self.bool_type.make_pointer() {
+ if let Some(pointee) = typ.get_pointee() {
+ if pointee.is_vector().is_some() {
+ panic!()
+ }
+ }
+ }
+ self.context.new_bitcast(None, value, typ)
+ }
+}
+
+impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
+ fn static_addr_of(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> {
+ if let Some(global_value) = self.const_globals.borrow().get(&cv) {
- // TODO
- /*unsafe {
- llvm::LLVMSetGlobalConstant(global_value, True);
- }*/
++ // TODO(antoyo): upgrade alignment.
+ return *global_value;
+ }
+ let global_value = self.static_addr_of_mut(cv, align, kind);
- //val_llty = self.type_i8();
++ // TODO(antoyo): set global constant.
+ self.const_globals.borrow_mut().insert(cv, global_value);
+ global_value
+ }
+
+ fn codegen_static(&self, def_id: DefId, is_mutable: bool) {
+ let attrs = self.tcx.codegen_fn_attrs(def_id);
+
+ let instance = Instance::mono(self.tcx, def_id);
+ let name = &*self.tcx.symbol_name(instance).name;
+
+ let (value, alloc) =
+ match codegen_static_initializer(&self, def_id) {
+ Ok(value) => value,
+ // Error has already been reported
+ Err(_) => return,
+ };
+
+ let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+ let global = self.get_static(def_id);
+
+ // boolean SSA values are i1, but they have to be stored in i8 slots,
+ // otherwise some LLVM optimization passes don't work as expected
+ let val_llty = self.val_ty(value);
+ let value =
+ if val_llty == self.type_i1() {
- //llvm::LLVMConstZExt(value, val_llty)
+ unimplemented!();
- /*let name = llvm::get_value_name(global).to_vec();
- llvm::set_value_name(global, b"");
-
- let linkage = llvm::LLVMRustGetLinkage(global);
- let visibility = llvm::LLVMRustGetVisibility(global);*/
+ }
+ else {
+ value
+ };
+
+ let instance = Instance::mono(self.tcx, def_id);
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let gcc_type = self.layout_of(ty).gcc_type(self, true);
+
+ let global =
+ if val_llty == gcc_type {
+ global
+ }
+ else {
+ // If we created the global with the wrong type,
+ // correct the type.
- /*llvm::LLVMRustSetLinkage(new_global, linkage);
- llvm::LLVMRustSetVisibility(new_global, visibility);*/
-
++ // TODO(antoyo): set value name, linkage and visibility.
+
+ let new_global = self.get_or_insert_global(&name, val_llty, is_tls, attrs.link_section);
+
- // TODO
- //set_global_alignment(&self, global, self.align_of(ty));
- //llvm::LLVMSetInitializer(global, value);
+ // To avoid breaking any invariants, we leave around the old
+ // global for the moment; we'll replace all references to it
+ // with the new global later. (See base::codegen_backend.)
+ //self.statics_to_rauw.borrow_mut().push((global, new_global));
+ new_global
+ };
- // FIXME: correctly support global variable initialization.
++ // TODO(antoyo): set alignment and initializer.
+ let value = self.rvalue_as_lvalue(value);
+ let value = value.get_address(None);
+ let dest_typ = global.get_type();
+ let value = self.context.new_cast(None, value, dest_typ);
+
+ // NOTE: do not init the variables related to argc/argv because it seems we cannot
+ // overwrite those variables.
- // TODO: switch to set_initializer when libgccjit supports that.
++ // FIXME(antoyo): correctly support global variable initialization.
+ let skip_init = [
+ ARGV_INIT_ARRAY,
+ ARGC,
+ ARGV,
+ ];
+ if !skip_init.iter().any(|symbol_name| name.starts_with(symbol_name)) {
- // TODO
- //llvm::LLVMSetGlobalConstant(global, llvm::True);
++ // TODO(antoyo): switch to set_initializer when libgccjit supports that.
+ let memcpy = self.context.get_builtin_function("memcpy");
+ let dst = self.context.new_cast(None, global, self.type_i8p());
+ let src = self.context.new_cast(None, value, self.type_ptr_to(self.type_void()));
+ let size = self.context.new_rvalue_from_long(self.sizet_type, alloc.size().bytes() as i64);
+ self.global_init_block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
+ }
+
+ // As an optimization, all shared statics which do not have interior
+ // mutability are placed into read-only memory.
+ if !is_mutable {
+ if self.type_is_freeze(ty) {
- //debuginfo::create_global_var_metadata(&self, def_id, global);
-
++ // TODO(antoyo): set global constant.
+ }
+ }
+
- /*let all_bytes_are_zero = alloc.relocations().is_empty()
- && alloc
- .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
- .iter()
- .all(|&byte| byte == 0);
-
- let sect_name = if all_bytes_are_zero {
- CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0")
- } else {
- CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0")
- };*/
+ if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
+ // Do not allow LLVM to change the alignment of a TLS on macOS.
+ //
+ // By default a global's alignment can be freely increased.
+ // This allows LLVM to generate more performant instructions
+ // e.g., using load-aligned into a SIMD register.
+ //
+ // However, on macOS 10.10 or below, the dynamic linker does not
+ // respect any alignment given on the TLS (radar 24221680).
+ // This will violate the alignment assumption, and causing segfault at runtime.
+ //
+ // This bug is very easy to trigger. In `println!` and `panic!`,
+ // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
+ // which the values would be `mem::replace`d on initialization.
+ // The implementation of `mem::replace` will use SIMD
+ // whenever the size is 32 bytes or higher. LLVM notices SIMD is used
+ // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
+ // which macOS's dyld disregarded and causing crashes
+ // (see issues #51794, #51758, #50867, #48866 and #44056).
+ //
+ // To workaround the bug, we trick LLVM into not increasing
+ // the global's alignment by explicitly assigning a section to it
+ // (equivalent to automatically generating a `#[link_section]` attribute).
+ // See the comment in the `GlobalValue::canIncreaseAlignment()` function
+ // of `lib/IR/Globals.cpp` for why this works.
+ //
+ // When the alignment is not increased, the optimized `mem::replace`
+ // will use load-unaligned instructions instead, and thus avoiding the crash.
+ //
+ // We could remove this hack whenever we decide to drop macOS 10.10 support.
+ if self.tcx.sess.target.options.is_like_osx {
+ // The `inspect` method is okay here because we checked relocations, and
+ // because we are doing this access to inspect the final interpreter state
+ // (not as part of the interpreter execution).
+ //
+ // FIXME: This check requires that the (arbitrary) value of undefined bytes
+ // happens to be zero. Instead, we should only check the value of defined bytes
+ // and set all undefined bytes to zero if this allocation is headed for the
+ // BSS.
- //llvm::LLVMSetSection(global, sect_name.as_ptr());
+ unimplemented!();
- /*let section = llvm::LLVMMDStringInContext(
- self.llcx,
- section.as_str().as_ptr().cast(),
- section.as_str().len() as c_uint,
- );
- assert!(alloc.relocations().is_empty());
-
- // The `inspect` method is okay here because we checked relocations, and
- // because we are doing this access to inspect the final interpreter state (not
- // as part of the interpreter execution).
- let bytes =
- alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len());
- let alloc = llvm::LLVMMDStringInContext(
- self.llcx,
- bytes.as_ptr().cast(),
- bytes.len() as c_uint,
- );
- let data = [section, alloc];
- let meta = llvm::LLVMMDNodeInContext(self.llcx, data.as_ptr(), 2);
- llvm::LLVMAddNamedMetadataOperand(
- self.llmod,
- "wasm.custom_sections\0".as_ptr().cast(),
- meta,
- );*/
+ }
+ }
+
+ // Wasm statics with custom link sections get special treatment as they
+ // go into custom sections of the wasm executable.
+ if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
+ if let Some(_section) = attrs.link_section {
+ unimplemented!();
- // TODO
- //base::set_link_section(global, &attrs);
+ }
+ } else {
- // TODO
- //let cast = self.context.new_cast(None, global, self.type_i8p());
- //self.used_statics.borrow_mut().push(cast);
++ // TODO(antoyo): set link section.
+ }
+
+ if attrs.flags.contains(CodegenFnAttrFlags::USED) {
+ self.add_used_global(global);
+ }
+ }
+
+ /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
+ fn add_used_global(&self, _global: RValue<'gcc>) {
- // TODO: check if it's okay that TLS is off here.
- // TODO: check if it's okay that link_section is None here.
- // TODO: set alignment here as well.
++ // TODO(antoyo)
+ }
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn static_addr_of_mut(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> {
+ let (name, gv) =
+ match kind {
+ Some(kind) if !self.tcx.sess.fewer_names() => {
+ let name = self.generate_local_symbol_name(kind);
- //llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
++ // TODO(antoyo): check if it's okay that TLS is off here.
++ // TODO(antoyo): check if it's okay that link_section is None here.
++ // TODO(antoyo): set alignment here as well.
+ let gv = self.define_global(&name[..], self.val_ty(cv), false, None).unwrap_or_else(|| {
+ bug!("symbol `{}` is already defined", name);
+ });
- // FIXME: I think the name coming from generate_local_symbol_name() above cannot be used
++ // TODO(antoyo): set linkage.
+ (name, gv)
+ }
+ _ => {
+ let index = self.global_gen_sym_counter.get();
+ let name = format!("global_{}_{}", index, self.codegen_unit.name());
+ let typ = self.val_ty(cv).get_aligned(align.bytes());
+ let global = self.define_private_global(typ);
+ (name, global)
+ },
+ };
- //llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global);
++ // FIXME(antoyo): I think the name coming from generate_local_symbol_name() above cannot be used
+ // globally.
+ // NOTE: global seems to only be global in a module. So save the name instead of the value
+ // to import it later.
+ self.global_names.borrow_mut().insert(cv, name);
+ self.global_init_block.add_assignment(None, gv.dereference(None), cv);
- /*let attrs = self.tcx.codegen_fn_attrs(def_id);
- let name = &*self.tcx.symbol_name(instance).name;
- let name =
- if let Some(linkage) = attrs.linkage {
- // This is to match what happens in check_and_apply_linkage.
- Cow::from(format!("_rust_extern_with_linkage_{}", name))
- }
- else {
- Cow::from(name)
- };
- let global = self.context.new_global(None, GlobalKind::Imported, global.get_type(), &name)
- .get_address(None);
- self.global_names.borrow_mut().insert(global, name.to_string());*/
++ // TODO(antoyo): set unnamed address.
+ gv
+ }
+
+ pub fn get_static(&self, def_id: DefId) -> RValue<'gcc> {
+ let instance = Instance::mono(self.tcx, def_id);
+ let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
+ if let Some(&global) = self.instances.borrow().get(&instance) {
- //debug!("get_static: sym={} instance={:?}", sym, instance);
-
+ return global;
+ }
+
+ let defined_in_current_codegen_unit =
+ self.codegen_unit.items().contains_key(&MonoItem::Static(def_id));
+ assert!(
+ !defined_in_current_codegen_unit,
+ "consts::get_static() should always hit the cache for \
+ statics defined in the same CGU, but did not for `{:?}`",
+ def_id
+ );
+
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let sym = self.tcx.symbol_name(instance).name;
+
- /*unsafe {
- llvm::LLVMRustSetVisibility(global, llvm::Visibility::Hidden);
- }*/
+ let global =
+ if let Some(def_id) = def_id.as_local() {
+ let id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ let llty = self.layout_of(ty).gcc_type(self, true);
+ // FIXME: refactor this to work without accessing the HIR
+ let global = match self.tcx.hir().get(id) {
+ Node::Item(&hir::Item { span, kind: hir::ItemKind::Static(..), .. }) => {
+ if let Some(global) = self.get_declared_value(&sym) {
+ if self.val_ty(global) != self.type_ptr_to(llty) {
+ span_bug!(span, "Conflicting types for static");
+ }
+ }
+
+ let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+ let global = self.declare_global(&sym, llty, is_tls, fn_attrs.link_section);
+
+ if !self.tcx.is_reachable_non_generic(def_id) {
- //debug!("get_static: sym={} attrs={:?}", sym, attrs);
-
++ // TODO(antoyo): set visibility.
+ }
+
+ global
+ }
+
+ Node::ForeignItem(&hir::ForeignItem {
+ span,
+ kind: hir::ForeignItemKind::Static(..),
+ ..
+ }) => {
+ let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
+ check_and_apply_linkage(&self, &fn_attrs, ty, sym, span)
+ }
+
+ item => bug!("get_static: expected static, found {:?}", item),
+ };
+
- let needs_dll_storage_attr = false; /*self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) &&
- // ThinLTO can't handle this workaround in all cases, so we don't
- // emit the attrs. Instead we make them unnecessary by disallowing
- // dynamic linking when linker plugin based LTO is enabled.
- !self.tcx.sess.opts.cg.linker_plugin_lto.enabled();*/
+ global
+ }
+ else {
+ // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
+ //debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id));
+
+ let attrs = self.tcx.codegen_fn_attrs(def_id);
+ let span = self.tcx.def_span(def_id);
+ let global = check_and_apply_linkage(&self, &attrs, ty, sym, span);
+
- /*unsafe {
- llvm::LLVMSetDLLStorageClass(global, llvm::DLLStorageClass::DllImport);
- }*/
++ let needs_dll_storage_attr = false; // TODO(antoyo)
+
+ // If this assertion triggers, there's something wrong with commandline
+ // argument validation.
+ debug_assert!(
+ !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
+ && self.tcx.sess.target.options.is_like_msvc
+ && self.tcx.sess.opts.cg.prefer_dynamic)
+ );
+
+ if needs_dll_storage_attr {
+ // This item is external but not foreign, i.e., it originates from an external Rust
+ // crate. Since we don't know whether this crate will be linked dynamically or
+ // statically in the final application, we always mark such symbols as 'dllimport'.
+ // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs
+ // to make things work.
+ //
+ // However, in some scenarios we defer emission of statics to downstream
+ // crates, so there are cases where a static with an upstream DefId
+ // is actually present in the current crate. We can find out via the
+ // is_codegened_item query.
+ if !self.tcx.is_codegened_item(def_id) {
+ unimplemented!();
- /*if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) {
- // For foreign (native) libs we know the exact storage type to use.
- unsafe {
- llvm::LLVMSetDLLStorageClass(global, llvm::DLLStorageClass::DllImport);
- }
- }*/
+ }
+ }
+ global
+ };
+
- //debug!("get_static: sym={} linkage={:?}", sym, linkage);
-
++ // TODO(antoyo): set dll storage class.
+
+ self.instances.borrow_mut().insert(instance, global);
+ global
+ }
+}
+
+pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: &Allocation) -> RValue<'gcc> {
+ let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
+ let dl = cx.data_layout();
+ let pointer_size = dl.pointer_size.bytes() as usize;
+
+ let mut next_offset = 0;
+ for &(offset, alloc_id) in alloc.relocations().iter() {
+ let offset = offset.bytes();
+ assert_eq!(offset as usize as u64, offset);
+ let offset = offset as usize;
+ if offset > next_offset {
+ // This `inspect` is okay since we have checked that it is not within a relocation, it
+ // is within the bounds of the allocation, and it doesn't affect interpreter execution
+ // (we inspect the result after interpreter execution). Any undef byte is replaced with
+ // some arbitrary byte value.
+ //
+ // FIXME: relay undef bytes to codegen as undef const bytes
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset);
+ llvals.push(cx.const_bytes(bytes));
+ }
+ let ptr_offset =
+ read_target_uint( dl.endian,
+ // This `inspect` is okay since it is within the bounds of the allocation, it doesn't
+ // affect interpreter execution (we inspect the result after interpreter execution),
+ // and we properly interpret the relocation as a relocation pointer offset.
+ alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
+ )
+ .expect("const_alloc_to_llvm: could not read relocation pointer")
+ as u64;
+ llvals.push(cx.scalar_to_backend(
+ InterpScalar::from_pointer(
+ interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
+ &cx.tcx,
+ ),
+ &abi::Scalar { value: Primitive::Pointer, valid_range: 0..=!0 },
+ cx.type_i8p(),
+ ));
+ next_offset = offset + pointer_size;
+ }
+ if alloc.len() >= next_offset {
+ let range = next_offset..alloc.len();
+ // This `inspect` is okay since we have check that it is after all relocations, it is
+ // within the bounds of the allocation, and it doesn't affect interpreter execution (we
+ // inspect the result after interpreter execution). Any undef byte is replaced with some
+ // arbitrary byte value.
+ //
+ // FIXME: relay undef bytes to codegen as undef const bytes
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
+ llvals.push(cx.const_bytes(bytes));
+ }
+
+ cx.const_struct(&llvals, true)
+}
+
+pub fn codegen_static_initializer<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, def_id: DefId) -> Result<(RValue<'gcc>, &'tcx Allocation), ErrorHandled> {
+ let alloc = cx.tcx.eval_static_initializer(def_id)?;
+ Ok((const_alloc_to_gcc(cx, alloc), alloc))
+}
+
+fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str, span: Span) -> RValue<'gcc> {
+ let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+ let llty = cx.layout_of(ty).gcc_type(cx, true);
+ if let Some(linkage) = attrs.linkage {
- //llvm::LLVMRustSetLinkage(global2, llvm::Linkage::InternalLinkage);
+ // If this is a static with a linkage specified, then we need to handle
+ // it a little specially. The typesystem prevents things like &T and
+ // extern "C" fn() from being non-null, so we can't just declare a
+ // static and call it a day. Some linkages (like weak) will make it such
+ // that the static actually has a null value.
+ let llty2 =
+ if let ty::RawPtr(ref mt) = ty.kind() {
+ cx.layout_of(mt.ty).gcc_type(cx, true)
+ }
+ else {
+ cx.sess().span_fatal(
+ span,
+ "must have type `*const T` or `*mut T` due to `#[linkage]` attribute",
+ )
+ };
+ // Declare a symbol `foo` with the desired linkage.
+ let global1 = cx.declare_global_with_linkage(&sym, llty2, base::global_linkage_to_gcc(linkage));
+
+ // Declare an internal global `extern_with_linkage_foo` which
+ // is initialized with the address of `foo`. If `foo` is
+ // discarded during linking (for example, if `foo` has weak
+ // linkage and there are no definitions), then
+ // `extern_with_linkage_foo` will instead be initialized to
+ // zero.
+ let mut real_name = "_rust_extern_with_linkage_".to_string();
+ real_name.push_str(&sym);
+ let global2 =
+ cx.define_global(&real_name, llty, is_tls, attrs.link_section).unwrap_or_else(|| {
+ cx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym))
+ });
- //llvm::LLVMSetInitializer(global2, global1);
++ // TODO(antoyo): set linkage.
+ let lvalue = global2.dereference(None);
+ cx.global_init_block.add_assignment(None, lvalue, global1);
++ // TODO(antoyo): use global_set_initializer() when it will work.
+ global2
+ }
+ else {
+ // Generate an external declaration.
+ // FIXME(nagisa): investigate whether it can be changed into define_global
+
+ // Thread-local statics in some other crate need to *always* be linked
+ // against in a thread-local fashion, so we need to be sure to apply the
+ // thread-local attribute locally if it was present remotely. If we
+ // don't do this then linker errors can be generated where the linker
+ // complains that one object files has a thread local version of the
+ // symbol and another one doesn't.
+ cx.declare_global(&sym, llty, is_tls, attrs.link_section)
+ }
+}
--- /dev/null
- // TODO: First set it to a dummy block to avoid using Option?
+use std::cell::{Cell, RefCell};
+
+use gccjit::{
+ Block,
+ Context,
+ CType,
+ Function,
+ FunctionType,
+ LValue,
+ RValue,
+ Struct,
+ Type,
+};
+use rustc_codegen_ssa::base::wants_msvc_seh;
+use rustc_codegen_ssa::traits::{
+ BackendTypes,
+ MiscMethods,
+};
+use rustc_data_structures::base_n;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_middle::bug;
+use rustc_middle::mir::mono::CodegenUnit;
+use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt};
+use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout};
+use rustc_session::Session;
+use rustc_span::{Span, Symbol, DUMMY_SP};
+use rustc_target::abi::{HasDataLayout, LayoutOf, PointeeInfo, Size, TargetDataLayout, VariantIdx};
+use rustc_target::spec::{HasTargetSpec, Target, TlsModel};
+
+use crate::callee::get_fn;
+use crate::declare::mangle_name;
+
+#[derive(Clone)]
+pub struct FuncSig<'gcc> {
+ pub params: Vec<Type<'gcc>>,
+ pub return_type: Type<'gcc>,
+}
+
+pub struct CodegenCx<'gcc, 'tcx> {
+ pub check_overflow: bool,
+ pub codegen_unit: &'tcx CodegenUnit<'tcx>,
+ pub context: &'gcc Context<'gcc>,
+
- // TODO: remove global_names.
++ // TODO(antoyo): First set it to a dummy block to avoid using Option?
+ pub current_block: RefCell<Option<Block<'gcc>>>,
+ pub current_func: RefCell<Option<Function<'gcc>>>,
+ pub normal_function_addresses: RefCell<FxHashSet<RValue<'gcc>>>,
+
+ /// The function where globals are initialized.
+ pub global_init_func: Function<'gcc>,
+ pub global_init_block: Block<'gcc>,
+
+ pub functions: RefCell<FxHashMap<String, Function<'gcc>>>,
+
+ pub tls_model: gccjit::TlsModel,
+
+ pub bool_type: Type<'gcc>,
+ pub i8_type: Type<'gcc>,
+ pub i16_type: Type<'gcc>,
+ pub i32_type: Type<'gcc>,
+ pub i64_type: Type<'gcc>,
+ pub i128_type: Type<'gcc>,
+ pub isize_type: Type<'gcc>,
+
+ pub u8_type: Type<'gcc>,
+ pub u16_type: Type<'gcc>,
+ pub u32_type: Type<'gcc>,
+ pub u64_type: Type<'gcc>,
+ pub u128_type: Type<'gcc>,
+ pub usize_type: Type<'gcc>,
+
+ pub int_type: Type<'gcc>,
+ pub uint_type: Type<'gcc>,
+ pub long_type: Type<'gcc>,
+ pub ulong_type: Type<'gcc>,
+ pub ulonglong_type: Type<'gcc>,
+ pub sizet_type: Type<'gcc>,
+
+ pub float_type: Type<'gcc>,
+ pub double_type: Type<'gcc>,
+
+ pub linkage: Cell<FunctionType>,
+ pub scalar_types: RefCell<FxHashMap<Ty<'tcx>, Type<'gcc>>>,
+ pub types: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), Type<'gcc>>>,
+ pub tcx: TyCtxt<'tcx>,
+
+ pub struct_types: RefCell<FxHashMap<Vec<Type<'gcc>>, Type<'gcc>>>,
+
+ pub types_with_fields_to_set: RefCell<FxHashMap<Type<'gcc>, (Struct<'gcc>, TyAndLayout<'tcx>)>>,
+
+ /// Cache instances of monomorphic and polymorphic items
+ pub instances: RefCell<FxHashMap<Instance<'tcx>, RValue<'gcc>>>,
+ /// Cache generated vtables
+ pub vtables: RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>>,
+
+ /// Cache of emitted const globals (value -> global)
+ pub const_globals: RefCell<FxHashMap<RValue<'gcc>, RValue<'gcc>>>,
+
+ pub init_argv_var: RefCell<String>,
+ pub argv_initialized: Cell<bool>,
+
+ /// Cache of constant strings,
+ pub const_cstr_cache: RefCell<FxHashMap<Symbol, LValue<'gcc>>>,
+
+ /// Cache of globals.
+ pub globals: RefCell<FxHashMap<String, RValue<'gcc>>>,
- /// FIXME: fix the rustc API to avoid having this hack.
++ // TODO(antoyo): remove global_names.
+ pub global_names: RefCell<FxHashMap<RValue<'gcc>, String>>,
+
+ /// A counter that is used for generating local symbol names
+ local_gen_sym_counter: Cell<usize>,
+ pub global_gen_sym_counter: Cell<usize>,
+
+ eh_personality: Cell<Option<RValue<'gcc>>>,
+
+ pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
+
+ /// NOTE: a hack is used because the rustc API is not suitable to libgccjit and as such,
+ /// `const_undef()` returns struct as pointer so that they can later be assigned a value.
+ /// As such, this set remembers which of these pointers were returned by this function so that
+ /// they can be derefered later.
- /// FIXME: remove when the API supports more types.
++ /// FIXME(antoyo): fix the rustc API to avoid having this hack.
+ pub structs_as_pointer: RefCell<FxHashSet<RValue<'gcc>>>,
+
+ /// Store the pointer of different types for safety.
+ /// When casting the values back to their original types, check that they are indeed that type
+ /// with these sets.
- // TODO: fix this mess. libgccjit seems to return random type when using new_int_type().
- //let isize_type = context.new_int_type((tcx.data_layout.pointer_size.bits() / 8) as i32, true);
++ /// FIXME(antoyo): remove when the API supports more types.
+ #[cfg(debug_assertions)]
+ lvalues: RefCell<FxHashSet<LValue<'gcc>>>,
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
+ let check_overflow = tcx.sess.overflow_checks();
- //let usize_type = context.new_int_type((tcx.data_layout.pointer_size.bits() / 8) as i32, false);
++ // TODO(antoyo): fix this mess. libgccjit seems to return random type when using new_int_type().
+ let isize_type = context.new_c_type(CType::LongLong);
- let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO: should this be hard-coded?
+ let usize_type = context.new_c_type(CType::ULongLong);
+ let bool_type = context.new_type::<bool>();
+ let i8_type = context.new_type::<i8>();
+ let i16_type = context.new_type::<i16>();
+ let i32_type = context.new_type::<i32>();
+ let i64_type = context.new_c_type(CType::LongLong);
- let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO: should this be hard-coded?
++ let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?
+ let u8_type = context.new_type::<u8>();
+ let u16_type = context.new_type::<u16>();
+ let u32_type = context.new_type::<u32>();
+ let u64_type = context.new_c_type(CType::ULongLong);
- //debug_assert!(self.lvalues.borrow().contains(&lvalue), "{:?} is not an lvalue", value);
++ let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?
+
+ let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
+
+ let float_type = context.new_type::<f32>();
+ let double_type = context.new_type::<f64>();
+
+ let int_type = context.new_c_type(CType::Int);
+ let uint_type = context.new_c_type(CType::UInt);
+ let long_type = context.new_c_type(CType::Long);
+ let ulong_type = context.new_c_type(CType::ULong);
+ let ulonglong_type = context.new_c_type(CType::ULongLong);
+ let sizet_type = context.new_c_type(CType::SizeT);
+
+ assert_eq!(isize_type, i64_type);
+ assert_eq!(usize_type, u64_type);
+
+ let mut functions = FxHashMap::default();
+ let builtins = [
+ "__builtin_unreachable", "abort", "__builtin_expect", "__builtin_add_overflow", "__builtin_mul_overflow",
+ "__builtin_saddll_overflow", /*"__builtin_sadd_overflow",*/ "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/
+ "__builtin_ssubll_overflow", /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow", "__builtin_uaddll_overflow",
+ "__builtin_uadd_overflow", "__builtin_umulll_overflow", "__builtin_umul_overflow", "__builtin_usubll_overflow",
+ "__builtin_usub_overflow", "sqrtf", "sqrt", "__builtin_powif", "__builtin_powi", "sinf", "sin", "cosf", "cos",
+ "powf", "pow", "expf", "exp", "exp2f", "exp2", "logf", "log", "log10f", "log10", "log2f", "log2", "fmaf",
+ "fma", "fabsf", "fabs", "fminf", "fmin", "fmaxf", "fmax", "copysignf", "copysign", "floorf", "floor", "ceilf",
+ "ceil", "truncf", "trunc", "rintf", "rint", "nearbyintf", "nearbyint", "roundf", "round",
+ "__builtin_expect_with_probability",
+ ];
+
+ for builtin in builtins.iter() {
+ functions.insert(builtin.to_string(), context.get_builtin_function(builtin));
+ }
+
+ let global_init_func = context.new_function(None, FunctionType::Exported, context.new_type::<()>(), &[],
+ &format!("__gccGlobalInit{}", unit_name(&codegen_unit)), false);
+ let global_init_block = global_init_func.new_block("initial");
+
+ Self {
+ check_overflow,
+ codegen_unit,
+ context,
+ current_block: RefCell::new(None),
+ current_func: RefCell::new(None),
+ normal_function_addresses: Default::default(),
+ functions: RefCell::new(functions),
+ global_init_func,
+ global_init_block,
+
+ tls_model,
+
+ bool_type,
+ i8_type,
+ i16_type,
+ i32_type,
+ i64_type,
+ i128_type,
+ isize_type,
+ usize_type,
+ u8_type,
+ u16_type,
+ u32_type,
+ u64_type,
+ u128_type,
+ int_type,
+ uint_type,
+ long_type,
+ ulong_type,
+ ulonglong_type,
+ sizet_type,
+
+ float_type,
+ double_type,
+
+ linkage: Cell::new(FunctionType::Internal),
+ #[cfg(debug_assertions)]
+ lvalues: Default::default(),
+ instances: Default::default(),
+ vtables: Default::default(),
+ const_globals: Default::default(),
+ init_argv_var: RefCell::new(String::new()),
+ argv_initialized: Cell::new(false),
+ const_cstr_cache: Default::default(),
+ global_names: Default::default(),
+ globals: Default::default(),
+ scalar_types: Default::default(),
+ types: Default::default(),
+ tcx,
+ struct_types: Default::default(),
+ types_with_fields_to_set: Default::default(),
+ local_gen_sym_counter: Cell::new(0),
+ global_gen_sym_counter: Cell::new(0),
+ eh_personality: Cell::new(None),
+ pointee_infos: Default::default(),
+ structs_as_pointer: Default::default(),
+ }
+ }
+
+ pub fn lvalue_to_rvalue(&self, value: LValue<'gcc>) -> RValue<'gcc> {
+ #[cfg(debug_assertions)]
+ self.lvalues.borrow_mut().insert(value);
+ unsafe { std::mem::transmute(value) }
+ }
+
+ pub fn rvalue_as_function(&self, value: RValue<'gcc>) -> Function<'gcc> {
+ let function: Function<'gcc> = unsafe { std::mem::transmute(value) };
+ debug_assert!(self.functions.borrow().values().find(|value| **value == function).is_some(),
+ "{:?} ({:?}) is not a function", value, value.get_type());
+ function
+ }
+
+ pub fn rvalue_as_lvalue(&self, value: RValue<'gcc>) -> LValue<'gcc> {
+ let lvalue: LValue<'gcc> = unsafe { std::mem::transmute(value) };
- type Funclet = (); // TODO
+ lvalue
+ }
+
+ pub fn sess(&self) -> &Session {
+ &self.tcx.sess
+ }
+}
+
+impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> {
+ type Value = RValue<'gcc>;
+ type Function = RValue<'gcc>;
+
+ type BasicBlock = Block<'gcc>;
+ type Type = Type<'gcc>;
- type DIScope = (); // TODO
- type DILocation = (); // TODO
- type DIVariable = (); // TODO
++ type Funclet = (); // TODO(antoyo)
+
- //let symbol = self.tcx.symbol_name(instance).name;
-
++ type DIScope = (); // TODO(antoyo)
++ type DILocation = (); // TODO(antoyo)
++ type DIVariable = (); // TODO(antoyo)
+}
+
+impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn vtables(&self) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>> {
+ &self.vtables
+ }
+
+ fn get_fn(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
+ let func = get_fn(self, instance);
+ *self.current_func.borrow_mut() = Some(self.rvalue_as_function(func));
+ func
+ }
+
+ fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
- // TODO: don't do this twice: i.e. in declare_fn and here.
- //let fn_abi = FnAbi::of_instance(self, instance, &[]);
- //let (return_type, params, _) = fn_abi.gcc_type(self);
- // FIXME: the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI).
- //let pointer_type = ptr.get_type();
+ let func = get_fn(self, instance);
+ let func = self.rvalue_as_function(func);
+ let ptr = func.get_address(None);
+
- // FIXME: this hack should not be needed. That will probably be removed when
++ // TODO(antoyo): don't do this twice: i.e. in declare_fn and here.
++ // FIXME(antoyo): the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI).
+
+ self.normal_function_addresses.borrow_mut().insert(ptr);
+
+ ptr
+ }
+
+ fn eh_personality(&self) -> RValue<'gcc> {
+ // The exception handling personality function.
+ //
+ // If our compilation unit has the `eh_personality` lang item somewhere
+ // within it, then we just need to codegen that. Otherwise, we're
+ // building an rlib which will depend on some upstream implementation of
+ // this function, so we just codegen a generic reference to it. We don't
+ // specify any of the types for the function, we just make it a symbol
+ // that LLVM can later use.
+ //
+ // Note that MSVC is a little special here in that we don't use the
+ // `eh_personality` lang item at all. Currently LLVM has support for
+ // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
+ // *name of the personality function* to decide what kind of unwind side
+ // tables/landing pads to emit. It looks like Dwarf is used by default,
+ // injecting a dependency on the `_Unwind_Resume` symbol for resuming
+ // an "exception", but for MSVC we want to force SEH. This means that we
+ // can't actually have the personality function be our standard
+ // `rust_eh_personality` function, but rather we wired it up to the
+ // CRT's custom personality function, which forces LLVM to consider
+ // landing pads as "landing pads for SEH".
+ if let Some(llpersonality) = self.eh_personality.get() {
+ return llpersonality;
+ }
+ let tcx = self.tcx;
+ let llfn = match tcx.lang_items().eh_personality() {
+ Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
+ ty::Instance::resolve(
+ tcx,
+ ty::ParamEnv::reveal_all(),
+ def_id,
+ tcx.intern_substs(&[]),
+ )
+ .unwrap().unwrap(),
+ ),
+ _ => {
+ let _name = if wants_msvc_seh(self.sess()) {
+ "__CxxFrameHandler3"
+ } else {
+ "rust_eh_personality"
+ };
+ //let func = self.declare_func(name, self.type_i32(), &[], true);
- //attributes::apply_target_cpu_attr(self, llfn);
++ // FIXME(antoyo): this hack should not be needed. That will probably be removed when
+ // unwinding support is added.
+ self.context.new_rvalue_from_int(self.int_type, 0)
+ }
+ };
- //&self.used_statics
++ // TODO(antoyo): apply target cpu attributes.
+ self.eh_personality.set(Some(llfn));
+ llfn
+ }
+
+ fn sess(&self) -> &Session {
+ &self.tcx.sess
+ }
+
+ fn check_overflow(&self) -> bool {
+ self.check_overflow
+ }
+
+ fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> {
+ self.codegen_unit
+ }
+
+ fn used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> {
+ unimplemented!();
- // TODO
- //attributes::set_frame_pointer_type(self, llfn)
+ }
+
+ fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) {
- // TODO
- //attributes::apply_target_cpu_attr(self, llfn)
++ // TODO(antoyo)
+ }
+
+ fn apply_target_cpu_attr(&self, _llfn: RValue<'gcc>) {
- /*let name = const_cstr!("llvm.used");
- let section = const_cstr!("llvm.metadata");
- let array =
- self.const_array(&self.type_ptr_to(self.type_i8()), &*self.used_statics.borrow());
-
- unsafe {
- let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
- llvm::LLVMSetInitializer(g, array);
- llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
- llvm::LLVMSetSection(g, section.as_ptr());
- }*/
++ // TODO(antoyo)
+ }
+
+ fn create_used_variable(&self) {
+ unimplemented!();
+ }
+
+ fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
+ if self.get_declared_value("main").is_none() {
+ Some(self.declare_cfn("main", fn_type))
+ }
+ else {
+ // If the symbol already exists, it is an error: for example, the user wrote
+ // #[no_mangle] extern "C" fn main(..) {..}
+ // instead of #[start]
+ None
+ }
+ }
+}
+
+impl<'gcc, 'tcx> HasTyCtxt<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
+
+impl<'gcc, 'tcx> HasDataLayout for CodegenCx<'gcc, 'tcx> {
+ fn data_layout(&self) -> &TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
+impl<'gcc, 'tcx> HasTargetSpec for CodegenCx<'gcc, 'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.tcx.sess.target
+ }
+}
+
+impl<'gcc, 'tcx> LayoutOf for CodegenCx<'gcc, 'tcx> {
+ type Ty = Ty<'tcx>;
+ type TyAndLayout = TyAndLayout<'tcx>;
+
+ fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
+ self.spanned_layout_of(ty, DUMMY_SP)
+ }
+
+ fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::TyAndLayout {
+ self.tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap_or_else(|e| {
+ if let LayoutError::SizeOverflow(_) = e {
+ self.sess().span_fatal(span, &e.to_string())
+ } else {
+ bug!("failed to get layout for `{}`: {}", ty, e)
+ }
+ })
+ }
+}
+
+impl<'tcx, 'gcc> HasParamEnv<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn param_env(&self) -> ParamEnv<'tcx> {
+ ParamEnv::reveal_all()
+ }
+}
+
+impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
+ /// Generates a new symbol name with the given prefix. This symbol name must
+ /// only be used for definitions with `internal` or `private` linkage.
+ pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
+ let idx = self.local_gen_sym_counter.get();
+ self.local_gen_sym_counter.set(idx + 1);
+ // Include a '.' character, so there can be no accidental conflicts with
+ // user defined names
+ let mut name = String::with_capacity(prefix.len() + 6);
+ name.push_str(prefix);
+ name.push_str(".");
+ base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
+ name
+ }
+}
+
+pub fn unit_name<'tcx>(codegen_unit: &CodegenUnit<'tcx>) -> String {
+ let name = &codegen_unit.name().to_string();
+ mangle_name(&name.replace('-', "_"))
+}
+
+fn to_gcc_tls_mode(tls_model: TlsModel) -> gccjit::TlsModel {
+ match tls_model {
+ TlsModel::GeneralDynamic => gccjit::TlsModel::GlobalDynamic,
+ TlsModel::LocalDynamic => gccjit::TlsModel::LocalDynamic,
+ TlsModel::InitialExec => gccjit::TlsModel::InitialExec,
+ TlsModel::LocalExec => gccjit::TlsModel::LocalExec,
+ }
+}
--- /dev/null
- /*if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "ensuring function source hash is set for instance={:?}; function_source_hash={}",
- instance, function_source_hash,
- );
- let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
- coverage_map
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .set_function_source_hash(function_source_hash);
- true
- } else {
- false
- }*/
+use gccjit::RValue;
+use rustc_codegen_ssa::traits::{CoverageInfoBuilderMethods, CoverageInfoMethods};
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::coverage::{
+ CodeRegion,
+ CounterValueReference,
+ ExpressionOperandId,
+ InjectedExpressionId,
+ Op,
+};
+use rustc_middle::ty::Instance;
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+
+impl<'a, 'gcc, 'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn set_function_source_hash(
+ &mut self,
+ _instance: Instance<'tcx>,
+ _function_source_hash: u64,
+ ) -> bool {
+ unimplemented!();
- /*if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "adding counter to coverage_regions: instance={:?}, function_source_hash={}, id={:?}, \
- at {:?}",
- instance, function_source_hash, id, region,
- );
- let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut();
- coverage_regions
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .add_counter(function_source_hash, id, region);
- true
- } else {
- false
- }*/
- // TODO
+ }
+
+ fn add_coverage_counter(&mut self, _instance: Instance<'tcx>, _id: CounterValueReference, _region: CodeRegion) -> bool {
- /*if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "adding counter expression to coverage_regions: instance={:?}, id={:?}, {:?} {:?} {:?}, \
- at {:?}",
- instance, id, lhs, op, rhs, region,
- );
- let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut();
- coverage_regions
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .add_counter_expression(id, lhs, op, rhs, region);
- true
- } else {
- false
- }*/
- // TODO
++ // TODO(antoyo)
+ false
+ }
+
+ fn add_coverage_counter_expression(&mut self, _instance: Instance<'tcx>, _id: InjectedExpressionId, _lhs: ExpressionOperandId, _op: Op, _rhs: ExpressionOperandId, _region: Option<CodeRegion>) -> bool {
- /*if let Some(coverage_context) = self.coverage_context() {
- debug!(
- "adding unreachable code to coverage_regions: instance={:?}, at {:?}",
- instance, region,
- );
- let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut();
- coverage_regions
- .entry(instance)
- .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
- .add_unreachable_region(region);
- true
- } else {
- false
- }*/
- // TODO
++ // TODO(antoyo)
+ false
+ }
+
+ fn add_coverage_unreachable(&mut self, _instance: Instance<'tcx>, _region: CodeRegion) -> bool {
- // TODO
- //mapgen::finalize(self)
++ // TODO(antoyo)
+ false
+ }
+}
+
+impl<'gcc, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn coverageinfo_finalize(&self) {
- /*if let Some(coverage_context) = self.coverage_context() {
- debug!("getting pgo_func_name_var for instance={:?}", instance);
- let mut pgo_func_name_var_map = coverage_context.pgo_func_name_var_map.borrow_mut();
- pgo_func_name_var_map
- .entry(instance)
- .or_insert_with(|| create_pgo_func_name_var(self, instance))
- } else {
- bug!("Could not get the `coverage_context`");
- }*/
++ // TODO(antoyo)
+ }
+
+ fn get_pgo_func_name_var(&self, _instance: Instance<'tcx>) -> RValue<'gcc> {
+ unimplemented!();
- /*let instance = declare_unused_fn(self, &def_id);
- codegen_unused_fn_and_counter(self, instance);
- add_unused_function_coverage(self, instance, def_id);*/
+ }
+
+ /// Functions with MIR-based coverage are normally codegenned _only_ if
+ /// called. LLVM coverage tools typically expect every function to be
+ /// defined (even if unused), with at least one call to LLVM intrinsic
+ /// `instrprof.increment`.
+ ///
+ /// Codegen a small function that will never be called, with one counter
+ /// that will never be incremented.
+ ///
+ /// For used/called functions, the coverageinfo was already added to the
+ /// `function_coverage_map` (keyed by function `Instance`) during codegen.
+ /// But in this case, since the unused function was _not_ previously
+ /// codegenned, collect the coverage `CodeRegion`s from the MIR and add
+ /// them. The first `CodeRegion` is used to add a single counter, with the
+ /// same counter ID used in the injected `instrprof.increment` intrinsic
+ /// call. Since the function is never called, all other `CodeRegion`s can be
+ /// added as `unreachable_region`s.
+ fn define_unused_fn(&self, _def_id: DefId) {
+ unimplemented!();
+ }
+}
--- /dev/null
- /*let cx = self.cx();
-
- // Convert the direct and indirect offsets to address ops.
- // FIXME(eddyb) use `const`s instead of getting the values via FFI,
- // the values should match the ones in the DWARF standard anyway.
- let op_deref = || unsafe { llvm::LLVMRustDIBuilderCreateOpDeref() };
- let op_plus_uconst = || unsafe { llvm::LLVMRustDIBuilderCreateOpPlusUconst() };
- let mut addr_ops = SmallVec::<[_; 8]>::new();
-
- if direct_offset.bytes() > 0 {
- addr_ops.push(op_plus_uconst());
- addr_ops.push(direct_offset.bytes() as i64);
- }
- for &offset in indirect_offsets {
- addr_ops.push(op_deref());
- if offset.bytes() > 0 {
- addr_ops.push(op_plus_uconst());
- addr_ops.push(offset.bytes() as i64);
- }
- }
-
- // FIXME(eddyb) maybe this information could be extracted from `dbg_var`,
- // to avoid having to pass it down in both places?
- // NB: `var` doesn't seem to know about the column, so that's a limitation.
- let dbg_loc = cx.create_debug_loc(scope_metadata, span);
- unsafe {
- // FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`.
- llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
- DIB(cx),
- variable_alloca,
- dbg_var,
- addr_ops.as_ptr(),
- addr_ops.len() as c_uint,
- dbg_loc,
- self.llbb(),
- );
- }*/
+use gccjit::{FunctionType, RValue};
+use rustc_codegen_ssa::mir::debuginfo::{FunctionDebugContext, VariableKind};
+use rustc_codegen_ssa::traits::{BuilderMethods, DebugInfoBuilderMethods, DebugInfoMethods};
+use rustc_middle::middle::cstore::CrateDepKind;
+use rustc_middle::mir;
+use rustc_middle::ty::{Instance, Ty};
+use rustc_span::{SourceFile, Span, Symbol};
+use rustc_span::def_id::LOCAL_CRATE;
+use rustc_target::abi::Size;
+use rustc_target::abi::call::FnAbi;
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+
+impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> {
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn dbg_var_addr(&mut self, _dbg_var: Self::DIVariable, _scope_metadata: Self::DIScope, _variable_alloca: Self::Value, _direct_offset: Size, _indirect_offsets: &[Size]) {
+ unimplemented!();
- /*fn set_source_location(&mut self, scope: Self::DIScope, span: Span) {
- unimplemented!();
- /*debug!("set_source_location: {}", self.sess().source_map().span_to_string(span));
-
- let dbg_loc = self.cx().create_debug_loc(scope, span);
-
- unsafe {
- llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc);
- }*/
- }*/
-
+ }
+
- // TODO: replace with gcc_jit_context_new_global_with_initializer() if it's added:
+ fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
- // FIXME: better way to find if a crate is of proc-macro type?
++ // TODO(antoyo): replace with gcc_jit_context_new_global_with_initializer() if it's added:
+ // https://gcc.gnu.org/pipermail/jit/2020q3/001225.html
+ //
+ // Call the function to initialize global values here.
+ // We assume this is only called for the main function.
+ use std::iter;
+
+ for crate_num in self.cx.tcx.crates(()).iter().copied().chain(iter::once(LOCAL_CRATE)) {
- // TODO
- //gdb::insert_reference_to_gdb_debug_scripts_section_global(self)
++ // FIXME(antoyo): better way to find if a crate is of proc-macro type?
+ if crate_num == LOCAL_CRATE || self.cx.tcx.dep_kind(crate_num) != CrateDepKind::MacrosOnly {
+ // NOTE: proc-macro crates are not included in the executable, so don't call their
+ // initialization routine.
+ let initializer_name = format!("__gccGlobalCrateInit{}", self.cx.tcx.crate_name(crate_num));
+ let codegen_init_func = self.context.new_function(None, FunctionType::Extern, self.context.new_type::<()>(), &[],
+ initializer_name, false);
+ self.llbb().add_eval(None, self.context.new_call(None, codegen_init_func, &[]));
+ }
+ }
+
- // Avoid wasting time if LLVM value names aren't even enabled.
- /*if self.sess().fewer_names() {
- return;
- }
-
- // Only function parameters and instructions are local to a function,
- // don't change the name of anything else (e.g. globals).
- let param_or_inst = unsafe {
- llvm::LLVMIsAArgument(value).is_some() || llvm::LLVMIsAInstruction(value).is_some()
- };
- if !param_or_inst {
- return;
- }
-
- // Avoid replacing the name if it already exists.
- // While we could combine the names somehow, it'd
- // get noisy quick, and the usefulness is dubious.
- if llvm::get_value_name(value).is_empty() {
- llvm::set_value_name(value, name.as_bytes());
- }*/
++ // TODO(antoyo): insert reference to gdb debug scripts section global.
+ }
+
+ fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) {
+ unimplemented!();
- /*unsafe {
- let dbg_loc_as_llval = llvm::LLVMRustMetadataAsValue(self.cx().llcx, dbg_loc);
- llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc_as_llval);
- }*/
+ }
+
+ fn set_dbg_loc(&mut self, _dbg_loc: Self::DILocation) {
+ unimplemented!();
- //metadata::create_vtable_metadata(self, ty, vtable)
+ }
+}
+
+impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn create_vtable_metadata(&self, _ty: Ty<'tcx>, _vtable: Self::Value) {
- // TODO
++ // TODO(antoyo)
+ }
+
+ fn create_function_debug_context(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _llfn: RValue<'gcc>, _mir: &mir::Body<'tcx>) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>> {
- //unimplemented!();
++ // TODO(antoyo)
+ None
+ }
+
+ fn extend_scope_to_file(&self, _scope_metadata: Self::DIScope, _file: &SourceFile) -> Self::DIScope {
+ unimplemented!();
+ }
+
+ fn debuginfo_finalize(&self) {
- /*let def_id = instance.def_id();
- let containing_scope = get_containing_scope(self, instance);
- let span = self.tcx.def_span(def_id);
- let loc = self.lookup_debug_loc(span.lo());
- let file_metadata = file_metadata(self, &loc.file);
-
- let function_type_metadata = unsafe {
- let fn_signature = get_function_signature(self, fn_abi);
- llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), fn_signature)
- };
-
- // Find the enclosing function, in case this is a closure.
- let def_key = self.tcx().def_key(def_id);
- let mut name = def_key.disambiguated_data.data.to_string();
-
- let enclosing_fn_def_id = self.tcx().closure_base_def_id(def_id);
-
- // Get_template_parameters() will append a `<...>` clause to the function
- // name if necessary.
- let generics = self.tcx().generics_of(enclosing_fn_def_id);
- let substs = instance.substs.truncate_to(self.tcx(), generics);
- let template_parameters = get_template_parameters(self, &generics, substs, &mut name);
-
- let linkage_name = &mangled_name_of_instance(self, instance).name;
- // Omit the linkage_name if it is the same as subprogram name.
- let linkage_name = if &name == linkage_name { "" } else { linkage_name };
-
- // FIXME(eddyb) does this need to be separate from `loc.line` for some reason?
- let scope_line = loc.line;
-
- let mut flags = DIFlags::FlagPrototyped;
-
- if fn_abi.ret.layout.abi.is_uninhabited() {
- flags |= DIFlags::FlagNoReturn;
- }
-
- let mut spflags = DISPFlags::SPFlagDefinition;
- if is_node_local_to_unit(self, def_id) {
- spflags |= DISPFlags::SPFlagLocalToUnit;
- }
- if self.sess().opts.optimize != config::OptLevel::No {
- spflags |= DISPFlags::SPFlagOptimized;
- }
- if let Some((id, _)) = self.tcx.entry_fn(LOCAL_CRATE) {
- if id.to_def_id() == def_id {
- spflags |= DISPFlags::SPFlagMainSubprogram;
- }
- }
-
- unsafe {
- return llvm::LLVMRustDIBuilderCreateFunction(
- DIB(self),
- containing_scope,
- name.as_ptr().cast(),
- name.len(),
- linkage_name.as_ptr().cast(),
- linkage_name.len(),
- file_metadata,
- loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
- function_type_metadata,
- scope_line.unwrap_or(UNKNOWN_LINE_NUMBER),
- flags,
- spflags,
- maybe_definition_llfn,
- template_parameters,
- None,
- );
- }
-
- fn get_function_signature<'ll, 'tcx>(
- cx: &CodegenCx<'ll, 'tcx>,
- fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
- ) -> &'ll DIArray {
- if cx.sess().opts.debuginfo == DebugInfo::Limited {
- return create_DIArray(DIB(cx), &[]);
- }
-
- let mut signature = Vec::with_capacity(fn_abi.args.len() + 1);
-
- // Return type -- llvm::DIBuilder wants this at index 0
- signature.push(if fn_abi.ret.is_ignore() {
- None
- } else {
- Some(type_metadata(cx, fn_abi.ret.layout.ty, rustc_span::DUMMY_SP))
- });
-
- // Arguments types
- if cx.sess().target.options.is_like_msvc {
- // FIXME(#42800):
- // There is a bug in MSDIA that leads to a crash when it encounters
- // a fixed-size array of `u8` or something zero-sized in a
- // function-type (see #40477).
- // As a workaround, we replace those fixed-size arrays with a
- // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would
- // appear as `fn foo(a: u8, b: *const u8)` in debuginfo,
- // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`.
- // This transformed type is wrong, but these function types are
- // already inaccurate due to ABI adjustments (see #42800).
- signature.extend(fn_abi.args.iter().map(|arg| {
- let t = arg.layout.ty;
- let t = match t.kind() {
- ty::Array(ct, _)
- if (*ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() =>
- {
- cx.tcx.mk_imm_ptr(ct)
- }
- _ => t,
- };
- Some(type_metadata(cx, t, rustc_span::DUMMY_SP))
- }));
- } else {
- signature.extend(
- fn_abi
- .args
- .iter()
- .map(|arg| Some(type_metadata(cx, arg.layout.ty, rustc_span::DUMMY_SP))),
- );
- }
-
- create_DIArray(DIB(cx), &signature[..])
- }
-
- fn get_template_parameters<'ll, 'tcx>(
- cx: &CodegenCx<'ll, 'tcx>,
- generics: &ty::Generics,
- substs: SubstsRef<'tcx>,
- name_to_append_suffix_to: &mut String,
- ) -> &'ll DIArray {
- if substs.types().next().is_none() {
- return create_DIArray(DIB(cx), &[]);
- }
-
- name_to_append_suffix_to.push('<');
- for (i, actual_type) in substs.types().enumerate() {
- if i != 0 {
- name_to_append_suffix_to.push(',');
- }
-
- let actual_type =
- cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type);
- // Add actual type name to <...> clause of function name
- let actual_type_name = compute_debuginfo_type_name(cx.tcx(), actual_type, true);
- name_to_append_suffix_to.push_str(&actual_type_name[..]);
- }
- name_to_append_suffix_to.push('>');
-
- // Again, only create type information if full debuginfo is enabled
- let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full {
- let names = get_parameter_names(cx, generics);
- substs
- .iter()
- .zip(names)
- .filter_map(|(kind, name)| {
- if let GenericArgKind::Type(ty) = kind.unpack() {
- let actual_type =
- cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
- let actual_type_metadata =
- type_metadata(cx, actual_type, rustc_span::DUMMY_SP);
- let name = name.as_str();
- Some(unsafe {
- Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
- DIB(cx),
- None,
- name.as_ptr().cast(),
- name.len(),
- actual_type_metadata,
- ))
- })
- } else {
- None
- }
- })
- .collect()
- } else {
- vec![]
- };
-
- create_DIArray(DIB(cx), &template_params[..])
- }
-
- fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
- let mut names = generics
- .parent
- .map_or(vec![], |def_id| get_parameter_names(cx, cx.tcx.generics_of(def_id)));
- names.extend(generics.params.iter().map(|param| param.name));
- names
- }
-
- fn get_containing_scope<'ll, 'tcx>(
- cx: &CodegenCx<'ll, 'tcx>,
- instance: Instance<'tcx>,
- ) -> &'ll DIScope {
- // First, let's see if this is a method within an inherent impl. Because
- // if yes, we want to make the result subroutine DIE a child of the
- // subroutine's self-type.
- let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| {
- // If the method does *not* belong to a trait, proceed
- if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
- let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
- instance.substs,
- ty::ParamEnv::reveal_all(),
- &cx.tcx.type_of(impl_def_id),
- );
-
- // Only "class" methods are generally understood by LLVM,
- // so avoid methods on other types (e.g., `<*mut T>::null`).
- match impl_self_ty.kind() {
- ty::Adt(def, ..) if !def.is_box() => {
- // Again, only create type information if full debuginfo is enabled
- if cx.sess().opts.debuginfo == DebugInfo::Full
- && !impl_self_ty.needs_subst()
- {
- Some(type_metadata(cx, impl_self_ty, rustc_span::DUMMY_SP))
- } else {
- Some(namespace::item_namespace(cx, def.did))
- }
- }
- _ => None,
- }
- } else {
- // For trait method impls we still use the "parallel namespace"
- // strategy
- None
- }
- });
-
- self_type.unwrap_or_else(|| {
- namespace::item_namespace(
- cx,
- DefId {
- krate: instance.def_id().krate,
- index: cx
- .tcx
- .def_key(instance.def_id())
- .parent
- .expect("get_containing_scope: missing parent?"),
- },
- )
- })
- }*/
++ // TODO(antoyo)
+ }
+
+ fn create_dbg_var(&self, _variable_name: Symbol, _variable_type: Ty<'tcx>, _scope_metadata: Self::DIScope, _variable_kind: VariableKind, _span: Span) -> Self::DIVariable {
+ unimplemented!();
+ }
+
+ fn dbg_scope_fn(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _maybe_definition_llfn: Option<RValue<'gcc>>) -> Self::DIScope {
+ unimplemented!();
- /*let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo());
-
- unsafe {
- llvm::LLVMRustDIBuilderCreateDebugLocation(
- utils::debug_context(self).llcontext,
- line.unwrap_or(UNKNOWN_LINE_NUMBER),
- col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
- scope,
- inlined_at,
- )
- }*/
+ }
+
+ fn dbg_loc(&self, _scope: Self::DIScope, _inlined_at: Option<Self::DILocation>, _span: Span) -> Self::DILocation {
+ unimplemented!();
+ }
+}
--- /dev/null
- //debug!("declare_global_with_linkage(name={:?})", name);
+use gccjit::{Function, FunctionType, GlobalKind, LValue, RValue, Type};
+use rustc_codegen_ssa::traits::BaseTypeMethods;
+use rustc_middle::ty::Ty;
+use rustc_span::Symbol;
+use rustc_target::abi::call::FnAbi;
+
+use crate::abi::FnAbiGccExt;
+use crate::context::{CodegenCx, unit_name};
+use crate::intrinsic::llvm;
+use crate::mangled_std_symbols::{ARGV_INIT_ARRAY, ARGV_INIT_WRAPPER};
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn get_or_insert_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> RValue<'gcc> {
+ if self.globals.borrow().contains_key(name) {
+ let typ = self.globals.borrow().get(name).expect("global").get_type();
+ let global = self.context.new_global(None, GlobalKind::Imported, typ, name);
+ if is_tls {
+ global.set_tls_model(self.tls_model);
+ }
+ if let Some(link_section) = link_section {
+ global.set_link_section(&link_section.as_str());
+ }
+ global.get_address(None)
+ }
+ else {
+ self.declare_global(name, ty, is_tls, link_section)
+ }
+ }
+
+ pub fn declare_unnamed_global(&self, ty: Type<'gcc>) -> LValue<'gcc> {
+ let index = self.global_gen_sym_counter.get();
+ self.global_gen_sym_counter.set(index + 1);
+ let name = format!("global_{}_{}", index, unit_name(&self.codegen_unit));
+ self.context.new_global(None, GlobalKind::Exported, ty, &name)
+ }
+
+ pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> RValue<'gcc> {
- // FIXME: this is a wrong cast. That requires changing the compiler API.
+ let global = self.context.new_global(None, linkage, ty, name)
+ .get_address(None);
+ self.globals.borrow_mut().insert(name.to_string(), global);
+ // NOTE: global seems to only be global in a module. So save the name instead of the value
+ // to import it later.
+ self.global_names.borrow_mut().insert(global, name.to_string());
+ global
+ }
+
+ pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> RValue<'gcc> {
+ self.linkage.set(FunctionType::Exported);
+ let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic);
- //debug!("declare_global(name={:?})", name);
- // FIXME: correctly support global variable initialization.
++ // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
+ unsafe { std::mem::transmute(func) }
+ }
+
+ pub fn declare_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> RValue<'gcc> {
- // TODO: use the fn_type parameter.
++ // FIXME(antoyo): correctly support global variable initialization.
+ if name.starts_with(ARGV_INIT_ARRAY) {
+ // NOTE: hack to avoid having to update the names in mangled_std_symbols: we save the
+ // name of the variable now to actually declare it later.
+ *self.init_argv_var.borrow_mut() = name.to_string();
+
+ let global = self.context.new_global(None, GlobalKind::Imported, ty, name);
+ if let Some(link_section) = link_section {
+ global.set_link_section(&link_section.as_str());
+ }
+ return global.get_address(None);
+ }
+ let global = self.context.new_global(None, GlobalKind::Exported, ty, name);
+ if is_tls {
+ global.set_tls_model(self.tls_model);
+ }
+ if let Some(link_section) = link_section {
+ global.set_link_section(&link_section.as_str());
+ }
+ let global = global.get_address(None);
+ self.globals.borrow_mut().insert(name.to_string(), global);
+ // NOTE: global seems to only be global in a module. So save the name instead of the value
+ // to import it later.
+ self.global_names.borrow_mut().insert(global, name.to_string());
+ global
+ }
+
+ pub fn declare_cfn(&self, name: &str, _fn_type: Type<'gcc>) -> RValue<'gcc> {
- // FIXME: this is a wrong cast. That requires changing the compiler API.
++ // TODO(antoyo): use the fn_type parameter.
+ let const_string = self.context.new_type::<u8>().make_pointer().make_pointer();
+ let return_type = self.type_i32();
+ let variadic = false;
+ self.linkage.set(FunctionType::Exported);
+ let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, &[self.type_i32(), const_string], variadic);
+ // NOTE: it is needed to set the current_func here as well, because get_fn() is not called
+ // for the main function.
+ *self.current_func.borrow_mut() = Some(func);
- //debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi);
++ // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
+ unsafe { std::mem::transmute(func) }
+ }
+
+ pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> RValue<'gcc> {
+ // NOTE: hack to avoid having to update the names in mangled_std_symbols: we found the name
+ // of the variable earlier, so we declare it now.
+ // Since we don't correctly support initializers yet, we initialize this variable manually
+ // for now.
+ if name.starts_with(ARGV_INIT_WRAPPER) && !self.argv_initialized.get() {
+ let global_name = &*self.init_argv_var.borrow();
+ let return_type = self.type_void();
+ let params = [
+ self.context.new_parameter(None, self.int_type, "argc"),
+ self.context.new_parameter(None, self.u8_type.make_pointer().make_pointer(), "argv"),
+ self.context.new_parameter(None, self.u8_type.make_pointer().make_pointer(), "envp"),
+ ];
+ let function = self.context.new_function(None, FunctionType::Extern, return_type, ¶ms, name, false);
+ let initializer = function.get_address(None);
+
+ let param_types = [
+ self.int_type,
+ self.u8_type.make_pointer().make_pointer(),
+ self.u8_type.make_pointer().make_pointer(),
+ ];
+ let ty = self.context.new_function_pointer_type(None, return_type, ¶m_types, false);
+
+ let global = self.context.new_global(None, GlobalKind::Exported, ty, global_name);
+ global.set_link_section(".init_array.00099");
+ global.global_set_initializer_value(initializer);
+ let global = global.get_address(None);
+ self.globals.borrow_mut().insert(global_name.to_string(), global);
+ // NOTE: global seems to only be global in a module. So save the name instead of the value
+ // to import it later.
+ self.global_names.borrow_mut().insert(global, global_name.to_string());
+ self.argv_initialized.set(true);
+ }
- //fn_abi.apply_attrs_llfn(self, func);
- // FIXME: this is a wrong cast. That requires changing the compiler API.
+ let (return_type, params, variadic) = fn_abi.gcc_type(self);
+ let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, ¶ms, variadic);
- //debug!("get_declared_value(name={:?})", name);
- // TODO: use a different field than globals, because this seems to return a function?
++ // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
+ unsafe { std::mem::transmute(func) }
+ }
+
+ pub fn define_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> Option<RValue<'gcc>> {
+ Some(self.get_or_insert_global(name, ty, is_tls, link_section))
+ }
+
+ pub fn define_private_global(&self, ty: Type<'gcc>) -> RValue<'gcc> {
+ let global = self.declare_unnamed_global(ty);
+ global.get_address(None)
+ }
+
+ pub fn get_declared_value(&self, name: &str) -> Option<RValue<'gcc>> {
-
- /*fn get_defined_value(&self, name: &str) -> Option<RValue<'gcc>> {
- // TODO: gcc does not allow global initialization.
- None
- /*self.get_declared_value(name).and_then(|val| {
- let declaration = unsafe { llvm::LLVMIsDeclaration(val) != 0 };
- if !declaration { Some(val) } else { None }
- })*/
- }*/
++ // TODO(antoyo): use a different field than globals, because this seems to return a function?
+ self.globals.borrow().get(name).cloned()
+ }
- //debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
- /*let llfn = unsafe {
- llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_ptr().cast(), name.len(), ty)
- };*/
-
+}
+
+/// Declare a function.
+///
+/// If there’s a value with the same name already declared, the function will
+/// update the declaration and return existing Value instead.
+fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*llvm::CallConv*/, return_type: Type<'gcc>, param_types: &[Type<'gcc>], variadic: bool) -> Function<'gcc> {
- .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO: set name.
+ if name.starts_with("llvm.") {
+ return llvm::intrinsic(name, cx);
+ }
+ let func =
+ if cx.functions.borrow().contains_key(name) {
+ *cx.functions.borrow().get(name).expect("function")
+ }
+ else {
+ let params: Vec<_> = param_types.into_iter().enumerate()
- //llvm::SetFunctionCallConv(llfn, callconv); // TODO
- // Function addresses in Rust are never significant, allowing functions to
- // be merged.
- //llvm::SetUnnamedAddress(llfn, llvm::UnnamedAddr::Global); // TODO
-
- /*if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.target.options.disable_redzone) {
- llvm::Attribute::NoRedZone.apply_llfn(Function, llfn);
- }*/
-
- //attributes::default_optimisation_attrs(cx.tcx.sess, llfn);
- //attributes::non_lazy_bind(cx.sess(), llfn);
++ .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO(antoyo): set name.
+ .collect();
+ let func = cx.context.new_function(None, cx.linkage.get(), return_type, ¶ms, mangle_name(name), variadic);
+ cx.functions.borrow_mut().insert(name.to_string(), func);
+ func
+ };
+
- // FIXME: invalid cast.
- // TODO: is this line useful?
- //cx.globals.borrow_mut().insert(name.to_string(), unsafe { std::mem::transmute(func) });
++ // TODO(antoyo): set function calling convention.
++ // TODO(antoyo): set unnamed address.
++ // TODO(antoyo): set no red zone function attribute.
++ // TODO(antoyo): set attributes for optimisation.
++ // TODO(antoyo): set attributes for non lazy bind.
+
- // FIXME: this is a hack because libgccjit currently only supports alpha, num and _.
++ // FIXME(antoyo): invalid cast.
+ func
+}
+
++// FIXME(antoyo): this is a hack because libgccjit currently only supports alpha, num and _.
+// Unsupported characters: `$` and `.`.
+pub fn mangle_name(name: &str) -> String {
+ name.replace(|char: char| {
+ if !char.is_alphanumeric() && char != '_' {
+ debug_assert!("$.".contains(char), "Unsupported char in function name: {}", char);
+ true
+ }
+ else {
+ false
+ }
+ }, "_")
+}
--- /dev/null
- // TODO: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html
+use gccjit::Function;
+
+use crate::context::CodegenCx;
+
+pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
+ let _gcc_name =
+ match name {
+ "llvm.x86.xgetbv" => {
+ let gcc_name = "__builtin_trap";
+ let func = cx.context.get_builtin_function(gcc_name);
+ cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
+ return func;
+ },
- println!("Get target builtin");
++ // NOTE: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html
+ "llvm.x86.sse2.cmp.pd" => "__builtin_ia32_cmppd",
+ "llvm.x86.sse2.movmsk.pd" => "__builtin_ia32_movmskpd",
+ "llvm.x86.sse2.pmovmskb.128" => "__builtin_ia32_pmovmskb128",
+ _ => unimplemented!("unsupported LLVM intrinsic {}", name)
+ };
+
- /*let func = cx.context.get_target_builtin_function(gcc_name);
- cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
- func*/
+ unimplemented!();
+}
--- /dev/null
- // FIXME: remove this cast when the API supports function.
+pub mod llvm;
+mod simd;
+
+use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp};
+use rustc_codegen_ssa::MemFlags;
+use rustc_codegen_ssa::base::wants_msvc_seh;
+use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error};
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
+use rustc_middle::bug;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_span::{Span, Symbol, symbol::kw, sym};
+use rustc_target::abi::{HasDataLayout, LayoutOf};
+use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
+use rustc_target::spec::PanicStrategy;
+
+use crate::abi::GccType;
+use crate::builder::Builder;
+use crate::common::TypeReflection;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+use crate::intrinsic::simd::generic_simd_intrinsic;
+
+fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> Option<Function<'gcc>> {
+ let gcc_name = match name {
+ sym::sqrtf32 => "sqrtf",
+ sym::sqrtf64 => "sqrt",
+ sym::powif32 => "__builtin_powif",
+ sym::powif64 => "__builtin_powi",
+ sym::sinf32 => "sinf",
+ sym::sinf64 => "sin",
+ sym::cosf32 => "cosf",
+ sym::cosf64 => "cos",
+ sym::powf32 => "powf",
+ sym::powf64 => "pow",
+ sym::expf32 => "expf",
+ sym::expf64 => "exp",
+ sym::exp2f32 => "exp2f",
+ sym::exp2f64 => "exp2",
+ sym::logf32 => "logf",
+ sym::logf64 => "log",
+ sym::log10f32 => "log10f",
+ sym::log10f64 => "log10",
+ sym::log2f32 => "log2f",
+ sym::log2f64 => "log2",
+ sym::fmaf32 => "fmaf",
+ sym::fmaf64 => "fma",
+ sym::fabsf32 => "fabsf",
+ sym::fabsf64 => "fabs",
+ sym::minnumf32 => "fminf",
+ sym::minnumf64 => "fmin",
+ sym::maxnumf32 => "fmaxf",
+ sym::maxnumf64 => "fmax",
+ sym::copysignf32 => "copysignf",
+ sym::copysignf64 => "copysign",
+ sym::floorf32 => "floorf",
+ sym::floorf64 => "floor",
+ sym::ceilf32 => "ceilf",
+ sym::ceilf64 => "ceil",
+ sym::truncf32 => "truncf",
+ sym::truncf64 => "trunc",
+ sym::rintf32 => "rintf",
+ sym::rintf64 => "rint",
+ sym::nearbyintf32 => "nearbyintf",
+ sym::nearbyintf64 => "nearbyint",
+ sym::roundf32 => "roundf",
+ sym::roundf64 => "round",
+ sym::abort => "abort",
+ _ => return None,
+ };
+ Some(cx.context.get_builtin_function(&gcc_name))
+}
+
+impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) {
+ let tcx = self.tcx;
+ let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
+
+ let (def_id, substs) = match *callee_ty.kind() {
+ ty::FnDef(def_id, substs) => (def_id, substs),
+ _ => bug!("expected fn item type, found {}", callee_ty),
+ };
+
+ let sig = callee_ty.fn_sig(tcx);
+ let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
+ let arg_tys = sig.inputs();
+ let ret_ty = sig.output();
+ let name = tcx.item_name(def_id);
+ let name_str = &*name.as_str();
+
+ let llret_ty = self.layout_of(ret_ty).gcc_type(self, true);
+ let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
+
+ let simple = get_simple_intrinsic(self, name);
+ let llval =
+ match name {
+ _ if simple.is_some() => {
- /*let llfn = self.get_intrinsic(&("llvm.debugtrap"));
- self.call(llfn, &[], None)*/
++ // FIXME(antoyo): remove this cast when the API supports function.
+ let func = unsafe { std::mem::transmute(simple.expect("simple")) };
+ self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
+ },
+ sym::likely => {
+ self.expect(args[0].immediate(), true)
+ }
+ sym::unlikely => {
+ self.expect(args[0].immediate(), false)
+ }
+ kw::Try => {
+ try_intrinsic(
+ self,
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ llresult,
+ );
+ return;
+ }
+ sym::breakpoint => {
+ unimplemented!();
- /*let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
- self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)*/
+ }
+ sym::va_copy => {
+ unimplemented!();
- /*match fn_abi.ret.layout.abi {
- abi::Abi::Scalar(ref scalar) => {
- match scalar.value {
- Primitive::Int(..) => {
- if self.cx().size_of(ret_ty).bytes() < 4 {
- // `va_arg` should not be called on a integer type
- // less than 4 bytes in length. If it is, promote
- // the integer to a `i32` and truncate the result
- // back to the smaller type.
- let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
- self.trunc(promoted_result, llret_ty)
- } else {
- emit_va_arg(self, args[0], ret_ty)
- }
- }
- Primitive::F64 | Primitive::Pointer => {
- emit_va_arg(self, args[0], ret_ty)
- }
- // `va_arg` should never be used with the return type f32.
- Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
- }
- }
- _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
- }*/
+ }
+ sym::va_arg => {
+ unimplemented!();
- // TODO
- /*let align = if name == sym::unaligned_volatile_load {
- 1
- } else {
- self.align_of(tp_ty).bytes() as u32
- };
- unsafe {
- llvm::LLVMSetAlignment(load, align);
- }*/
+ }
+
+ sym::volatile_load | sym::unaligned_volatile_load => {
+ let tp_ty = substs.type_at(0);
+ let mut ptr = args[0].immediate();
+ if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
+ }
+ let load = self.volatile_load(ptr.get_type(), ptr);
- /*let expect = self.get_intrinsic(&("llvm.prefetch"));
- let (rw, cache_type) = match name {
- sym::prefetch_read_data => (0, 1),
- sym::prefetch_write_data => (1, 1),
- sym::prefetch_read_instruction => (0, 0),
- sym::prefetch_write_instruction => (1, 0),
- _ => bug!(),
- };
- self.call(
- expect,
- &[
- args[0].immediate(),
- self.const_i32(rw),
- args[1].immediate(),
- self.const_i32(cache_type),
- ],
- None,
- )*/
++ // TODO(antoyo): set alignment.
+ self.to_immediate(load, self.layout_of(tp_ty))
+ }
+ sym::volatile_store => {
+ let dst = args[0].deref(self.cx());
+ args[1].val.volatile_store(self, dst);
+ return;
+ }
+ sym::unaligned_volatile_store => {
+ let dst = args[0].deref(self.cx());
+ args[1].val.unaligned_volatile_store(self, dst);
+ return;
+ }
+ sym::prefetch_read_data
+ | sym::prefetch_write_data
+ | sym::prefetch_read_instruction
+ | sym::prefetch_write_instruction => {
+ unimplemented!();
-
- /*let y = self.const_bool(false);
- let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
- self.call(llfn, &[args[0].immediate(), y], None)*/
+ }
+ sym::ctlz
+ | sym::ctlz_nonzero
+ | sym::cttz
+ | sym::cttz_nonzero
+ | sym::ctpop
+ | sym::bswap
+ | sym::bitreverse
+ | sym::rotate_left
+ | sym::rotate_right
+ | sym::saturating_add
+ | sym::saturating_sub => {
+ let ty = arg_tys[0];
+ match int_type_width_signed(ty, self) {
+ Some((width, signed)) => match name {
+ sym::ctlz | sym::cttz => {
+ let func = self.current_func.borrow().expect("func");
+ let then_block = func.new_block("then");
+ let else_block = func.new_block("else");
+ let after_block = func.new_block("after");
+
+ let arg = args[0].immediate();
+ let result = func.new_local(None, arg.get_type(), "zeros");
+ let zero = self.cx.context.new_rvalue_zero(arg.get_type());
+ let cond = self.cx.context.new_comparison(None, ComparisonOp::Equals, arg, zero);
+ self.block.expect("block").end_with_conditional(None, cond, then_block, else_block);
+
+ let zero_result = self.cx.context.new_rvalue_from_long(arg.get_type(), width as i64);
+ then_block.add_assignment(None, result, zero_result);
+ then_block.end_with_jump(None, after_block);
+
+ // NOTE: since jumps were added in a place
+ // count_leading_zeroes() does not expect, the current blocks
+ // in the state need to be updated.
+ *self.current_block.borrow_mut() = Some(else_block);
+ self.block = Some(else_block);
+
+ let zeros =
+ match name {
+ sym::ctlz => self.count_leading_zeroes(width, arg),
+ sym::cttz => self.count_trailing_zeroes(width, arg),
+ _ => unreachable!(),
+ };
+ else_block.add_assignment(None, result, zeros);
+ else_block.end_with_jump(None, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not
+ // expect, the current blocks in the state need to be updated.
+ *self.current_block.borrow_mut() = Some(after_block);
+ self.block = Some(after_block);
+
+ result.to_rvalue()
- // TODO: check if it's faster to use string literals and a
+ }
+ sym::ctlz_nonzero => {
+ self.count_leading_zeroes(width, args[0].immediate())
+ },
+ sym::cttz_nonzero => {
+ self.count_trailing_zeroes(width, args[0].immediate())
+ }
+ sym::ctpop => self.pop_count(args[0].immediate()),
+ sym::bswap => {
+ if width == 8 {
+ args[0].immediate() // byte swap a u8/i8 is just a no-op
+ }
+ else {
- // FIXME: this cast should not be necessary. Remove
++ // TODO(antoyo): check if it's faster to use string literals and a
+ // match instead of format!.
+ let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
+ let mut arg = args[0].immediate();
- // TODO: implement using algorithm from:
++ // FIXME(antoyo): this cast should not be necessary. Remove
+ // when having proper sized integer types.
+ let param_type = bswap.get_param(0).to_rvalue().get_type();
+ if param_type != arg.get_type() {
+ arg = self.bitcast(arg, param_type);
+ }
+ self.cx.context.new_call(None, bswap, &[arg])
+ }
+ },
+ sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
+ sym::rotate_left | sym::rotate_right => {
- let integer_ty = self.type_ix(layout.size.bits()); // FIXME: LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
++ // TODO(antoyo): implement using algorithm from:
+ // https://blog.regehr.org/archives/1063
+ // for other platforms.
+ let is_left = name == sym::rotate_left;
+ let val = args[0].immediate();
+ let raw_shift = args[1].immediate();
+ if is_left {
+ self.rotate_left(val, raw_shift, width)
+ }
+ else {
+ self.rotate_right(val, raw_shift, width)
+ }
+ },
+ sym::saturating_add => {
+ self.saturating_add(args[0].immediate(), args[1].immediate(), signed, width)
+ },
+ sym::saturating_sub => {
+ self.saturating_sub(args[0].immediate(), args[1].immediate(), signed, width)
+ },
+ _ => bug!(),
+ },
+ None => {
+ span_invalid_monomorphization_error(
+ tcx.sess,
+ span,
+ &format!(
+ "invalid monomorphization of `{}` intrinsic: \
+ expected basic integer type, found `{}`",
+ name, ty
+ ),
+ );
+ return;
+ }
+ }
+ }
+
+ sym::raw_eq => {
+ use rustc_target::abi::Abi::*;
+ let tp_ty = substs.type_at(0);
+ let layout = self.layout_of(tp_ty).layout;
+ let use_integer_compare = match layout.abi {
+ Scalar(_) | ScalarPair(_, _) => true,
+ Uninhabited | Vector { .. } => false,
+ Aggregate { .. } => {
+ // For rusty ABIs, small aggregates are actually passed
+ // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
+ // so we re-use that same threshold here.
+ layout.size <= self.data_layout().pointer_size * 2
+ }
+ };
+
+ let a = args[0].immediate();
+ let b = args[1].immediate();
+ if layout.size.bytes() == 0 {
+ self.const_bool(true)
+ }
+ /*else if use_integer_compare {
- // TODO: switch to asumme when it exists.
++ let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
+ let ptr_ty = self.type_ptr_to(integer_ty);
+ let a_ptr = self.bitcast(a, ptr_ty);
+ let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
+ let b_ptr = self.bitcast(b, ptr_ty);
+ let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
+ self.icmp(IntPredicate::IntEQ, a_val, b_val)
+ }*/
+ else {
+ let void_ptr_type = self.context.new_type::<*const ()>();
+ let a_ptr = self.bitcast(a, void_ptr_type);
+ let b_ptr = self.bitcast(b, void_ptr_type);
+ let n = self.context.new_cast(None, self.const_usize(layout.size.bytes()), self.sizet_type);
+ let builtin = self.context.get_builtin_function("memcmp");
+ let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
+ self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
+ }
+ }
+
+ _ if name_str.starts_with("simd_") => {
+ match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
+ Ok(llval) => llval,
+ Err(()) => return,
+ }
+ }
+
+ _ => bug!("unknown intrinsic '{}'", name),
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
+ let ptr = self.pointercast(result.llval, ptr_llty);
+ self.store(llval, ptr, result.align);
+ }
+ else {
+ OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
+ .val
+ .store(self, result);
+ }
+ }
+ }
+
+ fn abort(&mut self) {
+ let func = self.context.get_builtin_function("abort");
+ let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
+ self.call(self.type_void(), func, &[], None);
+ }
+
+ fn assume(&mut self, value: Self::Value) {
- // TODO
- /*let expect = self.context.get_builtin_function("__builtin_expect");
- let expect: RValue<'gcc> = unsafe { std::mem::transmute(expect) };
- self.call(expect, &[cond, self.const_bool(expected)], None)*/
++ // TODO(antoyo): switch to asumme when it exists.
+ // Or use something like this:
+ // #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
+ self.expect(value, true);
+ }
+
+ fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value {
- // TODO
- /*if self.tcx().sess.opts.debugging_opts.insert_sideeffect {
- let fnname = self.get_intrinsic(&("llvm.sideeffect"));
- self.call(fnname, &[], None);
- }*/
++ // TODO(antoyo)
+ cond
+ }
+
+ fn sideeffect(&mut self) {
- /*let intrinsic = self.cx().get_intrinsic("llvm.va_start");
- self.call(intrinsic, &[va_list], None)*/
++ // TODO(antoyo)
+ }
+
+ fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
- /*let intrinsic = self.cx().get_intrinsic("llvm.va_end");
- self.call(intrinsic, &[va_list], None)*/
+ }
+
+ fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
- // TODO: Refactor with other implementations.
+ }
+}
+
+impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn store_fn_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>) {
+ arg_abi.store_fn_arg(self, idx, dst)
+ }
+
+ fn store_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+ arg_abi.store(self, val, dst)
+ }
+
+ fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
+ arg_abi.memory_ty(self)
+ }
+}
+
+pub trait ArgAbiExt<'gcc, 'tcx> {
+ fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+ fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>);
+ fn store_fn_arg(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>);
+}
+
+impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
+ /// Gets the LLVM type for a place of the original Rust type of
+ /// this argument/return, i.e., the result of `type_of::type_of`.
+ fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ self.layout.gcc_type(cx, true)
+ }
+
+ /// Stores a direct/indirect value described by this ArgAbi into a
+ /// place for the original Rust type of this argument/return.
+ /// Can be used for both storing formal arguments into Rust variables
+ /// or results of call/invoke instructions into their destinations.
+ fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+ if self.is_ignore() {
+ return;
+ }
+ if self.is_sized_indirect() {
+ OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
+ }
+ else if self.is_unsized_indirect() {
+ bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
+ }
+ else if let PassMode::Cast(cast) = self.mode {
+ // FIXME(eddyb): Figure out when the simpler Store is safe, clang
+ // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
+ let can_store_through_cast_ptr = false;
+ if can_store_through_cast_ptr {
+ let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
+ let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
+ bx.store(val, cast_dst, self.layout.align.abi);
+ }
+ else {
+ // The actual return type is a struct, but the ABI
+ // adaptation code has cast it into some scalar type. The
+ // code that follows is the only reliable way I have
+ // found to do a transform like i64 -> {i32,i32}.
+ // Basically we dump the data onto the stack then memcpy it.
+ //
+ // Other approaches I tried:
+ // - Casting rust ret pointer to the foreign type and using Store
+ // is (a) unsafe if size of foreign type > size of rust type and
+ // (b) runs afoul of strict aliasing rules, yielding invalid
+ // assembly under -O (specifically, the store gets removed).
+ // - Truncating foreign type to correct integral type and then
+ // bitcasting to the struct type yields invalid cast errors.
+
+ // We instead thus allocate some scratch space...
+ let scratch_size = cast.size(bx);
+ let scratch_align = cast.align(bx);
+ let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align);
+ bx.lifetime_start(llscratch, scratch_size);
+
+ // ... where we first store the value...
+ bx.store(val, llscratch, scratch_align);
+
+ // ... and then memcpy it to the intended destination.
+ bx.memcpy(
+ dst.llval,
+ self.layout.align.abi,
+ llscratch,
+ scratch_align,
+ bx.const_usize(self.layout.size.bytes()),
+ MemFlags::empty(),
+ );
+
+ bx.lifetime_end(llscratch, scratch_size);
+ }
+ }
+ else {
+ OperandValue::Immediate(val).store(bx, dst);
+ }
+ }
+
+ fn store_fn_arg<'a>(&self, bx: &mut Builder<'a, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+ let mut next = || {
+ let val = bx.current_func().get_param(*idx as i32);
+ *idx += 1;
+ val.to_rvalue()
+ };
+ match self.mode {
+ PassMode::Ignore => {}
+ PassMode::Pair(..) => {
+ OperandValue::Pair(next(), next()).store(bx, dst);
+ }
+ PassMode::Indirect { extra_attrs: Some(_), .. } => {
+ OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
+ }
+ PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(_) => {
+ let next_arg = next();
+ self.store(bx, next_arg.to_rvalue(), dst);
+ }
+ }
+ }
+}
+
+fn int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) -> Option<(u64, bool)> {
+ match ty.kind() {
+ ty::Int(t) => Some((
+ match t {
+ rustc_middle::ty::IntTy::Isize => u64::from(cx.tcx.sess.target.pointer_width),
+ rustc_middle::ty::IntTy::I8 => 8,
+ rustc_middle::ty::IntTy::I16 => 16,
+ rustc_middle::ty::IntTy::I32 => 32,
+ rustc_middle::ty::IntTy::I64 => 64,
+ rustc_middle::ty::IntTy::I128 => 128,
+ },
+ true,
+ )),
+ ty::Uint(t) => Some((
+ match t {
+ rustc_middle::ty::UintTy::Usize => u64::from(cx.tcx.sess.target.pointer_width),
+ rustc_middle::ty::UintTy::U8 => 8,
+ rustc_middle::ty::UintTy::U16 => 16,
+ rustc_middle::ty::UintTy::U32 => 32,
+ rustc_middle::ty::UintTy::U64 => 64,
+ rustc_middle::ty::UintTy::U128 => 128,
+ },
+ false,
+ )),
+ _ => None,
+ }
+}
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+ fn bit_reverse(&mut self, width: u64, value: RValue<'gcc>) -> RValue<'gcc> {
+ let typ = value.get_type();
+ let context = &self.cx.context;
+ match width {
+ 8 => {
+ // First step.
+ let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0));
+ let left = self.lshr(left, context.new_rvalue_from_int(typ, 4));
+ let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F));
+ let right = self.shl(right, context.new_rvalue_from_int(typ, 4));
+ let step1 = self.or(left, right);
+
+ // Second step.
+ let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC));
+ let left = self.lshr(left, context.new_rvalue_from_int(typ, 2));
+ let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33));
+ let right = self.shl(right, context.new_rvalue_from_int(typ, 2));
+ let step2 = self.or(left, right);
+
+ // Third step.
+ let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA));
+ let left = self.lshr(left, context.new_rvalue_from_int(typ, 1));
+ let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55));
+ let right = self.shl(right, context.new_rvalue_from_int(typ, 1));
+ let step3 = self.or(left, right);
+
+ step3
+ },
+ 16 => {
+ // First step.
+ let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555));
+ let left = self.shl(left, context.new_rvalue_from_int(typ, 1));
+ let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA));
+ let right = self.lshr(right, context.new_rvalue_from_int(typ, 1));
+ let step1 = self.or(left, right);
+
+ // Second step.
+ let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333));
+ let left = self.shl(left, context.new_rvalue_from_int(typ, 2));
+ let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC));
+ let right = self.lshr(right, context.new_rvalue_from_int(typ, 2));
+ let step2 = self.or(left, right);
+
+ // Third step.
+ let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F));
+ let left = self.shl(left, context.new_rvalue_from_int(typ, 4));
+ let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0));
+ let right = self.lshr(right, context.new_rvalue_from_int(typ, 4));
+ let step3 = self.or(left, right);
+
+ // Fourth step.
+ let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF));
+ let left = self.shl(left, context.new_rvalue_from_int(typ, 8));
+ let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00));
+ let right = self.lshr(right, context.new_rvalue_from_int(typ, 8));
+ let step4 = self.or(left, right);
+
+ step4
+ },
+ 32 => {
- let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO: transmute the number instead?
++ // TODO(antoyo): Refactor with other implementations.
+ // First step.
+ let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 1));
+ let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA));
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 1));
+ let step1 = self.or(left, right);
+
+ // Second step.
+ let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 2));
+ let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC));
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 2));
+ let step2 = self.or(left, right);
+
+ // Third step.
+ let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 4));
+ let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0));
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 4));
+ let step3 = self.or(left, right);
+
+ // Fourth step.
+ let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 8));
+ let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00));
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 8));
+ let step4 = self.or(left, right);
+
+ // Fifth step.
+ let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 16));
+ let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000));
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 16));
+ let step5 = self.or(left, right);
+
+ step5
+ },
+ 64 => {
+ // First step.
+ let left = self.shl(value, context.new_rvalue_from_long(typ, 32));
+ let right = self.lshr(value, context.new_rvalue_from_long(typ, 32));
+ let step1 = self.or(left, right);
+
+ // Second step.
+ let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 15));
- // TODO: find a more efficient implementation?
++ let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO(antoyo): transmute the number instead?
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 17));
+ let step2 = self.or(left, right);
+
+ // Third step.
+ let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10));
+ let left = self.xor(step2, left);
+ let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F));
+
+ let left = self.shl(temp, context.new_rvalue_from_long(typ, 10));
+ let left = self.or(temp, left);
+ let step3 = self.xor(left, step2);
+
+ // Fourth step.
+ let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4));
+ let left = self.xor(step3, left);
+ let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421));
+
+ let left = self.shl(temp, context.new_rvalue_from_long(typ, 4));
+ let left = self.or(temp, left);
+ let step4 = self.xor(left, step3);
+
+ // Fifth step.
+ let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2));
+ let left = self.xor(step4, left);
+ let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842));
+
+ let left = self.shl(temp, context.new_rvalue_from_long(typ, 2));
+ let left = self.or(temp, left);
+ let step5 = self.xor(left, step4);
+
+ step5
+ },
+ 128 => {
- // TODO: use width?
++ // TODO(antoyo): find a more efficient implementation?
+ let sixty_four = self.context.new_rvalue_from_long(typ, 64);
+ let high = self.context.new_cast(None, value >> sixty_four, self.u64_type);
+ let low = self.context.new_cast(None, value, self.u64_type);
+
+ let reversed_high = self.bit_reverse(64, high);
+ let reversed_low = self.bit_reverse(64, low);
+
+ let new_low = self.context.new_cast(None, reversed_high, typ);
+ let new_high = self.context.new_cast(None, reversed_low, typ) << sixty_four;
+
+ new_low | new_high
+ },
+ _ => {
+ panic!("cannot bit reverse with width = {}", width);
+ },
+ }
+ }
+
+ fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: use the optimized version with fewer operations.
++ // TODO(antoyo): use width?
+ let arg_type = arg.get_type();
+ let count_leading_zeroes =
+ if arg_type.is_uint(&self.cx) {
+ "__builtin_clz"
+ }
+ else if arg_type.is_ulong(&self.cx) {
+ "__builtin_clzl"
+ }
+ else if arg_type.is_ulonglong(&self.cx) {
+ "__builtin_clzll"
+ }
+ else if width == 128 {
+ // Algorithm from: https://stackoverflow.com/a/28433850/389119
+ let array_type = self.context.new_array_type(None, arg_type, 3);
+ let result = self.current_func()
+ .new_local(None, array_type, "count_loading_zeroes_results");
+
+ let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
+ let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
+ let low = self.context.new_cast(None, arg, self.u64_type);
+
+ let zero = self.context.new_rvalue_zero(self.usize_type);
+ let one = self.context.new_rvalue_one(self.usize_type);
+ let two = self.context.new_rvalue_from_long(self.usize_type, 2);
+
+ let clzll = self.context.get_builtin_function("__builtin_clzll");
+
+ let first_elem = self.context.new_array_access(None, result, zero);
+ let first_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[high]), arg_type);
+ self.llbb()
+ .add_assignment(None, first_elem, first_value);
+
+ let second_elem = self.context.new_array_access(None, result, one);
+ let second_value = self.context.new_cast(None, self.context.new_call(None, clzll, &[low]), arg_type) + sixty_four;
+ self.llbb()
+ .add_assignment(None, second_elem, second_value);
+
+ let third_elem = self.context.new_array_access(None, result, two);
+ let third_value = self.context.new_rvalue_from_long(arg_type, 128);
+ self.llbb()
+ .add_assignment(None, third_elem, third_value);
+
+ let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
+ let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
+ let not_low_and_not_high = not_low & not_high;
+ let index = not_high + not_low_and_not_high;
+
+ let res = self.context.new_array_access(None, result, index);
+
+ return self.context.new_cast(None, res, arg_type);
+ }
+ else {
+ let count_leading_zeroes = self.context.get_builtin_function("__builtin_clz");
+ let arg = self.context.new_cast(None, arg, self.uint_type);
+ let diff = self.int_width(self.uint_type) - self.int_width(arg_type);
+ let diff = self.context.new_rvalue_from_long(self.int_type, diff);
+ let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff;
+ return self.context.new_cast(None, res, arg_type);
+ };
+ let count_leading_zeroes = self.context.get_builtin_function(count_leading_zeroes);
+ let res = self.context.new_call(None, count_leading_zeroes, &[arg]);
+ self.context.new_cast(None, res, arg_type)
+ }
+
+ fn count_trailing_zeroes(&self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
+ let arg_type = arg.get_type();
+ let (count_trailing_zeroes, expected_type) =
+ if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) {
+ // NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero.
+ ("__builtin_ctz", self.cx.uint_type)
+ }
+ else if arg_type.is_ulong(&self.cx) {
+ ("__builtin_ctzl", self.cx.ulong_type)
+ }
+ else if arg_type.is_ulonglong(&self.cx) {
+ ("__builtin_ctzll", self.cx.ulonglong_type)
+ }
+ else if arg_type.is_u128(&self.cx) {
+ // Adapted from the algorithm to count leading zeroes from: https://stackoverflow.com/a/28433850/389119
+ let array_type = self.context.new_array_type(None, arg_type, 3);
+ let result = self.current_func()
+ .new_local(None, array_type, "count_loading_zeroes_results");
+
+ let sixty_four = self.context.new_rvalue_from_long(arg_type, 64);
+ let high = self.context.new_cast(None, arg >> sixty_four, self.u64_type);
+ let low = self.context.new_cast(None, arg, self.u64_type);
+
+ let zero = self.context.new_rvalue_zero(self.usize_type);
+ let one = self.context.new_rvalue_one(self.usize_type);
+ let two = self.context.new_rvalue_from_long(self.usize_type, 2);
+
+ let ctzll = self.context.get_builtin_function("__builtin_ctzll");
+
+ let first_elem = self.context.new_array_access(None, result, zero);
+ let first_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[low]), arg_type);
+ self.llbb()
+ .add_assignment(None, first_elem, first_value);
+
+ let second_elem = self.context.new_array_access(None, result, one);
+ let second_value = self.context.new_cast(None, self.context.new_call(None, ctzll, &[high]), arg_type) + sixty_four;
+ self.llbb()
+ .add_assignment(None, second_elem, second_value);
+
+ let third_elem = self.context.new_array_access(None, result, two);
+ let third_value = self.context.new_rvalue_from_long(arg_type, 128);
+ self.llbb()
+ .add_assignment(None, third_elem, third_value);
+
+ let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
+ let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
+ let not_low_and_not_high = not_low & not_high;
+ let index = not_low + not_low_and_not_high;
+
+ let res = self.context.new_array_access(None, result, index);
+
+ return self.context.new_cast(None, res, arg_type);
+ }
+ else {
+ unimplemented!("count_trailing_zeroes for {:?}", arg_type);
+ };
+ let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
+ let arg =
+ if arg_type != expected_type {
+ self.context.new_cast(None, arg, expected_type)
+ }
+ else {
+ arg
+ };
+ let res = self.context.new_call(None, count_trailing_zeroes, &[arg]);
+ self.context.new_cast(None, res, arg_type)
+ }
+
+ fn int_width(&self, typ: Type<'gcc>) -> i64 {
+ self.cx.int_width(typ) as i64
+ }
+
+ fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> {
- // TODO: implement in the normal algorithm below to have a more efficient
++ // TODO(antoyo): use the optimized version with fewer operations.
+ let value_type = value.get_type();
+
+ if value_type.is_u128(&self.cx) {
- //codegen_msvc_try(bx, try_func, data, catch_func, dest);
++ // TODO(antoyo): implement in the normal algorithm below to have a more efficient
+ // implementation (that does not require a call to __popcountdi2).
+ let popcount = self.context.get_builtin_function("__builtin_popcountll");
+ let sixty_four = self.context.new_rvalue_from_long(value_type, 64);
+ let high = self.context.new_cast(None, value >> sixty_four, self.cx.ulonglong_type);
+ let high = self.context.new_call(None, popcount, &[high]);
+ let low = self.context.new_cast(None, value, self.cx.ulonglong_type);
+ let low = self.context.new_call(None, popcount, &[low]);
+ return high + low;
+ }
+
+ // First step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x5555555555555555);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 1);
+ let right = shifted & mask;
+ let value = left + right;
+
+ // Second step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x3333333333333333);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 2);
+ let right = shifted & mask;
+ let value = left + right;
+
+ // Third step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x0F0F0F0F0F0F0F0F);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 4);
+ let right = shifted & mask;
+ let value = left + right;
+
+ if value_type.is_u8(&self.cx) {
+ return value;
+ }
+
+ // Fourth step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x00FF00FF00FF00FF);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 8);
+ let right = shifted & mask;
+ let value = left + right;
+
+ if value_type.is_u16(&self.cx) {
+ return value;
+ }
+
+ // Fifth step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x0000FFFF0000FFFF);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 16);
+ let right = shifted & mask;
+ let value = left + right;
+
+ if value_type.is_u32(&self.cx) {
+ return value;
+ }
+
+ // Sixth step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x00000000FFFFFFFF);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 32);
+ let right = shifted & mask;
+ let value = left + right;
+
+ value
+ }
+
+ // Algorithm from: https://blog.regehr.org/archives/1063
+ fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+ let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
+ let shift = shift % max;
+ let lhs = self.shl(value, shift);
+ let result_and =
+ self.and(
+ self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
+ self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
+ );
+ let rhs = self.lshr(value, result_and);
+ self.or(lhs, rhs)
+ }
+
+ // Algorithm from: https://blog.regehr.org/archives/1063
+ fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+ let max = self.context.new_rvalue_from_long(shift.get_type(), width as i64);
+ let shift = shift % max;
+ let lhs = self.lshr(value, shift);
+ let result_and =
+ self.and(
+ self.context.new_unary_op(None, UnaryOp::Minus, shift.get_type(), shift),
+ self.context.new_rvalue_from_long(shift.get_type(), width as i64 - 1),
+ );
+ let rhs = self.shl(value, result_and);
+ self.or(lhs, rhs)
+ }
+
+ fn saturating_add(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
+ let func = self.current_func.borrow().expect("func");
+
+ if signed {
+ // Algorithm from: https://stackoverflow.com/a/56531252/389119
+ let after_block = func.new_block("after");
+ let func_name =
+ match width {
+ 8 => "__builtin_add_overflow",
+ 16 => "__builtin_add_overflow",
+ 32 => "__builtin_sadd_overflow",
+ 64 => "__builtin_saddll_overflow",
+ 128 => "__builtin_add_overflow",
+ _ => unreachable!(),
+ };
+ let overflow_func = self.context.get_builtin_function(func_name);
+ let result_type = lhs.get_type();
+ let res = func.new_local(None, result_type, "saturating_sum");
+ let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
+
+ let then_block = func.new_block("then");
+
+ let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
+ let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
+ let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
+ self.context.new_rvalue_from_int(unsigned_type, 0)
+ );
+ let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
+ then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
+ then_block.end_with_jump(None, after_block);
+
+ self.block.expect("block").end_with_conditional(None, overflow, then_block, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not
+ // expect, the current blocks in the state need to be updated.
+ *self.current_block.borrow_mut() = Some(after_block);
+ self.block = Some(after_block);
+
+ res.to_rvalue()
+ }
+ else {
+ // Algorithm from: http://locklessinc.com/articles/sat_arithmetic/
+ let res = lhs + rhs;
+ let res_type = res.get_type();
+ let cond = self.context.new_comparison(None, ComparisonOp::LessThan, res, lhs);
+ let value = self.context.new_unary_op(None, UnaryOp::Minus, res_type, self.context.new_cast(None, cond, res_type));
+ res | value
+ }
+ }
+
+ // Algorithm from: https://locklessinc.com/articles/sat_arithmetic/
+ fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
+ if signed {
+ // Also based on algorithm from: https://stackoverflow.com/a/56531252/389119
+ let func_name =
+ match width {
+ 8 => "__builtin_sub_overflow",
+ 16 => "__builtin_sub_overflow",
+ 32 => "__builtin_ssub_overflow",
+ 64 => "__builtin_ssubll_overflow",
+ 128 => "__builtin_sub_overflow",
+ _ => unreachable!(),
+ };
+ let overflow_func = self.context.get_builtin_function(func_name);
+ let result_type = lhs.get_type();
+ let func = self.current_func.borrow().expect("func");
+ let res = func.new_local(None, result_type, "saturating_diff");
+ let overflow = self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None);
+
+ let then_block = func.new_block("then");
+ let after_block = func.new_block("after");
+
+ let unsigned_type = self.context.new_int_type(width as i32 / 8, false);
+ let shifted = self.context.new_cast(None, lhs, unsigned_type) >> self.context.new_rvalue_from_int(unsigned_type, width as i32 - 1);
+ let uint_max = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, unsigned_type,
+ self.context.new_rvalue_from_int(unsigned_type, 0)
+ );
+ let int_max = uint_max >> self.context.new_rvalue_one(unsigned_type);
+ then_block.add_assignment(None, res, self.context.new_cast(None, shifted + int_max, result_type));
+ then_block.end_with_jump(None, after_block);
+
+ self.block.expect("block").end_with_conditional(None, overflow, then_block, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not
+ // expect, the current blocks in the state need to be updated.
+ *self.current_block.borrow_mut() = Some(after_block);
+ self.block = Some(after_block);
+
+ res.to_rvalue()
+ }
+ else {
+ let res = lhs - rhs;
+ let comparison = self.context.new_comparison(None, ComparisonOp::LessThanEquals, res, lhs);
+ let comparison = self.context.new_cast(None, comparison, lhs.get_type());
+ let unary_op = self.context.new_unary_op(None, UnaryOp::Minus, comparison.get_type(), comparison);
+ self.and(res, unary_op)
+ }
+ }
+}
+
+fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) {
+ if bx.sess().panic_strategy() == PanicStrategy::Abort {
+ bx.call(bx.type_void(), try_func, &[data], None);
+ // Return 0 unconditionally from the intrinsic call;
+ // we can never unwind.
+ let ret_align = bx.tcx.data_layout.i32_align.abi;
+ bx.store(bx.const_i32(0), dest, ret_align);
+ }
+ else if wants_msvc_seh(bx.sess()) {
+ unimplemented!();
- //codegen_gnu_try(bx, try_func, data, catch_func, dest);
+ }
+ else {
+ unimplemented!();
-
- // MSVC's definition of the `rust_try` function.
- //
- // This implementation uses the new exception handling instructions in LLVM
- // which have support in LLVM for SEH on MSVC targets. Although these
- // instructions are meant to work for all targets, as of the time of this
- // writing, however, LLVM does not recommend the usage of these new instructions
- // as the old ones are still more optimized.
- /*fn codegen_msvc_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
- unimplemented!();
- /*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
- bx.set_personality_fn(bx.eh_personality());
- bx.sideeffect();
-
- let mut normal = bx.build_sibling_block("normal");
- let mut catchswitch = bx.build_sibling_block("catchswitch");
- let mut catchpad = bx.build_sibling_block("catchpad");
- let mut caught = bx.build_sibling_block("caught");
-
- let try_func = llvm::get_param(bx.llfn(), 0);
- let data = llvm::get_param(bx.llfn(), 1);
- let catch_func = llvm::get_param(bx.llfn(), 2);
-
- // We're generating an IR snippet that looks like:
- //
- // declare i32 @rust_try(%try_func, %data, %catch_func) {
- // %slot = alloca u8*
- // invoke %try_func(%data) to label %normal unwind label %catchswitch
- //
- // normal:
- // ret i32 0
- //
- // catchswitch:
- // %cs = catchswitch within none [%catchpad] unwind to caller
- //
- // catchpad:
- // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
- // %ptr = load %slot
- // call %catch_func(%data, %ptr)
- // catchret from %tok to label %caught
- //
- // caught:
- // ret i32 1
- // }
- //
- // This structure follows the basic usage of throw/try/catch in LLVM.
- // For example, compile this C++ snippet to see what LLVM generates:
- //
- // #include <stdint.h>
- //
- // struct rust_panic {
- // rust_panic(const rust_panic&);
- // ~rust_panic();
- //
- // uint64_t x[2];
- // };
- //
- // int __rust_try(
- // void (*try_func)(void*),
- // void *data,
- // void (*catch_func)(void*, void*) noexcept
- // ) {
- // try {
- // try_func(data);
- // return 0;
- // } catch(rust_panic& a) {
- // catch_func(data, &a);
- // return 1;
- // }
- // }
- //
- // More information can be found in libstd's seh.rs implementation.
- let ptr_align = bx.tcx().data_layout.pointer_align.abi;
- let slot = bx.alloca(bx.type_i8p(), ptr_align);
- bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
-
- normal.ret(bx.const_i32(0));
-
- let cs = catchswitch.catch_switch(None, None, 1);
- catchswitch.add_handler(cs, catchpad.llbb());
-
- // We can't use the TypeDescriptor defined in libpanic_unwind because it
- // might be in another DLL and the SEH encoding only supports specifying
- // a TypeDescriptor from the current module.
- //
- // However this isn't an issue since the MSVC runtime uses string
- // comparison on the type name to match TypeDescriptors rather than
- // pointer equality.
- //
- // So instead we generate a new TypeDescriptor in each module that uses
- // `try` and let the linker merge duplicate definitions in the same
- // module.
- //
- // When modifying, make sure that the type_name string exactly matches
- // the one used in src/libpanic_unwind/seh.rs.
- let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
- let type_name = bx.const_bytes(b"rust_panic\0");
- let type_info =
- bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
- let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
- unsafe {
- llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
- llvm::SetUniqueComdat(bx.llmod, tydesc);
- llvm::LLVMSetInitializer(tydesc, type_info);
- }
-
- // The flag value of 8 indicates that we are catching the exception by
- // reference instead of by value. We can't use catch by value because
- // that requires copying the exception object, which we don't support
- // since our exception object effectively contains a Box.
- //
- // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
- let flags = bx.const_i32(8);
- let funclet = catchpad.catch_pad(cs, &[tydesc, flags, slot]);
- let ptr = catchpad.load(slot, ptr_align);
- catchpad.call(catch_func, &[data, ptr], Some(&funclet));
-
- catchpad.catch_ret(&funclet, caught.llbb());
-
- caught.ret(bx.const_i32(1));
- });
-
- // Note that no invoke is used here because by definition this function
- // can't panic (that's what it's catching).
- let ret = bx.call(llfn, &[try_func, data, catch_func], None);
- let i32_align = bx.tcx().data_layout.i32_align.abi;
- bx.store(ret, dest, i32_align);*/
- }*/
-
- // Definition of the standard `try` function for Rust using the GNU-like model
- // of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
- // instructions).
- //
- // This codegen is a little surprising because we always call a shim
- // function instead of inlining the call to `invoke` manually here. This is done
- // because in LLVM we're only allowed to have one personality per function
- // definition. The call to the `try` intrinsic is being inlined into the
- // function calling it, and that function may already have other personality
- // functions in play. By calling a shim we're guaranteed that our shim will have
- // the right personality function.
- /*fn codegen_gnu_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
- unimplemented!();
- /*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
- // Codegens the shims described above:
- //
- // bx:
- // invoke %try_func(%data) normal %normal unwind %catch
- //
- // normal:
- // ret 0
- //
- // catch:
- // (%ptr, _) = landingpad
- // call %catch_func(%data, %ptr)
- // ret 1
-
- bx.sideeffect();
-
- let mut then = bx.build_sibling_block("then");
- let mut catch = bx.build_sibling_block("catch");
-
- let try_func = llvm::get_param(bx.llfn(), 0);
- let data = llvm::get_param(bx.llfn(), 1);
- let catch_func = llvm::get_param(bx.llfn(), 2);
- bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
- then.ret(bx.const_i32(0));
-
- // Type indicator for the exception being thrown.
- //
- // The first value in this tuple is a pointer to the exception object
- // being thrown. The second value is a "selector" indicating which of
- // the landing pad clauses the exception's type had been matched to.
- // rust_try ignores the selector.
- let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
- let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
- let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() {
- Some(tydesc) => {
- let tydesc = bx.get_static(tydesc);
- bx.bitcast(tydesc, bx.type_i8p())
- }
- None => bx.const_null(bx.type_i8p()),
- };
- catch.add_clause(vals, tydesc);
- let ptr = catch.extract_value(vals, 0);
- catch.call(catch_func, &[data, ptr], None);
- catch.ret(bx.const_i32(1));
- });
-
- // Note that no invoke is used here because by definition this function
- // can't panic (that's what it's catching).
- let ret = bx.call(llfn, &[try_func, data, catch_func], None);
- let i32_align = bx.tcx().data_layout.i32_align.abi;
- bx.store(ret, dest, i32_align);*/
- }*/
+ }
+}
--- /dev/null
- //println!("Generic simd: {}", name);
-
+use gccjit::{RValue, Type};
+use rustc_codegen_ssa::base::compare_simd_types;
+use rustc_codegen_ssa::common::{TypeKind, span_invalid_monomorphization_error};
+use rustc_codegen_ssa::mir::operand::OperandRef;
+use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods};
+use rustc_hir as hir;
+use rustc_middle::span_bug;
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::{Span, Symbol, sym};
+
+use crate::builder::Builder;
+
+pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result<RValue<'gcc>, ()> {
- /*if name == sym::simd_select_bitmask {
- let in_ty = arg_tys[0];
- let m_len = match in_ty.kind() {
- // Note that this `.unwrap()` crashes for isize/usize, that's sort
- // of intentional as there's not currently a use case for that.
- ty::Int(i) => i.bit_width().unwrap(),
- ty::Uint(i) => i.bit_width().unwrap(),
- _ => return_error!("`{}` is not an integral type", in_ty),
- };
- require_simd!(arg_tys[1], "argument");
- let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
- require!(
- // Allow masks for vectors with fewer than 8 elements to be
- // represented with a u8 or i8.
- m_len == v_len || (m_len == 8 && v_len < 8),
- "mismatched lengths: mask length `{}` != other vector length `{}`",
- m_len,
- v_len
- );
- let i1 = bx.type_i1();
- let im = bx.type_ix(v_len);
- let i1xn = bx.type_vector(i1, v_len);
- let m_im = bx.trunc(args[0].immediate(), im);
- let m_i1s = bx.bitcast(m_im, i1xn);
- return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
- }*/
-
+ // macros for error handling:
+ macro_rules! emit_error {
+ ($msg: tt) => {
+ emit_error!($msg, )
+ };
+ ($msg: tt, $($fmt: tt)*) => {
+ span_invalid_monomorphization_error(
+ bx.sess(), span,
+ &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
+ name, $($fmt)*));
+ }
+ }
+
+ macro_rules! return_error {
+ ($($fmt: tt)*) => {
+ {
+ emit_error!($($fmt)*);
+ return Err(());
+ }
+ }
+ }
+
+ macro_rules! require {
+ ($cond: expr, $($fmt: tt)*) => {
+ if !$cond {
+ return_error!($($fmt)*);
+ }
+ };
+ }
+
+ macro_rules! require_simd {
+ ($ty: expr, $position: expr) => {
+ require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
+ };
+ }
+
+ let tcx = bx.tcx();
+ let sig =
+ tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx));
+ let arg_tys = sig.inputs();
+ let name_str = &*name.as_str();
+
- //let total_len = u128::from(in_len) * 2;
-
+ // every intrinsic below takes a SIMD vector as its first argument
+ require_simd!(arg_tys[0], "input");
+ let in_ty = arg_tys[0];
+
+ let comparison = match name {
+ sym::simd_eq => Some(hir::BinOpKind::Eq),
+ sym::simd_ne => Some(hir::BinOpKind::Ne),
+ sym::simd_lt => Some(hir::BinOpKind::Lt),
+ sym::simd_le => Some(hir::BinOpKind::Le),
+ sym::simd_gt => Some(hir::BinOpKind::Gt),
+ sym::simd_ge => Some(hir::BinOpKind::Ge),
+ _ => None,
+ };
+
+ let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
+ if let Some(cmp_op) = comparison {
+ require_simd!(ret_ty, "return");
+
+ let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ in_len == out_len,
+ "expected return type with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ );
+ require!(
+ bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
+ "expected return type with integer elements, found `{}` with non-integer `{}`",
+ ret_ty,
+ out_ty
+ );
+
+ return Ok(compare_simd_types(
+ bx,
+ args[0].immediate(),
+ args[1].immediate(),
+ in_elem,
+ llret_ty,
+ cmp_op,
+ ));
+ }
+
+ if let Some(stripped) = name_str.strip_prefix("simd_shuffle") {
+ let n: u64 = stripped.parse().unwrap_or_else(|_| {
+ span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
+ });
+
+ require_simd!(ret_ty, "return");
+
+ let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ out_len == n,
+ "expected return type of length {}, found `{}` with length {}",
+ n,
+ ret_ty,
+ out_len
+ );
+ require!(
+ in_elem == out_ty,
+ "expected return element type `{}` (element of input `{}`), \
+ found `{}` with element type `{}`",
+ in_elem,
+ in_ty,
+ ret_ty,
+ out_ty
+ );
+
- // TODO:
- /*let indices: Option<Vec<_>> = (0..n)
- .map(|i| {
- let arg_idx = i;
- let val = bx.const_get_vector_element(vector, i as u64);
- match bx.const_to_opt_u128(val, true) {
- None => {
- emit_error!("shuffle index #{} is not a constant", arg_idx);
- None
- }
- Some(idx) if idx >= total_len => {
- emit_error!(
- "shuffle index #{} is out of bounds (limit {})",
- arg_idx,
- total_len
- );
- None
- }
- Some(idx) => Some(bx.const_i32(idx as i32)),
- }
- })
- .collect();
- let indices = match indices {
- Some(i) => i,
- None => return Ok(bx.const_null(llret_ty)),
- };*/
-
+ let vector = args[2].immediate();
+
- /*if name == sym::simd_insert {
- require!(
- in_elem == arg_tys[2],
- "expected inserted type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- arg_tys[2]
- );
- return Ok(bx.insert_element(
- args[0].immediate(),
- args[2].immediate(),
- args[1].immediate(),
- ));
- }
- if name == sym::simd_extract {
- require!(
- ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
- );
- return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
- }
-
- if name == sym::simd_select {
- let m_elem_ty = in_elem;
- let m_len = in_len;
- require_simd!(arg_tys[1], "argument");
- let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
- require!(
- m_len == v_len,
- "mismatched lengths: mask length `{}` != other vector length `{}`",
- m_len,
- v_len
- );
- match m_elem_ty.kind() {
- ty::Int(_) => {}
- _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
- }
- // truncate the mask to a vector of i1s
- let i1 = bx.type_i1();
- let i1xn = bx.type_vector(i1, m_len as u64);
- let m_i1s = bx.trunc(args[0].immediate(), i1xn);
- return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
- }
-
- if name == sym::simd_bitmask {
- // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
- // vector mask and returns an unsigned integer containing the most
- // significant bit (MSB) of each lane.
-
- // If the vector has less than 8 lanes, an u8 is returned with zeroed
- // trailing bits.
- let expected_int_bits = in_len.max(8);
- match ret_ty.kind() {
- ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (),
- _ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits),
- }
-
- // Integer vector <i{in_bitwidth} x in_len>:
- let (i_xn, in_elem_bitwidth) = match in_elem.kind() {
- ty::Int(i) => (
- args[0].immediate(),
- i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
- ),
- ty::Uint(i) => (
- args[0].immediate(),
- i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
- ),
- _ => return_error!(
- "vector argument `{}`'s element type `{}`, expected integer element type",
- in_ty,
- in_elem
- ),
- };
-
- // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
- let shift_indices =
- vec![
- bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
- in_len as _
- ];
- let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
- // Truncate vector to an <i1 x N>
- let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
- // Bitcast <i1 x N> to iN:
- let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
- // Zero-extend iN to the bitmask type:
- return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
- }
-
- fn simd_simple_float_intrinsic<'a, 'gcc, 'tcx>(
- name: Symbol,
- in_elem: &::rustc_middle::ty::TyS<'_>,
- in_ty: &::rustc_middle::ty::TyS<'_>,
- in_len: u64,
- bx: &mut Builder<'a, 'gcc, 'tcx>,
- span: Span,
- args: &[OperandRef<'tcx, RValue<'gcc>>],
- ) -> Result<RValue<'gcc>, ()> {
- macro_rules! emit_error {
- ($msg: tt) => {
- emit_error!($msg, )
- };
- ($msg: tt, $($fmt: tt)*) => {
- span_invalid_monomorphization_error(
- bx.sess(), span,
- &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
- name, $($fmt)*));
- }
- }
- macro_rules! return_error {
- ($($fmt: tt)*) => {
- {
- emit_error!($($fmt)*);
- return Err(());
- }
- }
- }
-
- let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
- let elem_ty = bx.cx.type_float_from_ty(*f);
- match f.bit_width() {
- 32 => ("f32", elem_ty),
- 64 => ("f64", elem_ty),
- _ => {
- return_error!(
- "unsupported element type `{}` of floating-point vector `{}`",
- f.name_str(),
- in_ty
- );
- }
- }
- } else {
- return_error!("`{}` is not a floating-point type", in_ty);
- };
-
- let vec_ty = bx.type_vector(elem_ty, in_len);
-
- let (intr_name, fn_ty) = match name {
- sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
- sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)),
- sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)),
- sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
- sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
- _ => return_error!("unrecognized intrinsic `{}`", name),
- };
- let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
- let f = bx.declare_cfn(&llvm_name, fn_ty);
- let c = bx.call(f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
- Ok(c)
- }
-
- if std::matches!(
- name,
- sym::simd_ceil
- | sym::simd_fabs
- | sym::simd_fcos
- | sym::simd_fexp2
- | sym::simd_fexp
- | sym::simd_flog10
- | sym::simd_flog2
- | sym::simd_flog
- | sym::simd_floor
- | sym::simd_fma
- | sym::simd_fpow
- | sym::simd_fpowi
- | sym::simd_fsin
- | sym::simd_fsqrt
- | sym::simd_round
- | sym::simd_trunc
- ) {
- return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
- }
-
- // FIXME: use:
- // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
- // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
- fn llvm_vector_str(elem_ty: Ty<'_>, vec_len: u64, no_pointers: usize) -> String {
- let p0s: String = "p0".repeat(no_pointers);
- match *elem_ty.kind() {
- ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
- ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
- ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
- _ => unreachable!(),
- }
- }
-
- fn gcc_vector_ty<'gcc>(
- cx: &CodegenCx<'gcc, '_>,
- elem_ty: Ty<'_>,
- vec_len: u64,
- mut no_pointers: usize,
- ) -> Type<'gcc> {
- // FIXME: use cx.layout_of(ty).llvm_type() ?
- let mut elem_ty = match *elem_ty.kind() {
- ty::Int(v) => cx.type_int_from_ty(v),
- ty::Uint(v) => cx.type_uint_from_ty(v),
- ty::Float(v) => cx.type_float_from_ty(v),
- _ => unreachable!(),
- };
- while no_pointers > 0 {
- elem_ty = cx.type_ptr_to(elem_ty);
- no_pointers -= 1;
- }
- cx.type_vector(elem_ty, vec_len)
- }
-
- if name == sym::simd_gather {
- // simd_gather(values: <N x T>, pointers: <N x *_ T>,
- // mask: <N x i{M}>) -> <N x T>
- // * N: number of elements in the input vectors
- // * T: type of the element to load
- // * M: any integer width is supported, will be truncated to i1
-
- // All types must be simd vector types
- require_simd!(in_ty, "first");
- require_simd!(arg_tys[1], "second");
- require_simd!(arg_tys[2], "third");
- require_simd!(ret_ty, "return");
-
- // Of the same length:
- let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
- let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
- require!(
- in_len == out_len,
- "expected {} argument with length {} (same as input type `{}`), \
- found `{}` with length {}",
- "second",
- in_len,
- in_ty,
- arg_tys[1],
- out_len
- );
- require!(
- in_len == out_len2,
- "expected {} argument with length {} (same as input type `{}`), \
- found `{}` with length {}",
- "third",
- in_len,
- in_ty,
- arg_tys[2],
- out_len2
- );
-
- // The return type must match the first argument type
- require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
-
- // This counts how many pointers
- fn ptr_count(t: Ty<'_>) -> usize {
- match t.kind() {
- ty::RawPtr(p) => 1 + ptr_count(p.ty),
- _ => 0,
- }
- }
-
- // Non-ptr type
- fn non_ptr(t: Ty<'_>) -> Ty<'_> {
- match t.kind() {
- ty::RawPtr(p) => non_ptr(p.ty),
- _ => t,
- }
- }
-
- // The second argument must be a simd vector with an element type that's a pointer
- // to the element type of the first argument
- let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
- let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
- let (pointer_count, underlying_ty) = match element_ty1.kind() {
- ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)),
- _ => {
- require!(
- false,
- "expected element type `{}` of second argument `{}` \
- to be a pointer to the element type `{}` of the first \
- argument `{}`, found `{}` != `*_ {}`",
- element_ty1,
- arg_tys[1],
- in_elem,
- in_ty,
- element_ty1,
- in_elem
- );
- unreachable!();
- }
- };
- assert!(pointer_count > 0);
- assert_eq!(pointer_count - 1, ptr_count(element_ty0));
- assert_eq!(underlying_ty, non_ptr(element_ty0));
-
- // The element type of the third argument must be a signed integer type of any width:
- let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
- match element_ty2.kind() {
- ty::Int(_) => (),
- _ => {
- require!(
- false,
- "expected element type `{}` of third argument `{}` \
- to be a signed integer type",
- element_ty2,
- arg_tys[2]
- );
- }
- }
-
- // Alignment of T, must be a constant integer value:
- let alignment_ty = bx.type_i32();
- let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
-
- // Truncate the mask vector to a vector of i1s:
- let (mask, mask_ty) = {
- let i1 = bx.type_i1();
- let i1xn = bx.type_vector(i1, in_len);
- (bx.trunc(args[2].immediate(), i1xn), i1xn)
- };
-
- // Type of the vector of pointers:
- let llvm_pointer_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count);
- let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
-
- // Type of the vector of elements:
- let llvm_elem_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
- let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
-
- let llvm_intrinsic =
- format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
- let f = bx.declare_cfn(
- &llvm_intrinsic,
- bx.type_func(
- &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
- llvm_elem_vec_ty,
- ),
- );
- let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
- return Ok(v);
- }
-
- if name == sym::simd_scatter {
- // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
- // mask: <N x i{M}>) -> ()
- // * N: number of elements in the input vectors
- // * T: type of the element to load
- // * M: any integer width is supported, will be truncated to i1
-
- // All types must be simd vector types
- require_simd!(in_ty, "first");
- require_simd!(arg_tys[1], "second");
- require_simd!(arg_tys[2], "third");
-
- // Of the same length:
- let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
- let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
- require!(
- in_len == element_len1,
- "expected {} argument with length {} (same as input type `{}`), \
- found `{}` with length {}",
- "second",
- in_len,
- in_ty,
- arg_tys[1],
- element_len1
- );
- require!(
- in_len == element_len2,
- "expected {} argument with length {} (same as input type `{}`), \
- found `{}` with length {}",
- "third",
- in_len,
- in_ty,
- arg_tys[2],
- element_len2
- );
-
- // This counts how many pointers
- fn ptr_count(t: Ty<'_>) -> usize {
- match t.kind() {
- ty::RawPtr(p) => 1 + ptr_count(p.ty),
- _ => 0,
- }
- }
-
- // Non-ptr type
- fn non_ptr(t: Ty<'_>) -> Ty<'_> {
- match t.kind() {
- ty::RawPtr(p) => non_ptr(p.ty),
- _ => t,
- }
- }
-
- // The second argument must be a simd vector with an element type that's a pointer
- // to the element type of the first argument
- let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
- let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
- let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
- let (pointer_count, underlying_ty) = match element_ty1.kind() {
- ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
- (ptr_count(element_ty1), non_ptr(element_ty1))
- }
- _ => {
- require!(
- false,
- "expected element type `{}` of second argument `{}` \
- to be a pointer to the element type `{}` of the first \
- argument `{}`, found `{}` != `*mut {}`",
- element_ty1,
- arg_tys[1],
- in_elem,
- in_ty,
- element_ty1,
- in_elem
- );
- unreachable!();
- }
- };
- assert!(pointer_count > 0);
- assert_eq!(pointer_count - 1, ptr_count(element_ty0));
- assert_eq!(underlying_ty, non_ptr(element_ty0));
-
- // The element type of the third argument must be a signed integer type of any width:
- match element_ty2.kind() {
- ty::Int(_) => (),
- _ => {
- require!(
- false,
- "expected element type `{}` of third argument `{}` \
- be a signed integer type",
- element_ty2,
- arg_tys[2]
- );
- }
- }
-
- // Alignment of T, must be a constant integer value:
- let alignment_ty = bx.type_i32();
- let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
-
- // Truncate the mask vector to a vector of i1s:
- let (mask, mask_ty) = {
- let i1 = bx.type_i1();
- let i1xn = bx.type_vector(i1, in_len);
- (bx.trunc(args[2].immediate(), i1xn), i1xn)
- };
-
- let ret_t = bx.type_void();
-
- // Type of the vector of pointers:
- let llvm_pointer_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count);
- let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
-
- // Type of the vector of elements:
- let llvm_elem_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
- let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
-
- let llvm_intrinsic =
- format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
- let f = bx.declare_cfn(
- &llvm_intrinsic,
- bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t),
- );
- let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
- return Ok(v);
- }
-
- macro_rules! arith_red {
- ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
- $identity:expr) => {
- if name == sym::$name {
- require!(
- ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
- );
- return match in_elem.kind() {
- ty::Int(_) | ty::Uint(_) => {
- let r = bx.$integer_reduce(args[0].immediate());
- if $ordered {
- // if overflow occurs, the result is the
- // mathematical result modulo 2^n:
- Ok(bx.$op(args[1].immediate(), r))
- } else {
- Ok(bx.$integer_reduce(args[0].immediate()))
- }
- }
- ty::Float(f) => {
- let acc = if $ordered {
- // ordered arithmetic reductions take an accumulator
- args[1].immediate()
- } else {
- // unordered arithmetic reductions use the identity accumulator
- match f.bit_width() {
- 32 => bx.const_real(bx.type_f32(), $identity),
- 64 => bx.const_real(bx.type_f64(), $identity),
- v => return_error!(
- r#"
- unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
- sym::$name,
- in_ty,
- in_elem,
- v,
- ret_ty
- ),
- }
- };
- Ok(bx.$float_reduce(acc, args[0].immediate()))
- }
- _ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
- in_ty,
- in_elem,
- ret_ty
- ),
- };
- }
- };
- }
-
- arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0);
- arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
- arith_red!(
- simd_reduce_add_unordered: vector_reduce_add,
- vector_reduce_fadd_fast,
- false,
- add,
- 0.0
- );
- arith_red!(
- simd_reduce_mul_unordered: vector_reduce_mul,
- vector_reduce_fmul_fast,
- false,
- mul,
- 1.0
- );
-
- macro_rules! minmax_red {
- ($name:ident: $int_red:ident, $float_red:ident) => {
- if name == sym::$name {
- require!(
- ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
- );
- return match in_elem.kind() {
- ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
- ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
- ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
- _ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
- in_ty,
- in_elem,
- ret_ty
- ),
- };
- }
- };
- }
-
- minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
- minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
-
- minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin_fast);
- minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax_fast);
-
- macro_rules! bitwise_red {
- ($name:ident : $red:ident, $boolean:expr) => {
- if name == sym::$name {
- let input = if !$boolean {
- require!(
- ret_ty == in_elem,
- "expected return type `{}` (element of input `{}`), found `{}`",
- in_elem,
- in_ty,
- ret_ty
- );
- args[0].immediate()
- } else {
- match in_elem.kind() {
- ty::Int(_) | ty::Uint(_) => {}
- _ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
- in_ty,
- in_elem,
- ret_ty
- ),
- }
-
- // boolean reductions operate on vectors of i1s:
- let i1 = bx.type_i1();
- let i1xn = bx.type_vector(i1, in_len as u64);
- bx.trunc(args[0].immediate(), i1xn)
- };
- return match in_elem.kind() {
- ty::Int(_) | ty::Uint(_) => {
- let r = bx.$red(input);
- Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
- }
- _ => return_error!(
- "unsupported {} from `{}` with element `{}` to `{}`",
- sym::$name,
- in_ty,
- in_elem,
- ret_ty
- ),
- };
- }
- };
- }
-
- bitwise_red!(simd_reduce_and: vector_reduce_and, false);
- bitwise_red!(simd_reduce_or: vector_reduce_or, false);
- bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
- bitwise_red!(simd_reduce_all: vector_reduce_and, true);
- bitwise_red!(simd_reduce_any: vector_reduce_or, true);
-
- if name == sym::simd_cast {
- require_simd!(ret_ty, "return");
- let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
- require!(
- in_len == out_len,
- "expected return type with length {} (same as input type `{}`), \
- found `{}` with length {}",
- in_len,
- in_ty,
- ret_ty,
- out_len
- );
- // casting cares about nominal type, not just structural type
- if in_elem == out_elem {
- return Ok(args[0].immediate());
- }
-
- enum Style {
- Float,
- Int(/* is signed? */ bool),
- Unsupported,
- }
-
- let (in_style, in_width) = match in_elem.kind() {
- // vectors of pointer-sized integers should've been
- // disallowed before here, so this unwrap is safe.
- ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
- ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
- ty::Float(f) => (Style::Float, f.bit_width()),
- _ => (Style::Unsupported, 0),
- };
- let (out_style, out_width) = match out_elem.kind() {
- ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
- ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
- ty::Float(f) => (Style::Float, f.bit_width()),
- _ => (Style::Unsupported, 0),
- };
-
- match (in_style, out_style) {
- (Style::Int(in_is_signed), Style::Int(_)) => {
- return Ok(match in_width.cmp(&out_width) {
- Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
- Ordering::Equal => args[0].immediate(),
- Ordering::Less => {
- if in_is_signed {
- bx.sext(args[0].immediate(), llret_ty)
- } else {
- bx.zext(args[0].immediate(), llret_ty)
- }
- }
- });
- }
- (Style::Int(in_is_signed), Style::Float) => {
- return Ok(if in_is_signed {
- bx.sitofp(args[0].immediate(), llret_ty)
- } else {
- bx.uitofp(args[0].immediate(), llret_ty)
- });
- }
- (Style::Float, Style::Int(out_is_signed)) => {
- return Ok(if out_is_signed {
- bx.fptosi(args[0].immediate(), llret_ty)
- } else {
- bx.fptoui(args[0].immediate(), llret_ty)
- });
- }
- (Style::Float, Style::Float) => {
- return Ok(match in_width.cmp(&out_width) {
- Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
- Ordering::Equal => args[0].immediate(),
- Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
- });
- }
- _ => { /* Unsupported. Fallthrough. */ }
- }
- require!(
- false,
- "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
- in_ty,
- in_elem,
- ret_ty,
- out_elem
- );
- }*/
-
+ return Ok(bx.shuffle_vector(
+ args[0].immediate(),
+ args[1].immediate(),
+ vector,
+ ));
+ }
+
- simd_or: Uint, Int => or; // FIXME: calling or might not work on vectors.
+ macro_rules! arith_binary {
+ ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+ $(if name == sym::$name {
+ match in_elem.kind() {
+ $($(ty::$p(_))|* => {
+ return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
+ })*
+ _ => {},
+ }
+ require!(false,
+ "unsupported operation on `{}` with element `{}`",
+ in_ty,
+ in_elem)
+ })*
+ }
+ }
+
+ arith_binary! {
+ simd_add: Uint, Int => add, Float => fadd;
+ simd_sub: Uint, Int => sub, Float => fsub;
+ simd_mul: Uint, Int => mul, Float => fmul;
+ simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
+ simd_rem: Uint => urem, Int => srem, Float => frem;
+ simd_shl: Uint, Int => shl;
+ simd_shr: Uint => lshr, Int => ashr;
+ simd_and: Uint, Int => and;
- /*simd_fmax: Float => maxnum;
- simd_fmin: Float => minnum;*/
++ simd_or: Uint, Int => or; // FIXME(antoyo): calling `or` might not work on vectors.
+ simd_xor: Uint, Int => xor;
- /*macro_rules! arith_unary {
- ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
- $(if name == sym::$name {
- match in_elem.kind() {
- $($(ty::$p(_))|* => {
- return Ok(bx.$call(args[0].immediate()))
- })*
- _ => {},
- }
- require!(false,
- "unsupported operation on `{}` with element `{}`",
- in_ty,
- in_elem)
- })*
- }
- }
-
- arith_unary! {
- simd_neg: Int => neg, Float => fneg;
- }
-
- if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
- let lhs = args[0].immediate();
- let rhs = args[1].immediate();
- let is_add = name == sym::simd_saturating_add;
- let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
- let (signed, elem_width, elem_ty) = match *in_elem.kind() {
- ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
- ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
- _ => {
- return_error!(
- "expected element type `{}` of vector type `{}` \
- to be a signed or unsigned integer type",
- arg_tys[0].simd_size_and_type(bx.tcx()).1,
- arg_tys[0]
- );
- }
- };
- let llvm_intrinsic = &format!(
- "llvm.{}{}.sat.v{}i{}",
- if signed { 's' } else { 'u' },
- if is_add { "add" } else { "sub" },
- in_len,
- elem_width
- );
- let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
-
- let f = bx.declare_cfn(
- &llvm_intrinsic,
- bx.type_func(&[vec_ty, vec_ty], vec_ty),
- );
- let v = bx.call(f, &[lhs, rhs], None);
- return Ok(v);
- }*/
-
+ }
+
-
- //span_bug!(span, "unknown SIMD intrinsic");
+ unimplemented!("simd {}", name);
+}
--- /dev/null
- * TODO: support #[inline] attributes.
- * TODO: support LTO.
+/*
- * TODO: remove the local gccjit LD_LIBRARY_PATH in config.sh.
- * TODO: remove the object dependency.
- * TODO: remove the patches.
++ * TODO(antoyo): support #[inline] attributes.
++ * TODO(antoyo): support LTO.
+ *
- /*extern crate flate2;
- extern crate libc;*/
++ * TODO(antoyo): remove the patches.
+ */
+
+#![feature(rustc_private, decl_macro, associated_type_bounds, never_type, trusted_len)]
+#![allow(broken_intra_doc_links)]
+#![recursion_limit="256"]
+#![warn(rust_2018_idioms)]
+#![warn(unused_lifetimes)]
+
- //extern crate rustc_fs_util;
+extern crate rustc_ast;
+extern crate rustc_codegen_ssa;
+extern crate rustc_data_structures;
+extern crate rustc_errors;
- mod va_arg;
+extern crate rustc_hir;
+extern crate rustc_metadata;
+extern crate rustc_middle;
+extern crate rustc_mir;
+extern crate rustc_session;
+extern crate rustc_span;
+extern crate rustc_symbol_mangling;
+extern crate rustc_target;
+extern crate snap;
+
+// This prevents duplicating functions and statics that are already part of the host rustc process.
+#[allow(unused_extern_crates)]
+extern crate rustc_driver;
+
+mod abi;
+mod allocator;
+mod archive;
+mod asm;
+mod back;
+mod base;
+mod builder;
+mod callee;
+mod common;
+mod consts;
+mod context;
+mod coverageinfo;
+mod debuginfo;
+mod declare;
+mod intrinsic;
+mod mangled_std_symbols;
+mod mono_item;
+mod type_;
+mod type_of;
- // TODO: remove when global initializer work without calling a function at runtime.
+
+use std::any::Any;
+use std::sync::Arc;
+
+use gccjit::{Block, Context, FunctionType, OptimizationLevel};
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
+use rustc_codegen_ssa::base::codegen_crate;
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryFn};
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
+use rustc_codegen_ssa::target_features::supported_target_features;
+use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{ErrorReported, Handler};
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::middle::cstore::EncodedMetadata;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{CrateType, Lto, OptLevel, OutputFilenames};
+use rustc_session::Session;
+use rustc_span::Symbol;
+use rustc_span::fatal_error::FatalError;
+
+use crate::context::unit_name;
+
+pub struct PrintOnPanic<F: Fn() -> String>(pub F);
+
+impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
+ fn drop(&mut self) {
+ if ::std::thread::panicking() {
+ println!("{}", (self.0)());
+ }
+ }
+}
+
+#[derive(Clone)]
+pub struct GccCodegenBackend;
+
+impl CodegenBackend for GccCodegenBackend {
+ fn init(&self, sess: &Session) {
+ if sess.lto() != Lto::No {
+ sess.warn("LTO is not supported. You may get a linker error.");
+ }
+ }
+
+ fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: EncodedMetadata, need_metadata_module: bool) -> Box<dyn Any> {
+ let target_cpu = target_cpu(tcx.sess);
+ let res = codegen_crate(self.clone(), tcx, target_cpu.to_string(), metadata, need_metadata_module);
+
+ rustc_symbol_mangling::test::report_symbol_names(tcx);
+
+ Box::new(res)
+ }
+
+ fn join_codegen(&self, ongoing_codegen: Box<dyn Any>, sess: &Session) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorReported> {
+ let (codegen_results, work_products) = ongoing_codegen
+ .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<GccCodegenBackend>>()
+ .expect("Expected GccCodegenBackend's OngoingCodegen, found Box<Any>")
+ .join(sess);
+
+ Ok((codegen_results, work_products))
+ }
+
+ fn link(&self, sess: &Session, mut codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorReported> {
+ use rustc_codegen_ssa::back::link::link_binary;
+ if let Some(symbols) = codegen_results.crate_info.exported_symbols.get_mut(&CrateType::Dylib) {
- // TODO: set opt level.
++ // TODO:(antoyo): remove when global initializer work without calling a function at runtime.
+ // HACK: since this codegen add some symbols (e.g. __gccGlobalCrateInit) and the UI
+ // tests load libstd.so as a dynamic library, and rustc use a version-script to specify
+ // the symbols visibility, we add * to export all symbols.
+ // It seems other symbols from libstd/libcore are causing some issues here as well.
+ symbols.push("*".to_string());
+ }
+
+ link_binary::<crate::archive::ArArchiveBuilder<'_>>(
+ sess,
+ &codegen_results,
+ outputs,
+ )
+ }
+
+ fn target_features(&self, sess: &Session) -> Vec<Symbol> {
+ target_features(sess)
+ }
+}
+
+impl ExtraBackendMethods for GccCodegenBackend {
+ fn new_metadata<'tcx>(&self, _tcx: TyCtxt<'tcx>, _mod_name: &str) -> Self::Module {
+ GccContext {
+ context: Context::default(),
+ }
+ }
+
+ fn write_compressed_metadata<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: &EncodedMetadata, gcc_module: &mut Self::Module) {
+ base::write_compressed_metadata(tcx, metadata, gcc_module)
+ }
+
+ fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, mods: &mut Self::Module, kind: AllocatorKind, has_alloc_error_handler: bool) {
+ unsafe { allocator::codegen(tcx, mods, kind, has_alloc_error_handler) }
+ }
+
+ fn compile_codegen_unit<'tcx>(&self, tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen<Self::Module>, u64) {
+ base::compile_codegen_unit(tcx, cgu_name)
+ }
+
+ fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel) -> TargetMachineFactoryFn<Self> {
- // TODO
- //llvm_util::tune_cpu(sess)
++ // TODO(antoyo): set opt level.
+ Arc::new(|_| {
+ Ok(())
+ })
+ }
+
+ fn target_cpu<'b>(&self, _sess: &'b Session) -> &'b str {
+ unimplemented!();
+ }
+
+ fn tune_cpu<'b>(&self, _sess: &'b Session) -> Option<&'b str> {
+ None
- // FIXME: that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm". Try to disable it here.
++ // TODO(antoyo)
+ }
+}
+
+pub struct ModuleBuffer;
+
+impl ModuleBufferMethods for ModuleBuffer {
+ fn data(&self) -> &[u8] {
+ unimplemented!();
+ }
+}
+
+pub struct ThinBuffer;
+
+impl ThinBufferMethods for ThinBuffer {
+ fn data(&self) -> &[u8] {
+ unimplemented!();
+ }
+}
+
+pub struct GccContext {
+ context: Context<'static>,
+}
+
+unsafe impl Send for GccContext {}
- // TODO: implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins.
++// FIXME(antoyo): that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm". Try to disable it here.
+unsafe impl Sync for GccContext {}
+
+impl WriteBackendMethods for GccCodegenBackend {
+ type Module = GccContext;
+ type TargetMachine = ();
+ type ModuleBuffer = ModuleBuffer;
+ type Context = ();
+ type ThinData = ();
+ type ThinBuffer = ThinBuffer;
+
+ fn run_fat_lto(_cgcx: &CodegenContext<Self>, mut modules: Vec<FatLTOInput<Self>>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<LtoModuleCodegen<Self>, FatalError> {
- /*info!("pushing serialized module {:?}", name);
- let buffer = SerializedModule::Local(buffer);
- serialized_modules.push((buffer, CString::new(name).unwrap()));*/
++ // TODO(antoyo): implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins.
+ // NOTE: implemented elsewhere.
+ let module =
+ match modules.remove(0) {
+ FatLTOInput::InMemory(module) => module,
+ FatLTOInput::Serialized { .. } => {
+ unimplemented!();
- //if cgcx.lto == Lto::Fat {
- //module.module_llvm.context.add_driver_option("-flto");
- //}
+ }
+ };
+ Ok(LtoModuleCodegen::Fat { module: Some(module), _serialized_bitcode: vec![] })
+ }
+
+ fn run_thin_lto(_cgcx: &CodegenContext<Self>, _modules: Vec<(String, Self::ThinBuffer)>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
+ unimplemented!();
+ }
+
+ fn print_pass_timings(&self) {
+ unimplemented!();
+ }
+
+ unsafe fn optimize(_cgcx: &CodegenContext<Self>, _diag_handler: &Handler, module: &ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<(), FatalError> {
- // TODO
+ module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level));
+ Ok(())
+ }
+
+ unsafe fn optimize_thin(_cgcx: &CodegenContext<Self>, _thin: &mut ThinModule<Self>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+ unimplemented!();
+ }
+
+ unsafe fn codegen(cgcx: &CodegenContext<Self>, diag_handler: &Handler, module: ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
+ back::write::codegen(cgcx, diag_handler, module, config)
+ }
+
+ fn prepare_thin(_module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {
+ unimplemented!();
+ }
+
+ fn serialize_module(_module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) {
+ unimplemented!();
+ }
+
+ fn run_lto_pass_manager(_cgcx: &CodegenContext<Self>, _module: &ModuleCodegen<Self::Module>, _config: &ModuleConfig, _thin: bool) -> Result<(), FatalError> {
- /*fn target_triple(sess: &Session) -> target_lexicon::Triple {
- sess.target.llvm_target.parse().unwrap()
- }*/
-
++ // TODO(antoyo)
+ Ok(())
+ }
+
+ fn run_link(cgcx: &CodegenContext<Self>, diag_handler: &Handler, modules: Vec<ModuleCodegen<Self::Module>>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+ back::write::link(cgcx, diag_handler, modules)
+ }
+}
+
- /*unsafe {
- let mut len = 0;
- let ptr = llvm::LLVMRustGetHostCPUName(&mut len);
- str::from_utf8(slice::from_raw_parts(ptr as *const u8, len)).unwrap()
- }*/
+/// This is the entrypoint for a hot plugged rustc_codegen_gccjit
+#[no_mangle]
+pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
+ Box::new(GccCodegenBackend)
+}
+
+fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel {
+ match optlevel {
+ None => OptimizationLevel::None,
+ Some(level) => {
+ match level {
+ OptLevel::No => OptimizationLevel::None,
+ OptLevel::Less => OptimizationLevel::Limited,
+ OptLevel::Default => OptimizationLevel::Standard,
+ OptLevel::Aggressive => OptimizationLevel::Aggressive,
+ OptLevel::Size | OptLevel::SizeMin => OptimizationLevel::Limited,
+ }
+ },
+ }
+}
+
+fn create_function_calling_initializers<'gcc, 'tcx>(tcx: TyCtxt<'tcx>, context: &Context<'gcc>, block: Block<'gcc>) {
+ let codegen_units = tcx.collect_and_partition_mono_items(()).1;
+ for codegen_unit in codegen_units {
+ let codegen_init_func = context.new_function(None, FunctionType::Extern, context.new_type::<()>(), &[],
+ &format!("__gccGlobalInit{}", unit_name(&codegen_unit)), false);
+ block.add_eval(None, context.new_call(None, codegen_init_func, &[]));
+ }
+}
+
+fn handle_native(name: &str) -> &str {
+ if name != "native" {
+ return name;
+ }
+
+ unimplemented!();
- /*if feature.starts_with("sse") {
- return true;
- }*/
- // TODO: implement a way to get enabled feature in libgccjit.
- //println!("Feature: {}", feature);
- /*let llvm_feature = to_llvm_feature(sess, feature);
- let cstr = CString::new(llvm_feature).unwrap();
- unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) }*/
+}
+
+pub fn target_cpu(sess: &Session) -> &str {
+ let name = sess.opts.cg.target_cpu.as_ref().unwrap_or(&sess.target.cpu);
+ handle_native(name)
+}
+
+pub fn target_features(sess: &Session) -> Vec<Symbol> {
+ supported_target_features(sess)
+ .iter()
+ .filter_map(
+ |&(feature, gate)| {
+ if sess.is_nightly_build() || gate.is_none() { Some(feature) } else { None }
+ },
+ )
+ .filter(|_feature| {
++ // TODO(antoyo): implement a way to get enabled feature in libgccjit.
+ false
+ })
+ .map(|feature| Symbol::intern(feature))
+ .collect()
+}
--- /dev/null
- // TODO
- /*unsafe {
- llvm::LLVMRustSetLinkage(global, base::linkage_to_llvm(linkage));
- llvm::LLVMRustSetVisibility(global, base::visibility_to_llvm(visibility));
- }*/
-
+use rustc_codegen_ssa::traits::PreDefineMethods;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::{self, Instance, TypeFoldable};
+use rustc_middle::ty::layout::FnAbiExt;
+use rustc_span::def_id::DefId;
+use rustc_target::abi::LayoutOf;
+use rustc_target::abi::call::FnAbi;
+
+use crate::base;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn predefine_static(&self, def_id: DefId, _linkage: Linkage, _visibility: Visibility, symbol_name: &str) {
+ let attrs = self.tcx.codegen_fn_attrs(def_id);
+ let instance = Instance::mono(self.tcx, def_id);
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let gcc_type = self.layout_of(ty).gcc_type(self, true);
+
+ let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+ let global = self.define_global(symbol_name, gcc_type, is_tls, attrs.link_section).unwrap_or_else(|| {
+ self.sess().span_fatal(
+ self.tcx.def_span(def_id),
+ &format!("symbol `{}` is already defined", symbol_name),
+ )
+ });
+
- // TODO: call set_link_section() to allow initializing argc/argv.
- //base::set_link_section(decl, &attrs);
- /*if linkage == Linkage::LinkOnceODR || linkage == Linkage::WeakODR {
- llvm::SetUniqueComdat(self.llmod, decl);
- }*/
-
- //debug!("predefine_fn: instance = {:?}", instance);
-
- // TODO: use inline attribute from there in linkage.set() above:
- //attributes::from_fn_attrs(self, decl, instance);
-
- //self.instances.borrow_mut().insert(instance, decl);
++ // TODO(antoyo): set linkage and visibility.
+ self.instances.borrow_mut().insert(instance, global);
+ }
+
+ fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, _visibility: Visibility, symbol_name: &str) {
+ assert!(!instance.substs.needs_infer() && !instance.substs.has_param_types_or_consts());
+
+ let fn_abi = FnAbi::of_instance(self, instance, &[]);
+ self.linkage.set(base::linkage_to_gcc(linkage));
+ let _decl = self.declare_fn(symbol_name, &fn_abi);
+ //let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
+
++ // TODO(antoyo): call set_link_section() to allow initializing argc/argv.
++ // TODO(antoyo): set unique comdat.
++ // TODO(antoyo): use inline attribute from there in linkage.set() above.
+ }
+}
--- /dev/null
- /*
- let bytes = (num_bits / 8).next_power_of_two() as i32;
- println!("num_bits: {}, bytes: {}", num_bits, bytes);
- self.context.new_int_type(bytes, true) // TODO: check if it is indeed a signed integer.
- */
+use std::convert::TryInto;
+
+use gccjit::{RValue, Struct, Type};
+use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods};
+use rustc_codegen_ssa::common::TypeKind;
+use rustc_middle::bug;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_target::abi::{AddressSpace, Align, Integer, Size};
+
+use crate::common::TypeReflection;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn type_ix(&self, num_bits: u64) -> Type<'gcc> {
+ // gcc only supports 1, 2, 4 or 8-byte integers.
++ // FIXME(antoyo): this is misleading to use the next power of two as rustc_codegen_ssa
++ // sometimes use 96-bit numbers and the following code will give an integer of a different
++ // size.
+ let bytes = (num_bits / 8).next_power_of_two() as i32;
+ match bytes {
+ 1 => self.i8_type,
+ 2 => self.i16_type,
+ 4 => self.i32_type,
+ 8 => self.i64_type,
+ 16 => self.i128_type,
+ _ => panic!("unexpected num_bits: {}", num_bits),
+ }
- /*pub fn type_bool(&self) -> Type<'gcc> {
- self.bool_type
- }*/
-
+ }
+
-
- /*pub fn type_int_from_ty(&self, t: ty::IntTy) -> Type<'gcc> {
- match t {
- ty::IntTy::Isize => self.type_isize(),
- ty::IntTy::I8 => self.type_i8(),
- ty::IntTy::I16 => self.type_i16(),
- ty::IntTy::I32 => self.type_i32(),
- ty::IntTy::I64 => self.type_i64(),
- ty::IntTy::I128 => self.type_i128(),
- }
- }
-
- pub fn type_uint_from_ty(&self, t: ty::UintTy) -> Type<'gcc> {
- match t {
- ty::UintTy::Usize => self.type_isize(),
- ty::UintTy::U8 => self.type_i8(),
- ty::UintTy::U16 => self.type_i16(),
- ty::UintTy::U32 => self.type_i32(),
- ty::UintTy::U64 => self.type_i64(),
- ty::UintTy::U128 => self.type_i128(),
- }
- }
-
- pub fn type_float_from_ty(&self, t: ty::FloatTy) -> Type<'gcc> {
- match t {
- ty::FloatTy::F32 => self.type_f32(),
- ty::FloatTy::F64 => self.type_f64(),
- }
- }
-
- pub fn type_vector(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> {
- self.context.new_vector_type(ty, len)
- }*/
+ pub fn type_void(&self) -> Type<'gcc> {
+ self.context.new_type::<()>()
+ }
+
+ pub fn type_size_t(&self) -> Type<'gcc> {
+ self.context.new_type::<usize>()
+ }
+
+ pub fn type_u8(&self) -> Type<'gcc> {
+ self.u8_type
+ }
+
+ pub fn type_u16(&self) -> Type<'gcc> {
+ self.u16_type
+ }
+
+ pub fn type_u32(&self) -> Type<'gcc> {
+ self.u32_type
+ }
+
+ pub fn type_u64(&self) -> Type<'gcc> {
+ self.u64_type
+ }
+
+ pub fn type_u128(&self) -> Type<'gcc> {
+ self.u128_type
+ }
+
+ pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> {
+ // FIXME(eddyb) We could find a better approximation if ity.align < align.
+ let ity = Integer::approximate_align(self, align);
+ self.type_from_integer(ity)
+ }
- // TODO: use packed.
- //let name = types.iter().map(|typ| format!("{:?}", typ)).collect::<Vec<_>>().join("_");
- //let typ = self.context.new_struct_type(None, format!("struct{}", name), &fields).as_type();
+}
+
+impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn type_i1(&self) -> Type<'gcc> {
+ self.bool_type
+ }
+
+ fn type_i8(&self) -> Type<'gcc> {
+ self.i8_type
+ }
+
+ fn type_i16(&self) -> Type<'gcc> {
+ self.i16_type
+ }
+
+ fn type_i32(&self) -> Type<'gcc> {
+ self.i32_type
+ }
+
+ fn type_i64(&self) -> Type<'gcc> {
+ self.i64_type
+ }
+
+ fn type_i128(&self) -> Type<'gcc> {
+ self.i128_type
+ }
+
+ fn type_isize(&self) -> Type<'gcc> {
+ self.isize_type
+ }
+
+ fn type_f32(&self) -> Type<'gcc> {
+ self.context.new_type::<f32>()
+ }
+
+ fn type_f64(&self) -> Type<'gcc> {
+ self.context.new_type::<f64>()
+ }
+
+ fn type_func(&self, params: &[Type<'gcc>], return_type: Type<'gcc>) -> Type<'gcc> {
+ self.context.new_function_pointer_type(None, return_type, params, false)
+ }
+
+ fn type_struct(&self, fields: &[Type<'gcc>], _packed: bool) -> Type<'gcc> {
+ let types = fields.to_vec();
+ if let Some(typ) = self.struct_types.borrow().get(fields) {
+ return typ.clone();
+ }
+ let fields: Vec<_> = fields.iter().enumerate()
+ .map(|(index, field)| self.context.new_field(None, *field, &format!("field{}_TODO", index)))
+ .collect();
- // TODO
++ // TODO(antoyo): use packed.
+ let typ = self.context.new_struct_type(None, "struct", &fields).as_type();
+ self.struct_types.borrow_mut().insert(types, typ);
+ typ
+ }
+
+ fn type_kind(&self, typ: Type<'gcc>) -> TypeKind {
+ if typ.is_integral() {
+ TypeKind::Integer
+ }
+ else if typ.is_vector().is_some() {
+ TypeKind::Vector
+ }
+ else {
- // TODO
- /*assert_ne!(self.type_kind(ty), TypeKind::Function,
- "don't call ptr_to on function types, use ptr_to_gcc_type on FnAbi instead"
- );*/
++ // TODO(antoyo): support other types.
+ TypeKind::Void
+ }
+ }
+
+ fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> {
- // TODO: use address_space
+ ty.make_pointer()
+ }
+
+ fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> {
- //unsafe { llvm::LLVMGetVectorSize(ty) as usize }
++ // TODO(antoyo): use address_space
+ ty.make_pointer()
+ }
+
+ fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> {
+ if let Some(typ) = ty.is_array() {
+ typ
+ }
+ else if let Some(vector_type) = ty.is_vector() {
+ vector_type.get_element_type()
+ }
+ else if let Some(typ) = ty.get_pointee() {
+ typ
+ }
+ else {
+ unreachable!()
+ }
+ }
+
+ fn vector_length(&self, _ty: Type<'gcc>) -> usize {
+ unimplemented!();
- // TODO: support other sizes.
- /*match self.type_kind(ty) {
- TypeKind::Float => 32,
- TypeKind::Double => 64,
- TypeKind::X86_FP80 => 80,
- TypeKind::FP128 | TypeKind::PPC_FP128 => 128,
- _ => bug!("llvm_float_width called on a non-float type"),
- }*/
+ }
+
+ fn float_width(&self, typ: Type<'gcc>) -> usize {
+ let f32 = self.context.new_type::<f32>();
+ let f64 = self.context.new_type::<f64>();
+ if typ == f32 {
+ 32
+ }
+ else if typ == f64 {
+ 64
+ }
+ else {
+ panic!("Cannot get width of float type {:?}", typ);
+ }
- // TODO: use packed.
++ // TODO(antoyo): support other sizes.
+ }
+
+ fn int_width(&self, typ: Type<'gcc>) -> u64 {
+ if typ.is_i8(self) || typ.is_u8(self) {
+ 8
+ }
+ else if typ.is_i16(self) || typ.is_u16(self) {
+ 16
+ }
+ else if typ.is_i32(self) || typ.is_u32(self) {
+ 32
+ }
+ else if typ.is_i64(self) || typ.is_u64(self) {
+ 64
+ }
+ else if typ.is_i128(self) || typ.is_u128(self) {
+ 128
+ }
+ else {
+ panic!("Cannot get width of int type {:?}", typ);
+ }
+ }
+
+ fn val_ty(&self, value: RValue<'gcc>) -> Type<'gcc> {
+ value.get_type()
+ }
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn type_padding_filler(&self, size: Size, align: Align) -> Type<'gcc> {
+ let unit = Integer::approximate_align(self, align);
+ let size = size.bytes();
+ let unit_size = unit.size().bytes();
+ assert_eq!(size % unit_size, 0);
+ self.type_array(self.type_from_integer(unit), size / unit_size)
+ }
+
+ pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], _packed: bool) {
- /*fn type_struct(&self, fields: &[Type<'gcc>], packed: bool) -> Type<'gcc> {
- // TODO: use packed.
- let fields: Vec<_> = fields.iter().enumerate()
- .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index)))
- .collect();
- return self.context.new_struct_type(None, "unnamedStruct", &fields).as_type();
- }*/
-
++ // TODO(antoyo): use packed.
+ let fields: Vec<_> = fields.iter().enumerate()
+ .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index)))
+ .collect();
+ typ.set_fields(None, &fields);
+ }
+
- // FIXME: fix gccjit API.
+ pub fn type_named_struct(&self, name: &str) -> Struct<'gcc> {
+ self.context.new_opaque_struct_type(None, name)
+ }
+
+ pub fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> {
+ if let Some(struct_type) = ty.is_struct() {
+ if struct_type.get_field_count() == 0 {
+ // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a
+ // size of usize::MAX in test_binary_search, we workaround this by setting the size to
+ // zero for ZSTs.
- //debug!("struct_fields: {:#?}", layout);
++ // FIXME(antoyo): fix gccjit API.
+ len = 0;
+ }
+ }
+
+ // NOTE: see note above. Some other test uses usize::MAX.
+ if len == u64::MAX {
+ len = 0;
+ }
+
+ let len: i32 = len.try_into().expect("array len");
+
+ self.context.new_array_type(None, ty, len)
+ }
+}
+
+pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec<Type<'gcc>>, bool) {
- /*debug!(
- "struct_fields: {}: {:?} offset: {:?} target_offset: {:?} \
- effective_field_align: {}",
- i,
- field,
- offset,
- target_offset,
- effective_field_align.bytes()
- );*/
+ let field_count = layout.fields.count();
+
+ let mut packed = false;
+ let mut offset = Size::ZERO;
+ let mut prev_effective_align = layout.align.abi;
+ let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
+ for i in layout.fields.index_by_increasing_offset() {
+ let target_offset = layout.fields.offset(i as usize);
+ let field = layout.field(cx, i);
+ let effective_field_align =
+ layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
+ packed |= effective_field_align < field.align.abi;
+
- //debug!(" padding before: {:?}", padding);
+ assert!(target_offset >= offset);
+ let padding = target_offset - offset;
+ let padding_align = prev_effective_align.min(effective_field_align);
+ assert_eq!(offset.align_to(padding_align) + padding, target_offset);
+ result.push(cx.type_padding_filler(padding, padding_align));
- result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME: might need to check if the type is inside another, like Box<Type>.
+
- /*debug!(
- "struct_fields: pad_bytes: {:?} offset: {:?} stride: {:?}",
- padding, offset, layout.size
- );*/
++ result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME(antoyo): might need to check if the type is inside another, like Box<Type>.
+ offset = target_offset + field.size;
+ prev_effective_align = effective_field_align;
+ }
+ if !layout.is_unsized() && field_count > 0 {
+ if offset > layout.size {
+ bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
+ }
+ let padding = layout.size - offset;
+ let padding_align = prev_effective_align;
+ assert_eq!(offset.align_to(padding_align) + padding, layout.size);
- } else {
- //debug!("struct_fields: offset: {:?} stride: {:?}", offset, layout.size);
+ result.push(cx.type_padding_filler(padding, padding_align));
+ assert_eq!(result.len(), 1 + field_count * 2);
+ }
+
+ (result, packed)
+}
--- /dev/null
- // FIXME: I don't think that's true for libgccjit.
+use std::fmt::Write;
+
+use gccjit::{Struct, Type};
+use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods};
+use rustc_middle::bug;
+use rustc_middle::ty::{self, Ty, TypeFoldable};
+use rustc_middle::ty::layout::{FnAbiExt, TyAndLayout};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_target::abi::{self, Abi, F32, F64, FieldsShape, Int, Integer, LayoutOf, Pointer, PointeeInfo, Size, TyAndLayoutMethods, Variants};
+use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
+
+use crate::abi::{FnAbiGccExt, GccType};
+use crate::context::CodegenCx;
+use crate::type_::struct_fields;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ fn type_from_unsigned_integer(&self, i: Integer) -> Type<'gcc> {
+ use Integer::*;
+ match i {
+ I8 => self.type_u8(),
+ I16 => self.type_u16(),
+ I32 => self.type_u32(),
+ I64 => self.type_u64(),
+ I128 => self.type_u128(),
+ }
+ }
+}
+
+pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>) -> Type<'gcc> {
+ match layout.abi {
+ Abi::Scalar(_) => bug!("handled elsewhere"),
+ Abi::Vector { ref element, count } => {
+ let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO);
+ return cx.context.new_vector_type(element, count);
+ },
+ Abi::ScalarPair(..) => {
+ return cx.type_struct(
+ &[
+ layout.scalar_pair_element_gcc_type(cx, 0, false),
+ layout.scalar_pair_element_gcc_type(cx, 1, false),
+ ],
+ false,
+ );
+ }
+ Abi::Uninhabited | Abi::Aggregate { .. } => {}
+ }
+
+ let name = match layout.ty.kind() {
+ // FIXME(eddyb) producing readable type names for trait objects can result
+ // in problematically distinct types due to HRTB and subtyping (see #47638).
+ // ty::Dynamic(..) |
+ ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str
+ if !cx.sess().fewer_names() =>
+ {
+ let mut name = with_no_trimmed_paths(|| layout.ty.to_string());
+ if let (&ty::Adt(def, _), &Variants::Single { index }) =
+ (layout.ty.kind(), &layout.variants)
+ {
+ if def.is_enum() && !def.variants.is_empty() {
+ write!(&mut name, "::{}", def.variants[index].ident).unwrap();
+ }
+ }
+ if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
+ (layout.ty.kind(), &layout.variants)
+ {
+ write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap();
+ }
+ Some(name)
+ }
+ ty::Adt(..) => {
+ // If `Some` is returned then a named struct is created in LLVM. Name collisions are
+ // avoided by LLVM (with increasing suffixes). If rustc doesn't generate names then that
+ // can improve perf.
- //debug!("gcc_type({:#?})", self);
-
++ // FIXME(antoyo): I don't think that's true for libgccjit.
+ Some(String::new())
+ }
+ _ => None,
+ };
+
+ match layout.fields {
+ FieldsShape::Primitive | FieldsShape::Union(_) => {
+ let fill = cx.type_padding_filler(layout.size, layout.align.abi);
+ let packed = false;
+ match name {
+ None => cx.type_struct(&[fill], packed),
+ Some(ref name) => {
+ let gcc_type = cx.type_named_struct(name);
+ cx.set_struct_body(gcc_type, &[fill], packed);
+ gcc_type.as_type()
+ },
+ }
+ }
+ FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx, true), count),
+ FieldsShape::Arbitrary { .. } =>
+ match name {
+ None => {
+ let (gcc_fields, packed) = struct_fields(cx, layout);
+ cx.type_struct(&gcc_fields, packed)
+ },
+ Some(ref name) => {
+ let gcc_type = cx.type_named_struct(name);
+ *defer = Some((gcc_type, layout));
+ gcc_type.as_type()
+ },
+ },
+ }
+}
+
+pub trait LayoutGccExt<'tcx> {
+ fn is_gcc_immediate(&self) -> bool;
+ fn is_gcc_scalar_pair(&self) -> bool;
+ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc>;
+ fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+ fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>;
+ fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc>;
+ fn gcc_field_index(&self, index: usize) -> u64;
+ fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option<PointeeInfo>;
+}
+
+impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
+ fn is_gcc_immediate(&self) -> bool {
+ match self.abi {
+ Abi::Scalar(_) | Abi::Vector { .. } => true,
+ Abi::ScalarPair(..) => false,
+ Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(),
+ }
+ }
+
+ fn is_gcc_scalar_pair(&self) -> bool {
+ match self.abi {
+ Abi::ScalarPair(..) => true,
+ Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
+ }
+ }
+
+ /// Gets the GCC type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
+ /// The pointee type of the pointer in `PlaceRef` is always this type.
+ /// For sized types, it is also the right LLVM type for an `alloca`
+ /// containing a value of that type, and most immediates (except `bool`).
+ /// Unsized types, however, are represented by a "minimal unit", e.g.
+ /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
+ /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
+ /// If the type is an unsized struct, the regular layout is generated,
+ /// with the inner-most trailing unsized field using the "minimal unit"
+ /// of that field's type - this is useful for taking the address of
+ /// that field and ensuring the struct has the right alignment.
++ //TODO(antoyo): do we still need the set_fields parameter?
+ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc> {
+ if let Abi::Scalar(ref scalar) = self.abi {
+ // Use a different cache for scalars because pointers to DSTs
+ // can be either fat or thin (data pointers of fat pointers).
+ if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) {
+ return ty;
+ }
+ let ty =
+ match *self.ty.kind() {
+ ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
+ cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx, set_fields))
+ }
+ ty::Adt(def, _) if def.is_box() => {
+ cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx, true))
+ }
+ ty::FnPtr(sig) => cx.fn_ptr_backend_type(&FnAbi::of_fn_ptr(cx, sig, &[])),
+ _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
+ };
+ cx.scalar_types.borrow_mut().insert(self.ty, ty);
+ return ty;
+ }
+
+ // Check the cache.
+ let variant_index =
+ match self.variants {
+ Variants::Single { index } => Some(index),
+ _ => None,
+ };
+ let cached_type = cx.types.borrow().get(&(self.ty, variant_index)).cloned();
+ if let Some(ty) = cached_type {
+ let type_to_set_fields = cx.types_with_fields_to_set.borrow_mut().remove(&ty);
+ if let Some((struct_type, layout)) = type_to_set_fields {
+ // Since we might be trying to generate a type containing another type which is not
+ // completely generated yet, we deferred setting the fields until now.
+ let (fields, packed) = struct_fields(cx, layout);
+ cx.set_struct_body(struct_type, &fields, packed);
+ }
+ return ty;
+ }
+
- //debug!("--> mapped {:#?} to ty={:?}", self, ty);
+ assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
+
+ // Make sure lifetimes are erased, to avoid generating distinct LLVM
+ // types for Rust types that only differ in the choice of lifetimes.
+ let normal_ty = cx.tcx.erase_regions(self.ty);
+
+ let mut defer = None;
+ let ty =
+ if self.ty != normal_ty {
+ let mut layout = cx.layout_of(normal_ty);
+ if let Some(v) = variant_index {
+ layout = layout.for_variant(cx, v);
+ }
+ layout.gcc_type(cx, true)
+ }
+ else {
+ uncached_gcc_type(cx, *self, &mut defer)
+ };
- //TODO: do we still need this conditions and the set_fields parameter?
- //if set_fields {
- let (fields, packed) = struct_fields(cx, layout);
- cx.set_struct_body(ty, &fields, packed);
- /*}
- else {
- // Since we might be trying to generate a type containing another type which is not
- // completely generated yet, we don't set the fields right now, but we save the
- // type to set the fields later.
- cx.types_with_fields_to_set.borrow_mut().insert(ty.as_type(), (ty, layout));
- }*/
+
+ cx.types.borrow_mut().insert((self.ty, variant_index), ty);
+
+ if let Some((ty, layout)) = defer {
- // TODO: remove llvm hack:
++ let (fields, packed) = struct_fields(cx, layout);
++ cx.set_struct_body(ty, &fields, packed);
+ }
+
+ ty
+ }
+
+ fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ if let Abi::Scalar(ref scalar) = self.abi {
+ if scalar.is_bool() {
+ return cx.type_i1();
+ }
+ }
+ self.gcc_type(cx, true)
+ }
+
+ fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> {
+ match scalar.value {
+ Int(i, true) => cx.type_from_integer(i),
+ Int(i, false) => cx.type_from_unsigned_integer(i),
+ F32 => cx.type_f32(),
+ F64 => cx.type_f64(),
+ Pointer => {
+ // If we know the alignment, pick something better than i8.
+ let pointee =
+ if let Some(pointee) = self.pointee_info_at(cx, offset) {
+ cx.type_pointee_for_align(pointee.align)
+ }
+ else {
+ cx.type_i8()
+ };
+ cx.type_ptr_to(pointee)
+ }
+ }
+ }
+
+ fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
- // TODO: this bugs certainly don't happen in this case since the bool type is used instead of i1.
- if /*immediate &&*/ scalar.is_bool() {
++ // TODO(antoyo): remove llvm hack:
+ // HACK(eddyb) special-case fat pointers until LLVM removes
+ // pointee types, to avoid bitcasting every `OperandRef::deref`.
+ match self.ty.kind() {
+ ty::Ref(..) | ty::RawPtr(_) => {
+ return self.field(cx, index).gcc_type(cx, true);
+ }
+ ty::Adt(def, _) if def.is_box() => {
+ let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
+ return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate);
+ }
+ _ => {}
+ }
+
+ let (a, b) = match self.abi {
+ Abi::ScalarPair(ref a, ref b) => (a, b),
+ _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
+ };
+ let scalar = [a, b][index];
+
+ // Make sure to return the same type `immediate_gcc_type` would when
+ // dealing with an immediate pair. This means that `(bool, bool)` is
+ // effectively represented as `{i8, i8}` in memory and two `i1`s as an
+ // immediate, just like `bool` is typically `i8` in memory and only `i1`
+ // when immediate. We need to load/store `bool` as `i8` to avoid
+ // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
- //ty.gcc_type(self)
++ // TODO(antoyo): this bugs certainly don't happen in this case since the bool type is used instead of i1.
++ if scalar.is_bool() {
+ return cx.type_i1();
+ }
+
+ let offset =
+ if index == 0 {
+ Size::ZERO
+ }
+ else {
+ a.value.size(cx).align_to(b.value.align(cx).abi)
+ };
+ self.scalar_gcc_type_at(cx, scalar, offset)
+ }
+
+ fn gcc_field_index(&self, index: usize) -> u64 {
+ match self.abi {
+ Abi::Scalar(_) | Abi::ScalarPair(..) => {
+ bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self)
+ }
+ _ => {}
+ }
+ match self.fields {
+ FieldsShape::Primitive | FieldsShape::Union(_) => {
+ bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self)
+ }
+
+ FieldsShape::Array { .. } => index as u64,
+
+ FieldsShape::Arbitrary { .. } => 1 + (self.fields.memory_index(index) as u64) * 2,
+ }
+ }
+
+ fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
+ if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
+ return pointee;
+ }
+
+ let result = Ty::pointee_info_at(*self, cx, offset);
+
+ cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
+ result
+ }
+}
+
+impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> {
+ layout.gcc_type(self, true)
+ }
+
+ fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> {
+ layout.immediate_gcc_type(self)
+ }
+
+ fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool {
+ layout.is_gcc_immediate()
+ }
+
+ fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool {
+ layout.is_gcc_scalar_pair()
+ }
+
+ fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 {
+ layout.gcc_field_index(index)
+ }
+
+ fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
+ layout.scalar_pair_element_gcc_type(self, index, immediate)
+ }
+
+ fn cast_backend_type(&self, ty: &CastTarget) -> Type<'gcc> {
+ ty.gcc_type(self)
+ }
+
+ fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
+ fn_abi.ptr_to_gcc_type(self)
+ }
+
+ fn reg_backend_type(&self, _ty: &Reg) -> Type<'gcc> {
+ unimplemented!();
- // FIXME: return correct type.
+ }
+
+ fn fn_decl_backend_type(&self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
- //fn_abi.gcc_type(self)
++ // FIXME(antoyo): return correct type.
+ self.type_void()
+ }
+}
--- /dev/null
- # TODO: rewrite to cargo-make (or just) or something like that to only rebuild the sysroot when needed?
+#!/bin/bash
+
- #set -x
++# TODO(antoyo): rewrite to cargo-make (or just) or something like that to only rebuild the sysroot when needed?
+
- #if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
- #echo "[JIT] mini_core_hello_world"
- #CG_CLIF_JIT=1 CG_CLIF_JIT_ARGS="abc bcd" $RUSTC --crate-type bin -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target $HOST_TRIPLE
- #else
- #echo "[JIT] mini_core_hello_world (skipped)"
- #fi
-
+set -e
+
+export GCC_PATH=$(cat gcc_path)
+
+export LD_LIBRARY_PATH="$GCC_PATH"
+export LIBRARY_PATH="$GCC_PATH"
+
+if [[ "$1" == "--release" ]]; then
+ export CHANNEL='release'
+ CARGO_INCREMENTAL=1 cargo rustc --release
+else
+ echo $LD_LIBRARY_PATH
+ export CHANNEL='debug'
+ cargo rustc
+fi
+
+source config.sh
+
+rm -r target/out || true
+mkdir -p target/out/gccjit
+
+echo "[BUILD] mini_core"
+$RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target $TARGET_TRIPLE
+
+echo "[BUILD] example"
+$RUSTC example/example.rs --crate-type lib --target $TARGET_TRIPLE
+
- # (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd
+echo "[AOT] mini_core_hello_world"
+$RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE
+$RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
- # FIXME: this requires linking an additional lib for __popcountdi2
- #echo "[AOT] alloc_example"
- #$RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE
- #$RUN_WRAPPER ./target/out/alloc_example
-
- #if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
- #echo "[JIT] std_example"
- #CG_CLIF_JIT=1 $RUSTC --crate-type bin -Cprefer-dynamic example/std_example.rs --target $HOST_TRIPLE
- #else
- #echo "[JIT] std_example (skipped)"
- #fi
+
+echo "[BUILD] sysroot"
+time ./build_sysroot/build_sysroot.sh
+
+echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
+$RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target $TARGET_TRIPLE
+$RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
+
+echo "[AOT] alloc_system"
+$RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE"
+
- # FIXME Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
++echo "[AOT] alloc_example"
++$RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE
++$RUN_WRAPPER ./target/out/alloc_example
+
+echo "[AOT] dst_field_align"
- # FIXME: this requires linking an additional lib for __popcountdi2
- #echo "[BUILD] mod_bench"
- #$RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE
++# FIXME(antoyo): Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
+$RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE
+$RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
+
+echo "[AOT] std_example"
+$RUSTC example/std_example.rs --crate-type bin --target $TARGET_TRIPLE
+$RUN_WRAPPER ./target/out/std_example --target $TARGET_TRIPLE
+
+echo "[AOT] subslice-patterns-const-eval"
+$RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
+$RUN_WRAPPER ./target/out/subslice-patterns-const-eval
+
+echo "[AOT] track-caller-attribute"
+$RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
+$RUN_WRAPPER ./target/out/track-caller-attribute
+
- # FIXME linker gives multiple definitions error on Linux
++echo "[BUILD] mod_bench"
++$RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE
+
- #git apply ../rust_lang.patch
-
-
++# FIXME(antoyo): linker gives multiple definitions error on Linux
+#echo "[BUILD] sysroot in release mode"
+#./build_sysroot/build_sysroot.sh --release
+
++# TODO(antoyo): uncomment when it works.
+#pushd simple-raytracer
+#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+ #echo "[BENCH COMPILE] ebobby/simple-raytracer"
+ #hyperfine --runs ${RUN_RUNS:-10} --warmup 1 --prepare "rm -r target/*/debug || true" \
+ #"RUSTFLAGS='' cargo build --target $TARGET_TRIPLE" \
+ #"../cargo.sh build"
+
+ #echo "[BENCH RUN] ebobby/simple-raytracer"
+ #cp ./target/*/debug/main ./raytracer_cg_gccjit
+ #hyperfine --runs ${RUN_RUNS:-10} ./raytracer_cg_llvm ./raytracer_cg_gccjit
+#else
+ #echo "[BENCH COMPILE] ebobby/simple-raytracer (skipped)"
+ #echo "[COMPILE] ebobby/simple-raytracer"
+ #../cargo.sh build
+ #echo "[BENCH RUN] ebobby/simple-raytracer (skipped)"
+#fi
+#popd
+
+pushd build_sysroot/sysroot_src/library/core/tests
+echo "[TEST] libcore"
+rm -r ./target || true
+../../../../../cargo.sh test
+popd
+
++# TODO(antoyo): uncomment when it works.
+#pushd regex
+#echo "[TEST] rust-lang/regex example shootout-regex-dna"
+#../cargo.sh clean
+## Make sure `[codegen mono items] start` doesn't poison the diff
+#../cargo.sh build --example shootout-regex-dna
+#cat examples/regexdna-input.txt | ../cargo.sh run --example shootout-regex-dna | grep -v "Spawned thread" > res.txt
+#diff -u res.txt examples/regexdna-output.txt
+
+#echo "[TEST] rust-lang/regex tests"
+#../cargo.sh test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options
+#popd
+
+#echo
+#echo "[BENCH COMPILE] mod_bench"
+
+#COMPILE_MOD_BENCH_INLINE="$RUSTC example/mod_bench.rs --crate-type bin -Zmir-opt-level=3 -O --crate-name mod_bench_inline"
+#COMPILE_MOD_BENCH_LLVM_0="rustc example/mod_bench.rs --crate-type bin -Copt-level=0 -o target/out/mod_bench_llvm_0 -Cpanic=abort"
+#COMPILE_MOD_BENCH_LLVM_1="rustc example/mod_bench.rs --crate-type bin -Copt-level=1 -o target/out/mod_bench_llvm_1 -Cpanic=abort"
+#COMPILE_MOD_BENCH_LLVM_2="rustc example/mod_bench.rs --crate-type bin -Copt-level=2 -o target/out/mod_bench_llvm_2 -Cpanic=abort"
+#COMPILE_MOD_BENCH_LLVM_3="rustc example/mod_bench.rs --crate-type bin -Copt-level=3 -o target/out/mod_bench_llvm_3 -Cpanic=abort"
+
+## Use 100 runs, because a single compilations doesn't take more than ~150ms, so it isn't very slow
+#hyperfine --runs ${COMPILE_RUNS:-100} "$COMPILE_MOD_BENCH_INLINE" "$COMPILE_MOD_BENCH_LLVM_0" "$COMPILE_MOD_BENCH_LLVM_1" "$COMPILE_MOD_BENCH_LLVM_2" "$COMPILE_MOD_BENCH_LLVM_3"
+
+#echo
+#echo "[BENCH RUN] mod_bench"
+#hyperfine --runs ${RUN_RUNS:-10} ./target/out/mod_bench{,_inline} ./target/out/mod_bench_llvm_*
+
+echo
+echo "[TEST] rust-lang/rust"
+
+rust_toolchain=$(cat rust-toolchain)
+
+git clone https://github.com/rust-lang/rust.git || true
+cd rust
+git fetch
+git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(')
+export RUSTFLAGS=
+
- rm src/test/ui/llvm-asm/llvm-asm-in-out-operand.rs || true # TODO: Enable back this test if I ever implement the llvm_asm! macro.
- #rm src/test/ui/consts/const-size_of-cycle.rs || true # Error file path difference
- #rm src/test/ui/impl-trait/impl-generic-mismatch.rs || true # ^
- #rm src/test/ui/type_length_limit.rs || true
- #rm src/test/ui/issues/issue-50993.rs || true # Target `thumbv7em-none-eabihf` is not supported
- #rm src/test/ui/macros/same-sequence-span.rs || true # Proc macro .rustc section not found?
- #rm src/test/ui/suggestions/issue-61963.rs || true # ^
+rm config.toml || true
+
+cat > config.toml <<EOF
+[rust]
+codegen-backends = []
+
+[build]
+cargo = "$(which cargo)"
+local-rebuild = true
+rustc = "$HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE/bin/rustc"
+EOF
+
+rustc -V | cut -d' ' -f3 | tr -d '('
+git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') src/test
+
+for test in $(rg -i --files-with-matches "//(\[\w+\])?~|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" src/test/ui); do
+ rm $test
+done
+
+git checkout -- src/test/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed
+
+rm -r src/test/ui/{abi*,extern/,panic-runtime/,panics/,unsized-locals/,proc-macro/,threads-sendsync/,thinlto/,simd*,borrowck/,test*,*lto*.rs} || true
+for test in $(rg --files-with-matches "catch_unwind|should_panic|thread|lto" src/test/ui); do
+ rm $test
+done
+git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice.rs
+git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice2.rs
- # TODO: remove excluded tests when they stop stalling.
- COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 src/test/ui/ --rustc-args "$RUSTC_ARGS" --exclude src/test/ui/numbers-arithmetic/saturating-float-casts.rs --exclude src/test/ui/issues/issue-50811.rs
++rm src/test/ui/llvm-asm/llvm-asm-in-out-operand.rs || true # TODO(antoyo): Enable back this test if I ever implement the llvm_asm! macro.
+
+RUSTC_ARGS="-Zpanic-abort-tests -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot -Cpanic=abort"
+
+echo "[TEST] rustc test suite"
++COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 src/test/ui/ --rustc-args "$RUSTC_ARGS"